1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2016, Joyent, Inc. All rights reserved.
25 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
26 */
27
28 /*
29 * DTrace - Dynamic Tracing for Solaris
30 *
31 * This is the implementation of the Solaris Dynamic Tracing framework
32 * (DTrace). The user-visible interface to DTrace is described at length in
33 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace
34 * library, the in-kernel DTrace framework, and the DTrace providers are
35 * described in the block comments in the <sys/dtrace.h> header file. The
36 * internal architecture of DTrace is described in the block comments in the
37 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace
38 * implementation very much assume mastery of all of these sources; if one has
39 * an unanswered question about the implementation, one should consult them
40 * first.
41 *
42 * The functions here are ordered roughly as follows:
43 *
44 * - Probe context functions
45 * - Probe hashing functions
46 * - Non-probe context utility functions
47 * - Matching functions
48 * - Provider-to-Framework API functions
49 * - Probe management functions
50 * - DIF object functions
51 * - Format functions
52 * - Predicate functions
53 * - ECB functions
54 * - Buffer functions
55 * - Enabling functions
56 * - DOF functions
57 * - Anonymous enabling functions
58 * - Consumer state functions
59 * - Helper functions
60 * - Hook functions
61 * - Driver cookbook functions
62 *
63 * Each group of functions begins with a block comment labelled the "DTrace
64 * [Group] Functions", allowing one to find each block by searching forward
65 * on capital-f functions.
66 */
67 #include <sys/errno.h>
68 #include <sys/param.h>
69 #include <sys/types.h>
70 #ifndef illumos
71 #include <sys/time.h>
72 #endif
73 #include <sys/stat.h>
74 #include <sys/conf.h>
75 #include <sys/systm.h>
76 #include <sys/endian.h>
77 #ifdef illumos
78 #include <sys/ddi.h>
79 #include <sys/sunddi.h>
80 #endif
81 #include <sys/cpuvar.h>
82 #include <sys/kmem.h>
83 #ifdef illumos
84 #include <sys/strsubr.h>
85 #endif
86 #include <sys/sysmacros.h>
87 #include <sys/dtrace_impl.h>
88 #include <sys/atomic.h>
89 #include <sys/cmn_err.h>
90 #ifdef illumos
91 #include <sys/mutex_impl.h>
92 #include <sys/rwlock_impl.h>
93 #endif
94 #include <sys/ctf_api.h>
95 #ifdef illumos
96 #include <sys/panic.h>
97 #include <sys/priv_impl.h>
98 #endif
99 #ifdef illumos
100 #include <sys/cred_impl.h>
101 #include <sys/procfs_isa.h>
102 #endif
103 #include <sys/taskq.h>
104 #ifdef illumos
105 #include <sys/mkdev.h>
106 #include <sys/kdi.h>
107 #endif
108 #include <sys/zone.h>
109 #include <sys/socket.h>
110 #include <netinet/in.h>
111 #include "strtolctype.h"
112
113 /* FreeBSD includes: */
114 #ifndef illumos
115 #include <sys/callout.h>
116 #include <sys/ctype.h>
117 #include <sys/eventhandler.h>
118 #include <sys/limits.h>
119 #include <sys/linker.h>
120 #include <sys/kdb.h>
121 #include <sys/jail.h>
122 #include <sys/kernel.h>
123 #include <sys/malloc.h>
124 #include <sys/lock.h>
125 #include <sys/mutex.h>
126 #include <sys/ptrace.h>
127 #include <sys/random.h>
128 #include <sys/rwlock.h>
129 #include <sys/sx.h>
130 #include <sys/sysctl.h>
131
132
133 #include <sys/mount.h>
134 #undef AT_UID
135 #undef AT_GID
136 #include <sys/vnode.h>
137 #include <sys/cred.h>
138
139 #include <sys/dtrace_bsd.h>
140
141 #include <netinet/in.h>
142
143 #include "dtrace_cddl.h"
144 #include "dtrace_debug.c"
145 #endif
146
147 #include "dtrace_xoroshiro128_plus.h"
148
149 /*
150 * DTrace Tunable Variables
151 *
152 * The following variables may be tuned by adding a line to /etc/system that
153 * includes both the name of the DTrace module ("dtrace") and the name of the
154 * variable. For example:
155 *
156 * set dtrace:dtrace_destructive_disallow = 1
157 *
158 * In general, the only variables that one should be tuning this way are those
159 * that affect system-wide DTrace behavior, and for which the default behavior
160 * is undesirable. Most of these variables are tunable on a per-consumer
161 * basis using DTrace options, and need not be tuned on a system-wide basis.
162 * When tuning these variables, avoid pathological values; while some attempt
163 * is made to verify the integrity of these variables, they are not considered
164 * part of the supported interface to DTrace, and they are therefore not
165 * checked comprehensively. Further, these variables should not be tuned
166 * dynamically via "mdb -kw" or other means; they should only be tuned via
167 * /etc/system.
168 */
169 int dtrace_destructive_disallow = 0;
170 #ifndef illumos
171 /* Positive logic version of dtrace_destructive_disallow for loader tunable */
172 int dtrace_allow_destructive = 1;
173 #endif
174 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024);
175 size_t dtrace_difo_maxsize = (256 * 1024);
176 dtrace_optval_t dtrace_dof_maxsize = (8 * 1024 * 1024);
177 size_t dtrace_statvar_maxsize = (16 * 1024);
178 size_t dtrace_actions_max = (16 * 1024);
179 size_t dtrace_retain_max = 1024;
180 dtrace_optval_t dtrace_helper_actions_max = 128;
181 dtrace_optval_t dtrace_helper_providers_max = 32;
182 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024);
183 size_t dtrace_strsize_default = 256;
184 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */
185 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */
186 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */
187 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */
188 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */
189 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */
190 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */
191 dtrace_optval_t dtrace_nspec_default = 1;
192 dtrace_optval_t dtrace_specsize_default = 32 * 1024;
193 dtrace_optval_t dtrace_stackframes_default = 20;
194 dtrace_optval_t dtrace_ustackframes_default = 20;
195 dtrace_optval_t dtrace_jstackframes_default = 50;
196 dtrace_optval_t dtrace_jstackstrsize_default = 512;
197 int dtrace_msgdsize_max = 128;
198 hrtime_t dtrace_chill_max = MSEC2NSEC(500); /* 500 ms */
199 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */
200 int dtrace_devdepth_max = 32;
201 int dtrace_err_verbose;
202 hrtime_t dtrace_deadman_interval = NANOSEC;
203 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC;
204 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC;
205 hrtime_t dtrace_unregister_defunct_reap = (hrtime_t)60 * NANOSEC;
206 #ifndef illumos
207 int dtrace_memstr_max = 4096;
208 int dtrace_bufsize_max_frac = 128;
209 #endif
210
211 /*
212 * DTrace External Variables
213 *
214 * As dtrace(7D) is a kernel module, any DTrace variables are obviously
215 * available to DTrace consumers via the backtick (`) syntax. One of these,
216 * dtrace_zero, is made deliberately so: it is provided as a source of
217 * well-known, zero-filled memory. While this variable is not documented,
218 * it is used by some translators as an implementation detail.
219 */
220 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */
221
222 /*
223 * DTrace Internal Variables
224 */
225 #ifdef illumos
226 static dev_info_t *dtrace_devi; /* device info */
227 #endif
228 #ifdef illumos
229 static vmem_t *dtrace_arena; /* probe ID arena */
230 static vmem_t *dtrace_minor; /* minor number arena */
231 #else
232 static taskq_t *dtrace_taskq; /* task queue */
233 static struct unrhdr *dtrace_arena; /* Probe ID number. */
234 #endif
235 static dtrace_probe_t **dtrace_probes; /* array of all probes */
236 static int dtrace_nprobes; /* number of probes */
237 static dtrace_provider_t *dtrace_provider; /* provider list */
238 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */
239 static int dtrace_opens; /* number of opens */
240 static int dtrace_helpers; /* number of helpers */
241 static int dtrace_getf; /* number of unpriv getf()s */
242 #ifdef illumos
243 static void *dtrace_softstate; /* softstate pointer */
244 #endif
245 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */
246 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */
247 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */
248 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */
249 static int dtrace_toxranges; /* number of toxic ranges */
250 static int dtrace_toxranges_max; /* size of toxic range array */
251 static dtrace_anon_t dtrace_anon; /* anonymous enabling */
252 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */
253 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */
254 static kthread_t *dtrace_panicked; /* panicking thread */
255 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */
256 static dtrace_genid_t dtrace_probegen; /* current probe generation */
257 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */
258 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */
259 static dtrace_genid_t dtrace_retained_gen; /* current retained enab gen */
260 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */
261 static int dtrace_dynvar_failclean; /* dynvars failed to clean */
262 #ifndef illumos
263 static struct mtx dtrace_unr_mtx;
264 MTX_SYSINIT(dtrace_unr_mtx, &dtrace_unr_mtx, "Unique resource identifier", MTX_DEF);
265 static eventhandler_tag dtrace_kld_load_tag;
266 static eventhandler_tag dtrace_kld_unload_try_tag;
267 #endif
268
269 /*
270 * DTrace Locking
271 * DTrace is protected by three (relatively coarse-grained) locks:
272 *
273 * (1) dtrace_lock is required to manipulate essentially any DTrace state,
274 * including enabling state, probes, ECBs, consumer state, helper state,
275 * etc. Importantly, dtrace_lock is _not_ required when in probe context;
276 * probe context is lock-free -- synchronization is handled via the
277 * dtrace_sync() cross call mechanism.
278 *
279 * (2) dtrace_provider_lock is required when manipulating provider state, or
280 * when provider state must be held constant.
281 *
282 * (3) dtrace_meta_lock is required when manipulating meta provider state, or
283 * when meta provider state must be held constant.
284 *
285 * The lock ordering between these three locks is dtrace_meta_lock before
286 * dtrace_provider_lock before dtrace_lock. (In particular, there are
287 * several places where dtrace_provider_lock is held by the framework as it
288 * calls into the providers -- which then call back into the framework,
289 * grabbing dtrace_lock.)
290 *
291 * There are two other locks in the mix: mod_lock and cpu_lock. With respect
292 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical
293 * role as a coarse-grained lock; it is acquired before both of these locks.
294 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must
295 * be acquired _between_ dtrace_meta_lock and any other DTrace locks.
296 * mod_lock is similar with respect to dtrace_provider_lock in that it must be
297 * acquired _between_ dtrace_provider_lock and dtrace_lock.
298 */
299 static kmutex_t dtrace_lock; /* probe state lock */
300 static kmutex_t dtrace_provider_lock; /* provider state lock */
301 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */
302
303 #ifndef illumos
304 /* XXX FreeBSD hacks. */
305 #define cr_suid cr_svuid
306 #define cr_sgid cr_svgid
307 #define ipaddr_t in_addr_t
308 #define mod_modname pathname
309 #define vuprintf vprintf
310 #ifndef crgetzoneid
311 #define crgetzoneid(_a) 0
312 #endif
313 #define ttoproc(_a) ((_a)->td_proc)
314 #define SNOCD 0
315 #define CPU_ON_INTR(_a) 0
316
317 #define PRIV_EFFECTIVE (1 << 0)
318 #define PRIV_DTRACE_KERNEL (1 << 1)
319 #define PRIV_DTRACE_PROC (1 << 2)
320 #define PRIV_DTRACE_USER (1 << 3)
321 #define PRIV_PROC_OWNER (1 << 4)
322 #define PRIV_PROC_ZONE (1 << 5)
323 #define PRIV_ALL ~0
324
325 SYSCTL_DECL(_debug_dtrace);
326 SYSCTL_DECL(_kern_dtrace);
327 #endif
328
329 #ifdef illumos
330 #define curcpu CPU->cpu_id
331 #endif
332
333
334 /*
335 * DTrace Provider Variables
336 *
337 * These are the variables relating to DTrace as a provider (that is, the
338 * provider of the BEGIN, END, and ERROR probes).
339 */
340 static dtrace_pattr_t dtrace_provider_attr = {
341 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
342 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
343 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
344 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
345 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
346 };
347
348 static void
dtrace_nullop(void)349 dtrace_nullop(void)
350 {}
351
352 static dtrace_pops_t dtrace_provider_ops = {
353 .dtps_provide = (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop,
354 .dtps_provide_module = (void (*)(void *, modctl_t *))dtrace_nullop,
355 .dtps_enable = (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
356 .dtps_disable = (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
357 .dtps_suspend = (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
358 .dtps_resume = (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
359 .dtps_getargdesc = NULL,
360 .dtps_getargval = NULL,
361 .dtps_usermode = NULL,
362 .dtps_destroy = (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
363 };
364
365 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */
366 static dtrace_id_t dtrace_probeid_end; /* special END probe */
367 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */
368
369 /*
370 * DTrace Helper Tracing Variables
371 *
372 * These variables should be set dynamically to enable helper tracing. The
373 * only variables that should be set are dtrace_helptrace_enable (which should
374 * be set to a non-zero value to allocate helper tracing buffers on the next
375 * open of /dev/dtrace) and dtrace_helptrace_disable (which should be set to a
376 * non-zero value to deallocate helper tracing buffers on the next close of
377 * /dev/dtrace). When (and only when) helper tracing is disabled, the
378 * buffer size may also be set via dtrace_helptrace_bufsize.
379 */
380 int dtrace_helptrace_enable = 0;
381 int dtrace_helptrace_disable = 0;
382 int dtrace_helptrace_bufsize = 16 * 1024 * 1024;
383 uint32_t dtrace_helptrace_nlocals;
384 static dtrace_helptrace_t *dtrace_helptrace_buffer;
385 static uint32_t dtrace_helptrace_next = 0;
386 static int dtrace_helptrace_wrapped = 0;
387
388 /*
389 * DTrace Error Hashing
390 *
391 * On DEBUG kernels, DTrace will track the errors that has seen in a hash
392 * table. This is very useful for checking coverage of tests that are
393 * expected to induce DIF or DOF processing errors, and may be useful for
394 * debugging problems in the DIF code generator or in DOF generation . The
395 * error hash may be examined with the ::dtrace_errhash MDB dcmd.
396 */
397 #ifdef DEBUG
398 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ];
399 static const char *dtrace_errlast;
400 static kthread_t *dtrace_errthread;
401 static kmutex_t dtrace_errlock;
402 #endif
403
404 /*
405 * DTrace Macros and Constants
406 *
407 * These are various macros that are useful in various spots in the
408 * implementation, along with a few random constants that have no meaning
409 * outside of the implementation. There is no real structure to this cpp
410 * mishmash -- but is there ever?
411 */
412 #define DTRACE_HASHSTR(hash, probe) \
413 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs)))
414
415 #define DTRACE_HASHNEXT(hash, probe) \
416 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs)
417
418 #define DTRACE_HASHPREV(hash, probe) \
419 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs)
420
421 #define DTRACE_HASHEQ(hash, lhs, rhs) \
422 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \
423 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0)
424
425 #define DTRACE_AGGHASHSIZE_SLEW 17
426
427 #define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3)
428
429 /*
430 * The key for a thread-local variable consists of the lower 61 bits of the
431 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL.
432 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never
433 * equal to a variable identifier. This is necessary (but not sufficient) to
434 * assure that global associative arrays never collide with thread-local
435 * variables. To guarantee that they cannot collide, we must also define the
436 * order for keying dynamic variables. That order is:
437 *
438 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ]
439 *
440 * Because the variable-key and the tls-key are in orthogonal spaces, there is
441 * no way for a global variable key signature to match a thread-local key
442 * signature.
443 */
444 #ifdef illumos
445 #define DTRACE_TLS_THRKEY(where) { \
446 uint_t intr = 0; \
447 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \
448 for (; actv; actv >>= 1) \
449 intr++; \
450 ASSERT(intr < (1 << 3)); \
451 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \
452 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
453 }
454 #else
455 #define DTRACE_TLS_THRKEY(where) { \
456 solaris_cpu_t *_c = &solaris_cpu[curcpu]; \
457 uint_t intr = 0; \
458 uint_t actv = _c->cpu_intr_actv; \
459 for (; actv; actv >>= 1) \
460 intr++; \
461 ASSERT(intr < (1 << 3)); \
462 (where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \
463 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
464 }
465 #endif
466
467 #define DT_BSWAP_8(x) ((x) & 0xff)
468 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8))
469 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16))
470 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32))
471
472 #define DT_MASK_LO 0x00000000FFFFFFFFULL
473
474 #define DTRACE_STORE(type, tomax, offset, what) \
475 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what);
476
477 #if !defined(__x86) && !defined(__aarch64__)
478 #define DTRACE_ALIGNCHECK(addr, size, flags) \
479 if (addr & (size - 1)) { \
480 *flags |= CPU_DTRACE_BADALIGN; \
481 cpu_core[curcpu].cpuc_dtrace_illval = addr; \
482 return (0); \
483 }
484 #else
485 #define DTRACE_ALIGNCHECK(addr, size, flags)
486 #endif
487
488 /*
489 * Test whether a range of memory starting at testaddr of size testsz falls
490 * within the range of memory described by addr, sz. We take care to avoid
491 * problems with overflow and underflow of the unsigned quantities, and
492 * disallow all negative sizes. Ranges of size 0 are allowed.
493 */
494 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \
495 ((testaddr) - (uintptr_t)(baseaddr) < (basesz) && \
496 (testaddr) + (testsz) - (uintptr_t)(baseaddr) <= (basesz) && \
497 (testaddr) + (testsz) >= (testaddr))
498
499 #define DTRACE_RANGE_REMAIN(remp, addr, baseaddr, basesz) \
500 do { \
501 if ((remp) != NULL) { \
502 *(remp) = (uintptr_t)(baseaddr) + (basesz) - (addr); \
503 } \
504 } while (0)
505
506
507 /*
508 * Test whether alloc_sz bytes will fit in the scratch region. We isolate
509 * alloc_sz on the righthand side of the comparison in order to avoid overflow
510 * or underflow in the comparison with it. This is simpler than the INRANGE
511 * check above, because we know that the dtms_scratch_ptr is valid in the
512 * range. Allocations of size zero are allowed.
513 */
514 #define DTRACE_INSCRATCH(mstate, alloc_sz) \
515 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \
516 (mstate)->dtms_scratch_ptr >= (alloc_sz))
517
518 #define DTRACE_INSCRATCHPTR(mstate, ptr, howmany) \
519 ((ptr) >= (mstate)->dtms_scratch_base && \
520 (ptr) <= \
521 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - (howmany)))
522
523 #define DTRACE_LOADFUNC(bits) \
524 /*CSTYLED*/ \
525 uint##bits##_t \
526 dtrace_load##bits(uintptr_t addr) \
527 { \
528 size_t size = bits / NBBY; \
529 /*CSTYLED*/ \
530 uint##bits##_t rval; \
531 int i; \
532 volatile uint16_t *flags = (volatile uint16_t *) \
533 &cpu_core[curcpu].cpuc_dtrace_flags; \
534 \
535 DTRACE_ALIGNCHECK(addr, size, flags); \
536 \
537 for (i = 0; i < dtrace_toxranges; i++) { \
538 if (addr >= dtrace_toxrange[i].dtt_limit) \
539 continue; \
540 \
541 if (addr + size <= dtrace_toxrange[i].dtt_base) \
542 continue; \
543 \
544 /* \
545 * This address falls within a toxic region; return 0. \
546 */ \
547 *flags |= CPU_DTRACE_BADADDR; \
548 cpu_core[curcpu].cpuc_dtrace_illval = addr; \
549 return (0); \
550 } \
551 \
552 *flags |= CPU_DTRACE_NOFAULT; \
553 /*CSTYLED*/ \
554 rval = *((volatile uint##bits##_t *)addr); \
555 *flags &= ~CPU_DTRACE_NOFAULT; \
556 \
557 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \
558 }
559
560 #ifdef _LP64
561 #define dtrace_loadptr dtrace_load64
562 #else
563 #define dtrace_loadptr dtrace_load32
564 #endif
565
566 #define DTRACE_DYNHASH_FREE 0
567 #define DTRACE_DYNHASH_SINK 1
568 #define DTRACE_DYNHASH_VALID 2
569
570 #define DTRACE_MATCH_NEXT 0
571 #define DTRACE_MATCH_DONE 1
572 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0')
573 #define DTRACE_STATE_ALIGN 64
574
575 #define DTRACE_FLAGS2FLT(flags) \
576 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \
577 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \
578 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \
579 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \
580 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \
581 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \
582 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \
583 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \
584 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \
585 DTRACEFLT_UNKNOWN)
586
587 #define DTRACEACT_ISSTRING(act) \
588 ((act)->dta_kind == DTRACEACT_DIFEXPR && \
589 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING)
590
591 /* Function prototype definitions: */
592 static size_t dtrace_strlen(const char *, size_t);
593 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id);
594 static void dtrace_enabling_provide(dtrace_provider_t *);
595 static int dtrace_enabling_match(dtrace_enabling_t *, int *);
596 static void dtrace_enabling_matchall(void);
597 static void dtrace_enabling_reap(void);
598 static dtrace_state_t *dtrace_anon_grab(void);
599 static uint64_t dtrace_helper(int, dtrace_mstate_t *,
600 dtrace_state_t *, uint64_t, uint64_t);
601 static dtrace_helpers_t *dtrace_helpers_create(proc_t *);
602 static void dtrace_buffer_drop(dtrace_buffer_t *);
603 static int dtrace_buffer_consumed(dtrace_buffer_t *, hrtime_t when);
604 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t,
605 dtrace_state_t *, dtrace_mstate_t *);
606 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t,
607 dtrace_optval_t);
608 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *);
609 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *);
610 uint16_t dtrace_load16(uintptr_t);
611 uint32_t dtrace_load32(uintptr_t);
612 uint64_t dtrace_load64(uintptr_t);
613 uint8_t dtrace_load8(uintptr_t);
614 void dtrace_dynvar_clean(dtrace_dstate_t *);
615 dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *,
616 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *);
617 uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *);
618 static int dtrace_priv_proc(dtrace_state_t *);
619 static void dtrace_getf_barrier(void);
620 static int dtrace_canload_remains(uint64_t, size_t, size_t *,
621 dtrace_mstate_t *, dtrace_vstate_t *);
622 static int dtrace_canstore_remains(uint64_t, size_t, size_t *,
623 dtrace_mstate_t *, dtrace_vstate_t *);
624
625 /*
626 * DTrace Probe Context Functions
627 *
628 * These functions are called from probe context. Because probe context is
629 * any context in which C may be called, arbitrarily locks may be held,
630 * interrupts may be disabled, we may be in arbitrary dispatched state, etc.
631 * As a result, functions called from probe context may only call other DTrace
632 * support functions -- they may not interact at all with the system at large.
633 * (Note that the ASSERT macro is made probe-context safe by redefining it in
634 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary
635 * loads are to be performed from probe context, they _must_ be in terms of
636 * the safe dtrace_load*() variants.
637 *
638 * Some functions in this block are not actually called from probe context;
639 * for these functions, there will be a comment above the function reading
640 * "Note: not called from probe context."
641 */
642 void
dtrace_panic(const char * format,...)643 dtrace_panic(const char *format, ...)
644 {
645 va_list alist;
646
647 va_start(alist, format);
648 #ifdef __FreeBSD__
649 vpanic(format, alist);
650 #else
651 dtrace_vpanic(format, alist);
652 #endif
653 va_end(alist);
654 }
655
656 int
dtrace_assfail(const char * a,const char * f,int l)657 dtrace_assfail(const char *a, const char *f, int l)
658 {
659 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l);
660
661 /*
662 * We just need something here that even the most clever compiler
663 * cannot optimize away.
664 */
665 return (a[(uintptr_t)f]);
666 }
667
668 /*
669 * Atomically increment a specified error counter from probe context.
670 */
671 static void
dtrace_error(uint32_t * counter)672 dtrace_error(uint32_t *counter)
673 {
674 /*
675 * Most counters stored to in probe context are per-CPU counters.
676 * However, there are some error conditions that are sufficiently
677 * arcane that they don't merit per-CPU storage. If these counters
678 * are incremented concurrently on different CPUs, scalability will be
679 * adversely affected -- but we don't expect them to be white-hot in a
680 * correctly constructed enabling...
681 */
682 uint32_t oval, nval;
683
684 do {
685 oval = *counter;
686
687 if ((nval = oval + 1) == 0) {
688 /*
689 * If the counter would wrap, set it to 1 -- assuring
690 * that the counter is never zero when we have seen
691 * errors. (The counter must be 32-bits because we
692 * aren't guaranteed a 64-bit compare&swap operation.)
693 * To save this code both the infamy of being fingered
694 * by a priggish news story and the indignity of being
695 * the target of a neo-puritan witch trial, we're
696 * carefully avoiding any colorful description of the
697 * likelihood of this condition -- but suffice it to
698 * say that it is only slightly more likely than the
699 * overflow of predicate cache IDs, as discussed in
700 * dtrace_predicate_create().
701 */
702 nval = 1;
703 }
704 } while (dtrace_cas32(counter, oval, nval) != oval);
705 }
706
707 /*
708 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a
709 * uint8_t, a uint16_t, a uint32_t and a uint64_t.
710 */
711 /* BEGIN CSTYLED */
712 DTRACE_LOADFUNC(8)
713 DTRACE_LOADFUNC(16)
714 DTRACE_LOADFUNC(32)
715 DTRACE_LOADFUNC(64)
716 /* END CSTYLED */
717
718 static int
dtrace_inscratch(uintptr_t dest,size_t size,dtrace_mstate_t * mstate)719 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate)
720 {
721 if (dest < mstate->dtms_scratch_base)
722 return (0);
723
724 if (dest + size < dest)
725 return (0);
726
727 if (dest + size > mstate->dtms_scratch_ptr)
728 return (0);
729
730 return (1);
731 }
732
733 static int
dtrace_canstore_statvar(uint64_t addr,size_t sz,size_t * remain,dtrace_statvar_t ** svars,int nsvars)734 dtrace_canstore_statvar(uint64_t addr, size_t sz, size_t *remain,
735 dtrace_statvar_t **svars, int nsvars)
736 {
737 int i;
738 size_t maxglobalsize, maxlocalsize;
739
740 if (nsvars == 0)
741 return (0);
742
743 maxglobalsize = dtrace_statvar_maxsize + sizeof (uint64_t);
744 maxlocalsize = maxglobalsize * NCPU;
745
746 for (i = 0; i < nsvars; i++) {
747 dtrace_statvar_t *svar = svars[i];
748 uint8_t scope;
749 size_t size;
750
751 if (svar == NULL || (size = svar->dtsv_size) == 0)
752 continue;
753
754 scope = svar->dtsv_var.dtdv_scope;
755
756 /*
757 * We verify that our size is valid in the spirit of providing
758 * defense in depth: we want to prevent attackers from using
759 * DTrace to escalate an orthogonal kernel heap corruption bug
760 * into the ability to store to arbitrary locations in memory.
761 */
762 VERIFY((scope == DIFV_SCOPE_GLOBAL && size <= maxglobalsize) ||
763 (scope == DIFV_SCOPE_LOCAL && size <= maxlocalsize));
764
765 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data,
766 svar->dtsv_size)) {
767 DTRACE_RANGE_REMAIN(remain, addr, svar->dtsv_data,
768 svar->dtsv_size);
769 return (1);
770 }
771 }
772
773 return (0);
774 }
775
776 /*
777 * Check to see if the address is within a memory region to which a store may
778 * be issued. This includes the DTrace scratch areas, and any DTrace variable
779 * region. The caller of dtrace_canstore() is responsible for performing any
780 * alignment checks that are needed before stores are actually executed.
781 */
782 static int
dtrace_canstore(uint64_t addr,size_t sz,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)783 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
784 dtrace_vstate_t *vstate)
785 {
786 return (dtrace_canstore_remains(addr, sz, NULL, mstate, vstate));
787 }
788
789 /*
790 * Implementation of dtrace_canstore which communicates the upper bound of the
791 * allowed memory region.
792 */
793 static int
dtrace_canstore_remains(uint64_t addr,size_t sz,size_t * remain,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)794 dtrace_canstore_remains(uint64_t addr, size_t sz, size_t *remain,
795 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
796 {
797 /*
798 * First, check to see if the address is in scratch space...
799 */
800 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base,
801 mstate->dtms_scratch_size)) {
802 DTRACE_RANGE_REMAIN(remain, addr, mstate->dtms_scratch_base,
803 mstate->dtms_scratch_size);
804 return (1);
805 }
806
807 /*
808 * Now check to see if it's a dynamic variable. This check will pick
809 * up both thread-local variables and any global dynamically-allocated
810 * variables.
811 */
812 if (DTRACE_INRANGE(addr, sz, vstate->dtvs_dynvars.dtds_base,
813 vstate->dtvs_dynvars.dtds_size)) {
814 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
815 uintptr_t base = (uintptr_t)dstate->dtds_base +
816 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t));
817 uintptr_t chunkoffs;
818 dtrace_dynvar_t *dvar;
819
820 /*
821 * Before we assume that we can store here, we need to make
822 * sure that it isn't in our metadata -- storing to our
823 * dynamic variable metadata would corrupt our state. For
824 * the range to not include any dynamic variable metadata,
825 * it must:
826 *
827 * (1) Start above the hash table that is at the base of
828 * the dynamic variable space
829 *
830 * (2) Have a starting chunk offset that is beyond the
831 * dtrace_dynvar_t that is at the base of every chunk
832 *
833 * (3) Not span a chunk boundary
834 *
835 * (4) Not be in the tuple space of a dynamic variable
836 *
837 */
838 if (addr < base)
839 return (0);
840
841 chunkoffs = (addr - base) % dstate->dtds_chunksize;
842
843 if (chunkoffs < sizeof (dtrace_dynvar_t))
844 return (0);
845
846 if (chunkoffs + sz > dstate->dtds_chunksize)
847 return (0);
848
849 dvar = (dtrace_dynvar_t *)((uintptr_t)addr - chunkoffs);
850
851 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE)
852 return (0);
853
854 if (chunkoffs < sizeof (dtrace_dynvar_t) +
855 ((dvar->dtdv_tuple.dtt_nkeys - 1) * sizeof (dtrace_key_t)))
856 return (0);
857
858 DTRACE_RANGE_REMAIN(remain, addr, dvar, dstate->dtds_chunksize);
859 return (1);
860 }
861
862 /*
863 * Finally, check the static local and global variables. These checks
864 * take the longest, so we perform them last.
865 */
866 if (dtrace_canstore_statvar(addr, sz, remain,
867 vstate->dtvs_locals, vstate->dtvs_nlocals))
868 return (1);
869
870 if (dtrace_canstore_statvar(addr, sz, remain,
871 vstate->dtvs_globals, vstate->dtvs_nglobals))
872 return (1);
873
874 return (0);
875 }
876
877
878 /*
879 * Convenience routine to check to see if the address is within a memory
880 * region in which a load may be issued given the user's privilege level;
881 * if not, it sets the appropriate error flags and loads 'addr' into the
882 * illegal value slot.
883 *
884 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement
885 * appropriate memory access protection.
886 */
887 static int
dtrace_canload(uint64_t addr,size_t sz,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)888 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
889 dtrace_vstate_t *vstate)
890 {
891 return (dtrace_canload_remains(addr, sz, NULL, mstate, vstate));
892 }
893
894 /*
895 * Implementation of dtrace_canload which communicates the uppoer bound of the
896 * allowed memory region.
897 */
898 static int
dtrace_canload_remains(uint64_t addr,size_t sz,size_t * remain,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)899 dtrace_canload_remains(uint64_t addr, size_t sz, size_t *remain,
900 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
901 {
902 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval;
903 file_t *fp;
904
905 /*
906 * If we hold the privilege to read from kernel memory, then
907 * everything is readable.
908 */
909 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) {
910 DTRACE_RANGE_REMAIN(remain, addr, addr, sz);
911 return (1);
912 }
913
914 /*
915 * You can obviously read that which you can store.
916 */
917 if (dtrace_canstore_remains(addr, sz, remain, mstate, vstate))
918 return (1);
919
920 /*
921 * We're allowed to read from our own string table.
922 */
923 if (DTRACE_INRANGE(addr, sz, mstate->dtms_difo->dtdo_strtab,
924 mstate->dtms_difo->dtdo_strlen)) {
925 DTRACE_RANGE_REMAIN(remain, addr,
926 mstate->dtms_difo->dtdo_strtab,
927 mstate->dtms_difo->dtdo_strlen);
928 return (1);
929 }
930
931 if (vstate->dtvs_state != NULL &&
932 dtrace_priv_proc(vstate->dtvs_state)) {
933 proc_t *p;
934
935 /*
936 * When we have privileges to the current process, there are
937 * several context-related kernel structures that are safe to
938 * read, even absent the privilege to read from kernel memory.
939 * These reads are safe because these structures contain only
940 * state that (1) we're permitted to read, (2) is harmless or
941 * (3) contains pointers to additional kernel state that we're
942 * not permitted to read (and as such, do not present an
943 * opportunity for privilege escalation). Finally (and
944 * critically), because of the nature of their relation with
945 * the current thread context, the memory associated with these
946 * structures cannot change over the duration of probe context,
947 * and it is therefore impossible for this memory to be
948 * deallocated and reallocated as something else while it's
949 * being operated upon.
950 */
951 if (DTRACE_INRANGE(addr, sz, curthread, sizeof (kthread_t))) {
952 DTRACE_RANGE_REMAIN(remain, addr, curthread,
953 sizeof (kthread_t));
954 return (1);
955 }
956
957 if ((p = curthread->t_procp) != NULL && DTRACE_INRANGE(addr,
958 sz, curthread->t_procp, sizeof (proc_t))) {
959 DTRACE_RANGE_REMAIN(remain, addr, curthread->t_procp,
960 sizeof (proc_t));
961 return (1);
962 }
963
964 if (curthread->t_cred != NULL && DTRACE_INRANGE(addr, sz,
965 curthread->t_cred, sizeof (cred_t))) {
966 DTRACE_RANGE_REMAIN(remain, addr, curthread->t_cred,
967 sizeof (cred_t));
968 return (1);
969 }
970
971 #ifdef illumos
972 if (p != NULL && p->p_pidp != NULL && DTRACE_INRANGE(addr, sz,
973 &(p->p_pidp->pid_id), sizeof (pid_t))) {
974 DTRACE_RANGE_REMAIN(remain, addr, &(p->p_pidp->pid_id),
975 sizeof (pid_t));
976 return (1);
977 }
978
979 if (curthread->t_cpu != NULL && DTRACE_INRANGE(addr, sz,
980 curthread->t_cpu, offsetof(cpu_t, cpu_pause_thread))) {
981 DTRACE_RANGE_REMAIN(remain, addr, curthread->t_cpu,
982 offsetof(cpu_t, cpu_pause_thread));
983 return (1);
984 }
985 #endif
986 }
987
988 if ((fp = mstate->dtms_getf) != NULL) {
989 uintptr_t psz = sizeof (void *);
990 vnode_t *vp;
991 vnodeops_t *op;
992
993 /*
994 * When getf() returns a file_t, the enabling is implicitly
995 * granted the (transient) right to read the returned file_t
996 * as well as the v_path and v_op->vnop_name of the underlying
997 * vnode. These accesses are allowed after a successful
998 * getf() because the members that they refer to cannot change
999 * once set -- and the barrier logic in the kernel's closef()
1000 * path assures that the file_t and its referenced vode_t
1001 * cannot themselves be stale (that is, it impossible for
1002 * either dtms_getf itself or its f_vnode member to reference
1003 * freed memory).
1004 */
1005 if (DTRACE_INRANGE(addr, sz, fp, sizeof (file_t))) {
1006 DTRACE_RANGE_REMAIN(remain, addr, fp, sizeof (file_t));
1007 return (1);
1008 }
1009
1010 if ((vp = fp->f_vnode) != NULL) {
1011 size_t slen;
1012 #ifdef illumos
1013 if (DTRACE_INRANGE(addr, sz, &vp->v_path, psz)) {
1014 DTRACE_RANGE_REMAIN(remain, addr, &vp->v_path,
1015 psz);
1016 return (1);
1017 }
1018 slen = strlen(vp->v_path) + 1;
1019 if (DTRACE_INRANGE(addr, sz, vp->v_path, slen)) {
1020 DTRACE_RANGE_REMAIN(remain, addr, vp->v_path,
1021 slen);
1022 return (1);
1023 }
1024 #endif
1025
1026 if (DTRACE_INRANGE(addr, sz, &vp->v_op, psz)) {
1027 DTRACE_RANGE_REMAIN(remain, addr, &vp->v_op,
1028 psz);
1029 return (1);
1030 }
1031
1032 #ifdef illumos
1033 if ((op = vp->v_op) != NULL &&
1034 DTRACE_INRANGE(addr, sz, &op->vnop_name, psz)) {
1035 DTRACE_RANGE_REMAIN(remain, addr,
1036 &op->vnop_name, psz);
1037 return (1);
1038 }
1039
1040 if (op != NULL && op->vnop_name != NULL &&
1041 DTRACE_INRANGE(addr, sz, op->vnop_name,
1042 (slen = strlen(op->vnop_name) + 1))) {
1043 DTRACE_RANGE_REMAIN(remain, addr,
1044 op->vnop_name, slen);
1045 return (1);
1046 }
1047 #endif
1048 }
1049 }
1050
1051 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV);
1052 *illval = addr;
1053 return (0);
1054 }
1055
1056 /*
1057 * Convenience routine to check to see if a given string is within a memory
1058 * region in which a load may be issued given the user's privilege level;
1059 * this exists so that we don't need to issue unnecessary dtrace_strlen()
1060 * calls in the event that the user has all privileges.
1061 */
1062 static int
dtrace_strcanload(uint64_t addr,size_t sz,size_t * remain,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)1063 dtrace_strcanload(uint64_t addr, size_t sz, size_t *remain,
1064 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
1065 {
1066 size_t rsize;
1067
1068 /*
1069 * If we hold the privilege to read from kernel memory, then
1070 * everything is readable.
1071 */
1072 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) {
1073 DTRACE_RANGE_REMAIN(remain, addr, addr, sz);
1074 return (1);
1075 }
1076
1077 /*
1078 * Even if the caller is uninterested in querying the remaining valid
1079 * range, it is required to ensure that the access is allowed.
1080 */
1081 if (remain == NULL) {
1082 remain = &rsize;
1083 }
1084 if (dtrace_canload_remains(addr, 0, remain, mstate, vstate)) {
1085 size_t strsz;
1086 /*
1087 * Perform the strlen after determining the length of the
1088 * memory region which is accessible. This prevents timing
1089 * information from being used to find NULs in memory which is
1090 * not accessible to the caller.
1091 */
1092 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr,
1093 MIN(sz, *remain));
1094 if (strsz <= *remain) {
1095 return (1);
1096 }
1097 }
1098
1099 return (0);
1100 }
1101
1102 /*
1103 * Convenience routine to check to see if a given variable is within a memory
1104 * region in which a load may be issued given the user's privilege level.
1105 */
1106 static int
dtrace_vcanload(void * src,dtrace_diftype_t * type,size_t * remain,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)1107 dtrace_vcanload(void *src, dtrace_diftype_t *type, size_t *remain,
1108 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
1109 {
1110 size_t sz;
1111 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
1112
1113 /*
1114 * Calculate the max size before performing any checks since even
1115 * DTRACE_ACCESS_KERNEL-credentialed callers expect that this function
1116 * return the max length via 'remain'.
1117 */
1118 if (type->dtdt_kind == DIF_TYPE_STRING) {
1119 dtrace_state_t *state = vstate->dtvs_state;
1120
1121 if (state != NULL) {
1122 sz = state->dts_options[DTRACEOPT_STRSIZE];
1123 } else {
1124 /*
1125 * In helper context, we have a NULL state; fall back
1126 * to using the system-wide default for the string size
1127 * in this case.
1128 */
1129 sz = dtrace_strsize_default;
1130 }
1131 } else {
1132 sz = type->dtdt_size;
1133 }
1134
1135 /*
1136 * If we hold the privilege to read from kernel memory, then
1137 * everything is readable.
1138 */
1139 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) {
1140 DTRACE_RANGE_REMAIN(remain, (uintptr_t)src, src, sz);
1141 return (1);
1142 }
1143
1144 if (type->dtdt_kind == DIF_TYPE_STRING) {
1145 return (dtrace_strcanload((uintptr_t)src, sz, remain, mstate,
1146 vstate));
1147 }
1148 return (dtrace_canload_remains((uintptr_t)src, sz, remain, mstate,
1149 vstate));
1150 }
1151
1152 /*
1153 * Convert a string to a signed integer using safe loads.
1154 *
1155 * NOTE: This function uses various macros from strtolctype.h to manipulate
1156 * digit values, etc -- these have all been checked to ensure they make
1157 * no additional function calls.
1158 */
1159 static int64_t
dtrace_strtoll(char * input,int base,size_t limit)1160 dtrace_strtoll(char *input, int base, size_t limit)
1161 {
1162 uintptr_t pos = (uintptr_t)input;
1163 int64_t val = 0;
1164 int x;
1165 boolean_t neg = B_FALSE;
1166 char c, cc, ccc;
1167 uintptr_t end = pos + limit;
1168
1169 /*
1170 * Consume any whitespace preceding digits.
1171 */
1172 while ((c = dtrace_load8(pos)) == ' ' || c == '\t')
1173 pos++;
1174
1175 /*
1176 * Handle an explicit sign if one is present.
1177 */
1178 if (c == '-' || c == '+') {
1179 if (c == '-')
1180 neg = B_TRUE;
1181 c = dtrace_load8(++pos);
1182 }
1183
1184 /*
1185 * Check for an explicit hexadecimal prefix ("0x" or "0X") and skip it
1186 * if present.
1187 */
1188 if (base == 16 && c == '0' && ((cc = dtrace_load8(pos + 1)) == 'x' ||
1189 cc == 'X') && isxdigit(ccc = dtrace_load8(pos + 2))) {
1190 pos += 2;
1191 c = ccc;
1192 }
1193
1194 /*
1195 * Read in contiguous digits until the first non-digit character.
1196 */
1197 for (; pos < end && c != '\0' && lisalnum(c) && (x = DIGIT(c)) < base;
1198 c = dtrace_load8(++pos))
1199 val = val * base + x;
1200
1201 return (neg ? -val : val);
1202 }
1203
1204 /*
1205 * Compare two strings using safe loads.
1206 */
1207 static int
dtrace_strncmp(char * s1,char * s2,size_t limit)1208 dtrace_strncmp(char *s1, char *s2, size_t limit)
1209 {
1210 uint8_t c1, c2;
1211 volatile uint16_t *flags;
1212
1213 if (s1 == s2 || limit == 0)
1214 return (0);
1215
1216 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
1217
1218 do {
1219 if (s1 == NULL) {
1220 c1 = '\0';
1221 } else {
1222 c1 = dtrace_load8((uintptr_t)s1++);
1223 }
1224
1225 if (s2 == NULL) {
1226 c2 = '\0';
1227 } else {
1228 c2 = dtrace_load8((uintptr_t)s2++);
1229 }
1230
1231 if (c1 != c2)
1232 return (c1 - c2);
1233 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT));
1234
1235 return (0);
1236 }
1237
1238 /*
1239 * Compute strlen(s) for a string using safe memory accesses. The additional
1240 * len parameter is used to specify a maximum length to ensure completion.
1241 */
1242 static size_t
dtrace_strlen(const char * s,size_t lim)1243 dtrace_strlen(const char *s, size_t lim)
1244 {
1245 uint_t len;
1246
1247 for (len = 0; len != lim; len++) {
1248 if (dtrace_load8((uintptr_t)s++) == '\0')
1249 break;
1250 }
1251
1252 return (len);
1253 }
1254
1255 /*
1256 * Check if an address falls within a toxic region.
1257 */
1258 static int
dtrace_istoxic(uintptr_t kaddr,size_t size)1259 dtrace_istoxic(uintptr_t kaddr, size_t size)
1260 {
1261 uintptr_t taddr, tsize;
1262 int i;
1263
1264 for (i = 0; i < dtrace_toxranges; i++) {
1265 taddr = dtrace_toxrange[i].dtt_base;
1266 tsize = dtrace_toxrange[i].dtt_limit - taddr;
1267
1268 if (kaddr - taddr < tsize) {
1269 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1270 cpu_core[curcpu].cpuc_dtrace_illval = kaddr;
1271 return (1);
1272 }
1273
1274 if (taddr - kaddr < size) {
1275 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1276 cpu_core[curcpu].cpuc_dtrace_illval = taddr;
1277 return (1);
1278 }
1279 }
1280
1281 return (0);
1282 }
1283
1284 /*
1285 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe
1286 * memory specified by the DIF program. The dst is assumed to be safe memory
1287 * that we can store to directly because it is managed by DTrace. As with
1288 * standard bcopy, overlapping copies are handled properly.
1289 */
1290 static void
dtrace_bcopy(const void * src,void * dst,size_t len)1291 dtrace_bcopy(const void *src, void *dst, size_t len)
1292 {
1293 if (len != 0) {
1294 uint8_t *s1 = dst;
1295 const uint8_t *s2 = src;
1296
1297 if (s1 <= s2) {
1298 do {
1299 *s1++ = dtrace_load8((uintptr_t)s2++);
1300 } while (--len != 0);
1301 } else {
1302 s2 += len;
1303 s1 += len;
1304
1305 do {
1306 *--s1 = dtrace_load8((uintptr_t)--s2);
1307 } while (--len != 0);
1308 }
1309 }
1310 }
1311
1312 /*
1313 * Copy src to dst using safe memory accesses, up to either the specified
1314 * length, or the point that a nul byte is encountered. The src is assumed to
1315 * be unsafe memory specified by the DIF program. The dst is assumed to be
1316 * safe memory that we can store to directly because it is managed by DTrace.
1317 * Unlike dtrace_bcopy(), overlapping regions are not handled.
1318 */
1319 static void
dtrace_strcpy(const void * src,void * dst,size_t len)1320 dtrace_strcpy(const void *src, void *dst, size_t len)
1321 {
1322 if (len != 0) {
1323 uint8_t *s1 = dst, c;
1324 const uint8_t *s2 = src;
1325
1326 do {
1327 *s1++ = c = dtrace_load8((uintptr_t)s2++);
1328 } while (--len != 0 && c != '\0');
1329 }
1330 }
1331
1332 /*
1333 * Copy src to dst, deriving the size and type from the specified (BYREF)
1334 * variable type. The src is assumed to be unsafe memory specified by the DIF
1335 * program. The dst is assumed to be DTrace variable memory that is of the
1336 * specified type; we assume that we can store to directly.
1337 */
1338 static void
dtrace_vcopy(void * src,void * dst,dtrace_diftype_t * type,size_t limit)1339 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type, size_t limit)
1340 {
1341 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
1342
1343 if (type->dtdt_kind == DIF_TYPE_STRING) {
1344 dtrace_strcpy(src, dst, MIN(type->dtdt_size, limit));
1345 } else {
1346 dtrace_bcopy(src, dst, MIN(type->dtdt_size, limit));
1347 }
1348 }
1349
1350 /*
1351 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be
1352 * unsafe memory specified by the DIF program. The s2 data is assumed to be
1353 * safe memory that we can access directly because it is managed by DTrace.
1354 */
1355 static int
dtrace_bcmp(const void * s1,const void * s2,size_t len)1356 dtrace_bcmp(const void *s1, const void *s2, size_t len)
1357 {
1358 volatile uint16_t *flags;
1359
1360 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
1361
1362 if (s1 == s2)
1363 return (0);
1364
1365 if (s1 == NULL || s2 == NULL)
1366 return (1);
1367
1368 if (s1 != s2 && len != 0) {
1369 const uint8_t *ps1 = s1;
1370 const uint8_t *ps2 = s2;
1371
1372 do {
1373 if (dtrace_load8((uintptr_t)ps1++) != *ps2++)
1374 return (1);
1375 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT));
1376 }
1377 return (0);
1378 }
1379
1380 /*
1381 * Zero the specified region using a simple byte-by-byte loop. Note that this
1382 * is for safe DTrace-managed memory only.
1383 */
1384 static void
dtrace_bzero(void * dst,size_t len)1385 dtrace_bzero(void *dst, size_t len)
1386 {
1387 uchar_t *cp;
1388
1389 for (cp = dst; len != 0; len--)
1390 *cp++ = 0;
1391 }
1392
1393 static void
dtrace_add_128(uint64_t * addend1,uint64_t * addend2,uint64_t * sum)1394 dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum)
1395 {
1396 uint64_t result[2];
1397
1398 result[0] = addend1[0] + addend2[0];
1399 result[1] = addend1[1] + addend2[1] +
1400 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0);
1401
1402 sum[0] = result[0];
1403 sum[1] = result[1];
1404 }
1405
1406 /*
1407 * Shift the 128-bit value in a by b. If b is positive, shift left.
1408 * If b is negative, shift right.
1409 */
1410 static void
dtrace_shift_128(uint64_t * a,int b)1411 dtrace_shift_128(uint64_t *a, int b)
1412 {
1413 uint64_t mask;
1414
1415 if (b == 0)
1416 return;
1417
1418 if (b < 0) {
1419 b = -b;
1420 if (b >= 64) {
1421 a[0] = a[1] >> (b - 64);
1422 a[1] = 0;
1423 } else {
1424 a[0] >>= b;
1425 mask = 1LL << (64 - b);
1426 mask -= 1;
1427 a[0] |= ((a[1] & mask) << (64 - b));
1428 a[1] >>= b;
1429 }
1430 } else {
1431 if (b >= 64) {
1432 a[1] = a[0] << (b - 64);
1433 a[0] = 0;
1434 } else {
1435 a[1] <<= b;
1436 mask = a[0] >> (64 - b);
1437 a[1] |= mask;
1438 a[0] <<= b;
1439 }
1440 }
1441 }
1442
1443 /*
1444 * The basic idea is to break the 2 64-bit values into 4 32-bit values,
1445 * use native multiplication on those, and then re-combine into the
1446 * resulting 128-bit value.
1447 *
1448 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) =
1449 * hi1 * hi2 << 64 +
1450 * hi1 * lo2 << 32 +
1451 * hi2 * lo1 << 32 +
1452 * lo1 * lo2
1453 */
1454 static void
dtrace_multiply_128(uint64_t factor1,uint64_t factor2,uint64_t * product)1455 dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product)
1456 {
1457 uint64_t hi1, hi2, lo1, lo2;
1458 uint64_t tmp[2];
1459
1460 hi1 = factor1 >> 32;
1461 hi2 = factor2 >> 32;
1462
1463 lo1 = factor1 & DT_MASK_LO;
1464 lo2 = factor2 & DT_MASK_LO;
1465
1466 product[0] = lo1 * lo2;
1467 product[1] = hi1 * hi2;
1468
1469 tmp[0] = hi1 * lo2;
1470 tmp[1] = 0;
1471 dtrace_shift_128(tmp, 32);
1472 dtrace_add_128(product, tmp, product);
1473
1474 tmp[0] = hi2 * lo1;
1475 tmp[1] = 0;
1476 dtrace_shift_128(tmp, 32);
1477 dtrace_add_128(product, tmp, product);
1478 }
1479
1480 /*
1481 * This privilege check should be used by actions and subroutines to
1482 * verify that the user credentials of the process that enabled the
1483 * invoking ECB match the target credentials
1484 */
1485 static int
dtrace_priv_proc_common_user(dtrace_state_t * state)1486 dtrace_priv_proc_common_user(dtrace_state_t *state)
1487 {
1488 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1489
1490 /*
1491 * We should always have a non-NULL state cred here, since if cred
1492 * is null (anonymous tracing), we fast-path bypass this routine.
1493 */
1494 ASSERT(s_cr != NULL);
1495
1496 if ((cr = CRED()) != NULL &&
1497 s_cr->cr_uid == cr->cr_uid &&
1498 s_cr->cr_uid == cr->cr_ruid &&
1499 s_cr->cr_uid == cr->cr_suid &&
1500 s_cr->cr_gid == cr->cr_gid &&
1501 s_cr->cr_gid == cr->cr_rgid &&
1502 s_cr->cr_gid == cr->cr_sgid)
1503 return (1);
1504
1505 return (0);
1506 }
1507
1508 /*
1509 * This privilege check should be used by actions and subroutines to
1510 * verify that the zone of the process that enabled the invoking ECB
1511 * matches the target credentials
1512 */
1513 static int
dtrace_priv_proc_common_zone(dtrace_state_t * state)1514 dtrace_priv_proc_common_zone(dtrace_state_t *state)
1515 {
1516 #ifdef illumos
1517 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1518
1519 /*
1520 * We should always have a non-NULL state cred here, since if cred
1521 * is null (anonymous tracing), we fast-path bypass this routine.
1522 */
1523 ASSERT(s_cr != NULL);
1524
1525 if ((cr = CRED()) != NULL && s_cr->cr_zone == cr->cr_zone)
1526 return (1);
1527
1528 return (0);
1529 #else
1530 return (1);
1531 #endif
1532 }
1533
1534 /*
1535 * This privilege check should be used by actions and subroutines to
1536 * verify that the process has not setuid or changed credentials.
1537 */
1538 static int
dtrace_priv_proc_common_nocd(void)1539 dtrace_priv_proc_common_nocd(void)
1540 {
1541 proc_t *proc;
1542
1543 if ((proc = ttoproc(curthread)) != NULL &&
1544 !(proc->p_flag & SNOCD))
1545 return (1);
1546
1547 return (0);
1548 }
1549
1550 static int
dtrace_priv_proc_destructive(dtrace_state_t * state)1551 dtrace_priv_proc_destructive(dtrace_state_t *state)
1552 {
1553 int action = state->dts_cred.dcr_action;
1554
1555 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) &&
1556 dtrace_priv_proc_common_zone(state) == 0)
1557 goto bad;
1558
1559 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) &&
1560 dtrace_priv_proc_common_user(state) == 0)
1561 goto bad;
1562
1563 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) &&
1564 dtrace_priv_proc_common_nocd() == 0)
1565 goto bad;
1566
1567 return (1);
1568
1569 bad:
1570 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1571
1572 return (0);
1573 }
1574
1575 static int
dtrace_priv_proc_control(dtrace_state_t * state)1576 dtrace_priv_proc_control(dtrace_state_t *state)
1577 {
1578 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL)
1579 return (1);
1580
1581 if (dtrace_priv_proc_common_zone(state) &&
1582 dtrace_priv_proc_common_user(state) &&
1583 dtrace_priv_proc_common_nocd())
1584 return (1);
1585
1586 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1587
1588 return (0);
1589 }
1590
1591 static int
dtrace_priv_proc(dtrace_state_t * state)1592 dtrace_priv_proc(dtrace_state_t *state)
1593 {
1594 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC)
1595 return (1);
1596
1597 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1598
1599 return (0);
1600 }
1601
1602 static int
dtrace_priv_kernel(dtrace_state_t * state)1603 dtrace_priv_kernel(dtrace_state_t *state)
1604 {
1605 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL)
1606 return (1);
1607
1608 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1609
1610 return (0);
1611 }
1612
1613 static int
dtrace_priv_kernel_destructive(dtrace_state_t * state)1614 dtrace_priv_kernel_destructive(dtrace_state_t *state)
1615 {
1616 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE)
1617 return (1);
1618
1619 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1620
1621 return (0);
1622 }
1623
1624 /*
1625 * Determine if the dte_cond of the specified ECB allows for processing of
1626 * the current probe to continue. Note that this routine may allow continued
1627 * processing, but with access(es) stripped from the mstate's dtms_access
1628 * field.
1629 */
1630 static int
dtrace_priv_probe(dtrace_state_t * state,dtrace_mstate_t * mstate,dtrace_ecb_t * ecb)1631 dtrace_priv_probe(dtrace_state_t *state, dtrace_mstate_t *mstate,
1632 dtrace_ecb_t *ecb)
1633 {
1634 dtrace_probe_t *probe = ecb->dte_probe;
1635 dtrace_provider_t *prov = probe->dtpr_provider;
1636 dtrace_pops_t *pops = &prov->dtpv_pops;
1637 int mode = DTRACE_MODE_NOPRIV_DROP;
1638
1639 ASSERT(ecb->dte_cond);
1640
1641 #ifdef illumos
1642 if (pops->dtps_mode != NULL) {
1643 mode = pops->dtps_mode(prov->dtpv_arg,
1644 probe->dtpr_id, probe->dtpr_arg);
1645
1646 ASSERT((mode & DTRACE_MODE_USER) ||
1647 (mode & DTRACE_MODE_KERNEL));
1648 ASSERT((mode & DTRACE_MODE_NOPRIV_RESTRICT) ||
1649 (mode & DTRACE_MODE_NOPRIV_DROP));
1650 }
1651
1652 /*
1653 * If the dte_cond bits indicate that this consumer is only allowed to
1654 * see user-mode firings of this probe, call the provider's dtps_mode()
1655 * entry point to check that the probe was fired while in a user
1656 * context. If that's not the case, use the policy specified by the
1657 * provider to determine if we drop the probe or merely restrict
1658 * operation.
1659 */
1660 if (ecb->dte_cond & DTRACE_COND_USERMODE) {
1661 ASSERT(mode != DTRACE_MODE_NOPRIV_DROP);
1662
1663 if (!(mode & DTRACE_MODE_USER)) {
1664 if (mode & DTRACE_MODE_NOPRIV_DROP)
1665 return (0);
1666
1667 mstate->dtms_access &= ~DTRACE_ACCESS_ARGS;
1668 }
1669 }
1670 #endif
1671
1672 /*
1673 * This is more subtle than it looks. We have to be absolutely certain
1674 * that CRED() isn't going to change out from under us so it's only
1675 * legit to examine that structure if we're in constrained situations.
1676 * Currently, the only times we'll this check is if a non-super-user
1677 * has enabled the profile or syscall providers -- providers that
1678 * allow visibility of all processes. For the profile case, the check
1679 * above will ensure that we're examining a user context.
1680 */
1681 if (ecb->dte_cond & DTRACE_COND_OWNER) {
1682 cred_t *cr;
1683 cred_t *s_cr = state->dts_cred.dcr_cred;
1684 proc_t *proc;
1685
1686 ASSERT(s_cr != NULL);
1687
1688 if ((cr = CRED()) == NULL ||
1689 s_cr->cr_uid != cr->cr_uid ||
1690 s_cr->cr_uid != cr->cr_ruid ||
1691 s_cr->cr_uid != cr->cr_suid ||
1692 s_cr->cr_gid != cr->cr_gid ||
1693 s_cr->cr_gid != cr->cr_rgid ||
1694 s_cr->cr_gid != cr->cr_sgid ||
1695 (proc = ttoproc(curthread)) == NULL ||
1696 (proc->p_flag & SNOCD)) {
1697 if (mode & DTRACE_MODE_NOPRIV_DROP)
1698 return (0);
1699
1700 #ifdef illumos
1701 mstate->dtms_access &= ~DTRACE_ACCESS_PROC;
1702 #endif
1703 }
1704 }
1705
1706 #ifdef illumos
1707 /*
1708 * If our dte_cond is set to DTRACE_COND_ZONEOWNER and we are not
1709 * in our zone, check to see if our mode policy is to restrict rather
1710 * than to drop; if to restrict, strip away both DTRACE_ACCESS_PROC
1711 * and DTRACE_ACCESS_ARGS
1712 */
1713 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) {
1714 cred_t *cr;
1715 cred_t *s_cr = state->dts_cred.dcr_cred;
1716
1717 ASSERT(s_cr != NULL);
1718
1719 if ((cr = CRED()) == NULL ||
1720 s_cr->cr_zone->zone_id != cr->cr_zone->zone_id) {
1721 if (mode & DTRACE_MODE_NOPRIV_DROP)
1722 return (0);
1723
1724 mstate->dtms_access &=
1725 ~(DTRACE_ACCESS_PROC | DTRACE_ACCESS_ARGS);
1726 }
1727 }
1728 #endif
1729
1730 return (1);
1731 }
1732
1733 /*
1734 * Note: not called from probe context. This function is called
1735 * asynchronously (and at a regular interval) from outside of probe context to
1736 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable
1737 * cleaning is explained in detail in <sys/dtrace_impl.h>.
1738 */
1739 void
dtrace_dynvar_clean(dtrace_dstate_t * dstate)1740 dtrace_dynvar_clean(dtrace_dstate_t *dstate)
1741 {
1742 dtrace_dynvar_t *dirty;
1743 dtrace_dstate_percpu_t *dcpu;
1744 dtrace_dynvar_t **rinsep;
1745 int i, j, work = 0;
1746
1747 for (i = 0; i < NCPU; i++) {
1748 dcpu = &dstate->dtds_percpu[i];
1749 rinsep = &dcpu->dtdsc_rinsing;
1750
1751 /*
1752 * If the dirty list is NULL, there is no dirty work to do.
1753 */
1754 if (dcpu->dtdsc_dirty == NULL)
1755 continue;
1756
1757 if (dcpu->dtdsc_rinsing != NULL) {
1758 /*
1759 * If the rinsing list is non-NULL, then it is because
1760 * this CPU was selected to accept another CPU's
1761 * dirty list -- and since that time, dirty buffers
1762 * have accumulated. This is a highly unlikely
1763 * condition, but we choose to ignore the dirty
1764 * buffers -- they'll be picked up a future cleanse.
1765 */
1766 continue;
1767 }
1768
1769 if (dcpu->dtdsc_clean != NULL) {
1770 /*
1771 * If the clean list is non-NULL, then we're in a
1772 * situation where a CPU has done deallocations (we
1773 * have a non-NULL dirty list) but no allocations (we
1774 * also have a non-NULL clean list). We can't simply
1775 * move the dirty list into the clean list on this
1776 * CPU, yet we also don't want to allow this condition
1777 * to persist, lest a short clean list prevent a
1778 * massive dirty list from being cleaned (which in
1779 * turn could lead to otherwise avoidable dynamic
1780 * drops). To deal with this, we look for some CPU
1781 * with a NULL clean list, NULL dirty list, and NULL
1782 * rinsing list -- and then we borrow this CPU to
1783 * rinse our dirty list.
1784 */
1785 for (j = 0; j < NCPU; j++) {
1786 dtrace_dstate_percpu_t *rinser;
1787
1788 rinser = &dstate->dtds_percpu[j];
1789
1790 if (rinser->dtdsc_rinsing != NULL)
1791 continue;
1792
1793 if (rinser->dtdsc_dirty != NULL)
1794 continue;
1795
1796 if (rinser->dtdsc_clean != NULL)
1797 continue;
1798
1799 rinsep = &rinser->dtdsc_rinsing;
1800 break;
1801 }
1802
1803 if (j == NCPU) {
1804 /*
1805 * We were unable to find another CPU that
1806 * could accept this dirty list -- we are
1807 * therefore unable to clean it now.
1808 */
1809 dtrace_dynvar_failclean++;
1810 continue;
1811 }
1812 }
1813
1814 work = 1;
1815
1816 /*
1817 * Atomically move the dirty list aside.
1818 */
1819 do {
1820 dirty = dcpu->dtdsc_dirty;
1821
1822 /*
1823 * Before we zap the dirty list, set the rinsing list.
1824 * (This allows for a potential assertion in
1825 * dtrace_dynvar(): if a free dynamic variable appears
1826 * on a hash chain, either the dirty list or the
1827 * rinsing list for some CPU must be non-NULL.)
1828 */
1829 *rinsep = dirty;
1830 dtrace_membar_producer();
1831 } while (dtrace_casptr(&dcpu->dtdsc_dirty,
1832 dirty, NULL) != dirty);
1833 }
1834
1835 if (!work) {
1836 /*
1837 * We have no work to do; we can simply return.
1838 */
1839 return;
1840 }
1841
1842 dtrace_sync();
1843
1844 for (i = 0; i < NCPU; i++) {
1845 dcpu = &dstate->dtds_percpu[i];
1846
1847 if (dcpu->dtdsc_rinsing == NULL)
1848 continue;
1849
1850 /*
1851 * We are now guaranteed that no hash chain contains a pointer
1852 * into this dirty list; we can make it clean.
1853 */
1854 ASSERT(dcpu->dtdsc_clean == NULL);
1855 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing;
1856 dcpu->dtdsc_rinsing = NULL;
1857 }
1858
1859 /*
1860 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make
1861 * sure that all CPUs have seen all of the dtdsc_clean pointers.
1862 * This prevents a race whereby a CPU incorrectly decides that
1863 * the state should be something other than DTRACE_DSTATE_CLEAN
1864 * after dtrace_dynvar_clean() has completed.
1865 */
1866 dtrace_sync();
1867
1868 dstate->dtds_state = DTRACE_DSTATE_CLEAN;
1869 }
1870
1871 /*
1872 * Depending on the value of the op parameter, this function looks-up,
1873 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an
1874 * allocation is requested, this function will return a pointer to a
1875 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no
1876 * variable can be allocated. If NULL is returned, the appropriate counter
1877 * will be incremented.
1878 */
1879 dtrace_dynvar_t *
dtrace_dynvar(dtrace_dstate_t * dstate,uint_t nkeys,dtrace_key_t * key,size_t dsize,dtrace_dynvar_op_t op,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)1880 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys,
1881 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op,
1882 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
1883 {
1884 uint64_t hashval = DTRACE_DYNHASH_VALID;
1885 dtrace_dynhash_t *hash = dstate->dtds_hash;
1886 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL;
1887 processorid_t me = curcpu, cpu = me;
1888 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me];
1889 size_t bucket, ksize;
1890 size_t chunksize = dstate->dtds_chunksize;
1891 uintptr_t kdata, lock, nstate;
1892 uint_t i;
1893
1894 ASSERT(nkeys != 0);
1895
1896 /*
1897 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time"
1898 * algorithm. For the by-value portions, we perform the algorithm in
1899 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a
1900 * bit, and seems to have only a minute effect on distribution. For
1901 * the by-reference data, we perform "One-at-a-time" iterating (safely)
1902 * over each referenced byte. It's painful to do this, but it's much
1903 * better than pathological hash distribution. The efficacy of the
1904 * hashing algorithm (and a comparison with other algorithms) may be
1905 * found by running the ::dtrace_dynstat MDB dcmd.
1906 */
1907 for (i = 0; i < nkeys; i++) {
1908 if (key[i].dttk_size == 0) {
1909 uint64_t val = key[i].dttk_value;
1910
1911 hashval += (val >> 48) & 0xffff;
1912 hashval += (hashval << 10);
1913 hashval ^= (hashval >> 6);
1914
1915 hashval += (val >> 32) & 0xffff;
1916 hashval += (hashval << 10);
1917 hashval ^= (hashval >> 6);
1918
1919 hashval += (val >> 16) & 0xffff;
1920 hashval += (hashval << 10);
1921 hashval ^= (hashval >> 6);
1922
1923 hashval += val & 0xffff;
1924 hashval += (hashval << 10);
1925 hashval ^= (hashval >> 6);
1926 } else {
1927 /*
1928 * This is incredibly painful, but it beats the hell
1929 * out of the alternative.
1930 */
1931 uint64_t j, size = key[i].dttk_size;
1932 uintptr_t base = (uintptr_t)key[i].dttk_value;
1933
1934 if (!dtrace_canload(base, size, mstate, vstate))
1935 break;
1936
1937 for (j = 0; j < size; j++) {
1938 hashval += dtrace_load8(base + j);
1939 hashval += (hashval << 10);
1940 hashval ^= (hashval >> 6);
1941 }
1942 }
1943 }
1944
1945 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
1946 return (NULL);
1947
1948 hashval += (hashval << 3);
1949 hashval ^= (hashval >> 11);
1950 hashval += (hashval << 15);
1951
1952 /*
1953 * There is a remote chance (ideally, 1 in 2^31) that our hashval
1954 * comes out to be one of our two sentinel hash values. If this
1955 * actually happens, we set the hashval to be a value known to be a
1956 * non-sentinel value.
1957 */
1958 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK)
1959 hashval = DTRACE_DYNHASH_VALID;
1960
1961 /*
1962 * Yes, it's painful to do a divide here. If the cycle count becomes
1963 * important here, tricks can be pulled to reduce it. (However, it's
1964 * critical that hash collisions be kept to an absolute minimum;
1965 * they're much more painful than a divide.) It's better to have a
1966 * solution that generates few collisions and still keeps things
1967 * relatively simple.
1968 */
1969 bucket = hashval % dstate->dtds_hashsize;
1970
1971 if (op == DTRACE_DYNVAR_DEALLOC) {
1972 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock;
1973
1974 for (;;) {
1975 while ((lock = *lockp) & 1)
1976 continue;
1977
1978 if (dtrace_casptr((volatile void *)lockp,
1979 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock)
1980 break;
1981 }
1982
1983 dtrace_membar_producer();
1984 }
1985
1986 top:
1987 prev = NULL;
1988 lock = hash[bucket].dtdh_lock;
1989
1990 dtrace_membar_consumer();
1991
1992 start = hash[bucket].dtdh_chain;
1993 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK ||
1994 start->dtdv_hashval != DTRACE_DYNHASH_FREE ||
1995 op != DTRACE_DYNVAR_DEALLOC));
1996
1997 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) {
1998 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple;
1999 dtrace_key_t *dkey = &dtuple->dtt_key[0];
2000
2001 if (dvar->dtdv_hashval != hashval) {
2002 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) {
2003 /*
2004 * We've reached the sink, and therefore the
2005 * end of the hash chain; we can kick out of
2006 * the loop knowing that we have seen a valid
2007 * snapshot of state.
2008 */
2009 ASSERT(dvar->dtdv_next == NULL);
2010 ASSERT(dvar == &dtrace_dynhash_sink);
2011 break;
2012 }
2013
2014 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) {
2015 /*
2016 * We've gone off the rails: somewhere along
2017 * the line, one of the members of this hash
2018 * chain was deleted. Note that we could also
2019 * detect this by simply letting this loop run
2020 * to completion, as we would eventually hit
2021 * the end of the dirty list. However, we
2022 * want to avoid running the length of the
2023 * dirty list unnecessarily (it might be quite
2024 * long), so we catch this as early as
2025 * possible by detecting the hash marker. In
2026 * this case, we simply set dvar to NULL and
2027 * break; the conditional after the loop will
2028 * send us back to top.
2029 */
2030 dvar = NULL;
2031 break;
2032 }
2033
2034 goto next;
2035 }
2036
2037 if (dtuple->dtt_nkeys != nkeys)
2038 goto next;
2039
2040 for (i = 0; i < nkeys; i++, dkey++) {
2041 if (dkey->dttk_size != key[i].dttk_size)
2042 goto next; /* size or type mismatch */
2043
2044 if (dkey->dttk_size != 0) {
2045 if (dtrace_bcmp(
2046 (void *)(uintptr_t)key[i].dttk_value,
2047 (void *)(uintptr_t)dkey->dttk_value,
2048 dkey->dttk_size))
2049 goto next;
2050 } else {
2051 if (dkey->dttk_value != key[i].dttk_value)
2052 goto next;
2053 }
2054 }
2055
2056 if (op != DTRACE_DYNVAR_DEALLOC)
2057 return (dvar);
2058
2059 ASSERT(dvar->dtdv_next == NULL ||
2060 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE);
2061
2062 if (prev != NULL) {
2063 ASSERT(hash[bucket].dtdh_chain != dvar);
2064 ASSERT(start != dvar);
2065 ASSERT(prev->dtdv_next == dvar);
2066 prev->dtdv_next = dvar->dtdv_next;
2067 } else {
2068 if (dtrace_casptr(&hash[bucket].dtdh_chain,
2069 start, dvar->dtdv_next) != start) {
2070 /*
2071 * We have failed to atomically swing the
2072 * hash table head pointer, presumably because
2073 * of a conflicting allocation on another CPU.
2074 * We need to reread the hash chain and try
2075 * again.
2076 */
2077 goto top;
2078 }
2079 }
2080
2081 dtrace_membar_producer();
2082
2083 /*
2084 * Now set the hash value to indicate that it's free.
2085 */
2086 ASSERT(hash[bucket].dtdh_chain != dvar);
2087 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
2088
2089 dtrace_membar_producer();
2090
2091 /*
2092 * Set the next pointer to point at the dirty list, and
2093 * atomically swing the dirty pointer to the newly freed dvar.
2094 */
2095 do {
2096 next = dcpu->dtdsc_dirty;
2097 dvar->dtdv_next = next;
2098 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next);
2099
2100 /*
2101 * Finally, unlock this hash bucket.
2102 */
2103 ASSERT(hash[bucket].dtdh_lock == lock);
2104 ASSERT(lock & 1);
2105 hash[bucket].dtdh_lock++;
2106
2107 return (NULL);
2108 next:
2109 prev = dvar;
2110 continue;
2111 }
2112
2113 if (dvar == NULL) {
2114 /*
2115 * If dvar is NULL, it is because we went off the rails:
2116 * one of the elements that we traversed in the hash chain
2117 * was deleted while we were traversing it. In this case,
2118 * we assert that we aren't doing a dealloc (deallocs lock
2119 * the hash bucket to prevent themselves from racing with
2120 * one another), and retry the hash chain traversal.
2121 */
2122 ASSERT(op != DTRACE_DYNVAR_DEALLOC);
2123 goto top;
2124 }
2125
2126 if (op != DTRACE_DYNVAR_ALLOC) {
2127 /*
2128 * If we are not to allocate a new variable, we want to
2129 * return NULL now. Before we return, check that the value
2130 * of the lock word hasn't changed. If it has, we may have
2131 * seen an inconsistent snapshot.
2132 */
2133 if (op == DTRACE_DYNVAR_NOALLOC) {
2134 if (hash[bucket].dtdh_lock != lock)
2135 goto top;
2136 } else {
2137 ASSERT(op == DTRACE_DYNVAR_DEALLOC);
2138 ASSERT(hash[bucket].dtdh_lock == lock);
2139 ASSERT(lock & 1);
2140 hash[bucket].dtdh_lock++;
2141 }
2142
2143 return (NULL);
2144 }
2145
2146 /*
2147 * We need to allocate a new dynamic variable. The size we need is the
2148 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the
2149 * size of any auxiliary key data (rounded up to 8-byte alignment) plus
2150 * the size of any referred-to data (dsize). We then round the final
2151 * size up to the chunksize for allocation.
2152 */
2153 for (ksize = 0, i = 0; i < nkeys; i++)
2154 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
2155
2156 /*
2157 * This should be pretty much impossible, but could happen if, say,
2158 * strange DIF specified the tuple. Ideally, this should be an
2159 * assertion and not an error condition -- but that requires that the
2160 * chunksize calculation in dtrace_difo_chunksize() be absolutely
2161 * bullet-proof. (That is, it must not be able to be fooled by
2162 * malicious DIF.) Given the lack of backwards branches in DIF,
2163 * solving this would presumably not amount to solving the Halting
2164 * Problem -- but it still seems awfully hard.
2165 */
2166 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) +
2167 ksize + dsize > chunksize) {
2168 dcpu->dtdsc_drops++;
2169 return (NULL);
2170 }
2171
2172 nstate = DTRACE_DSTATE_EMPTY;
2173
2174 do {
2175 retry:
2176 free = dcpu->dtdsc_free;
2177
2178 if (free == NULL) {
2179 dtrace_dynvar_t *clean = dcpu->dtdsc_clean;
2180 void *rval;
2181
2182 if (clean == NULL) {
2183 /*
2184 * We're out of dynamic variable space on
2185 * this CPU. Unless we have tried all CPUs,
2186 * we'll try to allocate from a different
2187 * CPU.
2188 */
2189 switch (dstate->dtds_state) {
2190 case DTRACE_DSTATE_CLEAN: {
2191 void *sp = &dstate->dtds_state;
2192
2193 if (++cpu >= NCPU)
2194 cpu = 0;
2195
2196 if (dcpu->dtdsc_dirty != NULL &&
2197 nstate == DTRACE_DSTATE_EMPTY)
2198 nstate = DTRACE_DSTATE_DIRTY;
2199
2200 if (dcpu->dtdsc_rinsing != NULL)
2201 nstate = DTRACE_DSTATE_RINSING;
2202
2203 dcpu = &dstate->dtds_percpu[cpu];
2204
2205 if (cpu != me)
2206 goto retry;
2207
2208 (void) dtrace_cas32(sp,
2209 DTRACE_DSTATE_CLEAN, nstate);
2210
2211 /*
2212 * To increment the correct bean
2213 * counter, take another lap.
2214 */
2215 goto retry;
2216 }
2217
2218 case DTRACE_DSTATE_DIRTY:
2219 dcpu->dtdsc_dirty_drops++;
2220 break;
2221
2222 case DTRACE_DSTATE_RINSING:
2223 dcpu->dtdsc_rinsing_drops++;
2224 break;
2225
2226 case DTRACE_DSTATE_EMPTY:
2227 dcpu->dtdsc_drops++;
2228 break;
2229 }
2230
2231 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP);
2232 return (NULL);
2233 }
2234
2235 /*
2236 * The clean list appears to be non-empty. We want to
2237 * move the clean list to the free list; we start by
2238 * moving the clean pointer aside.
2239 */
2240 if (dtrace_casptr(&dcpu->dtdsc_clean,
2241 clean, NULL) != clean) {
2242 /*
2243 * We are in one of two situations:
2244 *
2245 * (a) The clean list was switched to the
2246 * free list by another CPU.
2247 *
2248 * (b) The clean list was added to by the
2249 * cleansing cyclic.
2250 *
2251 * In either of these situations, we can
2252 * just reattempt the free list allocation.
2253 */
2254 goto retry;
2255 }
2256
2257 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE);
2258
2259 /*
2260 * Now we'll move the clean list to our free list.
2261 * It's impossible for this to fail: the only way
2262 * the free list can be updated is through this
2263 * code path, and only one CPU can own the clean list.
2264 * Thus, it would only be possible for this to fail if
2265 * this code were racing with dtrace_dynvar_clean().
2266 * (That is, if dtrace_dynvar_clean() updated the clean
2267 * list, and we ended up racing to update the free
2268 * list.) This race is prevented by the dtrace_sync()
2269 * in dtrace_dynvar_clean() -- which flushes the
2270 * owners of the clean lists out before resetting
2271 * the clean lists.
2272 */
2273 dcpu = &dstate->dtds_percpu[me];
2274 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean);
2275 ASSERT(rval == NULL);
2276 goto retry;
2277 }
2278
2279 dvar = free;
2280 new_free = dvar->dtdv_next;
2281 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free);
2282
2283 /*
2284 * We have now allocated a new chunk. We copy the tuple keys into the
2285 * tuple array and copy any referenced key data into the data space
2286 * following the tuple array. As we do this, we relocate dttk_value
2287 * in the final tuple to point to the key data address in the chunk.
2288 */
2289 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys];
2290 dvar->dtdv_data = (void *)(kdata + ksize);
2291 dvar->dtdv_tuple.dtt_nkeys = nkeys;
2292
2293 for (i = 0; i < nkeys; i++) {
2294 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i];
2295 size_t kesize = key[i].dttk_size;
2296
2297 if (kesize != 0) {
2298 dtrace_bcopy(
2299 (const void *)(uintptr_t)key[i].dttk_value,
2300 (void *)kdata, kesize);
2301 dkey->dttk_value = kdata;
2302 kdata += P2ROUNDUP(kesize, sizeof (uint64_t));
2303 } else {
2304 dkey->dttk_value = key[i].dttk_value;
2305 }
2306
2307 dkey->dttk_size = kesize;
2308 }
2309
2310 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE);
2311 dvar->dtdv_hashval = hashval;
2312 dvar->dtdv_next = start;
2313
2314 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start)
2315 return (dvar);
2316
2317 /*
2318 * The cas has failed. Either another CPU is adding an element to
2319 * this hash chain, or another CPU is deleting an element from this
2320 * hash chain. The simplest way to deal with both of these cases
2321 * (though not necessarily the most efficient) is to free our
2322 * allocated block and re-attempt it all. Note that the free is
2323 * to the dirty list and _not_ to the free list. This is to prevent
2324 * races with allocators, above.
2325 */
2326 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
2327
2328 dtrace_membar_producer();
2329
2330 do {
2331 free = dcpu->dtdsc_dirty;
2332 dvar->dtdv_next = free;
2333 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free);
2334
2335 goto top;
2336 }
2337
2338 /*ARGSUSED*/
2339 static void
dtrace_aggregate_min(uint64_t * oval,uint64_t nval,uint64_t arg)2340 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg)
2341 {
2342 if ((int64_t)nval < (int64_t)*oval)
2343 *oval = nval;
2344 }
2345
2346 /*ARGSUSED*/
2347 static void
dtrace_aggregate_max(uint64_t * oval,uint64_t nval,uint64_t arg)2348 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg)
2349 {
2350 if ((int64_t)nval > (int64_t)*oval)
2351 *oval = nval;
2352 }
2353
2354 static void
dtrace_aggregate_quantize(uint64_t * quanta,uint64_t nval,uint64_t incr)2355 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr)
2356 {
2357 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET;
2358 int64_t val = (int64_t)nval;
2359
2360 if (val < 0) {
2361 for (i = 0; i < zero; i++) {
2362 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) {
2363 quanta[i] += incr;
2364 return;
2365 }
2366 }
2367 } else {
2368 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) {
2369 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) {
2370 quanta[i - 1] += incr;
2371 return;
2372 }
2373 }
2374
2375 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr;
2376 return;
2377 }
2378
2379 ASSERT(0);
2380 }
2381
2382 static void
dtrace_aggregate_lquantize(uint64_t * lquanta,uint64_t nval,uint64_t incr)2383 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr)
2384 {
2385 uint64_t arg = *lquanta++;
2386 int32_t base = DTRACE_LQUANTIZE_BASE(arg);
2387 uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
2388 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
2389 int32_t val = (int32_t)nval, level;
2390
2391 ASSERT(step != 0);
2392 ASSERT(levels != 0);
2393
2394 if (val < base) {
2395 /*
2396 * This is an underflow.
2397 */
2398 lquanta[0] += incr;
2399 return;
2400 }
2401
2402 level = (val - base) / step;
2403
2404 if (level < levels) {
2405 lquanta[level + 1] += incr;
2406 return;
2407 }
2408
2409 /*
2410 * This is an overflow.
2411 */
2412 lquanta[levels + 1] += incr;
2413 }
2414
2415 static int
dtrace_aggregate_llquantize_bucket(uint16_t factor,uint16_t low,uint16_t high,uint16_t nsteps,int64_t value)2416 dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low,
2417 uint16_t high, uint16_t nsteps, int64_t value)
2418 {
2419 int64_t this = 1, last, next;
2420 int base = 1, order;
2421
2422 ASSERT(factor <= nsteps);
2423 ASSERT(nsteps % factor == 0);
2424
2425 for (order = 0; order < low; order++)
2426 this *= factor;
2427
2428 /*
2429 * If our value is less than our factor taken to the power of the
2430 * low order of magnitude, it goes into the zeroth bucket.
2431 */
2432 if (value < (last = this))
2433 return (0);
2434
2435 for (this *= factor; order <= high; order++) {
2436 int nbuckets = this > nsteps ? nsteps : this;
2437
2438 if ((next = this * factor) < this) {
2439 /*
2440 * We should not generally get log/linear quantizations
2441 * with a high magnitude that allows 64-bits to
2442 * overflow, but we nonetheless protect against this
2443 * by explicitly checking for overflow, and clamping
2444 * our value accordingly.
2445 */
2446 value = this - 1;
2447 }
2448
2449 if (value < this) {
2450 /*
2451 * If our value lies within this order of magnitude,
2452 * determine its position by taking the offset within
2453 * the order of magnitude, dividing by the bucket
2454 * width, and adding to our (accumulated) base.
2455 */
2456 return (base + (value - last) / (this / nbuckets));
2457 }
2458
2459 base += nbuckets - (nbuckets / factor);
2460 last = this;
2461 this = next;
2462 }
2463
2464 /*
2465 * Our value is greater than or equal to our factor taken to the
2466 * power of one plus the high magnitude -- return the top bucket.
2467 */
2468 return (base);
2469 }
2470
2471 static void
dtrace_aggregate_llquantize(uint64_t * llquanta,uint64_t nval,uint64_t incr)2472 dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr)
2473 {
2474 uint64_t arg = *llquanta++;
2475 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg);
2476 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg);
2477 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg);
2478 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg);
2479
2480 llquanta[dtrace_aggregate_llquantize_bucket(factor,
2481 low, high, nsteps, nval)] += incr;
2482 }
2483
2484 /*ARGSUSED*/
2485 static void
dtrace_aggregate_avg(uint64_t * data,uint64_t nval,uint64_t arg)2486 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg)
2487 {
2488 data[0]++;
2489 data[1] += nval;
2490 }
2491
2492 /*ARGSUSED*/
2493 static void
dtrace_aggregate_stddev(uint64_t * data,uint64_t nval,uint64_t arg)2494 dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg)
2495 {
2496 int64_t snval = (int64_t)nval;
2497 uint64_t tmp[2];
2498
2499 data[0]++;
2500 data[1] += nval;
2501
2502 /*
2503 * What we want to say here is:
2504 *
2505 * data[2] += nval * nval;
2506 *
2507 * But given that nval is 64-bit, we could easily overflow, so
2508 * we do this as 128-bit arithmetic.
2509 */
2510 if (snval < 0)
2511 snval = -snval;
2512
2513 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp);
2514 dtrace_add_128(data + 2, tmp, data + 2);
2515 }
2516
2517 /*ARGSUSED*/
2518 static void
dtrace_aggregate_count(uint64_t * oval,uint64_t nval,uint64_t arg)2519 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg)
2520 {
2521 *oval = *oval + 1;
2522 }
2523
2524 /*ARGSUSED*/
2525 static void
dtrace_aggregate_sum(uint64_t * oval,uint64_t nval,uint64_t arg)2526 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg)
2527 {
2528 *oval += nval;
2529 }
2530
2531 /*
2532 * Aggregate given the tuple in the principal data buffer, and the aggregating
2533 * action denoted by the specified dtrace_aggregation_t. The aggregation
2534 * buffer is specified as the buf parameter. This routine does not return
2535 * failure; if there is no space in the aggregation buffer, the data will be
2536 * dropped, and a corresponding counter incremented.
2537 */
2538 static void
dtrace_aggregate(dtrace_aggregation_t * agg,dtrace_buffer_t * dbuf,intptr_t offset,dtrace_buffer_t * buf,uint64_t expr,uint64_t arg)2539 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf,
2540 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg)
2541 {
2542 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec;
2543 uint32_t i, ndx, size, fsize;
2544 uint32_t align = sizeof (uint64_t) - 1;
2545 dtrace_aggbuffer_t *agb;
2546 dtrace_aggkey_t *key;
2547 uint32_t hashval = 0, limit, isstr;
2548 caddr_t tomax, data, kdata;
2549 dtrace_actkind_t action;
2550 dtrace_action_t *act;
2551 uintptr_t offs;
2552
2553 if (buf == NULL)
2554 return;
2555
2556 if (!agg->dtag_hasarg) {
2557 /*
2558 * Currently, only quantize() and lquantize() take additional
2559 * arguments, and they have the same semantics: an increment
2560 * value that defaults to 1 when not present. If additional
2561 * aggregating actions take arguments, the setting of the
2562 * default argument value will presumably have to become more
2563 * sophisticated...
2564 */
2565 arg = 1;
2566 }
2567
2568 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION;
2569 size = rec->dtrd_offset - agg->dtag_base;
2570 fsize = size + rec->dtrd_size;
2571
2572 ASSERT(dbuf->dtb_tomax != NULL);
2573 data = dbuf->dtb_tomax + offset + agg->dtag_base;
2574
2575 if ((tomax = buf->dtb_tomax) == NULL) {
2576 dtrace_buffer_drop(buf);
2577 return;
2578 }
2579
2580 /*
2581 * The metastructure is always at the bottom of the buffer.
2582 */
2583 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size -
2584 sizeof (dtrace_aggbuffer_t));
2585
2586 if (buf->dtb_offset == 0) {
2587 /*
2588 * We just kludge up approximately 1/8th of the size to be
2589 * buckets. If this guess ends up being routinely
2590 * off-the-mark, we may need to dynamically readjust this
2591 * based on past performance.
2592 */
2593 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t);
2594
2595 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) <
2596 (uintptr_t)tomax || hashsize == 0) {
2597 /*
2598 * We've been given a ludicrously small buffer;
2599 * increment our drop count and leave.
2600 */
2601 dtrace_buffer_drop(buf);
2602 return;
2603 }
2604
2605 /*
2606 * And now, a pathetic attempt to try to get a an odd (or
2607 * perchance, a prime) hash size for better hash distribution.
2608 */
2609 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3))
2610 hashsize -= DTRACE_AGGHASHSIZE_SLEW;
2611
2612 agb->dtagb_hashsize = hashsize;
2613 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb -
2614 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *));
2615 agb->dtagb_free = (uintptr_t)agb->dtagb_hash;
2616
2617 for (i = 0; i < agb->dtagb_hashsize; i++)
2618 agb->dtagb_hash[i] = NULL;
2619 }
2620
2621 ASSERT(agg->dtag_first != NULL);
2622 ASSERT(agg->dtag_first->dta_intuple);
2623
2624 /*
2625 * Calculate the hash value based on the key. Note that we _don't_
2626 * include the aggid in the hashing (but we will store it as part of
2627 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time"
2628 * algorithm: a simple, quick algorithm that has no known funnels, and
2629 * gets good distribution in practice. The efficacy of the hashing
2630 * algorithm (and a comparison with other algorithms) may be found by
2631 * running the ::dtrace_aggstat MDB dcmd.
2632 */
2633 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2634 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2635 limit = i + act->dta_rec.dtrd_size;
2636 ASSERT(limit <= size);
2637 isstr = DTRACEACT_ISSTRING(act);
2638
2639 for (; i < limit; i++) {
2640 hashval += data[i];
2641 hashval += (hashval << 10);
2642 hashval ^= (hashval >> 6);
2643
2644 if (isstr && data[i] == '\0')
2645 break;
2646 }
2647 }
2648
2649 hashval += (hashval << 3);
2650 hashval ^= (hashval >> 11);
2651 hashval += (hashval << 15);
2652
2653 /*
2654 * Yes, the divide here is expensive -- but it's generally the least
2655 * of the performance issues given the amount of data that we iterate
2656 * over to compute hash values, compare data, etc.
2657 */
2658 ndx = hashval % agb->dtagb_hashsize;
2659
2660 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) {
2661 ASSERT((caddr_t)key >= tomax);
2662 ASSERT((caddr_t)key < tomax + buf->dtb_size);
2663
2664 if (hashval != key->dtak_hashval || key->dtak_size != size)
2665 continue;
2666
2667 kdata = key->dtak_data;
2668 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size);
2669
2670 for (act = agg->dtag_first; act->dta_intuple;
2671 act = act->dta_next) {
2672 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2673 limit = i + act->dta_rec.dtrd_size;
2674 ASSERT(limit <= size);
2675 isstr = DTRACEACT_ISSTRING(act);
2676
2677 for (; i < limit; i++) {
2678 if (kdata[i] != data[i])
2679 goto next;
2680
2681 if (isstr && data[i] == '\0')
2682 break;
2683 }
2684 }
2685
2686 if (action != key->dtak_action) {
2687 /*
2688 * We are aggregating on the same value in the same
2689 * aggregation with two different aggregating actions.
2690 * (This should have been picked up in the compiler,
2691 * so we may be dealing with errant or devious DIF.)
2692 * This is an error condition; we indicate as much,
2693 * and return.
2694 */
2695 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
2696 return;
2697 }
2698
2699 /*
2700 * This is a hit: we need to apply the aggregator to
2701 * the value at this key.
2702 */
2703 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg);
2704 return;
2705 next:
2706 continue;
2707 }
2708
2709 /*
2710 * We didn't find it. We need to allocate some zero-filled space,
2711 * link it into the hash table appropriately, and apply the aggregator
2712 * to the (zero-filled) value.
2713 */
2714 offs = buf->dtb_offset;
2715 while (offs & (align - 1))
2716 offs += sizeof (uint32_t);
2717
2718 /*
2719 * If we don't have enough room to both allocate a new key _and_
2720 * its associated data, increment the drop count and return.
2721 */
2722 if ((uintptr_t)tomax + offs + fsize >
2723 agb->dtagb_free - sizeof (dtrace_aggkey_t)) {
2724 dtrace_buffer_drop(buf);
2725 return;
2726 }
2727
2728 /*CONSTCOND*/
2729 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1)));
2730 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t));
2731 agb->dtagb_free -= sizeof (dtrace_aggkey_t);
2732
2733 key->dtak_data = kdata = tomax + offs;
2734 buf->dtb_offset = offs + fsize;
2735
2736 /*
2737 * Now copy the data across.
2738 */
2739 *((dtrace_aggid_t *)kdata) = agg->dtag_id;
2740
2741 for (i = sizeof (dtrace_aggid_t); i < size; i++)
2742 kdata[i] = data[i];
2743
2744 /*
2745 * Because strings are not zeroed out by default, we need to iterate
2746 * looking for actions that store strings, and we need to explicitly
2747 * pad these strings out with zeroes.
2748 */
2749 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2750 int nul;
2751
2752 if (!DTRACEACT_ISSTRING(act))
2753 continue;
2754
2755 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2756 limit = i + act->dta_rec.dtrd_size;
2757 ASSERT(limit <= size);
2758
2759 for (nul = 0; i < limit; i++) {
2760 if (nul) {
2761 kdata[i] = '\0';
2762 continue;
2763 }
2764
2765 if (data[i] != '\0')
2766 continue;
2767
2768 nul = 1;
2769 }
2770 }
2771
2772 for (i = size; i < fsize; i++)
2773 kdata[i] = 0;
2774
2775 key->dtak_hashval = hashval;
2776 key->dtak_size = size;
2777 key->dtak_action = action;
2778 key->dtak_next = agb->dtagb_hash[ndx];
2779 agb->dtagb_hash[ndx] = key;
2780
2781 /*
2782 * Finally, apply the aggregator.
2783 */
2784 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial;
2785 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg);
2786 }
2787
2788 /*
2789 * Given consumer state, this routine finds a speculation in the INACTIVE
2790 * state and transitions it into the ACTIVE state. If there is no speculation
2791 * in the INACTIVE state, 0 is returned. In this case, no error counter is
2792 * incremented -- it is up to the caller to take appropriate action.
2793 */
2794 static int
dtrace_speculation(dtrace_state_t * state)2795 dtrace_speculation(dtrace_state_t *state)
2796 {
2797 int i = 0;
2798 dtrace_speculation_state_t curstate;
2799 uint32_t *stat = &state->dts_speculations_unavail, count;
2800
2801 while (i < state->dts_nspeculations) {
2802 dtrace_speculation_t *spec = &state->dts_speculations[i];
2803
2804 curstate = spec->dtsp_state;
2805
2806 if (curstate != DTRACESPEC_INACTIVE) {
2807 if (curstate == DTRACESPEC_COMMITTINGMANY ||
2808 curstate == DTRACESPEC_COMMITTING ||
2809 curstate == DTRACESPEC_DISCARDING)
2810 stat = &state->dts_speculations_busy;
2811 i++;
2812 continue;
2813 }
2814
2815 if (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2816 curstate, DTRACESPEC_ACTIVE) == curstate)
2817 return (i + 1);
2818 }
2819
2820 /*
2821 * We couldn't find a speculation. If we found as much as a single
2822 * busy speculation buffer, we'll attribute this failure as "busy"
2823 * instead of "unavail".
2824 */
2825 do {
2826 count = *stat;
2827 } while (dtrace_cas32(stat, count, count + 1) != count);
2828
2829 return (0);
2830 }
2831
2832 /*
2833 * This routine commits an active speculation. If the specified speculation
2834 * is not in a valid state to perform a commit(), this routine will silently do
2835 * nothing. The state of the specified speculation is transitioned according
2836 * to the state transition diagram outlined in <sys/dtrace_impl.h>
2837 */
2838 static void
dtrace_speculation_commit(dtrace_state_t * state,processorid_t cpu,dtrace_specid_t which)2839 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu,
2840 dtrace_specid_t which)
2841 {
2842 dtrace_speculation_t *spec;
2843 dtrace_buffer_t *src, *dest;
2844 uintptr_t daddr, saddr, dlimit, slimit;
2845 dtrace_speculation_state_t curstate, new = 0;
2846 intptr_t offs;
2847 uint64_t timestamp;
2848
2849 if (which == 0)
2850 return;
2851
2852 if (which > state->dts_nspeculations) {
2853 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2854 return;
2855 }
2856
2857 spec = &state->dts_speculations[which - 1];
2858 src = &spec->dtsp_buffer[cpu];
2859 dest = &state->dts_buffer[cpu];
2860
2861 do {
2862 curstate = spec->dtsp_state;
2863
2864 if (curstate == DTRACESPEC_COMMITTINGMANY)
2865 break;
2866
2867 switch (curstate) {
2868 case DTRACESPEC_INACTIVE:
2869 case DTRACESPEC_DISCARDING:
2870 return;
2871
2872 case DTRACESPEC_COMMITTING:
2873 /*
2874 * This is only possible if we are (a) commit()'ing
2875 * without having done a prior speculate() on this CPU
2876 * and (b) racing with another commit() on a different
2877 * CPU. There's nothing to do -- we just assert that
2878 * our offset is 0.
2879 */
2880 ASSERT(src->dtb_offset == 0);
2881 return;
2882
2883 case DTRACESPEC_ACTIVE:
2884 new = DTRACESPEC_COMMITTING;
2885 break;
2886
2887 case DTRACESPEC_ACTIVEONE:
2888 /*
2889 * This speculation is active on one CPU. If our
2890 * buffer offset is non-zero, we know that the one CPU
2891 * must be us. Otherwise, we are committing on a
2892 * different CPU from the speculate(), and we must
2893 * rely on being asynchronously cleaned.
2894 */
2895 if (src->dtb_offset != 0) {
2896 new = DTRACESPEC_COMMITTING;
2897 break;
2898 }
2899 /*FALLTHROUGH*/
2900
2901 case DTRACESPEC_ACTIVEMANY:
2902 new = DTRACESPEC_COMMITTINGMANY;
2903 break;
2904
2905 default:
2906 ASSERT(0);
2907 }
2908 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2909 curstate, new) != curstate);
2910
2911 /*
2912 * We have set the state to indicate that we are committing this
2913 * speculation. Now reserve the necessary space in the destination
2914 * buffer.
2915 */
2916 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset,
2917 sizeof (uint64_t), state, NULL)) < 0) {
2918 dtrace_buffer_drop(dest);
2919 goto out;
2920 }
2921
2922 /*
2923 * We have sufficient space to copy the speculative buffer into the
2924 * primary buffer. First, modify the speculative buffer, filling
2925 * in the timestamp of all entries with the curstate time. The data
2926 * must have the commit() time rather than the time it was traced,
2927 * so that all entries in the primary buffer are in timestamp order.
2928 */
2929 timestamp = dtrace_gethrtime();
2930 saddr = (uintptr_t)src->dtb_tomax;
2931 slimit = saddr + src->dtb_offset;
2932 while (saddr < slimit) {
2933 size_t size;
2934 dtrace_rechdr_t *dtrh = (dtrace_rechdr_t *)saddr;
2935
2936 if (dtrh->dtrh_epid == DTRACE_EPIDNONE) {
2937 saddr += sizeof (dtrace_epid_t);
2938 continue;
2939 }
2940 ASSERT3U(dtrh->dtrh_epid, <=, state->dts_necbs);
2941 size = state->dts_ecbs[dtrh->dtrh_epid - 1]->dte_size;
2942
2943 ASSERT3U(saddr + size, <=, slimit);
2944 ASSERT3U(size, >=, sizeof (dtrace_rechdr_t));
2945 ASSERT3U(DTRACE_RECORD_LOAD_TIMESTAMP(dtrh), ==, UINT64_MAX);
2946
2947 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, timestamp);
2948
2949 saddr += size;
2950 }
2951
2952 /*
2953 * Copy the buffer across. (Note that this is a
2954 * highly subobtimal bcopy(); in the unlikely event that this becomes
2955 * a serious performance issue, a high-performance DTrace-specific
2956 * bcopy() should obviously be invented.)
2957 */
2958 daddr = (uintptr_t)dest->dtb_tomax + offs;
2959 dlimit = daddr + src->dtb_offset;
2960 saddr = (uintptr_t)src->dtb_tomax;
2961
2962 /*
2963 * First, the aligned portion.
2964 */
2965 while (dlimit - daddr >= sizeof (uint64_t)) {
2966 *((uint64_t *)daddr) = *((uint64_t *)saddr);
2967
2968 daddr += sizeof (uint64_t);
2969 saddr += sizeof (uint64_t);
2970 }
2971
2972 /*
2973 * Now any left-over bit...
2974 */
2975 while (dlimit - daddr)
2976 *((uint8_t *)daddr++) = *((uint8_t *)saddr++);
2977
2978 /*
2979 * Finally, commit the reserved space in the destination buffer.
2980 */
2981 dest->dtb_offset = offs + src->dtb_offset;
2982
2983 out:
2984 /*
2985 * If we're lucky enough to be the only active CPU on this speculation
2986 * buffer, we can just set the state back to DTRACESPEC_INACTIVE.
2987 */
2988 if (curstate == DTRACESPEC_ACTIVE ||
2989 (curstate == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) {
2990 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state,
2991 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE);
2992
2993 ASSERT(rval == DTRACESPEC_COMMITTING);
2994 }
2995
2996 src->dtb_offset = 0;
2997 src->dtb_xamot_drops += src->dtb_drops;
2998 src->dtb_drops = 0;
2999 }
3000
3001 /*
3002 * This routine discards an active speculation. If the specified speculation
3003 * is not in a valid state to perform a discard(), this routine will silently
3004 * do nothing. The state of the specified speculation is transitioned
3005 * according to the state transition diagram outlined in <sys/dtrace_impl.h>
3006 */
3007 static void
dtrace_speculation_discard(dtrace_state_t * state,processorid_t cpu,dtrace_specid_t which)3008 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu,
3009 dtrace_specid_t which)
3010 {
3011 dtrace_speculation_t *spec;
3012 dtrace_speculation_state_t curstate, new = 0;
3013 dtrace_buffer_t *buf;
3014
3015 if (which == 0)
3016 return;
3017
3018 if (which > state->dts_nspeculations) {
3019 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
3020 return;
3021 }
3022
3023 spec = &state->dts_speculations[which - 1];
3024 buf = &spec->dtsp_buffer[cpu];
3025
3026 do {
3027 curstate = spec->dtsp_state;
3028
3029 switch (curstate) {
3030 case DTRACESPEC_INACTIVE:
3031 case DTRACESPEC_COMMITTINGMANY:
3032 case DTRACESPEC_COMMITTING:
3033 case DTRACESPEC_DISCARDING:
3034 return;
3035
3036 case DTRACESPEC_ACTIVE:
3037 case DTRACESPEC_ACTIVEMANY:
3038 new = DTRACESPEC_DISCARDING;
3039 break;
3040
3041 case DTRACESPEC_ACTIVEONE:
3042 if (buf->dtb_offset != 0) {
3043 new = DTRACESPEC_INACTIVE;
3044 } else {
3045 new = DTRACESPEC_DISCARDING;
3046 }
3047 break;
3048
3049 default:
3050 ASSERT(0);
3051 }
3052 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
3053 curstate, new) != curstate);
3054
3055 buf->dtb_offset = 0;
3056 buf->dtb_drops = 0;
3057 }
3058
3059 /*
3060 * Note: not called from probe context. This function is called
3061 * asynchronously from cross call context to clean any speculations that are
3062 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be
3063 * transitioned back to the INACTIVE state until all CPUs have cleaned the
3064 * speculation.
3065 */
3066 static void
dtrace_speculation_clean_here(dtrace_state_t * state)3067 dtrace_speculation_clean_here(dtrace_state_t *state)
3068 {
3069 dtrace_icookie_t cookie;
3070 processorid_t cpu = curcpu;
3071 dtrace_buffer_t *dest = &state->dts_buffer[cpu];
3072 dtrace_specid_t i;
3073
3074 cookie = dtrace_interrupt_disable();
3075
3076 if (dest->dtb_tomax == NULL) {
3077 dtrace_interrupt_enable(cookie);
3078 return;
3079 }
3080
3081 for (i = 0; i < state->dts_nspeculations; i++) {
3082 dtrace_speculation_t *spec = &state->dts_speculations[i];
3083 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu];
3084
3085 if (src->dtb_tomax == NULL)
3086 continue;
3087
3088 if (spec->dtsp_state == DTRACESPEC_DISCARDING) {
3089 src->dtb_offset = 0;
3090 continue;
3091 }
3092
3093 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
3094 continue;
3095
3096 if (src->dtb_offset == 0)
3097 continue;
3098
3099 dtrace_speculation_commit(state, cpu, i + 1);
3100 }
3101
3102 dtrace_interrupt_enable(cookie);
3103 }
3104
3105 /*
3106 * Note: not called from probe context. This function is called
3107 * asynchronously (and at a regular interval) to clean any speculations that
3108 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there
3109 * is work to be done, it cross calls all CPUs to perform that work;
3110 * COMMITMANY and DISCARDING speculations may not be transitioned back to the
3111 * INACTIVE state until they have been cleaned by all CPUs.
3112 */
3113 static void
dtrace_speculation_clean(dtrace_state_t * state)3114 dtrace_speculation_clean(dtrace_state_t *state)
3115 {
3116 int work = 0, rv;
3117 dtrace_specid_t i;
3118
3119 for (i = 0; i < state->dts_nspeculations; i++) {
3120 dtrace_speculation_t *spec = &state->dts_speculations[i];
3121
3122 ASSERT(!spec->dtsp_cleaning);
3123
3124 if (spec->dtsp_state != DTRACESPEC_DISCARDING &&
3125 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
3126 continue;
3127
3128 work++;
3129 spec->dtsp_cleaning = 1;
3130 }
3131
3132 if (!work)
3133 return;
3134
3135 dtrace_xcall(DTRACE_CPUALL,
3136 (dtrace_xcall_t)dtrace_speculation_clean_here, state);
3137
3138 /*
3139 * We now know that all CPUs have committed or discarded their
3140 * speculation buffers, as appropriate. We can now set the state
3141 * to inactive.
3142 */
3143 for (i = 0; i < state->dts_nspeculations; i++) {
3144 dtrace_speculation_t *spec = &state->dts_speculations[i];
3145 dtrace_speculation_state_t curstate, new;
3146
3147 if (!spec->dtsp_cleaning)
3148 continue;
3149
3150 curstate = spec->dtsp_state;
3151 ASSERT(curstate == DTRACESPEC_DISCARDING ||
3152 curstate == DTRACESPEC_COMMITTINGMANY);
3153
3154 new = DTRACESPEC_INACTIVE;
3155
3156 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, curstate, new);
3157 ASSERT(rv == curstate);
3158 spec->dtsp_cleaning = 0;
3159 }
3160 }
3161
3162 /*
3163 * Called as part of a speculate() to get the speculative buffer associated
3164 * with a given speculation. Returns NULL if the specified speculation is not
3165 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and
3166 * the active CPU is not the specified CPU -- the speculation will be
3167 * atomically transitioned into the ACTIVEMANY state.
3168 */
3169 static dtrace_buffer_t *
dtrace_speculation_buffer(dtrace_state_t * state,processorid_t cpuid,dtrace_specid_t which)3170 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid,
3171 dtrace_specid_t which)
3172 {
3173 dtrace_speculation_t *spec;
3174 dtrace_speculation_state_t curstate, new = 0;
3175 dtrace_buffer_t *buf;
3176
3177 if (which == 0)
3178 return (NULL);
3179
3180 if (which > state->dts_nspeculations) {
3181 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
3182 return (NULL);
3183 }
3184
3185 spec = &state->dts_speculations[which - 1];
3186 buf = &spec->dtsp_buffer[cpuid];
3187
3188 do {
3189 curstate = spec->dtsp_state;
3190
3191 switch (curstate) {
3192 case DTRACESPEC_INACTIVE:
3193 case DTRACESPEC_COMMITTINGMANY:
3194 case DTRACESPEC_DISCARDING:
3195 return (NULL);
3196
3197 case DTRACESPEC_COMMITTING:
3198 ASSERT(buf->dtb_offset == 0);
3199 return (NULL);
3200
3201 case DTRACESPEC_ACTIVEONE:
3202 /*
3203 * This speculation is currently active on one CPU.
3204 * Check the offset in the buffer; if it's non-zero,
3205 * that CPU must be us (and we leave the state alone).
3206 * If it's zero, assume that we're starting on a new
3207 * CPU -- and change the state to indicate that the
3208 * speculation is active on more than one CPU.
3209 */
3210 if (buf->dtb_offset != 0)
3211 return (buf);
3212
3213 new = DTRACESPEC_ACTIVEMANY;
3214 break;
3215
3216 case DTRACESPEC_ACTIVEMANY:
3217 return (buf);
3218
3219 case DTRACESPEC_ACTIVE:
3220 new = DTRACESPEC_ACTIVEONE;
3221 break;
3222
3223 default:
3224 ASSERT(0);
3225 }
3226 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
3227 curstate, new) != curstate);
3228
3229 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY);
3230 return (buf);
3231 }
3232
3233 /*
3234 * Return a string. In the event that the user lacks the privilege to access
3235 * arbitrary kernel memory, we copy the string out to scratch memory so that we
3236 * don't fail access checking.
3237 *
3238 * dtrace_dif_variable() uses this routine as a helper for various
3239 * builtin values such as 'execname' and 'probefunc.'
3240 */
3241 uintptr_t
dtrace_dif_varstr(uintptr_t addr,dtrace_state_t * state,dtrace_mstate_t * mstate)3242 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state,
3243 dtrace_mstate_t *mstate)
3244 {
3245 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3246 uintptr_t ret;
3247 size_t strsz;
3248
3249 /*
3250 * The easy case: this probe is allowed to read all of memory, so
3251 * we can just return this as a vanilla pointer.
3252 */
3253 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
3254 return (addr);
3255
3256 /*
3257 * This is the tougher case: we copy the string in question from
3258 * kernel memory into scratch memory and return it that way: this
3259 * ensures that we won't trip up when access checking tests the
3260 * BYREF return value.
3261 */
3262 strsz = dtrace_strlen((char *)addr, size) + 1;
3263
3264 if (mstate->dtms_scratch_ptr + strsz >
3265 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
3266 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3267 return (0);
3268 }
3269
3270 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
3271 strsz);
3272 ret = mstate->dtms_scratch_ptr;
3273 mstate->dtms_scratch_ptr += strsz;
3274 return (ret);
3275 }
3276
3277 /*
3278 * Return a string from a memoy address which is known to have one or
3279 * more concatenated, individually zero terminated, sub-strings.
3280 * In the event that the user lacks the privilege to access
3281 * arbitrary kernel memory, we copy the string out to scratch memory so that we
3282 * don't fail access checking.
3283 *
3284 * dtrace_dif_variable() uses this routine as a helper for various
3285 * builtin values such as 'execargs'.
3286 */
3287 static uintptr_t
dtrace_dif_varstrz(uintptr_t addr,size_t strsz,dtrace_state_t * state,dtrace_mstate_t * mstate)3288 dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state,
3289 dtrace_mstate_t *mstate)
3290 {
3291 char *p;
3292 size_t i;
3293 uintptr_t ret;
3294
3295 if (mstate->dtms_scratch_ptr + strsz >
3296 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
3297 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3298 return (0);
3299 }
3300
3301 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
3302 strsz);
3303
3304 /* Replace sub-string termination characters with a space. */
3305 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1;
3306 p++, i++)
3307 if (*p == '\0')
3308 *p = ' ';
3309
3310 ret = mstate->dtms_scratch_ptr;
3311 mstate->dtms_scratch_ptr += strsz;
3312 return (ret);
3313 }
3314
3315 /*
3316 * This function implements the DIF emulator's variable lookups. The emulator
3317 * passes a reserved variable identifier and optional built-in array index.
3318 */
3319 static uint64_t
dtrace_dif_variable(dtrace_mstate_t * mstate,dtrace_state_t * state,uint64_t v,uint64_t ndx)3320 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v,
3321 uint64_t ndx)
3322 {
3323 /*
3324 * If we're accessing one of the uncached arguments, we'll turn this
3325 * into a reference in the args array.
3326 */
3327 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) {
3328 ndx = v - DIF_VAR_ARG0;
3329 v = DIF_VAR_ARGS;
3330 }
3331
3332 switch (v) {
3333 case DIF_VAR_ARGS:
3334 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS);
3335 if (ndx >= sizeof (mstate->dtms_arg) /
3336 sizeof (mstate->dtms_arg[0])) {
3337 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3338 dtrace_provider_t *pv;
3339 uint64_t val;
3340
3341 pv = mstate->dtms_probe->dtpr_provider;
3342 if (pv->dtpv_pops.dtps_getargval != NULL)
3343 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg,
3344 mstate->dtms_probe->dtpr_id,
3345 mstate->dtms_probe->dtpr_arg, ndx, aframes);
3346 else
3347 val = dtrace_getarg(ndx, aframes);
3348
3349 /*
3350 * This is regrettably required to keep the compiler
3351 * from tail-optimizing the call to dtrace_getarg().
3352 * The condition always evaluates to true, but the
3353 * compiler has no way of figuring that out a priori.
3354 * (None of this would be necessary if the compiler
3355 * could be relied upon to _always_ tail-optimize
3356 * the call to dtrace_getarg() -- but it can't.)
3357 */
3358 if (mstate->dtms_probe != NULL)
3359 return (val);
3360
3361 ASSERT(0);
3362 }
3363
3364 return (mstate->dtms_arg[ndx]);
3365
3366 case DIF_VAR_REGS:
3367 case DIF_VAR_UREGS: {
3368 struct trapframe *tframe;
3369
3370 if (!dtrace_priv_proc(state))
3371 return (0);
3372
3373 if (v == DIF_VAR_REGS)
3374 tframe = curthread->t_dtrace_trapframe;
3375 else
3376 tframe = curthread->td_frame;
3377
3378 if (tframe == NULL) {
3379 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
3380 cpu_core[curcpu].cpuc_dtrace_illval = 0;
3381 return (0);
3382 }
3383
3384 return (dtrace_getreg(tframe, ndx));
3385 }
3386
3387 case DIF_VAR_CURTHREAD:
3388 if (!dtrace_priv_proc(state))
3389 return (0);
3390 return ((uint64_t)(uintptr_t)curthread);
3391
3392 case DIF_VAR_TIMESTAMP:
3393 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
3394 mstate->dtms_timestamp = dtrace_gethrtime();
3395 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP;
3396 }
3397 return (mstate->dtms_timestamp);
3398
3399 case DIF_VAR_VTIMESTAMP:
3400 ASSERT(dtrace_vtime_references != 0);
3401 return (curthread->t_dtrace_vtime);
3402
3403 case DIF_VAR_WALLTIMESTAMP:
3404 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) {
3405 mstate->dtms_walltimestamp = dtrace_gethrestime();
3406 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP;
3407 }
3408 return (mstate->dtms_walltimestamp);
3409
3410 #ifdef illumos
3411 case DIF_VAR_IPL:
3412 if (!dtrace_priv_kernel(state))
3413 return (0);
3414 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) {
3415 mstate->dtms_ipl = dtrace_getipl();
3416 mstate->dtms_present |= DTRACE_MSTATE_IPL;
3417 }
3418 return (mstate->dtms_ipl);
3419 #endif
3420
3421 case DIF_VAR_EPID:
3422 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID);
3423 return (mstate->dtms_epid);
3424
3425 case DIF_VAR_ID:
3426 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3427 return (mstate->dtms_probe->dtpr_id);
3428
3429 case DIF_VAR_STACKDEPTH:
3430 if (!dtrace_priv_kernel(state))
3431 return (0);
3432 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) {
3433 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3434
3435 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes);
3436 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH;
3437 }
3438 return (mstate->dtms_stackdepth);
3439
3440 case DIF_VAR_USTACKDEPTH:
3441 if (!dtrace_priv_proc(state))
3442 return (0);
3443 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) {
3444 /*
3445 * See comment in DIF_VAR_PID.
3446 */
3447 if (DTRACE_ANCHORED(mstate->dtms_probe) &&
3448 CPU_ON_INTR(CPU)) {
3449 mstate->dtms_ustackdepth = 0;
3450 } else {
3451 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3452 mstate->dtms_ustackdepth =
3453 dtrace_getustackdepth();
3454 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3455 }
3456 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH;
3457 }
3458 return (mstate->dtms_ustackdepth);
3459
3460 case DIF_VAR_CALLER:
3461 if (!dtrace_priv_kernel(state))
3462 return (0);
3463 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) {
3464 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3465
3466 if (!DTRACE_ANCHORED(mstate->dtms_probe)) {
3467 /*
3468 * If this is an unanchored probe, we are
3469 * required to go through the slow path:
3470 * dtrace_caller() only guarantees correct
3471 * results for anchored probes.
3472 */
3473 pc_t caller[2] = {0, 0};
3474
3475 dtrace_getpcstack(caller, 2, aframes,
3476 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]);
3477 mstate->dtms_caller = caller[1];
3478 } else if ((mstate->dtms_caller =
3479 dtrace_caller(aframes)) == -1) {
3480 /*
3481 * We have failed to do this the quick way;
3482 * we must resort to the slower approach of
3483 * calling dtrace_getpcstack().
3484 */
3485 pc_t caller = 0;
3486
3487 dtrace_getpcstack(&caller, 1, aframes, NULL);
3488 mstate->dtms_caller = caller;
3489 }
3490
3491 mstate->dtms_present |= DTRACE_MSTATE_CALLER;
3492 }
3493 return (mstate->dtms_caller);
3494
3495 case DIF_VAR_UCALLER:
3496 if (!dtrace_priv_proc(state))
3497 return (0);
3498
3499 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) {
3500 uint64_t ustack[3];
3501
3502 /*
3503 * dtrace_getupcstack() fills in the first uint64_t
3504 * with the current PID. The second uint64_t will
3505 * be the program counter at user-level. The third
3506 * uint64_t will contain the caller, which is what
3507 * we're after.
3508 */
3509 ustack[2] = 0;
3510 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3511 dtrace_getupcstack(ustack, 3);
3512 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3513 mstate->dtms_ucaller = ustack[2];
3514 mstate->dtms_present |= DTRACE_MSTATE_UCALLER;
3515 }
3516
3517 return (mstate->dtms_ucaller);
3518
3519 case DIF_VAR_PROBEPROV:
3520 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3521 return (dtrace_dif_varstr(
3522 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name,
3523 state, mstate));
3524
3525 case DIF_VAR_PROBEMOD:
3526 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3527 return (dtrace_dif_varstr(
3528 (uintptr_t)mstate->dtms_probe->dtpr_mod,
3529 state, mstate));
3530
3531 case DIF_VAR_PROBEFUNC:
3532 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3533 return (dtrace_dif_varstr(
3534 (uintptr_t)mstate->dtms_probe->dtpr_func,
3535 state, mstate));
3536
3537 case DIF_VAR_PROBENAME:
3538 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3539 return (dtrace_dif_varstr(
3540 (uintptr_t)mstate->dtms_probe->dtpr_name,
3541 state, mstate));
3542
3543 case DIF_VAR_PID:
3544 if (!dtrace_priv_proc(state))
3545 return (0);
3546
3547 #ifdef illumos
3548 /*
3549 * Note that we are assuming that an unanchored probe is
3550 * always due to a high-level interrupt. (And we're assuming
3551 * that there is only a single high level interrupt.)
3552 */
3553 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3554 return (pid0.pid_id);
3555
3556 /*
3557 * It is always safe to dereference one's own t_procp pointer:
3558 * it always points to a valid, allocated proc structure.
3559 * Further, it is always safe to dereference the p_pidp member
3560 * of one's own proc structure. (These are truisms becuase
3561 * threads and processes don't clean up their own state --
3562 * they leave that task to whomever reaps them.)
3563 */
3564 return ((uint64_t)curthread->t_procp->p_pidp->pid_id);
3565 #else
3566 return ((uint64_t)curproc->p_pid);
3567 #endif
3568
3569 case DIF_VAR_PPID:
3570 if (!dtrace_priv_proc(state))
3571 return (0);
3572
3573 #ifdef illumos
3574 /*
3575 * See comment in DIF_VAR_PID.
3576 */
3577 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3578 return (pid0.pid_id);
3579
3580 /*
3581 * It is always safe to dereference one's own t_procp pointer:
3582 * it always points to a valid, allocated proc structure.
3583 * (This is true because threads don't clean up their own
3584 * state -- they leave that task to whomever reaps them.)
3585 */
3586 return ((uint64_t)curthread->t_procp->p_ppid);
3587 #else
3588 if (curproc->p_pid == proc0.p_pid)
3589 return (curproc->p_pid);
3590 else
3591 return (curproc->p_pptr->p_pid);
3592 #endif
3593
3594 case DIF_VAR_TID:
3595 #ifdef illumos
3596 /*
3597 * See comment in DIF_VAR_PID.
3598 */
3599 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3600 return (0);
3601 #endif
3602
3603 return ((uint64_t)curthread->t_tid);
3604
3605 case DIF_VAR_EXECARGS: {
3606 struct pargs *p_args = curthread->td_proc->p_args;
3607
3608 if (p_args == NULL)
3609 return(0);
3610
3611 return (dtrace_dif_varstrz(
3612 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate));
3613 }
3614
3615 case DIF_VAR_EXECNAME:
3616 #ifdef illumos
3617 if (!dtrace_priv_proc(state))
3618 return (0);
3619
3620 /*
3621 * See comment in DIF_VAR_PID.
3622 */
3623 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3624 return ((uint64_t)(uintptr_t)p0.p_user.u_comm);
3625
3626 /*
3627 * It is always safe to dereference one's own t_procp pointer:
3628 * it always points to a valid, allocated proc structure.
3629 * (This is true because threads don't clean up their own
3630 * state -- they leave that task to whomever reaps them.)
3631 */
3632 return (dtrace_dif_varstr(
3633 (uintptr_t)curthread->t_procp->p_user.u_comm,
3634 state, mstate));
3635 #else
3636 return (dtrace_dif_varstr(
3637 (uintptr_t) curthread->td_proc->p_comm, state, mstate));
3638 #endif
3639
3640 case DIF_VAR_ZONENAME:
3641 #ifdef illumos
3642 if (!dtrace_priv_proc(state))
3643 return (0);
3644
3645 /*
3646 * See comment in DIF_VAR_PID.
3647 */
3648 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3649 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name);
3650
3651 /*
3652 * It is always safe to dereference one's own t_procp pointer:
3653 * it always points to a valid, allocated proc structure.
3654 * (This is true because threads don't clean up their own
3655 * state -- they leave that task to whomever reaps them.)
3656 */
3657 return (dtrace_dif_varstr(
3658 (uintptr_t)curthread->t_procp->p_zone->zone_name,
3659 state, mstate));
3660 #elif defined(__FreeBSD__)
3661 /*
3662 * On FreeBSD, we introduce compatibility to zonename by falling through
3663 * into jailname.
3664 */
3665 case DIF_VAR_JAILNAME:
3666 if (!dtrace_priv_kernel(state))
3667 return (0);
3668
3669 return (dtrace_dif_varstr(
3670 (uintptr_t)curthread->td_ucred->cr_prison->pr_name,
3671 state, mstate));
3672
3673 case DIF_VAR_JID:
3674 if (!dtrace_priv_kernel(state))
3675 return (0);
3676
3677 return ((uint64_t)curthread->td_ucred->cr_prison->pr_id);
3678 #else
3679 return (0);
3680 #endif
3681
3682 case DIF_VAR_UID:
3683 if (!dtrace_priv_proc(state))
3684 return (0);
3685
3686 #ifdef illumos
3687 /*
3688 * See comment in DIF_VAR_PID.
3689 */
3690 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3691 return ((uint64_t)p0.p_cred->cr_uid);
3692
3693 /*
3694 * It is always safe to dereference one's own t_procp pointer:
3695 * it always points to a valid, allocated proc structure.
3696 * (This is true because threads don't clean up their own
3697 * state -- they leave that task to whomever reaps them.)
3698 *
3699 * Additionally, it is safe to dereference one's own process
3700 * credential, since this is never NULL after process birth.
3701 */
3702 return ((uint64_t)curthread->t_procp->p_cred->cr_uid);
3703 #else
3704 return ((uint64_t)curthread->td_ucred->cr_uid);
3705 #endif
3706
3707 case DIF_VAR_GID:
3708 if (!dtrace_priv_proc(state))
3709 return (0);
3710
3711 #ifdef illumos
3712 /*
3713 * See comment in DIF_VAR_PID.
3714 */
3715 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3716 return ((uint64_t)p0.p_cred->cr_gid);
3717
3718 /*
3719 * It is always safe to dereference one's own t_procp pointer:
3720 * it always points to a valid, allocated proc structure.
3721 * (This is true because threads don't clean up their own
3722 * state -- they leave that task to whomever reaps them.)
3723 *
3724 * Additionally, it is safe to dereference one's own process
3725 * credential, since this is never NULL after process birth.
3726 */
3727 return ((uint64_t)curthread->t_procp->p_cred->cr_gid);
3728 #else
3729 return ((uint64_t)curthread->td_ucred->cr_gid);
3730 #endif
3731
3732 case DIF_VAR_ERRNO: {
3733 #ifdef illumos
3734 klwp_t *lwp;
3735 if (!dtrace_priv_proc(state))
3736 return (0);
3737
3738 /*
3739 * See comment in DIF_VAR_PID.
3740 */
3741 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3742 return (0);
3743
3744 /*
3745 * It is always safe to dereference one's own t_lwp pointer in
3746 * the event that this pointer is non-NULL. (This is true
3747 * because threads and lwps don't clean up their own state --
3748 * they leave that task to whomever reaps them.)
3749 */
3750 if ((lwp = curthread->t_lwp) == NULL)
3751 return (0);
3752
3753 return ((uint64_t)lwp->lwp_errno);
3754 #else
3755 return (curthread->td_errno);
3756 #endif
3757 }
3758 #ifndef illumos
3759 case DIF_VAR_CPU: {
3760 return curcpu;
3761 }
3762 #endif
3763 default:
3764 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3765 return (0);
3766 }
3767 }
3768
3769
3770 typedef enum dtrace_json_state {
3771 DTRACE_JSON_REST = 1,
3772 DTRACE_JSON_OBJECT,
3773 DTRACE_JSON_STRING,
3774 DTRACE_JSON_STRING_ESCAPE,
3775 DTRACE_JSON_STRING_ESCAPE_UNICODE,
3776 DTRACE_JSON_COLON,
3777 DTRACE_JSON_COMMA,
3778 DTRACE_JSON_VALUE,
3779 DTRACE_JSON_IDENTIFIER,
3780 DTRACE_JSON_NUMBER,
3781 DTRACE_JSON_NUMBER_FRAC,
3782 DTRACE_JSON_NUMBER_EXP,
3783 DTRACE_JSON_COLLECT_OBJECT
3784 } dtrace_json_state_t;
3785
3786 /*
3787 * This function possesses just enough knowledge about JSON to extract a single
3788 * value from a JSON string and store it in the scratch buffer. It is able
3789 * to extract nested object values, and members of arrays by index.
3790 *
3791 * elemlist is a list of JSON keys, stored as packed NUL-terminated strings, to
3792 * be looked up as we descend into the object tree. e.g.
3793 *
3794 * foo[0].bar.baz[32] --> "foo" NUL "0" NUL "bar" NUL "baz" NUL "32" NUL
3795 * with nelems = 5.
3796 *
3797 * The run time of this function must be bounded above by strsize to limit the
3798 * amount of work done in probe context. As such, it is implemented as a
3799 * simple state machine, reading one character at a time using safe loads
3800 * until we find the requested element, hit a parsing error or run off the
3801 * end of the object or string.
3802 *
3803 * As there is no way for a subroutine to return an error without interrupting
3804 * clause execution, we simply return NULL in the event of a missing key or any
3805 * other error condition. Each NULL return in this function is commented with
3806 * the error condition it represents -- parsing or otherwise.
3807 *
3808 * The set of states for the state machine closely matches the JSON
3809 * specification (http://json.org/). Briefly:
3810 *
3811 * DTRACE_JSON_REST:
3812 * Skip whitespace until we find either a top-level Object, moving
3813 * to DTRACE_JSON_OBJECT; or an Array, moving to DTRACE_JSON_VALUE.
3814 *
3815 * DTRACE_JSON_OBJECT:
3816 * Locate the next key String in an Object. Sets a flag to denote
3817 * the next String as a key string and moves to DTRACE_JSON_STRING.
3818 *
3819 * DTRACE_JSON_COLON:
3820 * Skip whitespace until we find the colon that separates key Strings
3821 * from their values. Once found, move to DTRACE_JSON_VALUE.
3822 *
3823 * DTRACE_JSON_VALUE:
3824 * Detects the type of the next value (String, Number, Identifier, Object
3825 * or Array) and routes to the states that process that type. Here we also
3826 * deal with the element selector list if we are requested to traverse down
3827 * into the object tree.
3828 *
3829 * DTRACE_JSON_COMMA:
3830 * Skip whitespace until we find the comma that separates key-value pairs
3831 * in Objects (returning to DTRACE_JSON_OBJECT) or values in Arrays
3832 * (similarly DTRACE_JSON_VALUE). All following literal value processing
3833 * states return to this state at the end of their value, unless otherwise
3834 * noted.
3835 *
3836 * DTRACE_JSON_NUMBER, DTRACE_JSON_NUMBER_FRAC, DTRACE_JSON_NUMBER_EXP:
3837 * Processes a Number literal from the JSON, including any exponent
3838 * component that may be present. Numbers are returned as strings, which
3839 * may be passed to strtoll() if an integer is required.
3840 *
3841 * DTRACE_JSON_IDENTIFIER:
3842 * Processes a "true", "false" or "null" literal in the JSON.
3843 *
3844 * DTRACE_JSON_STRING, DTRACE_JSON_STRING_ESCAPE,
3845 * DTRACE_JSON_STRING_ESCAPE_UNICODE:
3846 * Processes a String literal from the JSON, whether the String denotes
3847 * a key, a value or part of a larger Object. Handles all escape sequences
3848 * present in the specification, including four-digit unicode characters,
3849 * but merely includes the escape sequence without converting it to the
3850 * actual escaped character. If the String is flagged as a key, we
3851 * move to DTRACE_JSON_COLON rather than DTRACE_JSON_COMMA.
3852 *
3853 * DTRACE_JSON_COLLECT_OBJECT:
3854 * This state collects an entire Object (or Array), correctly handling
3855 * embedded strings. If the full element selector list matches this nested
3856 * object, we return the Object in full as a string. If not, we use this
3857 * state to skip to the next value at this level and continue processing.
3858 *
3859 * NOTE: This function uses various macros from strtolctype.h to manipulate
3860 * digit values, etc -- these have all been checked to ensure they make
3861 * no additional function calls.
3862 */
3863 static char *
dtrace_json(uint64_t size,uintptr_t json,char * elemlist,int nelems,char * dest)3864 dtrace_json(uint64_t size, uintptr_t json, char *elemlist, int nelems,
3865 char *dest)
3866 {
3867 dtrace_json_state_t state = DTRACE_JSON_REST;
3868 int64_t array_elem = INT64_MIN;
3869 int64_t array_pos = 0;
3870 uint8_t escape_unicount = 0;
3871 boolean_t string_is_key = B_FALSE;
3872 boolean_t collect_object = B_FALSE;
3873 boolean_t found_key = B_FALSE;
3874 boolean_t in_array = B_FALSE;
3875 uint32_t braces = 0, brackets = 0;
3876 char *elem = elemlist;
3877 char *dd = dest;
3878 uintptr_t cur;
3879
3880 for (cur = json; cur < json + size; cur++) {
3881 char cc = dtrace_load8(cur);
3882 if (cc == '\0')
3883 return (NULL);
3884
3885 switch (state) {
3886 case DTRACE_JSON_REST:
3887 if (isspace(cc))
3888 break;
3889
3890 if (cc == '{') {
3891 state = DTRACE_JSON_OBJECT;
3892 break;
3893 }
3894
3895 if (cc == '[') {
3896 in_array = B_TRUE;
3897 array_pos = 0;
3898 array_elem = dtrace_strtoll(elem, 10, size);
3899 found_key = array_elem == 0 ? B_TRUE : B_FALSE;
3900 state = DTRACE_JSON_VALUE;
3901 break;
3902 }
3903
3904 /*
3905 * ERROR: expected to find a top-level object or array.
3906 */
3907 return (NULL);
3908 case DTRACE_JSON_OBJECT:
3909 if (isspace(cc))
3910 break;
3911
3912 if (cc == '"') {
3913 state = DTRACE_JSON_STRING;
3914 string_is_key = B_TRUE;
3915 break;
3916 }
3917
3918 /*
3919 * ERROR: either the object did not start with a key
3920 * string, or we've run off the end of the object
3921 * without finding the requested key.
3922 */
3923 return (NULL);
3924 case DTRACE_JSON_STRING:
3925 if (cc == '\\') {
3926 *dd++ = '\\';
3927 state = DTRACE_JSON_STRING_ESCAPE;
3928 break;
3929 }
3930
3931 if (cc == '"') {
3932 if (collect_object) {
3933 /*
3934 * We don't reset the dest here, as
3935 * the string is part of a larger
3936 * object being collected.
3937 */
3938 *dd++ = cc;
3939 collect_object = B_FALSE;
3940 state = DTRACE_JSON_COLLECT_OBJECT;
3941 break;
3942 }
3943 *dd = '\0';
3944 dd = dest; /* reset string buffer */
3945 if (string_is_key) {
3946 if (dtrace_strncmp(dest, elem,
3947 size) == 0)
3948 found_key = B_TRUE;
3949 } else if (found_key) {
3950 if (nelems > 1) {
3951 /*
3952 * We expected an object, not
3953 * this string.
3954 */
3955 return (NULL);
3956 }
3957 return (dest);
3958 }
3959 state = string_is_key ? DTRACE_JSON_COLON :
3960 DTRACE_JSON_COMMA;
3961 string_is_key = B_FALSE;
3962 break;
3963 }
3964
3965 *dd++ = cc;
3966 break;
3967 case DTRACE_JSON_STRING_ESCAPE:
3968 *dd++ = cc;
3969 if (cc == 'u') {
3970 escape_unicount = 0;
3971 state = DTRACE_JSON_STRING_ESCAPE_UNICODE;
3972 } else {
3973 state = DTRACE_JSON_STRING;
3974 }
3975 break;
3976 case DTRACE_JSON_STRING_ESCAPE_UNICODE:
3977 if (!isxdigit(cc)) {
3978 /*
3979 * ERROR: invalid unicode escape, expected
3980 * four valid hexidecimal digits.
3981 */
3982 return (NULL);
3983 }
3984
3985 *dd++ = cc;
3986 if (++escape_unicount == 4)
3987 state = DTRACE_JSON_STRING;
3988 break;
3989 case DTRACE_JSON_COLON:
3990 if (isspace(cc))
3991 break;
3992
3993 if (cc == ':') {
3994 state = DTRACE_JSON_VALUE;
3995 break;
3996 }
3997
3998 /*
3999 * ERROR: expected a colon.
4000 */
4001 return (NULL);
4002 case DTRACE_JSON_COMMA:
4003 if (isspace(cc))
4004 break;
4005
4006 if (cc == ',') {
4007 if (in_array) {
4008 state = DTRACE_JSON_VALUE;
4009 if (++array_pos == array_elem)
4010 found_key = B_TRUE;
4011 } else {
4012 state = DTRACE_JSON_OBJECT;
4013 }
4014 break;
4015 }
4016
4017 /*
4018 * ERROR: either we hit an unexpected character, or
4019 * we reached the end of the object or array without
4020 * finding the requested key.
4021 */
4022 return (NULL);
4023 case DTRACE_JSON_IDENTIFIER:
4024 if (islower(cc)) {
4025 *dd++ = cc;
4026 break;
4027 }
4028
4029 *dd = '\0';
4030 dd = dest; /* reset string buffer */
4031
4032 if (dtrace_strncmp(dest, "true", 5) == 0 ||
4033 dtrace_strncmp(dest, "false", 6) == 0 ||
4034 dtrace_strncmp(dest, "null", 5) == 0) {
4035 if (found_key) {
4036 if (nelems > 1) {
4037 /*
4038 * ERROR: We expected an object,
4039 * not this identifier.
4040 */
4041 return (NULL);
4042 }
4043 return (dest);
4044 } else {
4045 cur--;
4046 state = DTRACE_JSON_COMMA;
4047 break;
4048 }
4049 }
4050
4051 /*
4052 * ERROR: we did not recognise the identifier as one
4053 * of those in the JSON specification.
4054 */
4055 return (NULL);
4056 case DTRACE_JSON_NUMBER:
4057 if (cc == '.') {
4058 *dd++ = cc;
4059 state = DTRACE_JSON_NUMBER_FRAC;
4060 break;
4061 }
4062
4063 if (cc == 'x' || cc == 'X') {
4064 /*
4065 * ERROR: specification explicitly excludes
4066 * hexidecimal or octal numbers.
4067 */
4068 return (NULL);
4069 }
4070
4071 /* FALLTHRU */
4072 case DTRACE_JSON_NUMBER_FRAC:
4073 if (cc == 'e' || cc == 'E') {
4074 *dd++ = cc;
4075 state = DTRACE_JSON_NUMBER_EXP;
4076 break;
4077 }
4078
4079 if (cc == '+' || cc == '-') {
4080 /*
4081 * ERROR: expect sign as part of exponent only.
4082 */
4083 return (NULL);
4084 }
4085 /* FALLTHRU */
4086 case DTRACE_JSON_NUMBER_EXP:
4087 if (isdigit(cc) || cc == '+' || cc == '-') {
4088 *dd++ = cc;
4089 break;
4090 }
4091
4092 *dd = '\0';
4093 dd = dest; /* reset string buffer */
4094 if (found_key) {
4095 if (nelems > 1) {
4096 /*
4097 * ERROR: We expected an object, not
4098 * this number.
4099 */
4100 return (NULL);
4101 }
4102 return (dest);
4103 }
4104
4105 cur--;
4106 state = DTRACE_JSON_COMMA;
4107 break;
4108 case DTRACE_JSON_VALUE:
4109 if (isspace(cc))
4110 break;
4111
4112 if (cc == '{' || cc == '[') {
4113 if (nelems > 1 && found_key) {
4114 in_array = cc == '[' ? B_TRUE : B_FALSE;
4115 /*
4116 * If our element selector directs us
4117 * to descend into this nested object,
4118 * then move to the next selector
4119 * element in the list and restart the
4120 * state machine.
4121 */
4122 while (*elem != '\0')
4123 elem++;
4124 elem++; /* skip the inter-element NUL */
4125 nelems--;
4126 dd = dest;
4127 if (in_array) {
4128 state = DTRACE_JSON_VALUE;
4129 array_pos = 0;
4130 array_elem = dtrace_strtoll(
4131 elem, 10, size);
4132 found_key = array_elem == 0 ?
4133 B_TRUE : B_FALSE;
4134 } else {
4135 found_key = B_FALSE;
4136 state = DTRACE_JSON_OBJECT;
4137 }
4138 break;
4139 }
4140
4141 /*
4142 * Otherwise, we wish to either skip this
4143 * nested object or return it in full.
4144 */
4145 if (cc == '[')
4146 brackets = 1;
4147 else
4148 braces = 1;
4149 *dd++ = cc;
4150 state = DTRACE_JSON_COLLECT_OBJECT;
4151 break;
4152 }
4153
4154 if (cc == '"') {
4155 state = DTRACE_JSON_STRING;
4156 break;
4157 }
4158
4159 if (islower(cc)) {
4160 /*
4161 * Here we deal with true, false and null.
4162 */
4163 *dd++ = cc;
4164 state = DTRACE_JSON_IDENTIFIER;
4165 break;
4166 }
4167
4168 if (cc == '-' || isdigit(cc)) {
4169 *dd++ = cc;
4170 state = DTRACE_JSON_NUMBER;
4171 break;
4172 }
4173
4174 /*
4175 * ERROR: unexpected character at start of value.
4176 */
4177 return (NULL);
4178 case DTRACE_JSON_COLLECT_OBJECT:
4179 if (cc == '\0')
4180 /*
4181 * ERROR: unexpected end of input.
4182 */
4183 return (NULL);
4184
4185 *dd++ = cc;
4186 if (cc == '"') {
4187 collect_object = B_TRUE;
4188 state = DTRACE_JSON_STRING;
4189 break;
4190 }
4191
4192 if (cc == ']') {
4193 if (brackets-- == 0) {
4194 /*
4195 * ERROR: unbalanced brackets.
4196 */
4197 return (NULL);
4198 }
4199 } else if (cc == '}') {
4200 if (braces-- == 0) {
4201 /*
4202 * ERROR: unbalanced braces.
4203 */
4204 return (NULL);
4205 }
4206 } else if (cc == '{') {
4207 braces++;
4208 } else if (cc == '[') {
4209 brackets++;
4210 }
4211
4212 if (brackets == 0 && braces == 0) {
4213 if (found_key) {
4214 *dd = '\0';
4215 return (dest);
4216 }
4217 dd = dest; /* reset string buffer */
4218 state = DTRACE_JSON_COMMA;
4219 }
4220 break;
4221 }
4222 }
4223 return (NULL);
4224 }
4225
4226 /*
4227 * Emulate the execution of DTrace ID subroutines invoked by the call opcode.
4228 * Notice that we don't bother validating the proper number of arguments or
4229 * their types in the tuple stack. This isn't needed because all argument
4230 * interpretation is safe because of our load safety -- the worst that can
4231 * happen is that a bogus program can obtain bogus results.
4232 */
4233 static void
dtrace_dif_subr(uint_t subr,uint_t rd,uint64_t * regs,dtrace_key_t * tupregs,int nargs,dtrace_mstate_t * mstate,dtrace_state_t * state)4234 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs,
4235 dtrace_key_t *tupregs, int nargs,
4236 dtrace_mstate_t *mstate, dtrace_state_t *state)
4237 {
4238 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
4239 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval;
4240 dtrace_vstate_t *vstate = &state->dts_vstate;
4241
4242 #ifdef illumos
4243 union {
4244 mutex_impl_t mi;
4245 uint64_t mx;
4246 } m;
4247
4248 union {
4249 krwlock_t ri;
4250 uintptr_t rw;
4251 } r;
4252 #else
4253 struct thread *lowner;
4254 union {
4255 struct lock_object *li;
4256 uintptr_t lx;
4257 } l;
4258 #endif
4259
4260 switch (subr) {
4261 case DIF_SUBR_RAND:
4262 regs[rd] = dtrace_xoroshiro128_plus_next(
4263 state->dts_rstate[curcpu]);
4264 break;
4265
4266 #ifdef illumos
4267 case DIF_SUBR_MUTEX_OWNED:
4268 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4269 mstate, vstate)) {
4270 regs[rd] = 0;
4271 break;
4272 }
4273
4274 m.mx = dtrace_load64(tupregs[0].dttk_value);
4275 if (MUTEX_TYPE_ADAPTIVE(&m.mi))
4276 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER;
4277 else
4278 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock);
4279 break;
4280
4281 case DIF_SUBR_MUTEX_OWNER:
4282 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4283 mstate, vstate)) {
4284 regs[rd] = 0;
4285 break;
4286 }
4287
4288 m.mx = dtrace_load64(tupregs[0].dttk_value);
4289 if (MUTEX_TYPE_ADAPTIVE(&m.mi) &&
4290 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER)
4291 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi);
4292 else
4293 regs[rd] = 0;
4294 break;
4295
4296 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
4297 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4298 mstate, vstate)) {
4299 regs[rd] = 0;
4300 break;
4301 }
4302
4303 m.mx = dtrace_load64(tupregs[0].dttk_value);
4304 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi);
4305 break;
4306
4307 case DIF_SUBR_MUTEX_TYPE_SPIN:
4308 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4309 mstate, vstate)) {
4310 regs[rd] = 0;
4311 break;
4312 }
4313
4314 m.mx = dtrace_load64(tupregs[0].dttk_value);
4315 regs[rd] = MUTEX_TYPE_SPIN(&m.mi);
4316 break;
4317
4318 case DIF_SUBR_RW_READ_HELD: {
4319 uintptr_t tmp;
4320
4321 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
4322 mstate, vstate)) {
4323 regs[rd] = 0;
4324 break;
4325 }
4326
4327 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4328 regs[rd] = _RW_READ_HELD(&r.ri, tmp);
4329 break;
4330 }
4331
4332 case DIF_SUBR_RW_WRITE_HELD:
4333 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
4334 mstate, vstate)) {
4335 regs[rd] = 0;
4336 break;
4337 }
4338
4339 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4340 regs[rd] = _RW_WRITE_HELD(&r.ri);
4341 break;
4342
4343 case DIF_SUBR_RW_ISWRITER:
4344 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
4345 mstate, vstate)) {
4346 regs[rd] = 0;
4347 break;
4348 }
4349
4350 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4351 regs[rd] = _RW_ISWRITER(&r.ri);
4352 break;
4353
4354 #else /* !illumos */
4355 case DIF_SUBR_MUTEX_OWNED:
4356 if (!dtrace_canload(tupregs[0].dttk_value,
4357 sizeof (struct lock_object), mstate, vstate)) {
4358 regs[rd] = 0;
4359 break;
4360 }
4361 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
4362 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4363 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
4364 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4365 break;
4366
4367 case DIF_SUBR_MUTEX_OWNER:
4368 if (!dtrace_canload(tupregs[0].dttk_value,
4369 sizeof (struct lock_object), mstate, vstate)) {
4370 regs[rd] = 0;
4371 break;
4372 }
4373 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
4374 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4375 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
4376 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4377 regs[rd] = (uintptr_t)lowner;
4378 break;
4379
4380 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
4381 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx),
4382 mstate, vstate)) {
4383 regs[rd] = 0;
4384 break;
4385 }
4386 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
4387 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4388 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SLEEPLOCK) != 0;
4389 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4390 break;
4391
4392 case DIF_SUBR_MUTEX_TYPE_SPIN:
4393 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx),
4394 mstate, vstate)) {
4395 regs[rd] = 0;
4396 break;
4397 }
4398 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
4399 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4400 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SPINLOCK) != 0;
4401 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4402 break;
4403
4404 case DIF_SUBR_RW_READ_HELD:
4405 case DIF_SUBR_SX_SHARED_HELD:
4406 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
4407 mstate, vstate)) {
4408 regs[rd] = 0;
4409 break;
4410 }
4411 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
4412 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4413 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) &&
4414 lowner == NULL;
4415 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4416 break;
4417
4418 case DIF_SUBR_RW_WRITE_HELD:
4419 case DIF_SUBR_SX_EXCLUSIVE_HELD:
4420 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
4421 mstate, vstate)) {
4422 regs[rd] = 0;
4423 break;
4424 }
4425 l.lx = dtrace_loadptr(tupregs[0].dttk_value);
4426 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4427 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) &&
4428 lowner != NULL;
4429 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4430 break;
4431
4432 case DIF_SUBR_RW_ISWRITER:
4433 case DIF_SUBR_SX_ISEXCLUSIVE:
4434 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
4435 mstate, vstate)) {
4436 regs[rd] = 0;
4437 break;
4438 }
4439 l.lx = dtrace_loadptr(tupregs[0].dttk_value);
4440 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4441 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
4442 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4443 regs[rd] = (lowner == curthread);
4444 break;
4445 #endif /* illumos */
4446
4447 case DIF_SUBR_BCOPY: {
4448 /*
4449 * We need to be sure that the destination is in the scratch
4450 * region -- no other region is allowed.
4451 */
4452 uintptr_t src = tupregs[0].dttk_value;
4453 uintptr_t dest = tupregs[1].dttk_value;
4454 size_t size = tupregs[2].dttk_value;
4455
4456 if (!dtrace_inscratch(dest, size, mstate)) {
4457 *flags |= CPU_DTRACE_BADADDR;
4458 *illval = regs[rd];
4459 break;
4460 }
4461
4462 if (!dtrace_canload(src, size, mstate, vstate)) {
4463 regs[rd] = 0;
4464 break;
4465 }
4466
4467 dtrace_bcopy((void *)src, (void *)dest, size);
4468 break;
4469 }
4470
4471 case DIF_SUBR_ALLOCA:
4472 case DIF_SUBR_COPYIN: {
4473 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
4474 uint64_t size =
4475 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value;
4476 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size;
4477
4478 /*
4479 * This action doesn't require any credential checks since
4480 * probes will not activate in user contexts to which the
4481 * enabling user does not have permissions.
4482 */
4483
4484 /*
4485 * Rounding up the user allocation size could have overflowed
4486 * a large, bogus allocation (like -1ULL) to 0.
4487 */
4488 if (scratch_size < size ||
4489 !DTRACE_INSCRATCH(mstate, scratch_size)) {
4490 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4491 regs[rd] = 0;
4492 break;
4493 }
4494
4495 if (subr == DIF_SUBR_COPYIN) {
4496 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4497 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
4498 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4499 }
4500
4501 mstate->dtms_scratch_ptr += scratch_size;
4502 regs[rd] = dest;
4503 break;
4504 }
4505
4506 case DIF_SUBR_COPYINTO: {
4507 uint64_t size = tupregs[1].dttk_value;
4508 uintptr_t dest = tupregs[2].dttk_value;
4509
4510 /*
4511 * This action doesn't require any credential checks since
4512 * probes will not activate in user contexts to which the
4513 * enabling user does not have permissions.
4514 */
4515 if (!dtrace_inscratch(dest, size, mstate)) {
4516 *flags |= CPU_DTRACE_BADADDR;
4517 *illval = regs[rd];
4518 break;
4519 }
4520
4521 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4522 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
4523 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4524 break;
4525 }
4526
4527 case DIF_SUBR_COPYINSTR: {
4528 uintptr_t dest = mstate->dtms_scratch_ptr;
4529 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4530
4531 if (nargs > 1 && tupregs[1].dttk_value < size)
4532 size = tupregs[1].dttk_value + 1;
4533
4534 /*
4535 * This action doesn't require any credential checks since
4536 * probes will not activate in user contexts to which the
4537 * enabling user does not have permissions.
4538 */
4539 if (!DTRACE_INSCRATCH(mstate, size)) {
4540 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4541 regs[rd] = 0;
4542 break;
4543 }
4544
4545 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4546 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags);
4547 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4548
4549 ((char *)dest)[size - 1] = '\0';
4550 mstate->dtms_scratch_ptr += size;
4551 regs[rd] = dest;
4552 break;
4553 }
4554
4555 #ifdef illumos
4556 case DIF_SUBR_MSGSIZE:
4557 case DIF_SUBR_MSGDSIZE: {
4558 uintptr_t baddr = tupregs[0].dttk_value, daddr;
4559 uintptr_t wptr, rptr;
4560 size_t count = 0;
4561 int cont = 0;
4562
4563 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) {
4564
4565 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate,
4566 vstate)) {
4567 regs[rd] = 0;
4568 break;
4569 }
4570
4571 wptr = dtrace_loadptr(baddr +
4572 offsetof(mblk_t, b_wptr));
4573
4574 rptr = dtrace_loadptr(baddr +
4575 offsetof(mblk_t, b_rptr));
4576
4577 if (wptr < rptr) {
4578 *flags |= CPU_DTRACE_BADADDR;
4579 *illval = tupregs[0].dttk_value;
4580 break;
4581 }
4582
4583 daddr = dtrace_loadptr(baddr +
4584 offsetof(mblk_t, b_datap));
4585
4586 baddr = dtrace_loadptr(baddr +
4587 offsetof(mblk_t, b_cont));
4588
4589 /*
4590 * We want to prevent against denial-of-service here,
4591 * so we're only going to search the list for
4592 * dtrace_msgdsize_max mblks.
4593 */
4594 if (cont++ > dtrace_msgdsize_max) {
4595 *flags |= CPU_DTRACE_ILLOP;
4596 break;
4597 }
4598
4599 if (subr == DIF_SUBR_MSGDSIZE) {
4600 if (dtrace_load8(daddr +
4601 offsetof(dblk_t, db_type)) != M_DATA)
4602 continue;
4603 }
4604
4605 count += wptr - rptr;
4606 }
4607
4608 if (!(*flags & CPU_DTRACE_FAULT))
4609 regs[rd] = count;
4610
4611 break;
4612 }
4613 #endif
4614
4615 case DIF_SUBR_PROGENYOF: {
4616 pid_t pid = tupregs[0].dttk_value;
4617 proc_t *p;
4618 int rval = 0;
4619
4620 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4621
4622 for (p = curthread->t_procp; p != NULL; p = p->p_parent) {
4623 #ifdef illumos
4624 if (p->p_pidp->pid_id == pid) {
4625 #else
4626 if (p->p_pid == pid) {
4627 #endif
4628 rval = 1;
4629 break;
4630 }
4631 }
4632
4633 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4634
4635 regs[rd] = rval;
4636 break;
4637 }
4638
4639 case DIF_SUBR_SPECULATION:
4640 regs[rd] = dtrace_speculation(state);
4641 break;
4642
4643 case DIF_SUBR_COPYOUT: {
4644 uintptr_t kaddr = tupregs[0].dttk_value;
4645 uintptr_t uaddr = tupregs[1].dttk_value;
4646 uint64_t size = tupregs[2].dttk_value;
4647
4648 if (!dtrace_destructive_disallow &&
4649 dtrace_priv_proc_control(state) &&
4650 !dtrace_istoxic(kaddr, size) &&
4651 dtrace_canload(kaddr, size, mstate, vstate)) {
4652 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4653 dtrace_copyout(kaddr, uaddr, size, flags);
4654 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4655 }
4656 break;
4657 }
4658
4659 case DIF_SUBR_COPYOUTSTR: {
4660 uintptr_t kaddr = tupregs[0].dttk_value;
4661 uintptr_t uaddr = tupregs[1].dttk_value;
4662 uint64_t size = tupregs[2].dttk_value;
4663 size_t lim;
4664
4665 if (!dtrace_destructive_disallow &&
4666 dtrace_priv_proc_control(state) &&
4667 !dtrace_istoxic(kaddr, size) &&
4668 dtrace_strcanload(kaddr, size, &lim, mstate, vstate)) {
4669 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4670 dtrace_copyoutstr(kaddr, uaddr, lim, flags);
4671 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4672 }
4673 break;
4674 }
4675
4676 case DIF_SUBR_STRLEN: {
4677 size_t size = state->dts_options[DTRACEOPT_STRSIZE];
4678 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value;
4679 size_t lim;
4680
4681 if (!dtrace_strcanload(addr, size, &lim, mstate, vstate)) {
4682 regs[rd] = 0;
4683 break;
4684 }
4685
4686 regs[rd] = dtrace_strlen((char *)addr, lim);
4687 break;
4688 }
4689
4690 case DIF_SUBR_STRCHR:
4691 case DIF_SUBR_STRRCHR: {
4692 /*
4693 * We're going to iterate over the string looking for the
4694 * specified character. We will iterate until we have reached
4695 * the string length or we have found the character. If this
4696 * is DIF_SUBR_STRRCHR, we will look for the last occurrence
4697 * of the specified character instead of the first.
4698 */
4699 uintptr_t addr = tupregs[0].dttk_value;
4700 uintptr_t addr_limit;
4701 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4702 size_t lim;
4703 char c, target = (char)tupregs[1].dttk_value;
4704
4705 if (!dtrace_strcanload(addr, size, &lim, mstate, vstate)) {
4706 regs[rd] = 0;
4707 break;
4708 }
4709 addr_limit = addr + lim;
4710
4711 for (regs[rd] = 0; addr < addr_limit; addr++) {
4712 if ((c = dtrace_load8(addr)) == target) {
4713 regs[rd] = addr;
4714
4715 if (subr == DIF_SUBR_STRCHR)
4716 break;
4717 }
4718
4719 if (c == '\0')
4720 break;
4721 }
4722 break;
4723 }
4724
4725 case DIF_SUBR_STRSTR:
4726 case DIF_SUBR_INDEX:
4727 case DIF_SUBR_RINDEX: {
4728 /*
4729 * We're going to iterate over the string looking for the
4730 * specified string. We will iterate until we have reached
4731 * the string length or we have found the string. (Yes, this
4732 * is done in the most naive way possible -- but considering
4733 * that the string we're searching for is likely to be
4734 * relatively short, the complexity of Rabin-Karp or similar
4735 * hardly seems merited.)
4736 */
4737 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value;
4738 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value;
4739 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4740 size_t len = dtrace_strlen(addr, size);
4741 size_t sublen = dtrace_strlen(substr, size);
4742 char *limit = addr + len, *orig = addr;
4743 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1;
4744 int inc = 1;
4745
4746 regs[rd] = notfound;
4747
4748 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) {
4749 regs[rd] = 0;
4750 break;
4751 }
4752
4753 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate,
4754 vstate)) {
4755 regs[rd] = 0;
4756 break;
4757 }
4758
4759 /*
4760 * strstr() and index()/rindex() have similar semantics if
4761 * both strings are the empty string: strstr() returns a
4762 * pointer to the (empty) string, and index() and rindex()
4763 * both return index 0 (regardless of any position argument).
4764 */
4765 if (sublen == 0 && len == 0) {
4766 if (subr == DIF_SUBR_STRSTR)
4767 regs[rd] = (uintptr_t)addr;
4768 else
4769 regs[rd] = 0;
4770 break;
4771 }
4772
4773 if (subr != DIF_SUBR_STRSTR) {
4774 if (subr == DIF_SUBR_RINDEX) {
4775 limit = orig - 1;
4776 addr += len;
4777 inc = -1;
4778 }
4779
4780 /*
4781 * Both index() and rindex() take an optional position
4782 * argument that denotes the starting position.
4783 */
4784 if (nargs == 3) {
4785 int64_t pos = (int64_t)tupregs[2].dttk_value;
4786
4787 /*
4788 * If the position argument to index() is
4789 * negative, Perl implicitly clamps it at
4790 * zero. This semantic is a little surprising
4791 * given the special meaning of negative
4792 * positions to similar Perl functions like
4793 * substr(), but it appears to reflect a
4794 * notion that index() can start from a
4795 * negative index and increment its way up to
4796 * the string. Given this notion, Perl's
4797 * rindex() is at least self-consistent in
4798 * that it implicitly clamps positions greater
4799 * than the string length to be the string
4800 * length. Where Perl completely loses
4801 * coherence, however, is when the specified
4802 * substring is the empty string (""). In
4803 * this case, even if the position is
4804 * negative, rindex() returns 0 -- and even if
4805 * the position is greater than the length,
4806 * index() returns the string length. These
4807 * semantics violate the notion that index()
4808 * should never return a value less than the
4809 * specified position and that rindex() should
4810 * never return a value greater than the
4811 * specified position. (One assumes that
4812 * these semantics are artifacts of Perl's
4813 * implementation and not the results of
4814 * deliberate design -- it beggars belief that
4815 * even Larry Wall could desire such oddness.)
4816 * While in the abstract one would wish for
4817 * consistent position semantics across
4818 * substr(), index() and rindex() -- or at the
4819 * very least self-consistent position
4820 * semantics for index() and rindex() -- we
4821 * instead opt to keep with the extant Perl
4822 * semantics, in all their broken glory. (Do
4823 * we have more desire to maintain Perl's
4824 * semantics than Perl does? Probably.)
4825 */
4826 if (subr == DIF_SUBR_RINDEX) {
4827 if (pos < 0) {
4828 if (sublen == 0)
4829 regs[rd] = 0;
4830 break;
4831 }
4832
4833 if (pos > len)
4834 pos = len;
4835 } else {
4836 if (pos < 0)
4837 pos = 0;
4838
4839 if (pos >= len) {
4840 if (sublen == 0)
4841 regs[rd] = len;
4842 break;
4843 }
4844 }
4845
4846 addr = orig + pos;
4847 }
4848 }
4849
4850 for (regs[rd] = notfound; addr != limit; addr += inc) {
4851 if (dtrace_strncmp(addr, substr, sublen) == 0) {
4852 if (subr != DIF_SUBR_STRSTR) {
4853 /*
4854 * As D index() and rindex() are
4855 * modeled on Perl (and not on awk),
4856 * we return a zero-based (and not a
4857 * one-based) index. (For you Perl
4858 * weenies: no, we're not going to add
4859 * $[ -- and shouldn't you be at a con
4860 * or something?)
4861 */
4862 regs[rd] = (uintptr_t)(addr - orig);
4863 break;
4864 }
4865
4866 ASSERT(subr == DIF_SUBR_STRSTR);
4867 regs[rd] = (uintptr_t)addr;
4868 break;
4869 }
4870 }
4871
4872 break;
4873 }
4874
4875 case DIF_SUBR_STRTOK: {
4876 uintptr_t addr = tupregs[0].dttk_value;
4877 uintptr_t tokaddr = tupregs[1].dttk_value;
4878 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4879 uintptr_t limit, toklimit;
4880 size_t clim;
4881 uint8_t c = 0, tokmap[32]; /* 256 / 8 */
4882 char *dest = (char *)mstate->dtms_scratch_ptr;
4883 int i;
4884
4885 /*
4886 * Check both the token buffer and (later) the input buffer,
4887 * since both could be non-scratch addresses.
4888 */
4889 if (!dtrace_strcanload(tokaddr, size, &clim, mstate, vstate)) {
4890 regs[rd] = 0;
4891 break;
4892 }
4893 toklimit = tokaddr + clim;
4894
4895 if (!DTRACE_INSCRATCH(mstate, size)) {
4896 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4897 regs[rd] = 0;
4898 break;
4899 }
4900
4901 if (addr == 0) {
4902 /*
4903 * If the address specified is NULL, we use our saved
4904 * strtok pointer from the mstate. Note that this
4905 * means that the saved strtok pointer is _only_
4906 * valid within multiple enablings of the same probe --
4907 * it behaves like an implicit clause-local variable.
4908 */
4909 addr = mstate->dtms_strtok;
4910 limit = mstate->dtms_strtok_limit;
4911 } else {
4912 /*
4913 * If the user-specified address is non-NULL we must
4914 * access check it. This is the only time we have
4915 * a chance to do so, since this address may reside
4916 * in the string table of this clause-- future calls
4917 * (when we fetch addr from mstate->dtms_strtok)
4918 * would fail this access check.
4919 */
4920 if (!dtrace_strcanload(addr, size, &clim, mstate,
4921 vstate)) {
4922 regs[rd] = 0;
4923 break;
4924 }
4925 limit = addr + clim;
4926 }
4927
4928 /*
4929 * First, zero the token map, and then process the token
4930 * string -- setting a bit in the map for every character
4931 * found in the token string.
4932 */
4933 for (i = 0; i < sizeof (tokmap); i++)
4934 tokmap[i] = 0;
4935
4936 for (; tokaddr < toklimit; tokaddr++) {
4937 if ((c = dtrace_load8(tokaddr)) == '\0')
4938 break;
4939
4940 ASSERT((c >> 3) < sizeof (tokmap));
4941 tokmap[c >> 3] |= (1 << (c & 0x7));
4942 }
4943
4944 for (; addr < limit; addr++) {
4945 /*
4946 * We're looking for a character that is _not_
4947 * contained in the token string.
4948 */
4949 if ((c = dtrace_load8(addr)) == '\0')
4950 break;
4951
4952 if (!(tokmap[c >> 3] & (1 << (c & 0x7))))
4953 break;
4954 }
4955
4956 if (c == '\0') {
4957 /*
4958 * We reached the end of the string without finding
4959 * any character that was not in the token string.
4960 * We return NULL in this case, and we set the saved
4961 * address to NULL as well.
4962 */
4963 regs[rd] = 0;
4964 mstate->dtms_strtok = 0;
4965 mstate->dtms_strtok_limit = 0;
4966 break;
4967 }
4968
4969 /*
4970 * From here on, we're copying into the destination string.
4971 */
4972 for (i = 0; addr < limit && i < size - 1; addr++) {
4973 if ((c = dtrace_load8(addr)) == '\0')
4974 break;
4975
4976 if (tokmap[c >> 3] & (1 << (c & 0x7)))
4977 break;
4978
4979 ASSERT(i < size);
4980 dest[i++] = c;
4981 }
4982
4983 ASSERT(i < size);
4984 dest[i] = '\0';
4985 regs[rd] = (uintptr_t)dest;
4986 mstate->dtms_scratch_ptr += size;
4987 mstate->dtms_strtok = addr;
4988 mstate->dtms_strtok_limit = limit;
4989 break;
4990 }
4991
4992 case DIF_SUBR_SUBSTR: {
4993 uintptr_t s = tupregs[0].dttk_value;
4994 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4995 char *d = (char *)mstate->dtms_scratch_ptr;
4996 int64_t index = (int64_t)tupregs[1].dttk_value;
4997 int64_t remaining = (int64_t)tupregs[2].dttk_value;
4998 size_t len = dtrace_strlen((char *)s, size);
4999 int64_t i;
5000
5001 if (!dtrace_canload(s, len + 1, mstate, vstate)) {
5002 regs[rd] = 0;
5003 break;
5004 }
5005
5006 if (!DTRACE_INSCRATCH(mstate, size)) {
5007 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5008 regs[rd] = 0;
5009 break;
5010 }
5011
5012 if (nargs <= 2)
5013 remaining = (int64_t)size;
5014
5015 if (index < 0) {
5016 index += len;
5017
5018 if (index < 0 && index + remaining > 0) {
5019 remaining += index;
5020 index = 0;
5021 }
5022 }
5023
5024 if (index >= len || index < 0) {
5025 remaining = 0;
5026 } else if (remaining < 0) {
5027 remaining += len - index;
5028 } else if (index + remaining > size) {
5029 remaining = size - index;
5030 }
5031
5032 for (i = 0; i < remaining; i++) {
5033 if ((d[i] = dtrace_load8(s + index + i)) == '\0')
5034 break;
5035 }
5036
5037 d[i] = '\0';
5038
5039 mstate->dtms_scratch_ptr += size;
5040 regs[rd] = (uintptr_t)d;
5041 break;
5042 }
5043
5044 case DIF_SUBR_JSON: {
5045 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5046 uintptr_t json = tupregs[0].dttk_value;
5047 size_t jsonlen = dtrace_strlen((char *)json, size);
5048 uintptr_t elem = tupregs[1].dttk_value;
5049 size_t elemlen = dtrace_strlen((char *)elem, size);
5050
5051 char *dest = (char *)mstate->dtms_scratch_ptr;
5052 char *elemlist = (char *)mstate->dtms_scratch_ptr + jsonlen + 1;
5053 char *ee = elemlist;
5054 int nelems = 1;
5055 uintptr_t cur;
5056
5057 if (!dtrace_canload(json, jsonlen + 1, mstate, vstate) ||
5058 !dtrace_canload(elem, elemlen + 1, mstate, vstate)) {
5059 regs[rd] = 0;
5060 break;
5061 }
5062
5063 if (!DTRACE_INSCRATCH(mstate, jsonlen + 1 + elemlen + 1)) {
5064 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5065 regs[rd] = 0;
5066 break;
5067 }
5068
5069 /*
5070 * Read the element selector and split it up into a packed list
5071 * of strings.
5072 */
5073 for (cur = elem; cur < elem + elemlen; cur++) {
5074 char cc = dtrace_load8(cur);
5075
5076 if (cur == elem && cc == '[') {
5077 /*
5078 * If the first element selector key is
5079 * actually an array index then ignore the
5080 * bracket.
5081 */
5082 continue;
5083 }
5084
5085 if (cc == ']')
5086 continue;
5087
5088 if (cc == '.' || cc == '[') {
5089 nelems++;
5090 cc = '\0';
5091 }
5092
5093 *ee++ = cc;
5094 }
5095 *ee++ = '\0';
5096
5097 if ((regs[rd] = (uintptr_t)dtrace_json(size, json, elemlist,
5098 nelems, dest)) != 0)
5099 mstate->dtms_scratch_ptr += jsonlen + 1;
5100 break;
5101 }
5102
5103 case DIF_SUBR_TOUPPER:
5104 case DIF_SUBR_TOLOWER: {
5105 uintptr_t s = tupregs[0].dttk_value;
5106 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5107 char *dest = (char *)mstate->dtms_scratch_ptr, c;
5108 size_t len = dtrace_strlen((char *)s, size);
5109 char lower, upper, convert;
5110 int64_t i;
5111
5112 if (subr == DIF_SUBR_TOUPPER) {
5113 lower = 'a';
5114 upper = 'z';
5115 convert = 'A';
5116 } else {
5117 lower = 'A';
5118 upper = 'Z';
5119 convert = 'a';
5120 }
5121
5122 if (!dtrace_canload(s, len + 1, mstate, vstate)) {
5123 regs[rd] = 0;
5124 break;
5125 }
5126
5127 if (!DTRACE_INSCRATCH(mstate, size)) {
5128 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5129 regs[rd] = 0;
5130 break;
5131 }
5132
5133 for (i = 0; i < size - 1; i++) {
5134 if ((c = dtrace_load8(s + i)) == '\0')
5135 break;
5136
5137 if (c >= lower && c <= upper)
5138 c = convert + (c - lower);
5139
5140 dest[i] = c;
5141 }
5142
5143 ASSERT(i < size);
5144 dest[i] = '\0';
5145 regs[rd] = (uintptr_t)dest;
5146 mstate->dtms_scratch_ptr += size;
5147 break;
5148 }
5149
5150 #ifdef illumos
5151 case DIF_SUBR_GETMAJOR:
5152 #ifdef _LP64
5153 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64;
5154 #else
5155 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ;
5156 #endif
5157 break;
5158
5159 case DIF_SUBR_GETMINOR:
5160 #ifdef _LP64
5161 regs[rd] = tupregs[0].dttk_value & MAXMIN64;
5162 #else
5163 regs[rd] = tupregs[0].dttk_value & MAXMIN;
5164 #endif
5165 break;
5166
5167 case DIF_SUBR_DDI_PATHNAME: {
5168 /*
5169 * This one is a galactic mess. We are going to roughly
5170 * emulate ddi_pathname(), but it's made more complicated
5171 * by the fact that we (a) want to include the minor name and
5172 * (b) must proceed iteratively instead of recursively.
5173 */
5174 uintptr_t dest = mstate->dtms_scratch_ptr;
5175 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5176 char *start = (char *)dest, *end = start + size - 1;
5177 uintptr_t daddr = tupregs[0].dttk_value;
5178 int64_t minor = (int64_t)tupregs[1].dttk_value;
5179 char *s;
5180 int i, len, depth = 0;
5181
5182 /*
5183 * Due to all the pointer jumping we do and context we must
5184 * rely upon, we just mandate that the user must have kernel
5185 * read privileges to use this routine.
5186 */
5187 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) {
5188 *flags |= CPU_DTRACE_KPRIV;
5189 *illval = daddr;
5190 regs[rd] = 0;
5191 }
5192
5193 if (!DTRACE_INSCRATCH(mstate, size)) {
5194 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5195 regs[rd] = 0;
5196 break;
5197 }
5198
5199 *end = '\0';
5200
5201 /*
5202 * We want to have a name for the minor. In order to do this,
5203 * we need to walk the minor list from the devinfo. We want
5204 * to be sure that we don't infinitely walk a circular list,
5205 * so we check for circularity by sending a scout pointer
5206 * ahead two elements for every element that we iterate over;
5207 * if the list is circular, these will ultimately point to the
5208 * same element. You may recognize this little trick as the
5209 * answer to a stupid interview question -- one that always
5210 * seems to be asked by those who had to have it laboriously
5211 * explained to them, and who can't even concisely describe
5212 * the conditions under which one would be forced to resort to
5213 * this technique. Needless to say, those conditions are
5214 * found here -- and probably only here. Is this the only use
5215 * of this infamous trick in shipping, production code? If it
5216 * isn't, it probably should be...
5217 */
5218 if (minor != -1) {
5219 uintptr_t maddr = dtrace_loadptr(daddr +
5220 offsetof(struct dev_info, devi_minor));
5221
5222 uintptr_t next = offsetof(struct ddi_minor_data, next);
5223 uintptr_t name = offsetof(struct ddi_minor_data,
5224 d_minor) + offsetof(struct ddi_minor, name);
5225 uintptr_t dev = offsetof(struct ddi_minor_data,
5226 d_minor) + offsetof(struct ddi_minor, dev);
5227 uintptr_t scout;
5228
5229 if (maddr != NULL)
5230 scout = dtrace_loadptr(maddr + next);
5231
5232 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
5233 uint64_t m;
5234 #ifdef _LP64
5235 m = dtrace_load64(maddr + dev) & MAXMIN64;
5236 #else
5237 m = dtrace_load32(maddr + dev) & MAXMIN;
5238 #endif
5239 if (m != minor) {
5240 maddr = dtrace_loadptr(maddr + next);
5241
5242 if (scout == NULL)
5243 continue;
5244
5245 scout = dtrace_loadptr(scout + next);
5246
5247 if (scout == NULL)
5248 continue;
5249
5250 scout = dtrace_loadptr(scout + next);
5251
5252 if (scout == NULL)
5253 continue;
5254
5255 if (scout == maddr) {
5256 *flags |= CPU_DTRACE_ILLOP;
5257 break;
5258 }
5259
5260 continue;
5261 }
5262
5263 /*
5264 * We have the minor data. Now we need to
5265 * copy the minor's name into the end of the
5266 * pathname.
5267 */
5268 s = (char *)dtrace_loadptr(maddr + name);
5269 len = dtrace_strlen(s, size);
5270
5271 if (*flags & CPU_DTRACE_FAULT)
5272 break;
5273
5274 if (len != 0) {
5275 if ((end -= (len + 1)) < start)
5276 break;
5277
5278 *end = ':';
5279 }
5280
5281 for (i = 1; i <= len; i++)
5282 end[i] = dtrace_load8((uintptr_t)s++);
5283 break;
5284 }
5285 }
5286
5287 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
5288 ddi_node_state_t devi_state;
5289
5290 devi_state = dtrace_load32(daddr +
5291 offsetof(struct dev_info, devi_node_state));
5292
5293 if (*flags & CPU_DTRACE_FAULT)
5294 break;
5295
5296 if (devi_state >= DS_INITIALIZED) {
5297 s = (char *)dtrace_loadptr(daddr +
5298 offsetof(struct dev_info, devi_addr));
5299 len = dtrace_strlen(s, size);
5300
5301 if (*flags & CPU_DTRACE_FAULT)
5302 break;
5303
5304 if (len != 0) {
5305 if ((end -= (len + 1)) < start)
5306 break;
5307
5308 *end = '@';
5309 }
5310
5311 for (i = 1; i <= len; i++)
5312 end[i] = dtrace_load8((uintptr_t)s++);
5313 }
5314
5315 /*
5316 * Now for the node name...
5317 */
5318 s = (char *)dtrace_loadptr(daddr +
5319 offsetof(struct dev_info, devi_node_name));
5320
5321 daddr = dtrace_loadptr(daddr +
5322 offsetof(struct dev_info, devi_parent));
5323
5324 /*
5325 * If our parent is NULL (that is, if we're the root
5326 * node), we're going to use the special path
5327 * "devices".
5328 */
5329 if (daddr == 0)
5330 s = "devices";
5331
5332 len = dtrace_strlen(s, size);
5333 if (*flags & CPU_DTRACE_FAULT)
5334 break;
5335
5336 if ((end -= (len + 1)) < start)
5337 break;
5338
5339 for (i = 1; i <= len; i++)
5340 end[i] = dtrace_load8((uintptr_t)s++);
5341 *end = '/';
5342
5343 if (depth++ > dtrace_devdepth_max) {
5344 *flags |= CPU_DTRACE_ILLOP;
5345 break;
5346 }
5347 }
5348
5349 if (end < start)
5350 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5351
5352 if (daddr == 0) {
5353 regs[rd] = (uintptr_t)end;
5354 mstate->dtms_scratch_ptr += size;
5355 }
5356
5357 break;
5358 }
5359 #endif
5360
5361 case DIF_SUBR_STRJOIN: {
5362 char *d = (char *)mstate->dtms_scratch_ptr;
5363 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5364 uintptr_t s1 = tupregs[0].dttk_value;
5365 uintptr_t s2 = tupregs[1].dttk_value;
5366 int i = 0, j = 0;
5367 size_t lim1, lim2;
5368 char c;
5369
5370 if (!dtrace_strcanload(s1, size, &lim1, mstate, vstate) ||
5371 !dtrace_strcanload(s2, size, &lim2, mstate, vstate)) {
5372 regs[rd] = 0;
5373 break;
5374 }
5375
5376 if (!DTRACE_INSCRATCH(mstate, size)) {
5377 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5378 regs[rd] = 0;
5379 break;
5380 }
5381
5382 for (;;) {
5383 if (i >= size) {
5384 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5385 regs[rd] = 0;
5386 break;
5387 }
5388 c = (i >= lim1) ? '\0' : dtrace_load8(s1++);
5389 if ((d[i++] = c) == '\0') {
5390 i--;
5391 break;
5392 }
5393 }
5394
5395 for (;;) {
5396 if (i >= size) {
5397 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5398 regs[rd] = 0;
5399 break;
5400 }
5401
5402 c = (j++ >= lim2) ? '\0' : dtrace_load8(s2++);
5403 if ((d[i++] = c) == '\0')
5404 break;
5405 }
5406
5407 if (i < size) {
5408 mstate->dtms_scratch_ptr += i;
5409 regs[rd] = (uintptr_t)d;
5410 }
5411
5412 break;
5413 }
5414
5415 case DIF_SUBR_STRTOLL: {
5416 uintptr_t s = tupregs[0].dttk_value;
5417 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5418 size_t lim;
5419 int base = 10;
5420
5421 if (nargs > 1) {
5422 if ((base = tupregs[1].dttk_value) <= 1 ||
5423 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) {
5424 *flags |= CPU_DTRACE_ILLOP;
5425 break;
5426 }
5427 }
5428
5429 if (!dtrace_strcanload(s, size, &lim, mstate, vstate)) {
5430 regs[rd] = INT64_MIN;
5431 break;
5432 }
5433
5434 regs[rd] = dtrace_strtoll((char *)s, base, lim);
5435 break;
5436 }
5437
5438 case DIF_SUBR_LLTOSTR: {
5439 int64_t i = (int64_t)tupregs[0].dttk_value;
5440 uint64_t val, digit;
5441 uint64_t size = 65; /* enough room for 2^64 in binary */
5442 char *end = (char *)mstate->dtms_scratch_ptr + size - 1;
5443 int base = 10;
5444
5445 if (nargs > 1) {
5446 if ((base = tupregs[1].dttk_value) <= 1 ||
5447 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) {
5448 *flags |= CPU_DTRACE_ILLOP;
5449 break;
5450 }
5451 }
5452
5453 val = (base == 10 && i < 0) ? i * -1 : i;
5454
5455 if (!DTRACE_INSCRATCH(mstate, size)) {
5456 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5457 regs[rd] = 0;
5458 break;
5459 }
5460
5461 for (*end-- = '\0'; val; val /= base) {
5462 if ((digit = val % base) <= '9' - '0') {
5463 *end-- = '0' + digit;
5464 } else {
5465 *end-- = 'a' + (digit - ('9' - '0') - 1);
5466 }
5467 }
5468
5469 if (i == 0 && base == 16)
5470 *end-- = '0';
5471
5472 if (base == 16)
5473 *end-- = 'x';
5474
5475 if (i == 0 || base == 8 || base == 16)
5476 *end-- = '0';
5477
5478 if (i < 0 && base == 10)
5479 *end-- = '-';
5480
5481 regs[rd] = (uintptr_t)end + 1;
5482 mstate->dtms_scratch_ptr += size;
5483 break;
5484 }
5485
5486 case DIF_SUBR_HTONS:
5487 case DIF_SUBR_NTOHS:
5488 #if BYTE_ORDER == BIG_ENDIAN
5489 regs[rd] = (uint16_t)tupregs[0].dttk_value;
5490 #else
5491 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value);
5492 #endif
5493 break;
5494
5495
5496 case DIF_SUBR_HTONL:
5497 case DIF_SUBR_NTOHL:
5498 #if BYTE_ORDER == BIG_ENDIAN
5499 regs[rd] = (uint32_t)tupregs[0].dttk_value;
5500 #else
5501 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value);
5502 #endif
5503 break;
5504
5505
5506 case DIF_SUBR_HTONLL:
5507 case DIF_SUBR_NTOHLL:
5508 #if BYTE_ORDER == BIG_ENDIAN
5509 regs[rd] = (uint64_t)tupregs[0].dttk_value;
5510 #else
5511 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value);
5512 #endif
5513 break;
5514
5515
5516 case DIF_SUBR_DIRNAME:
5517 case DIF_SUBR_BASENAME: {
5518 char *dest = (char *)mstate->dtms_scratch_ptr;
5519 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5520 uintptr_t src = tupregs[0].dttk_value;
5521 int i, j, len = dtrace_strlen((char *)src, size);
5522 int lastbase = -1, firstbase = -1, lastdir = -1;
5523 int start, end;
5524
5525 if (!dtrace_canload(src, len + 1, mstate, vstate)) {
5526 regs[rd] = 0;
5527 break;
5528 }
5529
5530 if (!DTRACE_INSCRATCH(mstate, size)) {
5531 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5532 regs[rd] = 0;
5533 break;
5534 }
5535
5536 /*
5537 * The basename and dirname for a zero-length string is
5538 * defined to be "."
5539 */
5540 if (len == 0) {
5541 len = 1;
5542 src = (uintptr_t)".";
5543 }
5544
5545 /*
5546 * Start from the back of the string, moving back toward the
5547 * front until we see a character that isn't a slash. That
5548 * character is the last character in the basename.
5549 */
5550 for (i = len - 1; i >= 0; i--) {
5551 if (dtrace_load8(src + i) != '/')
5552 break;
5553 }
5554
5555 if (i >= 0)
5556 lastbase = i;
5557
5558 /*
5559 * Starting from the last character in the basename, move
5560 * towards the front until we find a slash. The character
5561 * that we processed immediately before that is the first
5562 * character in the basename.
5563 */
5564 for (; i >= 0; i--) {
5565 if (dtrace_load8(src + i) == '/')
5566 break;
5567 }
5568
5569 if (i >= 0)
5570 firstbase = i + 1;
5571
5572 /*
5573 * Now keep going until we find a non-slash character. That
5574 * character is the last character in the dirname.
5575 */
5576 for (; i >= 0; i--) {
5577 if (dtrace_load8(src + i) != '/')
5578 break;
5579 }
5580
5581 if (i >= 0)
5582 lastdir = i;
5583
5584 ASSERT(!(lastbase == -1 && firstbase != -1));
5585 ASSERT(!(firstbase == -1 && lastdir != -1));
5586
5587 if (lastbase == -1) {
5588 /*
5589 * We didn't find a non-slash character. We know that
5590 * the length is non-zero, so the whole string must be
5591 * slashes. In either the dirname or the basename
5592 * case, we return '/'.
5593 */
5594 ASSERT(firstbase == -1);
5595 firstbase = lastbase = lastdir = 0;
5596 }
5597
5598 if (firstbase == -1) {
5599 /*
5600 * The entire string consists only of a basename
5601 * component. If we're looking for dirname, we need
5602 * to change our string to be just "."; if we're
5603 * looking for a basename, we'll just set the first
5604 * character of the basename to be 0.
5605 */
5606 if (subr == DIF_SUBR_DIRNAME) {
5607 ASSERT(lastdir == -1);
5608 src = (uintptr_t)".";
5609 lastdir = 0;
5610 } else {
5611 firstbase = 0;
5612 }
5613 }
5614
5615 if (subr == DIF_SUBR_DIRNAME) {
5616 if (lastdir == -1) {
5617 /*
5618 * We know that we have a slash in the name --
5619 * or lastdir would be set to 0, above. And
5620 * because lastdir is -1, we know that this
5621 * slash must be the first character. (That
5622 * is, the full string must be of the form
5623 * "/basename".) In this case, the last
5624 * character of the directory name is 0.
5625 */
5626 lastdir = 0;
5627 }
5628
5629 start = 0;
5630 end = lastdir;
5631 } else {
5632 ASSERT(subr == DIF_SUBR_BASENAME);
5633 ASSERT(firstbase != -1 && lastbase != -1);
5634 start = firstbase;
5635 end = lastbase;
5636 }
5637
5638 for (i = start, j = 0; i <= end && j < size - 1; i++, j++)
5639 dest[j] = dtrace_load8(src + i);
5640
5641 dest[j] = '\0';
5642 regs[rd] = (uintptr_t)dest;
5643 mstate->dtms_scratch_ptr += size;
5644 break;
5645 }
5646
5647 case DIF_SUBR_GETF: {
5648 uintptr_t fd = tupregs[0].dttk_value;
5649 struct filedesc *fdp;
5650 file_t *fp;
5651
5652 if (!dtrace_priv_proc(state)) {
5653 regs[rd] = 0;
5654 break;
5655 }
5656 fdp = curproc->p_fd;
5657 FILEDESC_SLOCK(fdp);
5658 /*
5659 * XXXMJG this looks broken as no ref is taken.
5660 */
5661 fp = fget_noref(fdp, fd);
5662 mstate->dtms_getf = fp;
5663 regs[rd] = (uintptr_t)fp;
5664 FILEDESC_SUNLOCK(fdp);
5665 break;
5666 }
5667
5668 case DIF_SUBR_CLEANPATH: {
5669 char *dest = (char *)mstate->dtms_scratch_ptr, c;
5670 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5671 uintptr_t src = tupregs[0].dttk_value;
5672 size_t lim;
5673 int i = 0, j = 0;
5674 #ifdef illumos
5675 zone_t *z;
5676 #endif
5677
5678 if (!dtrace_strcanload(src, size, &lim, mstate, vstate)) {
5679 regs[rd] = 0;
5680 break;
5681 }
5682
5683 if (!DTRACE_INSCRATCH(mstate, size)) {
5684 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5685 regs[rd] = 0;
5686 break;
5687 }
5688
5689 /*
5690 * Move forward, loading each character.
5691 */
5692 do {
5693 c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
5694 next:
5695 if (j + 5 >= size) /* 5 = strlen("/..c\0") */
5696 break;
5697
5698 if (c != '/') {
5699 dest[j++] = c;
5700 continue;
5701 }
5702
5703 c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
5704
5705 if (c == '/') {
5706 /*
5707 * We have two slashes -- we can just advance
5708 * to the next character.
5709 */
5710 goto next;
5711 }
5712
5713 if (c != '.') {
5714 /*
5715 * This is not "." and it's not ".." -- we can
5716 * just store the "/" and this character and
5717 * drive on.
5718 */
5719 dest[j++] = '/';
5720 dest[j++] = c;
5721 continue;
5722 }
5723
5724 c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
5725
5726 if (c == '/') {
5727 /*
5728 * This is a "/./" component. We're not going
5729 * to store anything in the destination buffer;
5730 * we're just going to go to the next component.
5731 */
5732 goto next;
5733 }
5734
5735 if (c != '.') {
5736 /*
5737 * This is not ".." -- we can just store the
5738 * "/." and this character and continue
5739 * processing.
5740 */
5741 dest[j++] = '/';
5742 dest[j++] = '.';
5743 dest[j++] = c;
5744 continue;
5745 }
5746
5747 c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
5748
5749 if (c != '/' && c != '\0') {
5750 /*
5751 * This is not ".." -- it's "..[mumble]".
5752 * We'll store the "/.." and this character
5753 * and continue processing.
5754 */
5755 dest[j++] = '/';
5756 dest[j++] = '.';
5757 dest[j++] = '.';
5758 dest[j++] = c;
5759 continue;
5760 }
5761
5762 /*
5763 * This is "/../" or "/..\0". We need to back up
5764 * our destination pointer until we find a "/".
5765 */
5766 i--;
5767 while (j != 0 && dest[--j] != '/')
5768 continue;
5769
5770 if (c == '\0')
5771 dest[++j] = '/';
5772 } while (c != '\0');
5773
5774 dest[j] = '\0';
5775
5776 #ifdef illumos
5777 if (mstate->dtms_getf != NULL &&
5778 !(mstate->dtms_access & DTRACE_ACCESS_KERNEL) &&
5779 (z = state->dts_cred.dcr_cred->cr_zone) != kcred->cr_zone) {
5780 /*
5781 * If we've done a getf() as a part of this ECB and we
5782 * don't have kernel access (and we're not in the global
5783 * zone), check if the path we cleaned up begins with
5784 * the zone's root path, and trim it off if so. Note
5785 * that this is an output cleanliness issue, not a
5786 * security issue: knowing one's zone root path does
5787 * not enable privilege escalation.
5788 */
5789 if (strstr(dest, z->zone_rootpath) == dest)
5790 dest += strlen(z->zone_rootpath) - 1;
5791 }
5792 #endif
5793
5794 regs[rd] = (uintptr_t)dest;
5795 mstate->dtms_scratch_ptr += size;
5796 break;
5797 }
5798
5799 case DIF_SUBR_INET_NTOA:
5800 case DIF_SUBR_INET_NTOA6:
5801 case DIF_SUBR_INET_NTOP: {
5802 size_t size;
5803 int af, argi, i;
5804 char *base, *end;
5805
5806 if (subr == DIF_SUBR_INET_NTOP) {
5807 af = (int)tupregs[0].dttk_value;
5808 argi = 1;
5809 } else {
5810 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6;
5811 argi = 0;
5812 }
5813
5814 if (af == AF_INET) {
5815 ipaddr_t ip4;
5816 uint8_t *ptr8, val;
5817
5818 if (!dtrace_canload(tupregs[argi].dttk_value,
5819 sizeof (ipaddr_t), mstate, vstate)) {
5820 regs[rd] = 0;
5821 break;
5822 }
5823
5824 /*
5825 * Safely load the IPv4 address.
5826 */
5827 ip4 = dtrace_load32(tupregs[argi].dttk_value);
5828
5829 /*
5830 * Check an IPv4 string will fit in scratch.
5831 */
5832 size = INET_ADDRSTRLEN;
5833 if (!DTRACE_INSCRATCH(mstate, size)) {
5834 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5835 regs[rd] = 0;
5836 break;
5837 }
5838 base = (char *)mstate->dtms_scratch_ptr;
5839 end = (char *)mstate->dtms_scratch_ptr + size - 1;
5840
5841 /*
5842 * Stringify as a dotted decimal quad.
5843 */
5844 *end-- = '\0';
5845 ptr8 = (uint8_t *)&ip4;
5846 for (i = 3; i >= 0; i--) {
5847 val = ptr8[i];
5848
5849 if (val == 0) {
5850 *end-- = '0';
5851 } else {
5852 for (; val; val /= 10) {
5853 *end-- = '0' + (val % 10);
5854 }
5855 }
5856
5857 if (i > 0)
5858 *end-- = '.';
5859 }
5860 ASSERT(end + 1 >= base);
5861
5862 } else if (af == AF_INET6) {
5863 struct in6_addr ip6;
5864 int firstzero, tryzero, numzero, v6end;
5865 uint16_t val;
5866 const char digits[] = "0123456789abcdef";
5867
5868 /*
5869 * Stringify using RFC 1884 convention 2 - 16 bit
5870 * hexadecimal values with a zero-run compression.
5871 * Lower case hexadecimal digits are used.
5872 * eg, fe80::214:4fff:fe0b:76c8.
5873 * The IPv4 embedded form is returned for inet_ntop,
5874 * just the IPv4 string is returned for inet_ntoa6.
5875 */
5876
5877 if (!dtrace_canload(tupregs[argi].dttk_value,
5878 sizeof (struct in6_addr), mstate, vstate)) {
5879 regs[rd] = 0;
5880 break;
5881 }
5882
5883 /*
5884 * Safely load the IPv6 address.
5885 */
5886 dtrace_bcopy(
5887 (void *)(uintptr_t)tupregs[argi].dttk_value,
5888 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr));
5889
5890 /*
5891 * Check an IPv6 string will fit in scratch.
5892 */
5893 size = INET6_ADDRSTRLEN;
5894 if (!DTRACE_INSCRATCH(mstate, size)) {
5895 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5896 regs[rd] = 0;
5897 break;
5898 }
5899 base = (char *)mstate->dtms_scratch_ptr;
5900 end = (char *)mstate->dtms_scratch_ptr + size - 1;
5901 *end-- = '\0';
5902
5903 /*
5904 * Find the longest run of 16 bit zero values
5905 * for the single allowed zero compression - "::".
5906 */
5907 firstzero = -1;
5908 tryzero = -1;
5909 numzero = 1;
5910 for (i = 0; i < sizeof (struct in6_addr); i++) {
5911 #ifdef illumos
5912 if (ip6._S6_un._S6_u8[i] == 0 &&
5913 #else
5914 if (ip6.__u6_addr.__u6_addr8[i] == 0 &&
5915 #endif
5916 tryzero == -1 && i % 2 == 0) {
5917 tryzero = i;
5918 continue;
5919 }
5920
5921 if (tryzero != -1 &&
5922 #ifdef illumos
5923 (ip6._S6_un._S6_u8[i] != 0 ||
5924 #else
5925 (ip6.__u6_addr.__u6_addr8[i] != 0 ||
5926 #endif
5927 i == sizeof (struct in6_addr) - 1)) {
5928
5929 if (i - tryzero <= numzero) {
5930 tryzero = -1;
5931 continue;
5932 }
5933
5934 firstzero = tryzero;
5935 numzero = i - i % 2 - tryzero;
5936 tryzero = -1;
5937
5938 #ifdef illumos
5939 if (ip6._S6_un._S6_u8[i] == 0 &&
5940 #else
5941 if (ip6.__u6_addr.__u6_addr8[i] == 0 &&
5942 #endif
5943 i == sizeof (struct in6_addr) - 1)
5944 numzero += 2;
5945 }
5946 }
5947 ASSERT(firstzero + numzero <= sizeof (struct in6_addr));
5948
5949 /*
5950 * Check for an IPv4 embedded address.
5951 */
5952 v6end = sizeof (struct in6_addr) - 2;
5953 if (IN6_IS_ADDR_V4MAPPED(&ip6) ||
5954 IN6_IS_ADDR_V4COMPAT(&ip6)) {
5955 for (i = sizeof (struct in6_addr) - 1;
5956 i >= DTRACE_V4MAPPED_OFFSET; i--) {
5957 ASSERT(end >= base);
5958
5959 #ifdef illumos
5960 val = ip6._S6_un._S6_u8[i];
5961 #else
5962 val = ip6.__u6_addr.__u6_addr8[i];
5963 #endif
5964
5965 if (val == 0) {
5966 *end-- = '0';
5967 } else {
5968 for (; val; val /= 10) {
5969 *end-- = '0' + val % 10;
5970 }
5971 }
5972
5973 if (i > DTRACE_V4MAPPED_OFFSET)
5974 *end-- = '.';
5975 }
5976
5977 if (subr == DIF_SUBR_INET_NTOA6)
5978 goto inetout;
5979
5980 /*
5981 * Set v6end to skip the IPv4 address that
5982 * we have already stringified.
5983 */
5984 v6end = 10;
5985 }
5986
5987 /*
5988 * Build the IPv6 string by working through the
5989 * address in reverse.
5990 */
5991 for (i = v6end; i >= 0; i -= 2) {
5992 ASSERT(end >= base);
5993
5994 if (i == firstzero + numzero - 2) {
5995 *end-- = ':';
5996 *end-- = ':';
5997 i -= numzero - 2;
5998 continue;
5999 }
6000
6001 if (i < 14 && i != firstzero - 2)
6002 *end-- = ':';
6003
6004 #ifdef illumos
6005 val = (ip6._S6_un._S6_u8[i] << 8) +
6006 ip6._S6_un._S6_u8[i + 1];
6007 #else
6008 val = (ip6.__u6_addr.__u6_addr8[i] << 8) +
6009 ip6.__u6_addr.__u6_addr8[i + 1];
6010 #endif
6011
6012 if (val == 0) {
6013 *end-- = '0';
6014 } else {
6015 for (; val; val /= 16) {
6016 *end-- = digits[val % 16];
6017 }
6018 }
6019 }
6020 ASSERT(end + 1 >= base);
6021
6022 } else {
6023 /*
6024 * The user didn't use AH_INET or AH_INET6.
6025 */
6026 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
6027 regs[rd] = 0;
6028 break;
6029 }
6030
6031 inetout: regs[rd] = (uintptr_t)end + 1;
6032 mstate->dtms_scratch_ptr += size;
6033 break;
6034 }
6035
6036 case DIF_SUBR_MEMREF: {
6037 uintptr_t size = 2 * sizeof(uintptr_t);
6038 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t));
6039 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size;
6040
6041 /* address and length */
6042 memref[0] = tupregs[0].dttk_value;
6043 memref[1] = tupregs[1].dttk_value;
6044
6045 regs[rd] = (uintptr_t) memref;
6046 mstate->dtms_scratch_ptr += scratch_size;
6047 break;
6048 }
6049
6050 #ifndef illumos
6051 case DIF_SUBR_MEMSTR: {
6052 char *str = (char *)mstate->dtms_scratch_ptr;
6053 uintptr_t mem = tupregs[0].dttk_value;
6054 char c = tupregs[1].dttk_value;
6055 size_t size = tupregs[2].dttk_value;
6056 uint8_t n;
6057 int i;
6058
6059 regs[rd] = 0;
6060
6061 if (size == 0)
6062 break;
6063
6064 if (!dtrace_canload(mem, size - 1, mstate, vstate))
6065 break;
6066
6067 if (!DTRACE_INSCRATCH(mstate, size)) {
6068 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
6069 break;
6070 }
6071
6072 if (dtrace_memstr_max != 0 && size > dtrace_memstr_max) {
6073 *flags |= CPU_DTRACE_ILLOP;
6074 break;
6075 }
6076
6077 for (i = 0; i < size - 1; i++) {
6078 n = dtrace_load8(mem++);
6079 str[i] = (n == 0) ? c : n;
6080 }
6081 str[size - 1] = 0;
6082
6083 regs[rd] = (uintptr_t)str;
6084 mstate->dtms_scratch_ptr += size;
6085 break;
6086 }
6087 #endif
6088 }
6089 }
6090
6091 /*
6092 * Emulate the execution of DTrace IR instructions specified by the given
6093 * DIF object. This function is deliberately void of assertions as all of
6094 * the necessary checks are handled by a call to dtrace_difo_validate().
6095 */
6096 static uint64_t
6097 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate,
6098 dtrace_vstate_t *vstate, dtrace_state_t *state)
6099 {
6100 const dif_instr_t *text = difo->dtdo_buf;
6101 const uint_t textlen = difo->dtdo_len;
6102 const char *strtab = difo->dtdo_strtab;
6103 const uint64_t *inttab = difo->dtdo_inttab;
6104
6105 uint64_t rval = 0;
6106 dtrace_statvar_t *svar;
6107 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
6108 dtrace_difv_t *v;
6109 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
6110 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval;
6111
6112 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
6113 uint64_t regs[DIF_DIR_NREGS];
6114 uint64_t *tmp;
6115
6116 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0;
6117 int64_t cc_r;
6118 uint_t pc = 0, id, opc = 0;
6119 uint8_t ttop = 0;
6120 dif_instr_t instr;
6121 uint_t r1, r2, rd;
6122
6123 /*
6124 * We stash the current DIF object into the machine state: we need it
6125 * for subsequent access checking.
6126 */
6127 mstate->dtms_difo = difo;
6128
6129 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */
6130
6131 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) {
6132 opc = pc;
6133
6134 instr = text[pc++];
6135 r1 = DIF_INSTR_R1(instr);
6136 r2 = DIF_INSTR_R2(instr);
6137 rd = DIF_INSTR_RD(instr);
6138
6139 switch (DIF_INSTR_OP(instr)) {
6140 case DIF_OP_OR:
6141 regs[rd] = regs[r1] | regs[r2];
6142 break;
6143 case DIF_OP_XOR:
6144 regs[rd] = regs[r1] ^ regs[r2];
6145 break;
6146 case DIF_OP_AND:
6147 regs[rd] = regs[r1] & regs[r2];
6148 break;
6149 case DIF_OP_SLL:
6150 regs[rd] = regs[r1] << regs[r2];
6151 break;
6152 case DIF_OP_SRL:
6153 regs[rd] = regs[r1] >> regs[r2];
6154 break;
6155 case DIF_OP_SUB:
6156 regs[rd] = regs[r1] - regs[r2];
6157 break;
6158 case DIF_OP_ADD:
6159 regs[rd] = regs[r1] + regs[r2];
6160 break;
6161 case DIF_OP_MUL:
6162 regs[rd] = regs[r1] * regs[r2];
6163 break;
6164 case DIF_OP_SDIV:
6165 if (regs[r2] == 0) {
6166 regs[rd] = 0;
6167 *flags |= CPU_DTRACE_DIVZERO;
6168 } else {
6169 regs[rd] = (int64_t)regs[r1] /
6170 (int64_t)regs[r2];
6171 }
6172 break;
6173
6174 case DIF_OP_UDIV:
6175 if (regs[r2] == 0) {
6176 regs[rd] = 0;
6177 *flags |= CPU_DTRACE_DIVZERO;
6178 } else {
6179 regs[rd] = regs[r1] / regs[r2];
6180 }
6181 break;
6182
6183 case DIF_OP_SREM:
6184 if (regs[r2] == 0) {
6185 regs[rd] = 0;
6186 *flags |= CPU_DTRACE_DIVZERO;
6187 } else {
6188 regs[rd] = (int64_t)regs[r1] %
6189 (int64_t)regs[r2];
6190 }
6191 break;
6192
6193 case DIF_OP_UREM:
6194 if (regs[r2] == 0) {
6195 regs[rd] = 0;
6196 *flags |= CPU_DTRACE_DIVZERO;
6197 } else {
6198 regs[rd] = regs[r1] % regs[r2];
6199 }
6200 break;
6201
6202 case DIF_OP_NOT:
6203 regs[rd] = ~regs[r1];
6204 break;
6205 case DIF_OP_MOV:
6206 regs[rd] = regs[r1];
6207 break;
6208 case DIF_OP_CMP:
6209 cc_r = regs[r1] - regs[r2];
6210 cc_n = cc_r < 0;
6211 cc_z = cc_r == 0;
6212 cc_v = 0;
6213 cc_c = regs[r1] < regs[r2];
6214 break;
6215 case DIF_OP_TST:
6216 cc_n = cc_v = cc_c = 0;
6217 cc_z = regs[r1] == 0;
6218 break;
6219 case DIF_OP_BA:
6220 pc = DIF_INSTR_LABEL(instr);
6221 break;
6222 case DIF_OP_BE:
6223 if (cc_z)
6224 pc = DIF_INSTR_LABEL(instr);
6225 break;
6226 case DIF_OP_BNE:
6227 if (cc_z == 0)
6228 pc = DIF_INSTR_LABEL(instr);
6229 break;
6230 case DIF_OP_BG:
6231 if ((cc_z | (cc_n ^ cc_v)) == 0)
6232 pc = DIF_INSTR_LABEL(instr);
6233 break;
6234 case DIF_OP_BGU:
6235 if ((cc_c | cc_z) == 0)
6236 pc = DIF_INSTR_LABEL(instr);
6237 break;
6238 case DIF_OP_BGE:
6239 if ((cc_n ^ cc_v) == 0)
6240 pc = DIF_INSTR_LABEL(instr);
6241 break;
6242 case DIF_OP_BGEU:
6243 if (cc_c == 0)
6244 pc = DIF_INSTR_LABEL(instr);
6245 break;
6246 case DIF_OP_BL:
6247 if (cc_n ^ cc_v)
6248 pc = DIF_INSTR_LABEL(instr);
6249 break;
6250 case DIF_OP_BLU:
6251 if (cc_c)
6252 pc = DIF_INSTR_LABEL(instr);
6253 break;
6254 case DIF_OP_BLE:
6255 if (cc_z | (cc_n ^ cc_v))
6256 pc = DIF_INSTR_LABEL(instr);
6257 break;
6258 case DIF_OP_BLEU:
6259 if (cc_c | cc_z)
6260 pc = DIF_INSTR_LABEL(instr);
6261 break;
6262 case DIF_OP_RLDSB:
6263 if (!dtrace_canload(regs[r1], 1, mstate, vstate))
6264 break;
6265 /*FALLTHROUGH*/
6266 case DIF_OP_LDSB:
6267 regs[rd] = (int8_t)dtrace_load8(regs[r1]);
6268 break;
6269 case DIF_OP_RLDSH:
6270 if (!dtrace_canload(regs[r1], 2, mstate, vstate))
6271 break;
6272 /*FALLTHROUGH*/
6273 case DIF_OP_LDSH:
6274 regs[rd] = (int16_t)dtrace_load16(regs[r1]);
6275 break;
6276 case DIF_OP_RLDSW:
6277 if (!dtrace_canload(regs[r1], 4, mstate, vstate))
6278 break;
6279 /*FALLTHROUGH*/
6280 case DIF_OP_LDSW:
6281 regs[rd] = (int32_t)dtrace_load32(regs[r1]);
6282 break;
6283 case DIF_OP_RLDUB:
6284 if (!dtrace_canload(regs[r1], 1, mstate, vstate))
6285 break;
6286 /*FALLTHROUGH*/
6287 case DIF_OP_LDUB:
6288 regs[rd] = dtrace_load8(regs[r1]);
6289 break;
6290 case DIF_OP_RLDUH:
6291 if (!dtrace_canload(regs[r1], 2, mstate, vstate))
6292 break;
6293 /*FALLTHROUGH*/
6294 case DIF_OP_LDUH:
6295 regs[rd] = dtrace_load16(regs[r1]);
6296 break;
6297 case DIF_OP_RLDUW:
6298 if (!dtrace_canload(regs[r1], 4, mstate, vstate))
6299 break;
6300 /*FALLTHROUGH*/
6301 case DIF_OP_LDUW:
6302 regs[rd] = dtrace_load32(regs[r1]);
6303 break;
6304 case DIF_OP_RLDX:
6305 if (!dtrace_canload(regs[r1], 8, mstate, vstate))
6306 break;
6307 /*FALLTHROUGH*/
6308 case DIF_OP_LDX:
6309 regs[rd] = dtrace_load64(regs[r1]);
6310 break;
6311 case DIF_OP_ULDSB:
6312 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6313 regs[rd] = (int8_t)
6314 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
6315 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6316 break;
6317 case DIF_OP_ULDSH:
6318 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6319 regs[rd] = (int16_t)
6320 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
6321 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6322 break;
6323 case DIF_OP_ULDSW:
6324 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6325 regs[rd] = (int32_t)
6326 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
6327 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6328 break;
6329 case DIF_OP_ULDUB:
6330 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6331 regs[rd] =
6332 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
6333 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6334 break;
6335 case DIF_OP_ULDUH:
6336 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6337 regs[rd] =
6338 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
6339 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6340 break;
6341 case DIF_OP_ULDUW:
6342 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6343 regs[rd] =
6344 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
6345 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6346 break;
6347 case DIF_OP_ULDX:
6348 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6349 regs[rd] =
6350 dtrace_fuword64((void *)(uintptr_t)regs[r1]);
6351 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6352 break;
6353 case DIF_OP_RET:
6354 rval = regs[rd];
6355 pc = textlen;
6356 break;
6357 case DIF_OP_NOP:
6358 break;
6359 case DIF_OP_SETX:
6360 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)];
6361 break;
6362 case DIF_OP_SETS:
6363 regs[rd] = (uint64_t)(uintptr_t)
6364 (strtab + DIF_INSTR_STRING(instr));
6365 break;
6366 case DIF_OP_SCMP: {
6367 size_t sz = state->dts_options[DTRACEOPT_STRSIZE];
6368 uintptr_t s1 = regs[r1];
6369 uintptr_t s2 = regs[r2];
6370 size_t lim1, lim2;
6371
6372 /*
6373 * If one of the strings is NULL then the limit becomes
6374 * 0 which compares 0 characters in dtrace_strncmp()
6375 * resulting in a false positive. dtrace_strncmp()
6376 * treats a NULL as an empty 1-char string.
6377 */
6378 lim1 = lim2 = 1;
6379
6380 if (s1 != 0 &&
6381 !dtrace_strcanload(s1, sz, &lim1, mstate, vstate))
6382 break;
6383 if (s2 != 0 &&
6384 !dtrace_strcanload(s2, sz, &lim2, mstate, vstate))
6385 break;
6386
6387 cc_r = dtrace_strncmp((char *)s1, (char *)s2,
6388 MIN(lim1, lim2));
6389
6390 cc_n = cc_r < 0;
6391 cc_z = cc_r == 0;
6392 cc_v = cc_c = 0;
6393 break;
6394 }
6395 case DIF_OP_LDGA:
6396 regs[rd] = dtrace_dif_variable(mstate, state,
6397 r1, regs[r2]);
6398 break;
6399 case DIF_OP_LDGS:
6400 id = DIF_INSTR_VAR(instr);
6401
6402 if (id >= DIF_VAR_OTHER_UBASE) {
6403 uintptr_t a;
6404
6405 id -= DIF_VAR_OTHER_UBASE;
6406 svar = vstate->dtvs_globals[id];
6407 ASSERT(svar != NULL);
6408 v = &svar->dtsv_var;
6409
6410 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) {
6411 regs[rd] = svar->dtsv_data;
6412 break;
6413 }
6414
6415 a = (uintptr_t)svar->dtsv_data;
6416
6417 if (*(uint8_t *)a == UINT8_MAX) {
6418 /*
6419 * If the 0th byte is set to UINT8_MAX
6420 * then this is to be treated as a
6421 * reference to a NULL variable.
6422 */
6423 regs[rd] = 0;
6424 } else {
6425 regs[rd] = a + sizeof (uint64_t);
6426 }
6427
6428 break;
6429 }
6430
6431 regs[rd] = dtrace_dif_variable(mstate, state, id, 0);
6432 break;
6433
6434 case DIF_OP_STGS:
6435 id = DIF_INSTR_VAR(instr);
6436
6437 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6438 id -= DIF_VAR_OTHER_UBASE;
6439
6440 VERIFY(id < vstate->dtvs_nglobals);
6441 svar = vstate->dtvs_globals[id];
6442 ASSERT(svar != NULL);
6443 v = &svar->dtsv_var;
6444
6445 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6446 uintptr_t a = (uintptr_t)svar->dtsv_data;
6447 size_t lim;
6448
6449 ASSERT(a != 0);
6450 ASSERT(svar->dtsv_size != 0);
6451
6452 if (regs[rd] == 0) {
6453 *(uint8_t *)a = UINT8_MAX;
6454 break;
6455 } else {
6456 *(uint8_t *)a = 0;
6457 a += sizeof (uint64_t);
6458 }
6459 if (!dtrace_vcanload(
6460 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
6461 &lim, mstate, vstate))
6462 break;
6463
6464 dtrace_vcopy((void *)(uintptr_t)regs[rd],
6465 (void *)a, &v->dtdv_type, lim);
6466 break;
6467 }
6468
6469 svar->dtsv_data = regs[rd];
6470 break;
6471
6472 case DIF_OP_LDTA:
6473 /*
6474 * There are no DTrace built-in thread-local arrays at
6475 * present. This opcode is saved for future work.
6476 */
6477 *flags |= CPU_DTRACE_ILLOP;
6478 regs[rd] = 0;
6479 break;
6480
6481 case DIF_OP_LDLS:
6482 id = DIF_INSTR_VAR(instr);
6483
6484 if (id < DIF_VAR_OTHER_UBASE) {
6485 /*
6486 * For now, this has no meaning.
6487 */
6488 regs[rd] = 0;
6489 break;
6490 }
6491
6492 id -= DIF_VAR_OTHER_UBASE;
6493
6494 ASSERT(id < vstate->dtvs_nlocals);
6495 ASSERT(vstate->dtvs_locals != NULL);
6496
6497 svar = vstate->dtvs_locals[id];
6498 ASSERT(svar != NULL);
6499 v = &svar->dtsv_var;
6500
6501 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6502 uintptr_t a = (uintptr_t)svar->dtsv_data;
6503 size_t sz = v->dtdv_type.dtdt_size;
6504 size_t lim;
6505
6506 sz += sizeof (uint64_t);
6507 ASSERT(svar->dtsv_size == NCPU * sz);
6508 a += curcpu * sz;
6509
6510 if (*(uint8_t *)a == UINT8_MAX) {
6511 /*
6512 * If the 0th byte is set to UINT8_MAX
6513 * then this is to be treated as a
6514 * reference to a NULL variable.
6515 */
6516 regs[rd] = 0;
6517 } else {
6518 regs[rd] = a + sizeof (uint64_t);
6519 }
6520
6521 break;
6522 }
6523
6524 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
6525 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
6526 regs[rd] = tmp[curcpu];
6527 break;
6528
6529 case DIF_OP_STLS:
6530 id = DIF_INSTR_VAR(instr);
6531
6532 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6533 id -= DIF_VAR_OTHER_UBASE;
6534 VERIFY(id < vstate->dtvs_nlocals);
6535
6536 ASSERT(vstate->dtvs_locals != NULL);
6537 svar = vstate->dtvs_locals[id];
6538 ASSERT(svar != NULL);
6539 v = &svar->dtsv_var;
6540
6541 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6542 uintptr_t a = (uintptr_t)svar->dtsv_data;
6543 size_t sz = v->dtdv_type.dtdt_size;
6544 size_t lim;
6545
6546 sz += sizeof (uint64_t);
6547 ASSERT(svar->dtsv_size == NCPU * sz);
6548 a += curcpu * sz;
6549
6550 if (regs[rd] == 0) {
6551 *(uint8_t *)a = UINT8_MAX;
6552 break;
6553 } else {
6554 *(uint8_t *)a = 0;
6555 a += sizeof (uint64_t);
6556 }
6557
6558 if (!dtrace_vcanload(
6559 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
6560 &lim, mstate, vstate))
6561 break;
6562
6563 dtrace_vcopy((void *)(uintptr_t)regs[rd],
6564 (void *)a, &v->dtdv_type, lim);
6565 break;
6566 }
6567
6568 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
6569 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
6570 tmp[curcpu] = regs[rd];
6571 break;
6572
6573 case DIF_OP_LDTS: {
6574 dtrace_dynvar_t *dvar;
6575 dtrace_key_t *key;
6576
6577 id = DIF_INSTR_VAR(instr);
6578 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6579 id -= DIF_VAR_OTHER_UBASE;
6580 v = &vstate->dtvs_tlocals[id];
6581
6582 key = &tupregs[DIF_DTR_NREGS];
6583 key[0].dttk_value = (uint64_t)id;
6584 key[0].dttk_size = 0;
6585 DTRACE_TLS_THRKEY(key[1].dttk_value);
6586 key[1].dttk_size = 0;
6587
6588 dvar = dtrace_dynvar(dstate, 2, key,
6589 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC,
6590 mstate, vstate);
6591
6592 if (dvar == NULL) {
6593 regs[rd] = 0;
6594 break;
6595 }
6596
6597 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6598 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
6599 } else {
6600 regs[rd] = *((uint64_t *)dvar->dtdv_data);
6601 }
6602
6603 break;
6604 }
6605
6606 case DIF_OP_STTS: {
6607 dtrace_dynvar_t *dvar;
6608 dtrace_key_t *key;
6609
6610 id = DIF_INSTR_VAR(instr);
6611 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6612 id -= DIF_VAR_OTHER_UBASE;
6613 VERIFY(id < vstate->dtvs_ntlocals);
6614
6615 key = &tupregs[DIF_DTR_NREGS];
6616 key[0].dttk_value = (uint64_t)id;
6617 key[0].dttk_size = 0;
6618 DTRACE_TLS_THRKEY(key[1].dttk_value);
6619 key[1].dttk_size = 0;
6620 v = &vstate->dtvs_tlocals[id];
6621
6622 dvar = dtrace_dynvar(dstate, 2, key,
6623 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6624 v->dtdv_type.dtdt_size : sizeof (uint64_t),
6625 regs[rd] ? DTRACE_DYNVAR_ALLOC :
6626 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
6627
6628 /*
6629 * Given that we're storing to thread-local data,
6630 * we need to flush our predicate cache.
6631 */
6632 curthread->t_predcache = 0;
6633
6634 if (dvar == NULL)
6635 break;
6636
6637 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6638 size_t lim;
6639
6640 if (!dtrace_vcanload(
6641 (void *)(uintptr_t)regs[rd],
6642 &v->dtdv_type, &lim, mstate, vstate))
6643 break;
6644
6645 dtrace_vcopy((void *)(uintptr_t)regs[rd],
6646 dvar->dtdv_data, &v->dtdv_type, lim);
6647 } else {
6648 *((uint64_t *)dvar->dtdv_data) = regs[rd];
6649 }
6650
6651 break;
6652 }
6653
6654 case DIF_OP_SRA:
6655 regs[rd] = (int64_t)regs[r1] >> regs[r2];
6656 break;
6657
6658 case DIF_OP_CALL:
6659 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd,
6660 regs, tupregs, ttop, mstate, state);
6661 break;
6662
6663 case DIF_OP_PUSHTR:
6664 if (ttop == DIF_DTR_NREGS) {
6665 *flags |= CPU_DTRACE_TUPOFLOW;
6666 break;
6667 }
6668
6669 if (r1 == DIF_TYPE_STRING) {
6670 /*
6671 * If this is a string type and the size is 0,
6672 * we'll use the system-wide default string
6673 * size. Note that we are _not_ looking at
6674 * the value of the DTRACEOPT_STRSIZE option;
6675 * had this been set, we would expect to have
6676 * a non-zero size value in the "pushtr".
6677 */
6678 tupregs[ttop].dttk_size =
6679 dtrace_strlen((char *)(uintptr_t)regs[rd],
6680 regs[r2] ? regs[r2] :
6681 dtrace_strsize_default) + 1;
6682 } else {
6683 if (regs[r2] > LONG_MAX) {
6684 *flags |= CPU_DTRACE_ILLOP;
6685 break;
6686 }
6687
6688 tupregs[ttop].dttk_size = regs[r2];
6689 }
6690
6691 tupregs[ttop++].dttk_value = regs[rd];
6692 break;
6693
6694 case DIF_OP_PUSHTV:
6695 if (ttop == DIF_DTR_NREGS) {
6696 *flags |= CPU_DTRACE_TUPOFLOW;
6697 break;
6698 }
6699
6700 tupregs[ttop].dttk_value = regs[rd];
6701 tupregs[ttop++].dttk_size = 0;
6702 break;
6703
6704 case DIF_OP_POPTS:
6705 if (ttop != 0)
6706 ttop--;
6707 break;
6708
6709 case DIF_OP_FLUSHTS:
6710 ttop = 0;
6711 break;
6712
6713 case DIF_OP_LDGAA:
6714 case DIF_OP_LDTAA: {
6715 dtrace_dynvar_t *dvar;
6716 dtrace_key_t *key = tupregs;
6717 uint_t nkeys = ttop;
6718
6719 id = DIF_INSTR_VAR(instr);
6720 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6721 id -= DIF_VAR_OTHER_UBASE;
6722
6723 key[nkeys].dttk_value = (uint64_t)id;
6724 key[nkeys++].dttk_size = 0;
6725
6726 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) {
6727 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
6728 key[nkeys++].dttk_size = 0;
6729 VERIFY(id < vstate->dtvs_ntlocals);
6730 v = &vstate->dtvs_tlocals[id];
6731 } else {
6732 VERIFY(id < vstate->dtvs_nglobals);
6733 v = &vstate->dtvs_globals[id]->dtsv_var;
6734 }
6735
6736 dvar = dtrace_dynvar(dstate, nkeys, key,
6737 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6738 v->dtdv_type.dtdt_size : sizeof (uint64_t),
6739 DTRACE_DYNVAR_NOALLOC, mstate, vstate);
6740
6741 if (dvar == NULL) {
6742 regs[rd] = 0;
6743 break;
6744 }
6745
6746 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6747 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
6748 } else {
6749 regs[rd] = *((uint64_t *)dvar->dtdv_data);
6750 }
6751
6752 break;
6753 }
6754
6755 case DIF_OP_STGAA:
6756 case DIF_OP_STTAA: {
6757 dtrace_dynvar_t *dvar;
6758 dtrace_key_t *key = tupregs;
6759 uint_t nkeys = ttop;
6760
6761 id = DIF_INSTR_VAR(instr);
6762 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6763 id -= DIF_VAR_OTHER_UBASE;
6764
6765 key[nkeys].dttk_value = (uint64_t)id;
6766 key[nkeys++].dttk_size = 0;
6767
6768 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) {
6769 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
6770 key[nkeys++].dttk_size = 0;
6771 VERIFY(id < vstate->dtvs_ntlocals);
6772 v = &vstate->dtvs_tlocals[id];
6773 } else {
6774 VERIFY(id < vstate->dtvs_nglobals);
6775 v = &vstate->dtvs_globals[id]->dtsv_var;
6776 }
6777
6778 dvar = dtrace_dynvar(dstate, nkeys, key,
6779 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6780 v->dtdv_type.dtdt_size : sizeof (uint64_t),
6781 regs[rd] ? DTRACE_DYNVAR_ALLOC :
6782 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
6783
6784 if (dvar == NULL)
6785 break;
6786
6787 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6788 size_t lim;
6789
6790 if (!dtrace_vcanload(
6791 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
6792 &lim, mstate, vstate))
6793 break;
6794
6795 dtrace_vcopy((void *)(uintptr_t)regs[rd],
6796 dvar->dtdv_data, &v->dtdv_type, lim);
6797 } else {
6798 *((uint64_t *)dvar->dtdv_data) = regs[rd];
6799 }
6800
6801 break;
6802 }
6803
6804 case DIF_OP_ALLOCS: {
6805 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
6806 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1];
6807
6808 /*
6809 * Rounding up the user allocation size could have
6810 * overflowed large, bogus allocations (like -1ULL) to
6811 * 0.
6812 */
6813 if (size < regs[r1] ||
6814 !DTRACE_INSCRATCH(mstate, size)) {
6815 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
6816 regs[rd] = 0;
6817 break;
6818 }
6819
6820 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size);
6821 mstate->dtms_scratch_ptr += size;
6822 regs[rd] = ptr;
6823 break;
6824 }
6825
6826 case DIF_OP_COPYS:
6827 if (!dtrace_canstore(regs[rd], regs[r2],
6828 mstate, vstate)) {
6829 *flags |= CPU_DTRACE_BADADDR;
6830 *illval = regs[rd];
6831 break;
6832 }
6833
6834 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate))
6835 break;
6836
6837 dtrace_bcopy((void *)(uintptr_t)regs[r1],
6838 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]);
6839 break;
6840
6841 case DIF_OP_STB:
6842 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) {
6843 *flags |= CPU_DTRACE_BADADDR;
6844 *illval = regs[rd];
6845 break;
6846 }
6847 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1];
6848 break;
6849
6850 case DIF_OP_STH:
6851 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) {
6852 *flags |= CPU_DTRACE_BADADDR;
6853 *illval = regs[rd];
6854 break;
6855 }
6856 if (regs[rd] & 1) {
6857 *flags |= CPU_DTRACE_BADALIGN;
6858 *illval = regs[rd];
6859 break;
6860 }
6861 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1];
6862 break;
6863
6864 case DIF_OP_STW:
6865 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) {
6866 *flags |= CPU_DTRACE_BADADDR;
6867 *illval = regs[rd];
6868 break;
6869 }
6870 if (regs[rd] & 3) {
6871 *flags |= CPU_DTRACE_BADALIGN;
6872 *illval = regs[rd];
6873 break;
6874 }
6875 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1];
6876 break;
6877
6878 case DIF_OP_STX:
6879 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) {
6880 *flags |= CPU_DTRACE_BADADDR;
6881 *illval = regs[rd];
6882 break;
6883 }
6884 if (regs[rd] & 7) {
6885 *flags |= CPU_DTRACE_BADALIGN;
6886 *illval = regs[rd];
6887 break;
6888 }
6889 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1];
6890 break;
6891 }
6892 }
6893
6894 if (!(*flags & CPU_DTRACE_FAULT))
6895 return (rval);
6896
6897 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t);
6898 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS;
6899
6900 return (0);
6901 }
6902
6903 static void
6904 dtrace_action_breakpoint(dtrace_ecb_t *ecb)
6905 {
6906 dtrace_probe_t *probe = ecb->dte_probe;
6907 dtrace_provider_t *prov = probe->dtpr_provider;
6908 char c[DTRACE_FULLNAMELEN + 80], *str;
6909 char *msg = "dtrace: breakpoint action at probe ";
6910 char *ecbmsg = " (ecb ";
6911 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4));
6912 uintptr_t val = (uintptr_t)ecb;
6913 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0;
6914
6915 if (dtrace_destructive_disallow)
6916 return;
6917
6918 /*
6919 * It's impossible to be taking action on the NULL probe.
6920 */
6921 ASSERT(probe != NULL);
6922
6923 /*
6924 * This is a poor man's (destitute man's?) sprintf(): we want to
6925 * print the provider name, module name, function name and name of
6926 * the probe, along with the hex address of the ECB with the breakpoint
6927 * action -- all of which we must place in the character buffer by
6928 * hand.
6929 */
6930 while (*msg != '\0')
6931 c[i++] = *msg++;
6932
6933 for (str = prov->dtpv_name; *str != '\0'; str++)
6934 c[i++] = *str;
6935 c[i++] = ':';
6936
6937 for (str = probe->dtpr_mod; *str != '\0'; str++)
6938 c[i++] = *str;
6939 c[i++] = ':';
6940
6941 for (str = probe->dtpr_func; *str != '\0'; str++)
6942 c[i++] = *str;
6943 c[i++] = ':';
6944
6945 for (str = probe->dtpr_name; *str != '\0'; str++)
6946 c[i++] = *str;
6947
6948 while (*ecbmsg != '\0')
6949 c[i++] = *ecbmsg++;
6950
6951 while (shift >= 0) {
6952 mask = (uintptr_t)0xf << shift;
6953
6954 if (val >= ((uintptr_t)1 << shift))
6955 c[i++] = "0123456789abcdef"[(val & mask) >> shift];
6956 shift -= 4;
6957 }
6958
6959 c[i++] = ')';
6960 c[i] = '\0';
6961
6962 #ifdef illumos
6963 debug_enter(c);
6964 #else
6965 kdb_enter(KDB_WHY_DTRACE, "breakpoint action");
6966 #endif
6967 }
6968
6969 static void
6970 dtrace_action_panic(dtrace_ecb_t *ecb)
6971 {
6972 dtrace_probe_t *probe = ecb->dte_probe;
6973
6974 /*
6975 * It's impossible to be taking action on the NULL probe.
6976 */
6977 ASSERT(probe != NULL);
6978
6979 if (dtrace_destructive_disallow)
6980 return;
6981
6982 if (dtrace_panicked != NULL)
6983 return;
6984
6985 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL)
6986 return;
6987
6988 /*
6989 * We won the right to panic. (We want to be sure that only one
6990 * thread calls panic() from dtrace_probe(), and that panic() is
6991 * called exactly once.)
6992 */
6993 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)",
6994 probe->dtpr_provider->dtpv_name, probe->dtpr_mod,
6995 probe->dtpr_func, probe->dtpr_name, (void *)ecb);
6996 }
6997
6998 static void
6999 dtrace_action_raise(uint64_t sig)
7000 {
7001 if (dtrace_destructive_disallow)
7002 return;
7003
7004 if (sig >= NSIG) {
7005 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
7006 return;
7007 }
7008
7009 #ifdef illumos
7010 /*
7011 * raise() has a queue depth of 1 -- we ignore all subsequent
7012 * invocations of the raise() action.
7013 */
7014 if (curthread->t_dtrace_sig == 0)
7015 curthread->t_dtrace_sig = (uint8_t)sig;
7016
7017 curthread->t_sig_check = 1;
7018 aston(curthread);
7019 #else
7020 struct proc *p = curproc;
7021 PROC_LOCK(p);
7022 kern_psignal(p, sig);
7023 PROC_UNLOCK(p);
7024 #endif
7025 }
7026
7027 static void
7028 dtrace_action_stop(void)
7029 {
7030 if (dtrace_destructive_disallow)
7031 return;
7032
7033 #ifdef illumos
7034 if (!curthread->t_dtrace_stop) {
7035 curthread->t_dtrace_stop = 1;
7036 curthread->t_sig_check = 1;
7037 aston(curthread);
7038 }
7039 #else
7040 struct proc *p = curproc;
7041 PROC_LOCK(p);
7042 kern_psignal(p, SIGSTOP);
7043 PROC_UNLOCK(p);
7044 #endif
7045 }
7046
7047 static void
7048 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val)
7049 {
7050 hrtime_t now;
7051 volatile uint16_t *flags;
7052 #ifdef illumos
7053 cpu_t *cpu = CPU;
7054 #else
7055 cpu_t *cpu = &solaris_cpu[curcpu];
7056 #endif
7057
7058 if (dtrace_destructive_disallow)
7059 return;
7060
7061 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
7062
7063 now = dtrace_gethrtime();
7064
7065 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) {
7066 /*
7067 * We need to advance the mark to the current time.
7068 */
7069 cpu->cpu_dtrace_chillmark = now;
7070 cpu->cpu_dtrace_chilled = 0;
7071 }
7072
7073 /*
7074 * Now check to see if the requested chill time would take us over
7075 * the maximum amount of time allowed in the chill interval. (Or
7076 * worse, if the calculation itself induces overflow.)
7077 */
7078 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max ||
7079 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) {
7080 *flags |= CPU_DTRACE_ILLOP;
7081 return;
7082 }
7083
7084 while (dtrace_gethrtime() - now < val)
7085 continue;
7086
7087 /*
7088 * Normally, we assure that the value of the variable "timestamp" does
7089 * not change within an ECB. The presence of chill() represents an
7090 * exception to this rule, however.
7091 */
7092 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP;
7093 cpu->cpu_dtrace_chilled += val;
7094 }
7095
7096 static void
7097 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state,
7098 uint64_t *buf, uint64_t arg)
7099 {
7100 int nframes = DTRACE_USTACK_NFRAMES(arg);
7101 int strsize = DTRACE_USTACK_STRSIZE(arg);
7102 uint64_t *pcs = &buf[1], *fps;
7103 char *str = (char *)&pcs[nframes];
7104 int size, offs = 0, i, j;
7105 size_t rem;
7106 uintptr_t old = mstate->dtms_scratch_ptr, saved;
7107 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
7108 char *sym;
7109
7110 /*
7111 * Should be taking a faster path if string space has not been
7112 * allocated.
7113 */
7114 ASSERT(strsize != 0);
7115
7116 /*
7117 * We will first allocate some temporary space for the frame pointers.
7118 */
7119 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
7120 size = (uintptr_t)fps - mstate->dtms_scratch_ptr +
7121 (nframes * sizeof (uint64_t));
7122
7123 if (!DTRACE_INSCRATCH(mstate, size)) {
7124 /*
7125 * Not enough room for our frame pointers -- need to indicate
7126 * that we ran out of scratch space.
7127 */
7128 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
7129 return;
7130 }
7131
7132 mstate->dtms_scratch_ptr += size;
7133 saved = mstate->dtms_scratch_ptr;
7134
7135 /*
7136 * Now get a stack with both program counters and frame pointers.
7137 */
7138 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
7139 dtrace_getufpstack(buf, fps, nframes + 1);
7140 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
7141
7142 /*
7143 * If that faulted, we're cooked.
7144 */
7145 if (*flags & CPU_DTRACE_FAULT)
7146 goto out;
7147
7148 /*
7149 * Now we want to walk up the stack, calling the USTACK helper. For
7150 * each iteration, we restore the scratch pointer.
7151 */
7152 for (i = 0; i < nframes; i++) {
7153 mstate->dtms_scratch_ptr = saved;
7154
7155 if (offs >= strsize)
7156 break;
7157
7158 sym = (char *)(uintptr_t)dtrace_helper(
7159 DTRACE_HELPER_ACTION_USTACK,
7160 mstate, state, pcs[i], fps[i]);
7161
7162 /*
7163 * If we faulted while running the helper, we're going to
7164 * clear the fault and null out the corresponding string.
7165 */
7166 if (*flags & CPU_DTRACE_FAULT) {
7167 *flags &= ~CPU_DTRACE_FAULT;
7168 str[offs++] = '\0';
7169 continue;
7170 }
7171
7172 if (sym == NULL) {
7173 str[offs++] = '\0';
7174 continue;
7175 }
7176
7177 if (!dtrace_strcanload((uintptr_t)sym, strsize, &rem, mstate,
7178 &(state->dts_vstate))) {
7179 str[offs++] = '\0';
7180 continue;
7181 }
7182
7183 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
7184
7185 /*
7186 * Now copy in the string that the helper returned to us.
7187 */
7188 for (j = 0; offs + j < strsize && j < rem; j++) {
7189 if ((str[offs + j] = sym[j]) == '\0')
7190 break;
7191 }
7192
7193 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
7194
7195 offs += j + 1;
7196 }
7197
7198 if (offs >= strsize) {
7199 /*
7200 * If we didn't have room for all of the strings, we don't
7201 * abort processing -- this needn't be a fatal error -- but we
7202 * still want to increment a counter (dts_stkstroverflows) to
7203 * allow this condition to be warned about. (If this is from
7204 * a jstack() action, it is easily tuned via jstackstrsize.)
7205 */
7206 dtrace_error(&state->dts_stkstroverflows);
7207 }
7208
7209 while (offs < strsize)
7210 str[offs++] = '\0';
7211
7212 out:
7213 mstate->dtms_scratch_ptr = old;
7214 }
7215
7216 static void
7217 dtrace_store_by_ref(dtrace_difo_t *dp, caddr_t tomax, size_t size,
7218 size_t *valoffsp, uint64_t *valp, uint64_t end, int intuple, int dtkind)
7219 {
7220 volatile uint16_t *flags;
7221 uint64_t val = *valp;
7222 size_t valoffs = *valoffsp;
7223
7224 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
7225 ASSERT(dtkind == DIF_TF_BYREF || dtkind == DIF_TF_BYUREF);
7226
7227 /*
7228 * If this is a string, we're going to only load until we find the zero
7229 * byte -- after which we'll store zero bytes.
7230 */
7231 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
7232 char c = '\0' + 1;
7233 size_t s;
7234
7235 for (s = 0; s < size; s++) {
7236 if (c != '\0' && dtkind == DIF_TF_BYREF) {
7237 c = dtrace_load8(val++);
7238 } else if (c != '\0' && dtkind == DIF_TF_BYUREF) {
7239 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
7240 c = dtrace_fuword8((void *)(uintptr_t)val++);
7241 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
7242 if (*flags & CPU_DTRACE_FAULT)
7243 break;
7244 }
7245
7246 DTRACE_STORE(uint8_t, tomax, valoffs++, c);
7247
7248 if (c == '\0' && intuple)
7249 break;
7250 }
7251 } else {
7252 uint8_t c;
7253 while (valoffs < end) {
7254 if (dtkind == DIF_TF_BYREF) {
7255 c = dtrace_load8(val++);
7256 } else if (dtkind == DIF_TF_BYUREF) {
7257 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
7258 c = dtrace_fuword8((void *)(uintptr_t)val++);
7259 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
7260 if (*flags & CPU_DTRACE_FAULT)
7261 break;
7262 }
7263
7264 DTRACE_STORE(uint8_t, tomax,
7265 valoffs++, c);
7266 }
7267 }
7268
7269 *valp = val;
7270 *valoffsp = valoffs;
7271 }
7272
7273 /*
7274 * Disables interrupts and sets the per-thread inprobe flag. When DEBUG is
7275 * defined, we also assert that we are not recursing unless the probe ID is an
7276 * error probe.
7277 */
7278 static dtrace_icookie_t
7279 dtrace_probe_enter(dtrace_id_t id)
7280 {
7281 dtrace_icookie_t cookie;
7282
7283 cookie = dtrace_interrupt_disable();
7284
7285 /*
7286 * Unless this is an ERROR probe, we are not allowed to recurse in
7287 * dtrace_probe(). Recursing into DTrace probe usually means that a
7288 * function is instrumented that should not have been instrumented or
7289 * that the ordering guarantee of the records will be violated,
7290 * resulting in unexpected output. If there is an exception to this
7291 * assertion, a new case should be added.
7292 */
7293 ASSERT(curthread->t_dtrace_inprobe == 0 ||
7294 id == dtrace_probeid_error);
7295 curthread->t_dtrace_inprobe = 1;
7296
7297 return (cookie);
7298 }
7299
7300 /*
7301 * Clears the per-thread inprobe flag and enables interrupts.
7302 */
7303 static void
7304 dtrace_probe_exit(dtrace_icookie_t cookie)
7305 {
7306
7307 curthread->t_dtrace_inprobe = 0;
7308 dtrace_interrupt_enable(cookie);
7309 }
7310
7311 /*
7312 * If you're looking for the epicenter of DTrace, you just found it. This
7313 * is the function called by the provider to fire a probe -- from which all
7314 * subsequent probe-context DTrace activity emanates.
7315 */
7316 void
7317 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1,
7318 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4)
7319 {
7320 processorid_t cpuid;
7321 dtrace_icookie_t cookie;
7322 dtrace_probe_t *probe;
7323 dtrace_mstate_t mstate;
7324 dtrace_ecb_t *ecb;
7325 dtrace_action_t *act;
7326 intptr_t offs;
7327 size_t size;
7328 int vtime, onintr;
7329 volatile uint16_t *flags;
7330 hrtime_t now;
7331
7332 if (KERNEL_PANICKED())
7333 return;
7334
7335 #ifdef illumos
7336 /*
7337 * Kick out immediately if this CPU is still being born (in which case
7338 * curthread will be set to -1) or the current thread can't allow
7339 * probes in its current context.
7340 */
7341 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE))
7342 return;
7343 #endif
7344
7345 cookie = dtrace_probe_enter(id);
7346 probe = dtrace_probes[id - 1];
7347 cpuid = curcpu;
7348 onintr = CPU_ON_INTR(CPU);
7349
7350 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE &&
7351 probe->dtpr_predcache == curthread->t_predcache) {
7352 /*
7353 * We have hit in the predicate cache; we know that
7354 * this predicate would evaluate to be false.
7355 */
7356 dtrace_probe_exit(cookie);
7357 return;
7358 }
7359
7360 #ifdef illumos
7361 if (panic_quiesce) {
7362 #else
7363 if (KERNEL_PANICKED()) {
7364 #endif
7365 /*
7366 * We don't trace anything if we're panicking.
7367 */
7368 dtrace_probe_exit(cookie);
7369 return;
7370 }
7371
7372 now = mstate.dtms_timestamp = dtrace_gethrtime();
7373 mstate.dtms_present = DTRACE_MSTATE_TIMESTAMP;
7374 vtime = dtrace_vtime_references != 0;
7375
7376 if (vtime && curthread->t_dtrace_start)
7377 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start;
7378
7379 mstate.dtms_difo = NULL;
7380 mstate.dtms_probe = probe;
7381 mstate.dtms_strtok = 0;
7382 mstate.dtms_arg[0] = arg0;
7383 mstate.dtms_arg[1] = arg1;
7384 mstate.dtms_arg[2] = arg2;
7385 mstate.dtms_arg[3] = arg3;
7386 mstate.dtms_arg[4] = arg4;
7387
7388 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags;
7389
7390 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
7391 dtrace_predicate_t *pred = ecb->dte_predicate;
7392 dtrace_state_t *state = ecb->dte_state;
7393 dtrace_buffer_t *buf = &state->dts_buffer[cpuid];
7394 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid];
7395 dtrace_vstate_t *vstate = &state->dts_vstate;
7396 dtrace_provider_t *prov = probe->dtpr_provider;
7397 uint64_t tracememsize = 0;
7398 int committed = 0;
7399 caddr_t tomax;
7400
7401 /*
7402 * A little subtlety with the following (seemingly innocuous)
7403 * declaration of the automatic 'val': by looking at the
7404 * code, you might think that it could be declared in the
7405 * action processing loop, below. (That is, it's only used in
7406 * the action processing loop.) However, it must be declared
7407 * out of that scope because in the case of DIF expression
7408 * arguments to aggregating actions, one iteration of the
7409 * action loop will use the last iteration's value.
7410 */
7411 uint64_t val = 0;
7412
7413 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE;
7414 mstate.dtms_getf = NULL;
7415
7416 *flags &= ~CPU_DTRACE_ERROR;
7417
7418 if (prov == dtrace_provider) {
7419 /*
7420 * If dtrace itself is the provider of this probe,
7421 * we're only going to continue processing the ECB if
7422 * arg0 (the dtrace_state_t) is equal to the ECB's
7423 * creating state. (This prevents disjoint consumers
7424 * from seeing one another's metaprobes.)
7425 */
7426 if (arg0 != (uint64_t)(uintptr_t)state)
7427 continue;
7428 }
7429
7430 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) {
7431 /*
7432 * We're not currently active. If our provider isn't
7433 * the dtrace pseudo provider, we're not interested.
7434 */
7435 if (prov != dtrace_provider)
7436 continue;
7437
7438 /*
7439 * Now we must further check if we are in the BEGIN
7440 * probe. If we are, we will only continue processing
7441 * if we're still in WARMUP -- if one BEGIN enabling
7442 * has invoked the exit() action, we don't want to
7443 * evaluate subsequent BEGIN enablings.
7444 */
7445 if (probe->dtpr_id == dtrace_probeid_begin &&
7446 state->dts_activity != DTRACE_ACTIVITY_WARMUP) {
7447 ASSERT(state->dts_activity ==
7448 DTRACE_ACTIVITY_DRAINING);
7449 continue;
7450 }
7451 }
7452
7453 if (ecb->dte_cond) {
7454 /*
7455 * If the dte_cond bits indicate that this
7456 * consumer is only allowed to see user-mode firings
7457 * of this probe, call the provider's dtps_usermode()
7458 * entry point to check that the probe was fired
7459 * while in a user context. Skip this ECB if that's
7460 * not the case.
7461 */
7462 if ((ecb->dte_cond & DTRACE_COND_USERMODE) &&
7463 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg,
7464 probe->dtpr_id, probe->dtpr_arg) == 0)
7465 continue;
7466
7467 #ifdef illumos
7468 /*
7469 * This is more subtle than it looks. We have to be
7470 * absolutely certain that CRED() isn't going to
7471 * change out from under us so it's only legit to
7472 * examine that structure if we're in constrained
7473 * situations. Currently, the only times we'll this
7474 * check is if a non-super-user has enabled the
7475 * profile or syscall providers -- providers that
7476 * allow visibility of all processes. For the
7477 * profile case, the check above will ensure that
7478 * we're examining a user context.
7479 */
7480 if (ecb->dte_cond & DTRACE_COND_OWNER) {
7481 cred_t *cr;
7482 cred_t *s_cr =
7483 ecb->dte_state->dts_cred.dcr_cred;
7484 proc_t *proc;
7485
7486 ASSERT(s_cr != NULL);
7487
7488 if ((cr = CRED()) == NULL ||
7489 s_cr->cr_uid != cr->cr_uid ||
7490 s_cr->cr_uid != cr->cr_ruid ||
7491 s_cr->cr_uid != cr->cr_suid ||
7492 s_cr->cr_gid != cr->cr_gid ||
7493 s_cr->cr_gid != cr->cr_rgid ||
7494 s_cr->cr_gid != cr->cr_sgid ||
7495 (proc = ttoproc(curthread)) == NULL ||
7496 (proc->p_flag & SNOCD))
7497 continue;
7498 }
7499
7500 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) {
7501 cred_t *cr;
7502 cred_t *s_cr =
7503 ecb->dte_state->dts_cred.dcr_cred;
7504
7505 ASSERT(s_cr != NULL);
7506
7507 if ((cr = CRED()) == NULL ||
7508 s_cr->cr_zone->zone_id !=
7509 cr->cr_zone->zone_id)
7510 continue;
7511 }
7512 #endif
7513 }
7514
7515 if (now - state->dts_alive > dtrace_deadman_timeout) {
7516 /*
7517 * We seem to be dead. Unless we (a) have kernel
7518 * destructive permissions (b) have explicitly enabled
7519 * destructive actions and (c) destructive actions have
7520 * not been disabled, we're going to transition into
7521 * the KILLED state, from which no further processing
7522 * on this state will be performed.
7523 */
7524 if (!dtrace_priv_kernel_destructive(state) ||
7525 !state->dts_cred.dcr_destructive ||
7526 dtrace_destructive_disallow) {
7527 void *activity = &state->dts_activity;
7528 dtrace_activity_t curstate;
7529
7530 do {
7531 curstate = state->dts_activity;
7532 } while (dtrace_cas32(activity, curstate,
7533 DTRACE_ACTIVITY_KILLED) != curstate);
7534
7535 continue;
7536 }
7537 }
7538
7539 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed,
7540 ecb->dte_alignment, state, &mstate)) < 0)
7541 continue;
7542
7543 tomax = buf->dtb_tomax;
7544 ASSERT(tomax != NULL);
7545
7546 if (ecb->dte_size != 0) {
7547 dtrace_rechdr_t dtrh;
7548 if (!(mstate.dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
7549 mstate.dtms_timestamp = dtrace_gethrtime();
7550 mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP;
7551 }
7552 ASSERT3U(ecb->dte_size, >=, sizeof (dtrace_rechdr_t));
7553 dtrh.dtrh_epid = ecb->dte_epid;
7554 DTRACE_RECORD_STORE_TIMESTAMP(&dtrh,
7555 mstate.dtms_timestamp);
7556 *((dtrace_rechdr_t *)(tomax + offs)) = dtrh;
7557 }
7558
7559 mstate.dtms_epid = ecb->dte_epid;
7560 mstate.dtms_present |= DTRACE_MSTATE_EPID;
7561
7562 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)
7563 mstate.dtms_access = DTRACE_ACCESS_KERNEL;
7564 else
7565 mstate.dtms_access = 0;
7566
7567 if (pred != NULL) {
7568 dtrace_difo_t *dp = pred->dtp_difo;
7569 uint64_t rval;
7570
7571 rval = dtrace_dif_emulate(dp, &mstate, vstate, state);
7572
7573 if (!(*flags & CPU_DTRACE_ERROR) && !rval) {
7574 dtrace_cacheid_t cid = probe->dtpr_predcache;
7575
7576 if (cid != DTRACE_CACHEIDNONE && !onintr) {
7577 /*
7578 * Update the predicate cache...
7579 */
7580 ASSERT(cid == pred->dtp_cacheid);
7581 curthread->t_predcache = cid;
7582 }
7583
7584 continue;
7585 }
7586 }
7587
7588 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) &&
7589 act != NULL; act = act->dta_next) {
7590 size_t valoffs;
7591 dtrace_difo_t *dp;
7592 dtrace_recdesc_t *rec = &act->dta_rec;
7593
7594 size = rec->dtrd_size;
7595 valoffs = offs + rec->dtrd_offset;
7596
7597 if (DTRACEACT_ISAGG(act->dta_kind)) {
7598 uint64_t v = 0xbad;
7599 dtrace_aggregation_t *agg;
7600
7601 agg = (dtrace_aggregation_t *)act;
7602
7603 if ((dp = act->dta_difo) != NULL)
7604 v = dtrace_dif_emulate(dp,
7605 &mstate, vstate, state);
7606
7607 if (*flags & CPU_DTRACE_ERROR)
7608 continue;
7609
7610 /*
7611 * Note that we always pass the expression
7612 * value from the previous iteration of the
7613 * action loop. This value will only be used
7614 * if there is an expression argument to the
7615 * aggregating action, denoted by the
7616 * dtag_hasarg field.
7617 */
7618 dtrace_aggregate(agg, buf,
7619 offs, aggbuf, v, val);
7620 continue;
7621 }
7622
7623 switch (act->dta_kind) {
7624 case DTRACEACT_STOP:
7625 if (dtrace_priv_proc_destructive(state))
7626 dtrace_action_stop();
7627 continue;
7628
7629 case DTRACEACT_BREAKPOINT:
7630 if (dtrace_priv_kernel_destructive(state))
7631 dtrace_action_breakpoint(ecb);
7632 continue;
7633
7634 case DTRACEACT_PANIC:
7635 if (dtrace_priv_kernel_destructive(state))
7636 dtrace_action_panic(ecb);
7637 continue;
7638
7639 case DTRACEACT_STACK:
7640 if (!dtrace_priv_kernel(state))
7641 continue;
7642
7643 dtrace_getpcstack((pc_t *)(tomax + valoffs),
7644 size / sizeof (pc_t), probe->dtpr_aframes,
7645 DTRACE_ANCHORED(probe) ? NULL :
7646 (uint32_t *)arg0);
7647 continue;
7648
7649 case DTRACEACT_JSTACK:
7650 case DTRACEACT_USTACK:
7651 if (!dtrace_priv_proc(state))
7652 continue;
7653
7654 /*
7655 * See comment in DIF_VAR_PID.
7656 */
7657 if (DTRACE_ANCHORED(mstate.dtms_probe) &&
7658 CPU_ON_INTR(CPU)) {
7659 int depth = DTRACE_USTACK_NFRAMES(
7660 rec->dtrd_arg) + 1;
7661
7662 dtrace_bzero((void *)(tomax + valoffs),
7663 DTRACE_USTACK_STRSIZE(rec->dtrd_arg)
7664 + depth * sizeof (uint64_t));
7665
7666 continue;
7667 }
7668
7669 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 &&
7670 curproc->p_dtrace_helpers != NULL) {
7671 /*
7672 * This is the slow path -- we have
7673 * allocated string space, and we're
7674 * getting the stack of a process that
7675 * has helpers. Call into a separate
7676 * routine to perform this processing.
7677 */
7678 dtrace_action_ustack(&mstate, state,
7679 (uint64_t *)(tomax + valoffs),
7680 rec->dtrd_arg);
7681 continue;
7682 }
7683
7684 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
7685 dtrace_getupcstack((uint64_t *)
7686 (tomax + valoffs),
7687 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1);
7688 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
7689 continue;
7690
7691 default:
7692 break;
7693 }
7694
7695 dp = act->dta_difo;
7696 ASSERT(dp != NULL);
7697
7698 val = dtrace_dif_emulate(dp, &mstate, vstate, state);
7699
7700 if (*flags & CPU_DTRACE_ERROR)
7701 continue;
7702
7703 switch (act->dta_kind) {
7704 case DTRACEACT_SPECULATE: {
7705 dtrace_rechdr_t *dtrh;
7706
7707 ASSERT(buf == &state->dts_buffer[cpuid]);
7708 buf = dtrace_speculation_buffer(state,
7709 cpuid, val);
7710
7711 if (buf == NULL) {
7712 *flags |= CPU_DTRACE_DROP;
7713 continue;
7714 }
7715
7716 offs = dtrace_buffer_reserve(buf,
7717 ecb->dte_needed, ecb->dte_alignment,
7718 state, NULL);
7719
7720 if (offs < 0) {
7721 *flags |= CPU_DTRACE_DROP;
7722 continue;
7723 }
7724
7725 tomax = buf->dtb_tomax;
7726 ASSERT(tomax != NULL);
7727
7728 if (ecb->dte_size == 0)
7729 continue;
7730
7731 ASSERT3U(ecb->dte_size, >=,
7732 sizeof (dtrace_rechdr_t));
7733 dtrh = ((void *)(tomax + offs));
7734 dtrh->dtrh_epid = ecb->dte_epid;
7735 /*
7736 * When the speculation is committed, all of
7737 * the records in the speculative buffer will
7738 * have their timestamps set to the commit
7739 * time. Until then, it is set to a sentinel
7740 * value, for debugability.
7741 */
7742 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, UINT64_MAX);
7743 continue;
7744 }
7745
7746 case DTRACEACT_PRINTM: {
7747 /*
7748 * printm() assumes that the DIF returns a
7749 * pointer returned by memref(). memref() is a
7750 * subroutine that is used to get around the
7751 * single-valued returns of DIF and is assumed
7752 * to always be allocated in the scratch space.
7753 * Therefore, we need to validate that the
7754 * pointer given to printm() is in the scratch
7755 * space in order to avoid a potential panic.
7756 */
7757 uintptr_t *memref = (uintptr_t *)(uintptr_t) val;
7758
7759 if (!DTRACE_INSCRATCHPTR(&mstate,
7760 (uintptr_t)memref, 2 * sizeof(uintptr_t))) {
7761 *flags |= CPU_DTRACE_BADADDR;
7762 continue;
7763 }
7764
7765 /* Get the size from the memref. */
7766 size = memref[1];
7767
7768 /*
7769 * Check if the size exceeds the allocated
7770 * buffer size.
7771 */
7772 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) {
7773 /* Flag a drop! */
7774 *flags |= CPU_DTRACE_DROP;
7775 continue;
7776 }
7777
7778 /* Store the size in the buffer first. */
7779 DTRACE_STORE(uintptr_t, tomax,
7780 valoffs, size);
7781
7782 /*
7783 * Offset the buffer address to the start
7784 * of the data.
7785 */
7786 valoffs += sizeof(uintptr_t);
7787
7788 /*
7789 * Reset to the memory address rather than
7790 * the memref array, then let the BYREF
7791 * code below do the work to store the
7792 * memory data in the buffer.
7793 */
7794 val = memref[0];
7795 break;
7796 }
7797
7798 case DTRACEACT_CHILL:
7799 if (dtrace_priv_kernel_destructive(state))
7800 dtrace_action_chill(&mstate, val);
7801 continue;
7802
7803 case DTRACEACT_RAISE:
7804 if (dtrace_priv_proc_destructive(state))
7805 dtrace_action_raise(val);
7806 continue;
7807
7808 case DTRACEACT_COMMIT:
7809 ASSERT(!committed);
7810
7811 /*
7812 * We need to commit our buffer state.
7813 */
7814 if (ecb->dte_size)
7815 buf->dtb_offset = offs + ecb->dte_size;
7816 buf = &state->dts_buffer[cpuid];
7817 dtrace_speculation_commit(state, cpuid, val);
7818 committed = 1;
7819 continue;
7820
7821 case DTRACEACT_DISCARD:
7822 dtrace_speculation_discard(state, cpuid, val);
7823 continue;
7824
7825 case DTRACEACT_DIFEXPR:
7826 case DTRACEACT_LIBACT:
7827 case DTRACEACT_PRINTF:
7828 case DTRACEACT_PRINTA:
7829 case DTRACEACT_SYSTEM:
7830 case DTRACEACT_FREOPEN:
7831 case DTRACEACT_TRACEMEM:
7832 break;
7833
7834 case DTRACEACT_TRACEMEM_DYNSIZE:
7835 tracememsize = val;
7836 break;
7837
7838 case DTRACEACT_SYM:
7839 case DTRACEACT_MOD:
7840 if (!dtrace_priv_kernel(state))
7841 continue;
7842 break;
7843
7844 case DTRACEACT_USYM:
7845 case DTRACEACT_UMOD:
7846 case DTRACEACT_UADDR: {
7847 #ifdef illumos
7848 struct pid *pid = curthread->t_procp->p_pidp;
7849 #endif
7850
7851 if (!dtrace_priv_proc(state))
7852 continue;
7853
7854 DTRACE_STORE(uint64_t, tomax,
7855 #ifdef illumos
7856 valoffs, (uint64_t)pid->pid_id);
7857 #else
7858 valoffs, (uint64_t) curproc->p_pid);
7859 #endif
7860 DTRACE_STORE(uint64_t, tomax,
7861 valoffs + sizeof (uint64_t), val);
7862
7863 continue;
7864 }
7865
7866 case DTRACEACT_EXIT: {
7867 /*
7868 * For the exit action, we are going to attempt
7869 * to atomically set our activity to be
7870 * draining. If this fails (either because
7871 * another CPU has beat us to the exit action,
7872 * or because our current activity is something
7873 * other than ACTIVE or WARMUP), we will
7874 * continue. This assures that the exit action
7875 * can be successfully recorded at most once
7876 * when we're in the ACTIVE state. If we're
7877 * encountering the exit() action while in
7878 * COOLDOWN, however, we want to honor the new
7879 * status code. (We know that we're the only
7880 * thread in COOLDOWN, so there is no race.)
7881 */
7882 void *activity = &state->dts_activity;
7883 dtrace_activity_t curstate = state->dts_activity;
7884
7885 if (curstate == DTRACE_ACTIVITY_COOLDOWN)
7886 break;
7887
7888 if (curstate != DTRACE_ACTIVITY_WARMUP)
7889 curstate = DTRACE_ACTIVITY_ACTIVE;
7890
7891 if (dtrace_cas32(activity, curstate,
7892 DTRACE_ACTIVITY_DRAINING) != curstate) {
7893 *flags |= CPU_DTRACE_DROP;
7894 continue;
7895 }
7896
7897 break;
7898 }
7899
7900 default:
7901 ASSERT(0);
7902 }
7903
7904 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF ||
7905 dp->dtdo_rtype.dtdt_flags & DIF_TF_BYUREF) {
7906 uintptr_t end = valoffs + size;
7907
7908 if (tracememsize != 0 &&
7909 valoffs + tracememsize < end) {
7910 end = valoffs + tracememsize;
7911 tracememsize = 0;
7912 }
7913
7914 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF &&
7915 !dtrace_vcanload((void *)(uintptr_t)val,
7916 &dp->dtdo_rtype, NULL, &mstate, vstate))
7917 continue;
7918
7919 dtrace_store_by_ref(dp, tomax, size, &valoffs,
7920 &val, end, act->dta_intuple,
7921 dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF ?
7922 DIF_TF_BYREF: DIF_TF_BYUREF);
7923 continue;
7924 }
7925
7926 switch (size) {
7927 case 0:
7928 break;
7929
7930 case sizeof (uint8_t):
7931 DTRACE_STORE(uint8_t, tomax, valoffs, val);
7932 break;
7933 case sizeof (uint16_t):
7934 DTRACE_STORE(uint16_t, tomax, valoffs, val);
7935 break;
7936 case sizeof (uint32_t):
7937 DTRACE_STORE(uint32_t, tomax, valoffs, val);
7938 break;
7939 case sizeof (uint64_t):
7940 DTRACE_STORE(uint64_t, tomax, valoffs, val);
7941 break;
7942 default:
7943 /*
7944 * Any other size should have been returned by
7945 * reference, not by value.
7946 */
7947 ASSERT(0);
7948 break;
7949 }
7950 }
7951
7952 if (*flags & CPU_DTRACE_DROP)
7953 continue;
7954
7955 if (*flags & CPU_DTRACE_FAULT) {
7956 int ndx;
7957 dtrace_action_t *err;
7958
7959 buf->dtb_errors++;
7960
7961 if (probe->dtpr_id == dtrace_probeid_error) {
7962 /*
7963 * There's nothing we can do -- we had an
7964 * error on the error probe. We bump an
7965 * error counter to at least indicate that
7966 * this condition happened.
7967 */
7968 dtrace_error(&state->dts_dblerrors);
7969 continue;
7970 }
7971
7972 if (vtime) {
7973 /*
7974 * Before recursing on dtrace_probe(), we
7975 * need to explicitly clear out our start
7976 * time to prevent it from being accumulated
7977 * into t_dtrace_vtime.
7978 */
7979 curthread->t_dtrace_start = 0;
7980 }
7981
7982 /*
7983 * Iterate over the actions to figure out which action
7984 * we were processing when we experienced the error.
7985 * Note that act points _past_ the faulting action; if
7986 * act is ecb->dte_action, the fault was in the
7987 * predicate, if it's ecb->dte_action->dta_next it's
7988 * in action #1, and so on.
7989 */
7990 for (err = ecb->dte_action, ndx = 0;
7991 err != act; err = err->dta_next, ndx++)
7992 continue;
7993
7994 dtrace_probe_error(state, ecb->dte_epid, ndx,
7995 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ?
7996 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags),
7997 cpu_core[cpuid].cpuc_dtrace_illval);
7998
7999 continue;
8000 }
8001
8002 if (!committed)
8003 buf->dtb_offset = offs + ecb->dte_size;
8004 }
8005
8006 if (vtime)
8007 curthread->t_dtrace_start = dtrace_gethrtime();
8008
8009 dtrace_probe_exit(cookie);
8010 }
8011
8012 /*
8013 * DTrace Probe Hashing Functions
8014 *
8015 * The functions in this section (and indeed, the functions in remaining
8016 * sections) are not _called_ from probe context. (Any exceptions to this are
8017 * marked with a "Note:".) Rather, they are called from elsewhere in the
8018 * DTrace framework to look-up probes in, add probes to and remove probes from
8019 * the DTrace probe hashes. (Each probe is hashed by each element of the
8020 * probe tuple -- allowing for fast lookups, regardless of what was
8021 * specified.)
8022 */
8023 static uint_t
8024 dtrace_hash_str(const char *p)
8025 {
8026 unsigned int g;
8027 uint_t hval = 0;
8028
8029 while (*p) {
8030 hval = (hval << 4) + *p++;
8031 if ((g = (hval & 0xf0000000)) != 0)
8032 hval ^= g >> 24;
8033 hval &= ~g;
8034 }
8035 return (hval);
8036 }
8037
8038 static dtrace_hash_t *
8039 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs)
8040 {
8041 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP);
8042
8043 hash->dth_stroffs = stroffs;
8044 hash->dth_nextoffs = nextoffs;
8045 hash->dth_prevoffs = prevoffs;
8046
8047 hash->dth_size = 1;
8048 hash->dth_mask = hash->dth_size - 1;
8049
8050 hash->dth_tab = kmem_zalloc(hash->dth_size *
8051 sizeof (dtrace_hashbucket_t *), KM_SLEEP);
8052
8053 return (hash);
8054 }
8055
8056 static void
8057 dtrace_hash_destroy(dtrace_hash_t *hash)
8058 {
8059 #ifdef DEBUG
8060 int i;
8061
8062 for (i = 0; i < hash->dth_size; i++)
8063 ASSERT(hash->dth_tab[i] == NULL);
8064 #endif
8065
8066 kmem_free(hash->dth_tab,
8067 hash->dth_size * sizeof (dtrace_hashbucket_t *));
8068 kmem_free(hash, sizeof (dtrace_hash_t));
8069 }
8070
8071 static void
8072 dtrace_hash_resize(dtrace_hash_t *hash)
8073 {
8074 int size = hash->dth_size, i, ndx;
8075 int new_size = hash->dth_size << 1;
8076 int new_mask = new_size - 1;
8077 dtrace_hashbucket_t **new_tab, *bucket, *next;
8078
8079 ASSERT((new_size & new_mask) == 0);
8080
8081 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP);
8082
8083 for (i = 0; i < size; i++) {
8084 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) {
8085 dtrace_probe_t *probe = bucket->dthb_chain;
8086
8087 ASSERT(probe != NULL);
8088 ndx = DTRACE_HASHSTR(hash, probe) & new_mask;
8089
8090 next = bucket->dthb_next;
8091 bucket->dthb_next = new_tab[ndx];
8092 new_tab[ndx] = bucket;
8093 }
8094 }
8095
8096 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *));
8097 hash->dth_tab = new_tab;
8098 hash->dth_size = new_size;
8099 hash->dth_mask = new_mask;
8100 }
8101
8102 static void
8103 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new)
8104 {
8105 int hashval = DTRACE_HASHSTR(hash, new);
8106 int ndx = hashval & hash->dth_mask;
8107 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
8108 dtrace_probe_t **nextp, **prevp;
8109
8110 for (; bucket != NULL; bucket = bucket->dthb_next) {
8111 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new))
8112 goto add;
8113 }
8114
8115 if ((hash->dth_nbuckets >> 1) > hash->dth_size) {
8116 dtrace_hash_resize(hash);
8117 dtrace_hash_add(hash, new);
8118 return;
8119 }
8120
8121 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP);
8122 bucket->dthb_next = hash->dth_tab[ndx];
8123 hash->dth_tab[ndx] = bucket;
8124 hash->dth_nbuckets++;
8125
8126 add:
8127 nextp = DTRACE_HASHNEXT(hash, new);
8128 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL);
8129 *nextp = bucket->dthb_chain;
8130
8131 if (bucket->dthb_chain != NULL) {
8132 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain);
8133 ASSERT(*prevp == NULL);
8134 *prevp = new;
8135 }
8136
8137 bucket->dthb_chain = new;
8138 bucket->dthb_len++;
8139 }
8140
8141 static dtrace_probe_t *
8142 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template)
8143 {
8144 int hashval = DTRACE_HASHSTR(hash, template);
8145 int ndx = hashval & hash->dth_mask;
8146 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
8147
8148 for (; bucket != NULL; bucket = bucket->dthb_next) {
8149 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
8150 return (bucket->dthb_chain);
8151 }
8152
8153 return (NULL);
8154 }
8155
8156 static int
8157 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template)
8158 {
8159 int hashval = DTRACE_HASHSTR(hash, template);
8160 int ndx = hashval & hash->dth_mask;
8161 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
8162
8163 for (; bucket != NULL; bucket = bucket->dthb_next) {
8164 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
8165 return (bucket->dthb_len);
8166 }
8167
8168 return (0);
8169 }
8170
8171 static void
8172 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe)
8173 {
8174 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask;
8175 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
8176
8177 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe);
8178 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe);
8179
8180 /*
8181 * Find the bucket that we're removing this probe from.
8182 */
8183 for (; bucket != NULL; bucket = bucket->dthb_next) {
8184 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe))
8185 break;
8186 }
8187
8188 ASSERT(bucket != NULL);
8189
8190 if (*prevp == NULL) {
8191 if (*nextp == NULL) {
8192 /*
8193 * The removed probe was the only probe on this
8194 * bucket; we need to remove the bucket.
8195 */
8196 dtrace_hashbucket_t *b = hash->dth_tab[ndx];
8197
8198 ASSERT(bucket->dthb_chain == probe);
8199 ASSERT(b != NULL);
8200
8201 if (b == bucket) {
8202 hash->dth_tab[ndx] = bucket->dthb_next;
8203 } else {
8204 while (b->dthb_next != bucket)
8205 b = b->dthb_next;
8206 b->dthb_next = bucket->dthb_next;
8207 }
8208
8209 ASSERT(hash->dth_nbuckets > 0);
8210 hash->dth_nbuckets--;
8211 kmem_free(bucket, sizeof (dtrace_hashbucket_t));
8212 return;
8213 }
8214
8215 bucket->dthb_chain = *nextp;
8216 } else {
8217 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp;
8218 }
8219
8220 if (*nextp != NULL)
8221 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp;
8222 }
8223
8224 /*
8225 * DTrace Utility Functions
8226 *
8227 * These are random utility functions that are _not_ called from probe context.
8228 */
8229 static int
8230 dtrace_badattr(const dtrace_attribute_t *a)
8231 {
8232 return (a->dtat_name > DTRACE_STABILITY_MAX ||
8233 a->dtat_data > DTRACE_STABILITY_MAX ||
8234 a->dtat_class > DTRACE_CLASS_MAX);
8235 }
8236
8237 /*
8238 * Return a duplicate copy of a string. If the specified string is NULL,
8239 * this function returns a zero-length string.
8240 */
8241 static char *
8242 dtrace_strdup(const char *str)
8243 {
8244 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP);
8245
8246 if (str != NULL)
8247 (void) strcpy(new, str);
8248
8249 return (new);
8250 }
8251
8252 #define DTRACE_ISALPHA(c) \
8253 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z'))
8254
8255 static int
8256 dtrace_badname(const char *s)
8257 {
8258 char c;
8259
8260 if (s == NULL || (c = *s++) == '\0')
8261 return (0);
8262
8263 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.')
8264 return (1);
8265
8266 while ((c = *s++) != '\0') {
8267 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') &&
8268 c != '-' && c != '_' && c != '.' && c != '`')
8269 return (1);
8270 }
8271
8272 return (0);
8273 }
8274
8275 static void
8276 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp)
8277 {
8278 uint32_t priv;
8279
8280 #ifdef illumos
8281 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
8282 /*
8283 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter.
8284 */
8285 priv = DTRACE_PRIV_ALL;
8286 } else {
8287 *uidp = crgetuid(cr);
8288 *zoneidp = crgetzoneid(cr);
8289
8290 priv = 0;
8291 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE))
8292 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER;
8293 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE))
8294 priv |= DTRACE_PRIV_USER;
8295 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE))
8296 priv |= DTRACE_PRIV_PROC;
8297 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
8298 priv |= DTRACE_PRIV_OWNER;
8299 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
8300 priv |= DTRACE_PRIV_ZONEOWNER;
8301 }
8302 #else
8303 priv = DTRACE_PRIV_ALL;
8304 #endif
8305
8306 *privp = priv;
8307 }
8308
8309 #ifdef DTRACE_ERRDEBUG
8310 static void
8311 dtrace_errdebug(const char *str)
8312 {
8313 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ;
8314 int occupied = 0;
8315
8316 mutex_enter(&dtrace_errlock);
8317 dtrace_errlast = str;
8318 dtrace_errthread = curthread;
8319
8320 while (occupied++ < DTRACE_ERRHASHSZ) {
8321 if (dtrace_errhash[hval].dter_msg == str) {
8322 dtrace_errhash[hval].dter_count++;
8323 goto out;
8324 }
8325
8326 if (dtrace_errhash[hval].dter_msg != NULL) {
8327 hval = (hval + 1) % DTRACE_ERRHASHSZ;
8328 continue;
8329 }
8330
8331 dtrace_errhash[hval].dter_msg = str;
8332 dtrace_errhash[hval].dter_count = 1;
8333 goto out;
8334 }
8335
8336 panic("dtrace: undersized error hash");
8337 out:
8338 mutex_exit(&dtrace_errlock);
8339 }
8340 #endif
8341
8342 /*
8343 * DTrace Matching Functions
8344 *
8345 * These functions are used to match groups of probes, given some elements of
8346 * a probe tuple, or some globbed expressions for elements of a probe tuple.
8347 */
8348 static int
8349 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid,
8350 zoneid_t zoneid)
8351 {
8352 if (priv != DTRACE_PRIV_ALL) {
8353 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags;
8354 uint32_t match = priv & ppriv;
8355
8356 /*
8357 * No PRIV_DTRACE_* privileges...
8358 */
8359 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER |
8360 DTRACE_PRIV_KERNEL)) == 0)
8361 return (0);
8362
8363 /*
8364 * No matching bits, but there were bits to match...
8365 */
8366 if (match == 0 && ppriv != 0)
8367 return (0);
8368
8369 /*
8370 * Need to have permissions to the process, but don't...
8371 */
8372 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 &&
8373 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) {
8374 return (0);
8375 }
8376
8377 /*
8378 * Need to be in the same zone unless we possess the
8379 * privilege to examine all zones.
8380 */
8381 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 &&
8382 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) {
8383 return (0);
8384 }
8385 }
8386
8387 return (1);
8388 }
8389
8390 /*
8391 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which
8392 * consists of input pattern strings and an ops-vector to evaluate them.
8393 * This function returns >0 for match, 0 for no match, and <0 for error.
8394 */
8395 static int
8396 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp,
8397 uint32_t priv, uid_t uid, zoneid_t zoneid)
8398 {
8399 dtrace_provider_t *pvp = prp->dtpr_provider;
8400 int rv;
8401
8402 if (pvp->dtpv_defunct)
8403 return (0);
8404
8405 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0)
8406 return (rv);
8407
8408 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0)
8409 return (rv);
8410
8411 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0)
8412 return (rv);
8413
8414 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0)
8415 return (rv);
8416
8417 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0)
8418 return (0);
8419
8420 return (rv);
8421 }
8422
8423 /*
8424 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN)
8425 * interface for matching a glob pattern 'p' to an input string 's'. Unlike
8426 * libc's version, the kernel version only applies to 8-bit ASCII strings.
8427 * In addition, all of the recursion cases except for '*' matching have been
8428 * unwound. For '*', we still implement recursive evaluation, but a depth
8429 * counter is maintained and matching is aborted if we recurse too deep.
8430 * The function returns 0 if no match, >0 if match, and <0 if recursion error.
8431 */
8432 static int
8433 dtrace_match_glob(const char *s, const char *p, int depth)
8434 {
8435 const char *olds;
8436 char s1, c;
8437 int gs;
8438
8439 if (depth > DTRACE_PROBEKEY_MAXDEPTH)
8440 return (-1);
8441
8442 if (s == NULL)
8443 s = ""; /* treat NULL as empty string */
8444
8445 top:
8446 olds = s;
8447 s1 = *s++;
8448
8449 if (p == NULL)
8450 return (0);
8451
8452 if ((c = *p++) == '\0')
8453 return (s1 == '\0');
8454
8455 switch (c) {
8456 case '[': {
8457 int ok = 0, notflag = 0;
8458 char lc = '\0';
8459
8460 if (s1 == '\0')
8461 return (0);
8462
8463 if (*p == '!') {
8464 notflag = 1;
8465 p++;
8466 }
8467
8468 if ((c = *p++) == '\0')
8469 return (0);
8470
8471 do {
8472 if (c == '-' && lc != '\0' && *p != ']') {
8473 if ((c = *p++) == '\0')
8474 return (0);
8475 if (c == '\\' && (c = *p++) == '\0')
8476 return (0);
8477
8478 if (notflag) {
8479 if (s1 < lc || s1 > c)
8480 ok++;
8481 else
8482 return (0);
8483 } else if (lc <= s1 && s1 <= c)
8484 ok++;
8485
8486 } else if (c == '\\' && (c = *p++) == '\0')
8487 return (0);
8488
8489 lc = c; /* save left-hand 'c' for next iteration */
8490
8491 if (notflag) {
8492 if (s1 != c)
8493 ok++;
8494 else
8495 return (0);
8496 } else if (s1 == c)
8497 ok++;
8498
8499 if ((c = *p++) == '\0')
8500 return (0);
8501
8502 } while (c != ']');
8503
8504 if (ok)
8505 goto top;
8506
8507 return (0);
8508 }
8509
8510 case '\\':
8511 if ((c = *p++) == '\0')
8512 return (0);
8513 /*FALLTHRU*/
8514
8515 default:
8516 if (c != s1)
8517 return (0);
8518 /*FALLTHRU*/
8519
8520 case '?':
8521 if (s1 != '\0')
8522 goto top;
8523 return (0);
8524
8525 case '*':
8526 while (*p == '*')
8527 p++; /* consecutive *'s are identical to a single one */
8528
8529 if (*p == '\0')
8530 return (1);
8531
8532 for (s = olds; *s != '\0'; s++) {
8533 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0)
8534 return (gs);
8535 }
8536
8537 return (0);
8538 }
8539 }
8540
8541 /*ARGSUSED*/
8542 static int
8543 dtrace_match_string(const char *s, const char *p, int depth)
8544 {
8545 return (s != NULL && strcmp(s, p) == 0);
8546 }
8547
8548 /*ARGSUSED*/
8549 static int
8550 dtrace_match_nul(const char *s, const char *p, int depth)
8551 {
8552 return (1); /* always match the empty pattern */
8553 }
8554
8555 /*ARGSUSED*/
8556 static int
8557 dtrace_match_nonzero(const char *s, const char *p, int depth)
8558 {
8559 return (s != NULL && s[0] != '\0');
8560 }
8561
8562 static int
8563 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid,
8564 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg)
8565 {
8566 dtrace_probe_t template, *probe;
8567 dtrace_hash_t *hash = NULL;
8568 int len, best = INT_MAX, nmatched = 0;
8569 dtrace_id_t i;
8570
8571 ASSERT(MUTEX_HELD(&dtrace_lock));
8572
8573 /*
8574 * If the probe ID is specified in the key, just lookup by ID and
8575 * invoke the match callback once if a matching probe is found.
8576 */
8577 if (pkp->dtpk_id != DTRACE_IDNONE) {
8578 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL &&
8579 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) {
8580 (void) (*matched)(probe, arg);
8581 nmatched++;
8582 }
8583 return (nmatched);
8584 }
8585
8586 template.dtpr_mod = (char *)pkp->dtpk_mod;
8587 template.dtpr_func = (char *)pkp->dtpk_func;
8588 template.dtpr_name = (char *)pkp->dtpk_name;
8589
8590 /*
8591 * We want to find the most distinct of the module name, function
8592 * name, and name. So for each one that is not a glob pattern or
8593 * empty string, we perform a lookup in the corresponding hash and
8594 * use the hash table with the fewest collisions to do our search.
8595 */
8596 if (pkp->dtpk_mmatch == &dtrace_match_string &&
8597 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) {
8598 best = len;
8599 hash = dtrace_bymod;
8600 }
8601
8602 if (pkp->dtpk_fmatch == &dtrace_match_string &&
8603 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) {
8604 best = len;
8605 hash = dtrace_byfunc;
8606 }
8607
8608 if (pkp->dtpk_nmatch == &dtrace_match_string &&
8609 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) {
8610 best = len;
8611 hash = dtrace_byname;
8612 }
8613
8614 /*
8615 * If we did not select a hash table, iterate over every probe and
8616 * invoke our callback for each one that matches our input probe key.
8617 */
8618 if (hash == NULL) {
8619 for (i = 0; i < dtrace_nprobes; i++) {
8620 if ((probe = dtrace_probes[i]) == NULL ||
8621 dtrace_match_probe(probe, pkp, priv, uid,
8622 zoneid) <= 0)
8623 continue;
8624
8625 nmatched++;
8626
8627 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT)
8628 break;
8629 }
8630
8631 return (nmatched);
8632 }
8633
8634 /*
8635 * If we selected a hash table, iterate over each probe of the same key
8636 * name and invoke the callback for every probe that matches the other
8637 * attributes of our input probe key.
8638 */
8639 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL;
8640 probe = *(DTRACE_HASHNEXT(hash, probe))) {
8641
8642 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0)
8643 continue;
8644
8645 nmatched++;
8646
8647 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT)
8648 break;
8649 }
8650
8651 return (nmatched);
8652 }
8653
8654 /*
8655 * Return the function pointer dtrace_probecmp() should use to compare the
8656 * specified pattern with a string. For NULL or empty patterns, we select
8657 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob().
8658 * For non-empty non-glob strings, we use dtrace_match_string().
8659 */
8660 static dtrace_probekey_f *
8661 dtrace_probekey_func(const char *p)
8662 {
8663 char c;
8664
8665 if (p == NULL || *p == '\0')
8666 return (&dtrace_match_nul);
8667
8668 while ((c = *p++) != '\0') {
8669 if (c == '[' || c == '?' || c == '*' || c == '\\')
8670 return (&dtrace_match_glob);
8671 }
8672
8673 return (&dtrace_match_string);
8674 }
8675
8676 /*
8677 * Build a probe comparison key for use with dtrace_match_probe() from the
8678 * given probe description. By convention, a null key only matches anchored
8679 * probes: if each field is the empty string, reset dtpk_fmatch to
8680 * dtrace_match_nonzero().
8681 */
8682 static void
8683 dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp)
8684 {
8685 pkp->dtpk_prov = pdp->dtpd_provider;
8686 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider);
8687
8688 pkp->dtpk_mod = pdp->dtpd_mod;
8689 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod);
8690
8691 pkp->dtpk_func = pdp->dtpd_func;
8692 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func);
8693
8694 pkp->dtpk_name = pdp->dtpd_name;
8695 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name);
8696
8697 pkp->dtpk_id = pdp->dtpd_id;
8698
8699 if (pkp->dtpk_id == DTRACE_IDNONE &&
8700 pkp->dtpk_pmatch == &dtrace_match_nul &&
8701 pkp->dtpk_mmatch == &dtrace_match_nul &&
8702 pkp->dtpk_fmatch == &dtrace_match_nul &&
8703 pkp->dtpk_nmatch == &dtrace_match_nul)
8704 pkp->dtpk_fmatch = &dtrace_match_nonzero;
8705 }
8706
8707 /*
8708 * DTrace Provider-to-Framework API Functions
8709 *
8710 * These functions implement much of the Provider-to-Framework API, as
8711 * described in <sys/dtrace.h>. The parts of the API not in this section are
8712 * the functions in the API for probe management (found below), and
8713 * dtrace_probe() itself (found above).
8714 */
8715
8716 /*
8717 * Register the calling provider with the DTrace framework. This should
8718 * generally be called by DTrace providers in their attach(9E) entry point.
8719 */
8720 int
8721 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv,
8722 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp)
8723 {
8724 dtrace_provider_t *provider;
8725
8726 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) {
8727 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8728 "arguments", name ? name : "<NULL>");
8729 return (EINVAL);
8730 }
8731
8732 if (name[0] == '\0' || dtrace_badname(name)) {
8733 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8734 "provider name", name);
8735 return (EINVAL);
8736 }
8737
8738 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) ||
8739 pops->dtps_enable == NULL || pops->dtps_disable == NULL ||
8740 pops->dtps_destroy == NULL ||
8741 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) {
8742 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8743 "provider ops", name);
8744 return (EINVAL);
8745 }
8746
8747 if (dtrace_badattr(&pap->dtpa_provider) ||
8748 dtrace_badattr(&pap->dtpa_mod) ||
8749 dtrace_badattr(&pap->dtpa_func) ||
8750 dtrace_badattr(&pap->dtpa_name) ||
8751 dtrace_badattr(&pap->dtpa_args)) {
8752 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8753 "provider attributes", name);
8754 return (EINVAL);
8755 }
8756
8757 if (priv & ~DTRACE_PRIV_ALL) {
8758 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8759 "privilege attributes", name);
8760 return (EINVAL);
8761 }
8762
8763 if ((priv & DTRACE_PRIV_KERNEL) &&
8764 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) &&
8765 pops->dtps_usermode == NULL) {
8766 cmn_err(CE_WARN, "failed to register provider '%s': need "
8767 "dtps_usermode() op for given privilege attributes", name);
8768 return (EINVAL);
8769 }
8770
8771 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP);
8772 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
8773 (void) strcpy(provider->dtpv_name, name);
8774
8775 provider->dtpv_attr = *pap;
8776 provider->dtpv_priv.dtpp_flags = priv;
8777 if (cr != NULL) {
8778 provider->dtpv_priv.dtpp_uid = crgetuid(cr);
8779 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr);
8780 }
8781 provider->dtpv_pops = *pops;
8782
8783 if (pops->dtps_provide == NULL) {
8784 ASSERT(pops->dtps_provide_module != NULL);
8785 provider->dtpv_pops.dtps_provide =
8786 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop;
8787 }
8788
8789 if (pops->dtps_provide_module == NULL) {
8790 ASSERT(pops->dtps_provide != NULL);
8791 provider->dtpv_pops.dtps_provide_module =
8792 (void (*)(void *, modctl_t *))dtrace_nullop;
8793 }
8794
8795 if (pops->dtps_suspend == NULL) {
8796 ASSERT(pops->dtps_resume == NULL);
8797 provider->dtpv_pops.dtps_suspend =
8798 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
8799 provider->dtpv_pops.dtps_resume =
8800 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
8801 }
8802
8803 provider->dtpv_arg = arg;
8804 *idp = (dtrace_provider_id_t)provider;
8805
8806 if (pops == &dtrace_provider_ops) {
8807 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
8808 ASSERT(MUTEX_HELD(&dtrace_lock));
8809 ASSERT(dtrace_anon.dta_enabling == NULL);
8810
8811 /*
8812 * We make sure that the DTrace provider is at the head of
8813 * the provider chain.
8814 */
8815 provider->dtpv_next = dtrace_provider;
8816 dtrace_provider = provider;
8817 return (0);
8818 }
8819
8820 mutex_enter(&dtrace_provider_lock);
8821 mutex_enter(&dtrace_lock);
8822
8823 /*
8824 * If there is at least one provider registered, we'll add this
8825 * provider after the first provider.
8826 */
8827 if (dtrace_provider != NULL) {
8828 provider->dtpv_next = dtrace_provider->dtpv_next;
8829 dtrace_provider->dtpv_next = provider;
8830 } else {
8831 dtrace_provider = provider;
8832 }
8833
8834 if (dtrace_retained != NULL) {
8835 dtrace_enabling_provide(provider);
8836
8837 /*
8838 * Now we need to call dtrace_enabling_matchall() -- which
8839 * will acquire cpu_lock and dtrace_lock. We therefore need
8840 * to drop all of our locks before calling into it...
8841 */
8842 mutex_exit(&dtrace_lock);
8843 mutex_exit(&dtrace_provider_lock);
8844 dtrace_enabling_matchall();
8845
8846 return (0);
8847 }
8848
8849 mutex_exit(&dtrace_lock);
8850 mutex_exit(&dtrace_provider_lock);
8851
8852 return (0);
8853 }
8854
8855 /*
8856 * Unregister the specified provider from the DTrace framework. This should
8857 * generally be called by DTrace providers in their detach(9E) entry point.
8858 */
8859 int
8860 dtrace_unregister(dtrace_provider_id_t id)
8861 {
8862 dtrace_provider_t *old = (dtrace_provider_t *)id;
8863 dtrace_provider_t *prev = NULL;
8864 int i, self = 0, noreap = 0;
8865 dtrace_probe_t *probe, *first = NULL;
8866
8867 if (old->dtpv_pops.dtps_enable ==
8868 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) {
8869 /*
8870 * If DTrace itself is the provider, we're called with locks
8871 * already held.
8872 */
8873 ASSERT(old == dtrace_provider);
8874 #ifdef illumos
8875 ASSERT(dtrace_devi != NULL);
8876 #endif
8877 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
8878 ASSERT(MUTEX_HELD(&dtrace_lock));
8879 self = 1;
8880
8881 if (dtrace_provider->dtpv_next != NULL) {
8882 /*
8883 * There's another provider here; return failure.
8884 */
8885 return (EBUSY);
8886 }
8887 } else {
8888 mutex_enter(&dtrace_provider_lock);
8889 #ifdef illumos
8890 mutex_enter(&mod_lock);
8891 #endif
8892 mutex_enter(&dtrace_lock);
8893 }
8894
8895 /*
8896 * If anyone has /dev/dtrace open, or if there are anonymous enabled
8897 * probes, we refuse to let providers slither away, unless this
8898 * provider has already been explicitly invalidated.
8899 */
8900 if (!old->dtpv_defunct &&
8901 (dtrace_opens || (dtrace_anon.dta_state != NULL &&
8902 dtrace_anon.dta_state->dts_necbs > 0))) {
8903 if (!self) {
8904 mutex_exit(&dtrace_lock);
8905 #ifdef illumos
8906 mutex_exit(&mod_lock);
8907 #endif
8908 mutex_exit(&dtrace_provider_lock);
8909 }
8910 return (EBUSY);
8911 }
8912
8913 /*
8914 * Attempt to destroy the probes associated with this provider.
8915 */
8916 for (i = 0; i < dtrace_nprobes; i++) {
8917 if ((probe = dtrace_probes[i]) == NULL)
8918 continue;
8919
8920 if (probe->dtpr_provider != old)
8921 continue;
8922
8923 if (probe->dtpr_ecb == NULL)
8924 continue;
8925
8926 /*
8927 * If we are trying to unregister a defunct provider, and the
8928 * provider was made defunct within the interval dictated by
8929 * dtrace_unregister_defunct_reap, we'll (asynchronously)
8930 * attempt to reap our enablings. To denote that the provider
8931 * should reattempt to unregister itself at some point in the
8932 * future, we will return a differentiable error code (EAGAIN
8933 * instead of EBUSY) in this case.
8934 */
8935 if (dtrace_gethrtime() - old->dtpv_defunct >
8936 dtrace_unregister_defunct_reap)
8937 noreap = 1;
8938
8939 if (!self) {
8940 mutex_exit(&dtrace_lock);
8941 #ifdef illumos
8942 mutex_exit(&mod_lock);
8943 #endif
8944 mutex_exit(&dtrace_provider_lock);
8945 }
8946
8947 if (noreap)
8948 return (EBUSY);
8949
8950 (void) taskq_dispatch(dtrace_taskq,
8951 (task_func_t *)dtrace_enabling_reap, NULL, TQ_SLEEP);
8952
8953 return (EAGAIN);
8954 }
8955
8956 /*
8957 * All of the probes for this provider are disabled; we can safely
8958 * remove all of them from their hash chains and from the probe array.
8959 */
8960 for (i = 0; i < dtrace_nprobes; i++) {
8961 if ((probe = dtrace_probes[i]) == NULL)
8962 continue;
8963
8964 if (probe->dtpr_provider != old)
8965 continue;
8966
8967 dtrace_probes[i] = NULL;
8968
8969 dtrace_hash_remove(dtrace_bymod, probe);
8970 dtrace_hash_remove(dtrace_byfunc, probe);
8971 dtrace_hash_remove(dtrace_byname, probe);
8972
8973 if (first == NULL) {
8974 first = probe;
8975 probe->dtpr_nextmod = NULL;
8976 } else {
8977 probe->dtpr_nextmod = first;
8978 first = probe;
8979 }
8980 }
8981
8982 /*
8983 * The provider's probes have been removed from the hash chains and
8984 * from the probe array. Now issue a dtrace_sync() to be sure that
8985 * everyone has cleared out from any probe array processing.
8986 */
8987 dtrace_sync();
8988
8989 for (probe = first; probe != NULL; probe = first) {
8990 first = probe->dtpr_nextmod;
8991
8992 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id,
8993 probe->dtpr_arg);
8994 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
8995 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
8996 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
8997 #ifdef illumos
8998 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1);
8999 #else
9000 free_unr(dtrace_arena, probe->dtpr_id);
9001 #endif
9002 kmem_free(probe, sizeof (dtrace_probe_t));
9003 }
9004
9005 if ((prev = dtrace_provider) == old) {
9006 #ifdef illumos
9007 ASSERT(self || dtrace_devi == NULL);
9008 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL);
9009 #endif
9010 dtrace_provider = old->dtpv_next;
9011 } else {
9012 while (prev != NULL && prev->dtpv_next != old)
9013 prev = prev->dtpv_next;
9014
9015 if (prev == NULL) {
9016 panic("attempt to unregister non-existent "
9017 "dtrace provider %p\n", (void *)id);
9018 }
9019
9020 prev->dtpv_next = old->dtpv_next;
9021 }
9022
9023 if (!self) {
9024 mutex_exit(&dtrace_lock);
9025 #ifdef illumos
9026 mutex_exit(&mod_lock);
9027 #endif
9028 mutex_exit(&dtrace_provider_lock);
9029 }
9030
9031 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1);
9032 kmem_free(old, sizeof (dtrace_provider_t));
9033
9034 return (0);
9035 }
9036
9037 /*
9038 * Invalidate the specified provider. All subsequent probe lookups for the
9039 * specified provider will fail, but its probes will not be removed.
9040 */
9041 void
9042 dtrace_invalidate(dtrace_provider_id_t id)
9043 {
9044 dtrace_provider_t *pvp = (dtrace_provider_t *)id;
9045
9046 ASSERT(pvp->dtpv_pops.dtps_enable !=
9047 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop);
9048
9049 mutex_enter(&dtrace_provider_lock);
9050 mutex_enter(&dtrace_lock);
9051
9052 pvp->dtpv_defunct = dtrace_gethrtime();
9053
9054 mutex_exit(&dtrace_lock);
9055 mutex_exit(&dtrace_provider_lock);
9056 }
9057
9058 /*
9059 * Indicate whether or not DTrace has attached.
9060 */
9061 int
9062 dtrace_attached(void)
9063 {
9064 /*
9065 * dtrace_provider will be non-NULL iff the DTrace driver has
9066 * attached. (It's non-NULL because DTrace is always itself a
9067 * provider.)
9068 */
9069 return (dtrace_provider != NULL);
9070 }
9071
9072 /*
9073 * Remove all the unenabled probes for the given provider. This function is
9074 * not unlike dtrace_unregister(), except that it doesn't remove the provider
9075 * -- just as many of its associated probes as it can.
9076 */
9077 int
9078 dtrace_condense(dtrace_provider_id_t id)
9079 {
9080 dtrace_provider_t *prov = (dtrace_provider_t *)id;
9081 int i;
9082 dtrace_probe_t *probe;
9083
9084 /*
9085 * Make sure this isn't the dtrace provider itself.
9086 */
9087 ASSERT(prov->dtpv_pops.dtps_enable !=
9088 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop);
9089
9090 mutex_enter(&dtrace_provider_lock);
9091 mutex_enter(&dtrace_lock);
9092
9093 /*
9094 * Attempt to destroy the probes associated with this provider.
9095 */
9096 for (i = 0; i < dtrace_nprobes; i++) {
9097 if ((probe = dtrace_probes[i]) == NULL)
9098 continue;
9099
9100 if (probe->dtpr_provider != prov)
9101 continue;
9102
9103 if (probe->dtpr_ecb != NULL)
9104 continue;
9105
9106 dtrace_probes[i] = NULL;
9107
9108 dtrace_hash_remove(dtrace_bymod, probe);
9109 dtrace_hash_remove(dtrace_byfunc, probe);
9110 dtrace_hash_remove(dtrace_byname, probe);
9111
9112 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1,
9113 probe->dtpr_arg);
9114 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
9115 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
9116 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
9117 kmem_free(probe, sizeof (dtrace_probe_t));
9118 #ifdef illumos
9119 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1);
9120 #else
9121 free_unr(dtrace_arena, i + 1);
9122 #endif
9123 }
9124
9125 mutex_exit(&dtrace_lock);
9126 mutex_exit(&dtrace_provider_lock);
9127
9128 return (0);
9129 }
9130
9131 /*
9132 * DTrace Probe Management Functions
9133 *
9134 * The functions in this section perform the DTrace probe management,
9135 * including functions to create probes, look-up probes, and call into the
9136 * providers to request that probes be provided. Some of these functions are
9137 * in the Provider-to-Framework API; these functions can be identified by the
9138 * fact that they are not declared "static".
9139 */
9140
9141 /*
9142 * Create a probe with the specified module name, function name, and name.
9143 */
9144 dtrace_id_t
9145 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod,
9146 const char *func, const char *name, int aframes, void *arg)
9147 {
9148 dtrace_probe_t *probe, **probes;
9149 dtrace_provider_t *provider = (dtrace_provider_t *)prov;
9150 dtrace_id_t id;
9151
9152 if (provider == dtrace_provider) {
9153 ASSERT(MUTEX_HELD(&dtrace_lock));
9154 } else {
9155 mutex_enter(&dtrace_lock);
9156 }
9157
9158 #ifdef illumos
9159 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1,
9160 VM_BESTFIT | VM_SLEEP);
9161 #else
9162 id = alloc_unr(dtrace_arena);
9163 #endif
9164 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP);
9165
9166 probe->dtpr_id = id;
9167 probe->dtpr_gen = dtrace_probegen++;
9168 probe->dtpr_mod = dtrace_strdup(mod);
9169 probe->dtpr_func = dtrace_strdup(func);
9170 probe->dtpr_name = dtrace_strdup(name);
9171 probe->dtpr_arg = arg;
9172 probe->dtpr_aframes = aframes;
9173 probe->dtpr_provider = provider;
9174
9175 dtrace_hash_add(dtrace_bymod, probe);
9176 dtrace_hash_add(dtrace_byfunc, probe);
9177 dtrace_hash_add(dtrace_byname, probe);
9178
9179 if (id - 1 >= dtrace_nprobes) {
9180 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *);
9181 size_t nsize = osize << 1;
9182
9183 if (nsize == 0) {
9184 ASSERT(osize == 0);
9185 ASSERT(dtrace_probes == NULL);
9186 nsize = sizeof (dtrace_probe_t *);
9187 }
9188
9189 probes = kmem_zalloc(nsize, KM_SLEEP);
9190
9191 if (dtrace_probes == NULL) {
9192 ASSERT(osize == 0);
9193 dtrace_probes = probes;
9194 dtrace_nprobes = 1;
9195 } else {
9196 dtrace_probe_t **oprobes = dtrace_probes;
9197
9198 bcopy(oprobes, probes, osize);
9199 dtrace_membar_producer();
9200 dtrace_probes = probes;
9201
9202 dtrace_sync();
9203
9204 /*
9205 * All CPUs are now seeing the new probes array; we can
9206 * safely free the old array.
9207 */
9208 kmem_free(oprobes, osize);
9209 dtrace_nprobes <<= 1;
9210 }
9211
9212 ASSERT(id - 1 < dtrace_nprobes);
9213 }
9214
9215 ASSERT(dtrace_probes[id - 1] == NULL);
9216 dtrace_probes[id - 1] = probe;
9217
9218 if (provider != dtrace_provider)
9219 mutex_exit(&dtrace_lock);
9220
9221 return (id);
9222 }
9223
9224 static dtrace_probe_t *
9225 dtrace_probe_lookup_id(dtrace_id_t id)
9226 {
9227 ASSERT(MUTEX_HELD(&dtrace_lock));
9228
9229 if (id == 0 || id > dtrace_nprobes)
9230 return (NULL);
9231
9232 return (dtrace_probes[id - 1]);
9233 }
9234
9235 static int
9236 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg)
9237 {
9238 *((dtrace_id_t *)arg) = probe->dtpr_id;
9239
9240 return (DTRACE_MATCH_DONE);
9241 }
9242
9243 /*
9244 * Look up a probe based on provider and one or more of module name, function
9245 * name and probe name.
9246 */
9247 dtrace_id_t
9248 dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod,
9249 char *func, char *name)
9250 {
9251 dtrace_probekey_t pkey;
9252 dtrace_id_t id;
9253 int match;
9254
9255 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name;
9256 pkey.dtpk_pmatch = &dtrace_match_string;
9257 pkey.dtpk_mod = mod;
9258 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul;
9259 pkey.dtpk_func = func;
9260 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul;
9261 pkey.dtpk_name = name;
9262 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul;
9263 pkey.dtpk_id = DTRACE_IDNONE;
9264
9265 mutex_enter(&dtrace_lock);
9266 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0,
9267 dtrace_probe_lookup_match, &id);
9268 mutex_exit(&dtrace_lock);
9269
9270 ASSERT(match == 1 || match == 0);
9271 return (match ? id : 0);
9272 }
9273
9274 /*
9275 * Returns the probe argument associated with the specified probe.
9276 */
9277 void *
9278 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid)
9279 {
9280 dtrace_probe_t *probe;
9281 void *rval = NULL;
9282
9283 mutex_enter(&dtrace_lock);
9284
9285 if ((probe = dtrace_probe_lookup_id(pid)) != NULL &&
9286 probe->dtpr_provider == (dtrace_provider_t *)id)
9287 rval = probe->dtpr_arg;
9288
9289 mutex_exit(&dtrace_lock);
9290
9291 return (rval);
9292 }
9293
9294 /*
9295 * Copy a probe into a probe description.
9296 */
9297 static void
9298 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp)
9299 {
9300 bzero(pdp, sizeof (dtrace_probedesc_t));
9301 pdp->dtpd_id = prp->dtpr_id;
9302
9303 (void) strncpy(pdp->dtpd_provider,
9304 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1);
9305
9306 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1);
9307 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1);
9308 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1);
9309 }
9310
9311 /*
9312 * Called to indicate that a probe -- or probes -- should be provided by a
9313 * specfied provider. If the specified description is NULL, the provider will
9314 * be told to provide all of its probes. (This is done whenever a new
9315 * consumer comes along, or whenever a retained enabling is to be matched.) If
9316 * the specified description is non-NULL, the provider is given the
9317 * opportunity to dynamically provide the specified probe, allowing providers
9318 * to support the creation of probes on-the-fly. (So-called _autocreated_
9319 * probes.) If the provider is NULL, the operations will be applied to all
9320 * providers; if the provider is non-NULL the operations will only be applied
9321 * to the specified provider. The dtrace_provider_lock must be held, and the
9322 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation
9323 * will need to grab the dtrace_lock when it reenters the framework through
9324 * dtrace_probe_lookup(), dtrace_probe_create(), etc.
9325 */
9326 static void
9327 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv)
9328 {
9329 #ifdef illumos
9330 modctl_t *ctl;
9331 #endif
9332 int all = 0;
9333
9334 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
9335
9336 if (prv == NULL) {
9337 all = 1;
9338 prv = dtrace_provider;
9339 }
9340
9341 do {
9342 /*
9343 * First, call the blanket provide operation.
9344 */
9345 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc);
9346
9347 #ifdef illumos
9348 /*
9349 * Now call the per-module provide operation. We will grab
9350 * mod_lock to prevent the list from being modified. Note
9351 * that this also prevents the mod_busy bits from changing.
9352 * (mod_busy can only be changed with mod_lock held.)
9353 */
9354 mutex_enter(&mod_lock);
9355
9356 ctl = &modules;
9357 do {
9358 if (ctl->mod_busy || ctl->mod_mp == NULL)
9359 continue;
9360
9361 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
9362
9363 } while ((ctl = ctl->mod_next) != &modules);
9364
9365 mutex_exit(&mod_lock);
9366 #endif
9367 } while (all && (prv = prv->dtpv_next) != NULL);
9368 }
9369
9370 #ifdef illumos
9371 /*
9372 * Iterate over each probe, and call the Framework-to-Provider API function
9373 * denoted by offs.
9374 */
9375 static void
9376 dtrace_probe_foreach(uintptr_t offs)
9377 {
9378 dtrace_provider_t *prov;
9379 void (*func)(void *, dtrace_id_t, void *);
9380 dtrace_probe_t *probe;
9381 dtrace_icookie_t cookie;
9382 int i;
9383
9384 /*
9385 * We disable interrupts to walk through the probe array. This is
9386 * safe -- the dtrace_sync() in dtrace_unregister() assures that we
9387 * won't see stale data.
9388 */
9389 cookie = dtrace_interrupt_disable();
9390
9391 for (i = 0; i < dtrace_nprobes; i++) {
9392 if ((probe = dtrace_probes[i]) == NULL)
9393 continue;
9394
9395 if (probe->dtpr_ecb == NULL) {
9396 /*
9397 * This probe isn't enabled -- don't call the function.
9398 */
9399 continue;
9400 }
9401
9402 prov = probe->dtpr_provider;
9403 func = *((void(**)(void *, dtrace_id_t, void *))
9404 ((uintptr_t)&prov->dtpv_pops + offs));
9405
9406 func(prov->dtpv_arg, i + 1, probe->dtpr_arg);
9407 }
9408
9409 dtrace_interrupt_enable(cookie);
9410 }
9411 #endif
9412
9413 static int
9414 dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab)
9415 {
9416 dtrace_probekey_t pkey;
9417 uint32_t priv;
9418 uid_t uid;
9419 zoneid_t zoneid;
9420
9421 ASSERT(MUTEX_HELD(&dtrace_lock));
9422 dtrace_ecb_create_cache = NULL;
9423
9424 if (desc == NULL) {
9425 /*
9426 * If we're passed a NULL description, we're being asked to
9427 * create an ECB with a NULL probe.
9428 */
9429 (void) dtrace_ecb_create_enable(NULL, enab);
9430 return (0);
9431 }
9432
9433 dtrace_probekey(desc, &pkey);
9434 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred,
9435 &priv, &uid, &zoneid);
9436
9437 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable,
9438 enab));
9439 }
9440
9441 /*
9442 * DTrace Helper Provider Functions
9443 */
9444 static void
9445 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr)
9446 {
9447 attr->dtat_name = DOF_ATTR_NAME(dofattr);
9448 attr->dtat_data = DOF_ATTR_DATA(dofattr);
9449 attr->dtat_class = DOF_ATTR_CLASS(dofattr);
9450 }
9451
9452 static void
9453 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov,
9454 const dof_provider_t *dofprov, char *strtab)
9455 {
9456 hprov->dthpv_provname = strtab + dofprov->dofpv_name;
9457 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider,
9458 dofprov->dofpv_provattr);
9459 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod,
9460 dofprov->dofpv_modattr);
9461 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func,
9462 dofprov->dofpv_funcattr);
9463 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name,
9464 dofprov->dofpv_nameattr);
9465 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args,
9466 dofprov->dofpv_argsattr);
9467 }
9468
9469 static void
9470 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
9471 {
9472 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
9473 dof_hdr_t *dof = (dof_hdr_t *)daddr;
9474 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
9475 dof_provider_t *provider;
9476 dof_probe_t *probe;
9477 uint32_t *off, *enoff;
9478 uint8_t *arg;
9479 char *strtab;
9480 uint_t i, nprobes;
9481 dtrace_helper_provdesc_t dhpv;
9482 dtrace_helper_probedesc_t dhpb;
9483 dtrace_meta_t *meta = dtrace_meta_pid;
9484 dtrace_mops_t *mops = &meta->dtm_mops;
9485 void *parg;
9486
9487 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
9488 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9489 provider->dofpv_strtab * dof->dofh_secsize);
9490 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9491 provider->dofpv_probes * dof->dofh_secsize);
9492 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9493 provider->dofpv_prargs * dof->dofh_secsize);
9494 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9495 provider->dofpv_proffs * dof->dofh_secsize);
9496
9497 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
9498 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset);
9499 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
9500 enoff = NULL;
9501
9502 /*
9503 * See dtrace_helper_provider_validate().
9504 */
9505 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
9506 provider->dofpv_prenoffs != DOF_SECT_NONE) {
9507 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9508 provider->dofpv_prenoffs * dof->dofh_secsize);
9509 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset);
9510 }
9511
9512 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
9513
9514 /*
9515 * Create the provider.
9516 */
9517 dtrace_dofprov2hprov(&dhpv, provider, strtab);
9518
9519 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL)
9520 return;
9521
9522 meta->dtm_count++;
9523
9524 /*
9525 * Create the probes.
9526 */
9527 for (i = 0; i < nprobes; i++) {
9528 probe = (dof_probe_t *)(uintptr_t)(daddr +
9529 prb_sec->dofs_offset + i * prb_sec->dofs_entsize);
9530
9531 /* See the check in dtrace_helper_provider_validate(). */
9532 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN)
9533 continue;
9534
9535 dhpb.dthpb_mod = dhp->dofhp_mod;
9536 dhpb.dthpb_func = strtab + probe->dofpr_func;
9537 dhpb.dthpb_name = strtab + probe->dofpr_name;
9538 dhpb.dthpb_base = probe->dofpr_addr;
9539 dhpb.dthpb_offs = off + probe->dofpr_offidx;
9540 dhpb.dthpb_noffs = probe->dofpr_noffs;
9541 if (enoff != NULL) {
9542 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx;
9543 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs;
9544 } else {
9545 dhpb.dthpb_enoffs = NULL;
9546 dhpb.dthpb_nenoffs = 0;
9547 }
9548 dhpb.dthpb_args = arg + probe->dofpr_argidx;
9549 dhpb.dthpb_nargc = probe->dofpr_nargc;
9550 dhpb.dthpb_xargc = probe->dofpr_xargc;
9551 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv;
9552 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv;
9553
9554 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb);
9555 }
9556 }
9557
9558 static void
9559 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid)
9560 {
9561 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
9562 dof_hdr_t *dof = (dof_hdr_t *)daddr;
9563 int i;
9564
9565 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
9566
9567 for (i = 0; i < dof->dofh_secnum; i++) {
9568 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
9569 dof->dofh_secoff + i * dof->dofh_secsize);
9570
9571 if (sec->dofs_type != DOF_SECT_PROVIDER)
9572 continue;
9573
9574 dtrace_helper_provide_one(dhp, sec, pid);
9575 }
9576
9577 /*
9578 * We may have just created probes, so we must now rematch against
9579 * any retained enablings. Note that this call will acquire both
9580 * cpu_lock and dtrace_lock; the fact that we are holding
9581 * dtrace_meta_lock now is what defines the ordering with respect to
9582 * these three locks.
9583 */
9584 dtrace_enabling_matchall();
9585 }
9586
9587 static void
9588 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
9589 {
9590 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
9591 dof_hdr_t *dof = (dof_hdr_t *)daddr;
9592 dof_sec_t *str_sec;
9593 dof_provider_t *provider;
9594 char *strtab;
9595 dtrace_helper_provdesc_t dhpv;
9596 dtrace_meta_t *meta = dtrace_meta_pid;
9597 dtrace_mops_t *mops = &meta->dtm_mops;
9598
9599 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
9600 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9601 provider->dofpv_strtab * dof->dofh_secsize);
9602
9603 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
9604
9605 /*
9606 * Create the provider.
9607 */
9608 dtrace_dofprov2hprov(&dhpv, provider, strtab);
9609
9610 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid);
9611
9612 meta->dtm_count--;
9613 }
9614
9615 static void
9616 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid)
9617 {
9618 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
9619 dof_hdr_t *dof = (dof_hdr_t *)daddr;
9620 int i;
9621
9622 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
9623
9624 for (i = 0; i < dof->dofh_secnum; i++) {
9625 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
9626 dof->dofh_secoff + i * dof->dofh_secsize);
9627
9628 if (sec->dofs_type != DOF_SECT_PROVIDER)
9629 continue;
9630
9631 dtrace_helper_provider_remove_one(dhp, sec, pid);
9632 }
9633 }
9634
9635 /*
9636 * DTrace Meta Provider-to-Framework API Functions
9637 *
9638 * These functions implement the Meta Provider-to-Framework API, as described
9639 * in <sys/dtrace.h>.
9640 */
9641 int
9642 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg,
9643 dtrace_meta_provider_id_t *idp)
9644 {
9645 dtrace_meta_t *meta;
9646 dtrace_helpers_t *help, *next;
9647 int i;
9648
9649 *idp = DTRACE_METAPROVNONE;
9650
9651 /*
9652 * We strictly don't need the name, but we hold onto it for
9653 * debuggability. All hail error queues!
9654 */
9655 if (name == NULL) {
9656 cmn_err(CE_WARN, "failed to register meta-provider: "
9657 "invalid name");
9658 return (EINVAL);
9659 }
9660
9661 if (mops == NULL ||
9662 mops->dtms_create_probe == NULL ||
9663 mops->dtms_provide_pid == NULL ||
9664 mops->dtms_remove_pid == NULL) {
9665 cmn_err(CE_WARN, "failed to register meta-register %s: "
9666 "invalid ops", name);
9667 return (EINVAL);
9668 }
9669
9670 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP);
9671 meta->dtm_mops = *mops;
9672 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
9673 (void) strcpy(meta->dtm_name, name);
9674 meta->dtm_arg = arg;
9675
9676 mutex_enter(&dtrace_meta_lock);
9677 mutex_enter(&dtrace_lock);
9678
9679 if (dtrace_meta_pid != NULL) {
9680 mutex_exit(&dtrace_lock);
9681 mutex_exit(&dtrace_meta_lock);
9682 cmn_err(CE_WARN, "failed to register meta-register %s: "
9683 "user-land meta-provider exists", name);
9684 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1);
9685 kmem_free(meta, sizeof (dtrace_meta_t));
9686 return (EINVAL);
9687 }
9688
9689 dtrace_meta_pid = meta;
9690 *idp = (dtrace_meta_provider_id_t)meta;
9691
9692 /*
9693 * If there are providers and probes ready to go, pass them
9694 * off to the new meta provider now.
9695 */
9696
9697 help = dtrace_deferred_pid;
9698 dtrace_deferred_pid = NULL;
9699
9700 mutex_exit(&dtrace_lock);
9701
9702 while (help != NULL) {
9703 for (i = 0; i < help->dthps_nprovs; i++) {
9704 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
9705 help->dthps_pid);
9706 }
9707
9708 next = help->dthps_next;
9709 help->dthps_next = NULL;
9710 help->dthps_prev = NULL;
9711 help->dthps_deferred = 0;
9712 help = next;
9713 }
9714
9715 mutex_exit(&dtrace_meta_lock);
9716
9717 return (0);
9718 }
9719
9720 int
9721 dtrace_meta_unregister(dtrace_meta_provider_id_t id)
9722 {
9723 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id;
9724
9725 mutex_enter(&dtrace_meta_lock);
9726 mutex_enter(&dtrace_lock);
9727
9728 if (old == dtrace_meta_pid) {
9729 pp = &dtrace_meta_pid;
9730 } else {
9731 panic("attempt to unregister non-existent "
9732 "dtrace meta-provider %p\n", (void *)old);
9733 }
9734
9735 if (old->dtm_count != 0) {
9736 mutex_exit(&dtrace_lock);
9737 mutex_exit(&dtrace_meta_lock);
9738 return (EBUSY);
9739 }
9740
9741 *pp = NULL;
9742
9743 mutex_exit(&dtrace_lock);
9744 mutex_exit(&dtrace_meta_lock);
9745
9746 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1);
9747 kmem_free(old, sizeof (dtrace_meta_t));
9748
9749 return (0);
9750 }
9751
9752
9753 /*
9754 * DTrace DIF Object Functions
9755 */
9756 static int
9757 dtrace_difo_err(uint_t pc, const char *format, ...)
9758 {
9759 if (dtrace_err_verbose) {
9760 va_list alist;
9761
9762 (void) uprintf("dtrace DIF object error: [%u]: ", pc);
9763 va_start(alist, format);
9764 (void) vuprintf(format, alist);
9765 va_end(alist);
9766 }
9767
9768 #ifdef DTRACE_ERRDEBUG
9769 dtrace_errdebug(format);
9770 #endif
9771 return (1);
9772 }
9773
9774 /*
9775 * Validate a DTrace DIF object by checking the IR instructions. The following
9776 * rules are currently enforced by dtrace_difo_validate():
9777 *
9778 * 1. Each instruction must have a valid opcode
9779 * 2. Each register, string, variable, or subroutine reference must be valid
9780 * 3. No instruction can modify register %r0 (must be zero)
9781 * 4. All instruction reserved bits must be set to zero
9782 * 5. The last instruction must be a "ret" instruction
9783 * 6. All branch targets must reference a valid instruction _after_ the branch
9784 */
9785 static int
9786 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs,
9787 cred_t *cr)
9788 {
9789 int err = 0, i;
9790 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
9791 int kcheckload;
9792 uint_t pc;
9793 int maxglobal = -1, maxlocal = -1, maxtlocal = -1;
9794
9795 kcheckload = cr == NULL ||
9796 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0;
9797
9798 dp->dtdo_destructive = 0;
9799
9800 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
9801 dif_instr_t instr = dp->dtdo_buf[pc];
9802
9803 uint_t r1 = DIF_INSTR_R1(instr);
9804 uint_t r2 = DIF_INSTR_R2(instr);
9805 uint_t rd = DIF_INSTR_RD(instr);
9806 uint_t rs = DIF_INSTR_RS(instr);
9807 uint_t label = DIF_INSTR_LABEL(instr);
9808 uint_t v = DIF_INSTR_VAR(instr);
9809 uint_t subr = DIF_INSTR_SUBR(instr);
9810 uint_t type = DIF_INSTR_TYPE(instr);
9811 uint_t op = DIF_INSTR_OP(instr);
9812
9813 switch (op) {
9814 case DIF_OP_OR:
9815 case DIF_OP_XOR:
9816 case DIF_OP_AND:
9817 case DIF_OP_SLL:
9818 case DIF_OP_SRL:
9819 case DIF_OP_SRA:
9820 case DIF_OP_SUB:
9821 case DIF_OP_ADD:
9822 case DIF_OP_MUL:
9823 case DIF_OP_SDIV:
9824 case DIF_OP_UDIV:
9825 case DIF_OP_SREM:
9826 case DIF_OP_UREM:
9827 case DIF_OP_COPYS:
9828 if (r1 >= nregs)
9829 err += efunc(pc, "invalid register %u\n", r1);
9830 if (r2 >= nregs)
9831 err += efunc(pc, "invalid register %u\n", r2);
9832 if (rd >= nregs)
9833 err += efunc(pc, "invalid register %u\n", rd);
9834 if (rd == 0)
9835 err += efunc(pc, "cannot write to %%r0\n");
9836 break;
9837 case DIF_OP_NOT:
9838 case DIF_OP_MOV:
9839 case DIF_OP_ALLOCS:
9840 if (r1 >= nregs)
9841 err += efunc(pc, "invalid register %u\n", r1);
9842 if (r2 != 0)
9843 err += efunc(pc, "non-zero reserved bits\n");
9844 if (rd >= nregs)
9845 err += efunc(pc, "invalid register %u\n", rd);
9846 if (rd == 0)
9847 err += efunc(pc, "cannot write to %%r0\n");
9848 break;
9849 case DIF_OP_LDSB:
9850 case DIF_OP_LDSH:
9851 case DIF_OP_LDSW:
9852 case DIF_OP_LDUB:
9853 case DIF_OP_LDUH:
9854 case DIF_OP_LDUW:
9855 case DIF_OP_LDX:
9856 if (r1 >= nregs)
9857 err += efunc(pc, "invalid register %u\n", r1);
9858 if (r2 != 0)
9859 err += efunc(pc, "non-zero reserved bits\n");
9860 if (rd >= nregs)
9861 err += efunc(pc, "invalid register %u\n", rd);
9862 if (rd == 0)
9863 err += efunc(pc, "cannot write to %%r0\n");
9864 if (kcheckload)
9865 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op +
9866 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd);
9867 break;
9868 case DIF_OP_RLDSB:
9869 case DIF_OP_RLDSH:
9870 case DIF_OP_RLDSW:
9871 case DIF_OP_RLDUB:
9872 case DIF_OP_RLDUH:
9873 case DIF_OP_RLDUW:
9874 case DIF_OP_RLDX:
9875 if (r1 >= nregs)
9876 err += efunc(pc, "invalid register %u\n", r1);
9877 if (r2 != 0)
9878 err += efunc(pc, "non-zero reserved bits\n");
9879 if (rd >= nregs)
9880 err += efunc(pc, "invalid register %u\n", rd);
9881 if (rd == 0)
9882 err += efunc(pc, "cannot write to %%r0\n");
9883 break;
9884 case DIF_OP_ULDSB:
9885 case DIF_OP_ULDSH:
9886 case DIF_OP_ULDSW:
9887 case DIF_OP_ULDUB:
9888 case DIF_OP_ULDUH:
9889 case DIF_OP_ULDUW:
9890 case DIF_OP_ULDX:
9891 if (r1 >= nregs)
9892 err += efunc(pc, "invalid register %u\n", r1);
9893 if (r2 != 0)
9894 err += efunc(pc, "non-zero reserved bits\n");
9895 if (rd >= nregs)
9896 err += efunc(pc, "invalid register %u\n", rd);
9897 if (rd == 0)
9898 err += efunc(pc, "cannot write to %%r0\n");
9899 break;
9900 case DIF_OP_STB:
9901 case DIF_OP_STH:
9902 case DIF_OP_STW:
9903 case DIF_OP_STX:
9904 if (r1 >= nregs)
9905 err += efunc(pc, "invalid register %u\n", r1);
9906 if (r2 != 0)
9907 err += efunc(pc, "non-zero reserved bits\n");
9908 if (rd >= nregs)
9909 err += efunc(pc, "invalid register %u\n", rd);
9910 if (rd == 0)
9911 err += efunc(pc, "cannot write to 0 address\n");
9912 break;
9913 case DIF_OP_CMP:
9914 case DIF_OP_SCMP:
9915 if (r1 >= nregs)
9916 err += efunc(pc, "invalid register %u\n", r1);
9917 if (r2 >= nregs)
9918 err += efunc(pc, "invalid register %u\n", r2);
9919 if (rd != 0)
9920 err += efunc(pc, "non-zero reserved bits\n");
9921 break;
9922 case DIF_OP_TST:
9923 if (r1 >= nregs)
9924 err += efunc(pc, "invalid register %u\n", r1);
9925 if (r2 != 0 || rd != 0)
9926 err += efunc(pc, "non-zero reserved bits\n");
9927 break;
9928 case DIF_OP_BA:
9929 case DIF_OP_BE:
9930 case DIF_OP_BNE:
9931 case DIF_OP_BG:
9932 case DIF_OP_BGU:
9933 case DIF_OP_BGE:
9934 case DIF_OP_BGEU:
9935 case DIF_OP_BL:
9936 case DIF_OP_BLU:
9937 case DIF_OP_BLE:
9938 case DIF_OP_BLEU:
9939 if (label >= dp->dtdo_len) {
9940 err += efunc(pc, "invalid branch target %u\n",
9941 label);
9942 }
9943 if (label <= pc) {
9944 err += efunc(pc, "backward branch to %u\n",
9945 label);
9946 }
9947 break;
9948 case DIF_OP_RET:
9949 if (r1 != 0 || r2 != 0)
9950 err += efunc(pc, "non-zero reserved bits\n");
9951 if (rd >= nregs)
9952 err += efunc(pc, "invalid register %u\n", rd);
9953 break;
9954 case DIF_OP_NOP:
9955 case DIF_OP_POPTS:
9956 case DIF_OP_FLUSHTS:
9957 if (r1 != 0 || r2 != 0 || rd != 0)
9958 err += efunc(pc, "non-zero reserved bits\n");
9959 break;
9960 case DIF_OP_SETX:
9961 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) {
9962 err += efunc(pc, "invalid integer ref %u\n",
9963 DIF_INSTR_INTEGER(instr));
9964 }
9965 if (rd >= nregs)
9966 err += efunc(pc, "invalid register %u\n", rd);
9967 if (rd == 0)
9968 err += efunc(pc, "cannot write to %%r0\n");
9969 break;
9970 case DIF_OP_SETS:
9971 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) {
9972 err += efunc(pc, "invalid string ref %u\n",
9973 DIF_INSTR_STRING(instr));
9974 }
9975 if (rd >= nregs)
9976 err += efunc(pc, "invalid register %u\n", rd);
9977 if (rd == 0)
9978 err += efunc(pc, "cannot write to %%r0\n");
9979 break;
9980 case DIF_OP_LDGA:
9981 case DIF_OP_LDTA:
9982 if (r1 > DIF_VAR_ARRAY_MAX)
9983 err += efunc(pc, "invalid array %u\n", r1);
9984 if (r2 >= nregs)
9985 err += efunc(pc, "invalid register %u\n", r2);
9986 if (rd >= nregs)
9987 err += efunc(pc, "invalid register %u\n", rd);
9988 if (rd == 0)
9989 err += efunc(pc, "cannot write to %%r0\n");
9990 break;
9991 case DIF_OP_LDGS:
9992 case DIF_OP_LDTS:
9993 case DIF_OP_LDLS:
9994 case DIF_OP_LDGAA:
9995 case DIF_OP_LDTAA:
9996 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX)
9997 err += efunc(pc, "invalid variable %u\n", v);
9998 if (rd >= nregs)
9999 err += efunc(pc, "invalid register %u\n", rd);
10000 if (rd == 0)
10001 err += efunc(pc, "cannot write to %%r0\n");
10002 break;
10003 case DIF_OP_STGS:
10004 case DIF_OP_STTS:
10005 case DIF_OP_STLS:
10006 case DIF_OP_STGAA:
10007 case DIF_OP_STTAA:
10008 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX)
10009 err += efunc(pc, "invalid variable %u\n", v);
10010 if (rs >= nregs)
10011 err += efunc(pc, "invalid register %u\n", rd);
10012 break;
10013 case DIF_OP_CALL:
10014 if (subr > DIF_SUBR_MAX)
10015 err += efunc(pc, "invalid subr %u\n", subr);
10016 if (rd >= nregs)
10017 err += efunc(pc, "invalid register %u\n", rd);
10018 if (rd == 0)
10019 err += efunc(pc, "cannot write to %%r0\n");
10020
10021 if (subr == DIF_SUBR_COPYOUT ||
10022 subr == DIF_SUBR_COPYOUTSTR) {
10023 dp->dtdo_destructive = 1;
10024 }
10025
10026 if (subr == DIF_SUBR_GETF) {
10027 #ifdef __FreeBSD__
10028 err += efunc(pc, "getf() not supported");
10029 #else
10030 /*
10031 * If we have a getf() we need to record that
10032 * in our state. Note that our state can be
10033 * NULL if this is a helper -- but in that
10034 * case, the call to getf() is itself illegal,
10035 * and will be caught (slightly later) when
10036 * the helper is validated.
10037 */
10038 if (vstate->dtvs_state != NULL)
10039 vstate->dtvs_state->dts_getf++;
10040 #endif
10041 }
10042
10043 break;
10044 case DIF_OP_PUSHTR:
10045 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF)
10046 err += efunc(pc, "invalid ref type %u\n", type);
10047 if (r2 >= nregs)
10048 err += efunc(pc, "invalid register %u\n", r2);
10049 if (rs >= nregs)
10050 err += efunc(pc, "invalid register %u\n", rs);
10051 break;
10052 case DIF_OP_PUSHTV:
10053 if (type != DIF_TYPE_CTF)
10054 err += efunc(pc, "invalid val type %u\n", type);
10055 if (r2 >= nregs)
10056 err += efunc(pc, "invalid register %u\n", r2);
10057 if (rs >= nregs)
10058 err += efunc(pc, "invalid register %u\n", rs);
10059 break;
10060 default:
10061 err += efunc(pc, "invalid opcode %u\n",
10062 DIF_INSTR_OP(instr));
10063 }
10064 }
10065
10066 if (dp->dtdo_len != 0 &&
10067 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) {
10068 err += efunc(dp->dtdo_len - 1,
10069 "expected 'ret' as last DIF instruction\n");
10070 }
10071
10072 if (!(dp->dtdo_rtype.dtdt_flags & (DIF_TF_BYREF | DIF_TF_BYUREF))) {
10073 /*
10074 * If we're not returning by reference, the size must be either
10075 * 0 or the size of one of the base types.
10076 */
10077 switch (dp->dtdo_rtype.dtdt_size) {
10078 case 0:
10079 case sizeof (uint8_t):
10080 case sizeof (uint16_t):
10081 case sizeof (uint32_t):
10082 case sizeof (uint64_t):
10083 break;
10084
10085 default:
10086 err += efunc(dp->dtdo_len - 1, "bad return size\n");
10087 }
10088 }
10089
10090 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) {
10091 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL;
10092 dtrace_diftype_t *vt, *et;
10093 uint_t id, ndx;
10094
10095 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL &&
10096 v->dtdv_scope != DIFV_SCOPE_THREAD &&
10097 v->dtdv_scope != DIFV_SCOPE_LOCAL) {
10098 err += efunc(i, "unrecognized variable scope %d\n",
10099 v->dtdv_scope);
10100 break;
10101 }
10102
10103 if (v->dtdv_kind != DIFV_KIND_ARRAY &&
10104 v->dtdv_kind != DIFV_KIND_SCALAR) {
10105 err += efunc(i, "unrecognized variable type %d\n",
10106 v->dtdv_kind);
10107 break;
10108 }
10109
10110 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) {
10111 err += efunc(i, "%d exceeds variable id limit\n", id);
10112 break;
10113 }
10114
10115 if (id < DIF_VAR_OTHER_UBASE)
10116 continue;
10117
10118 /*
10119 * For user-defined variables, we need to check that this
10120 * definition is identical to any previous definition that we
10121 * encountered.
10122 */
10123 ndx = id - DIF_VAR_OTHER_UBASE;
10124
10125 switch (v->dtdv_scope) {
10126 case DIFV_SCOPE_GLOBAL:
10127 if (maxglobal == -1 || ndx > maxglobal)
10128 maxglobal = ndx;
10129
10130 if (ndx < vstate->dtvs_nglobals) {
10131 dtrace_statvar_t *svar;
10132
10133 if ((svar = vstate->dtvs_globals[ndx]) != NULL)
10134 existing = &svar->dtsv_var;
10135 }
10136
10137 break;
10138
10139 case DIFV_SCOPE_THREAD:
10140 if (maxtlocal == -1 || ndx > maxtlocal)
10141 maxtlocal = ndx;
10142
10143 if (ndx < vstate->dtvs_ntlocals)
10144 existing = &vstate->dtvs_tlocals[ndx];
10145 break;
10146
10147 case DIFV_SCOPE_LOCAL:
10148 if (maxlocal == -1 || ndx > maxlocal)
10149 maxlocal = ndx;
10150
10151 if (ndx < vstate->dtvs_nlocals) {
10152 dtrace_statvar_t *svar;
10153
10154 if ((svar = vstate->dtvs_locals[ndx]) != NULL)
10155 existing = &svar->dtsv_var;
10156 }
10157
10158 break;
10159 }
10160
10161 vt = &v->dtdv_type;
10162
10163 if (vt->dtdt_flags & DIF_TF_BYREF) {
10164 if (vt->dtdt_size == 0) {
10165 err += efunc(i, "zero-sized variable\n");
10166 break;
10167 }
10168
10169 if ((v->dtdv_scope == DIFV_SCOPE_GLOBAL ||
10170 v->dtdv_scope == DIFV_SCOPE_LOCAL) &&
10171 vt->dtdt_size > dtrace_statvar_maxsize) {
10172 err += efunc(i, "oversized by-ref static\n");
10173 break;
10174 }
10175 }
10176
10177 if (existing == NULL || existing->dtdv_id == 0)
10178 continue;
10179
10180 ASSERT(existing->dtdv_id == v->dtdv_id);
10181 ASSERT(existing->dtdv_scope == v->dtdv_scope);
10182
10183 if (existing->dtdv_kind != v->dtdv_kind)
10184 err += efunc(i, "%d changed variable kind\n", id);
10185
10186 et = &existing->dtdv_type;
10187
10188 if (vt->dtdt_flags != et->dtdt_flags) {
10189 err += efunc(i, "%d changed variable type flags\n", id);
10190 break;
10191 }
10192
10193 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) {
10194 err += efunc(i, "%d changed variable type size\n", id);
10195 break;
10196 }
10197 }
10198
10199 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
10200 dif_instr_t instr = dp->dtdo_buf[pc];
10201
10202 uint_t v = DIF_INSTR_VAR(instr);
10203 uint_t op = DIF_INSTR_OP(instr);
10204
10205 switch (op) {
10206 case DIF_OP_LDGS:
10207 case DIF_OP_LDGAA:
10208 case DIF_OP_STGS:
10209 case DIF_OP_STGAA:
10210 if (v > DIF_VAR_OTHER_UBASE + maxglobal)
10211 err += efunc(pc, "invalid variable %u\n", v);
10212 break;
10213 case DIF_OP_LDTS:
10214 case DIF_OP_LDTAA:
10215 case DIF_OP_STTS:
10216 case DIF_OP_STTAA:
10217 if (v > DIF_VAR_OTHER_UBASE + maxtlocal)
10218 err += efunc(pc, "invalid variable %u\n", v);
10219 break;
10220 case DIF_OP_LDLS:
10221 case DIF_OP_STLS:
10222 if (v > DIF_VAR_OTHER_UBASE + maxlocal)
10223 err += efunc(pc, "invalid variable %u\n", v);
10224 break;
10225 default:
10226 break;
10227 }
10228 }
10229
10230 return (err);
10231 }
10232
10233 /*
10234 * Validate a DTrace DIF object that it is to be used as a helper. Helpers
10235 * are much more constrained than normal DIFOs. Specifically, they may
10236 * not:
10237 *
10238 * 1. Make calls to subroutines other than copyin(), copyinstr() or
10239 * miscellaneous string routines
10240 * 2. Access DTrace variables other than the args[] array, and the
10241 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables.
10242 * 3. Have thread-local variables.
10243 * 4. Have dynamic variables.
10244 */
10245 static int
10246 dtrace_difo_validate_helper(dtrace_difo_t *dp)
10247 {
10248 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
10249 int err = 0;
10250 uint_t pc;
10251
10252 for (pc = 0; pc < dp->dtdo_len; pc++) {
10253 dif_instr_t instr = dp->dtdo_buf[pc];
10254
10255 uint_t v = DIF_INSTR_VAR(instr);
10256 uint_t subr = DIF_INSTR_SUBR(instr);
10257 uint_t op = DIF_INSTR_OP(instr);
10258
10259 switch (op) {
10260 case DIF_OP_OR:
10261 case DIF_OP_XOR:
10262 case DIF_OP_AND:
10263 case DIF_OP_SLL:
10264 case DIF_OP_SRL:
10265 case DIF_OP_SRA:
10266 case DIF_OP_SUB:
10267 case DIF_OP_ADD:
10268 case DIF_OP_MUL:
10269 case DIF_OP_SDIV:
10270 case DIF_OP_UDIV:
10271 case DIF_OP_SREM:
10272 case DIF_OP_UREM:
10273 case DIF_OP_COPYS:
10274 case DIF_OP_NOT:
10275 case DIF_OP_MOV:
10276 case DIF_OP_RLDSB:
10277 case DIF_OP_RLDSH:
10278 case DIF_OP_RLDSW:
10279 case DIF_OP_RLDUB:
10280 case DIF_OP_RLDUH:
10281 case DIF_OP_RLDUW:
10282 case DIF_OP_RLDX:
10283 case DIF_OP_ULDSB:
10284 case DIF_OP_ULDSH:
10285 case DIF_OP_ULDSW:
10286 case DIF_OP_ULDUB:
10287 case DIF_OP_ULDUH:
10288 case DIF_OP_ULDUW:
10289 case DIF_OP_ULDX:
10290 case DIF_OP_STB:
10291 case DIF_OP_STH:
10292 case DIF_OP_STW:
10293 case DIF_OP_STX:
10294 case DIF_OP_ALLOCS:
10295 case DIF_OP_CMP:
10296 case DIF_OP_SCMP:
10297 case DIF_OP_TST:
10298 case DIF_OP_BA:
10299 case DIF_OP_BE:
10300 case DIF_OP_BNE:
10301 case DIF_OP_BG:
10302 case DIF_OP_BGU:
10303 case DIF_OP_BGE:
10304 case DIF_OP_BGEU:
10305 case DIF_OP_BL:
10306 case DIF_OP_BLU:
10307 case DIF_OP_BLE:
10308 case DIF_OP_BLEU:
10309 case DIF_OP_RET:
10310 case DIF_OP_NOP:
10311 case DIF_OP_POPTS:
10312 case DIF_OP_FLUSHTS:
10313 case DIF_OP_SETX:
10314 case DIF_OP_SETS:
10315 case DIF_OP_LDGA:
10316 case DIF_OP_LDLS:
10317 case DIF_OP_STGS:
10318 case DIF_OP_STLS:
10319 case DIF_OP_PUSHTR:
10320 case DIF_OP_PUSHTV:
10321 break;
10322
10323 case DIF_OP_LDGS:
10324 if (v >= DIF_VAR_OTHER_UBASE)
10325 break;
10326
10327 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9)
10328 break;
10329
10330 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID ||
10331 v == DIF_VAR_PPID || v == DIF_VAR_TID ||
10332 v == DIF_VAR_EXECARGS ||
10333 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME ||
10334 v == DIF_VAR_UID || v == DIF_VAR_GID)
10335 break;
10336
10337 err += efunc(pc, "illegal variable %u\n", v);
10338 break;
10339
10340 case DIF_OP_LDTA:
10341 case DIF_OP_LDTS:
10342 case DIF_OP_LDGAA:
10343 case DIF_OP_LDTAA:
10344 err += efunc(pc, "illegal dynamic variable load\n");
10345 break;
10346
10347 case DIF_OP_STTS:
10348 case DIF_OP_STGAA:
10349 case DIF_OP_STTAA:
10350 err += efunc(pc, "illegal dynamic variable store\n");
10351 break;
10352
10353 case DIF_OP_CALL:
10354 if (subr == DIF_SUBR_ALLOCA ||
10355 subr == DIF_SUBR_BCOPY ||
10356 subr == DIF_SUBR_COPYIN ||
10357 subr == DIF_SUBR_COPYINTO ||
10358 subr == DIF_SUBR_COPYINSTR ||
10359 subr == DIF_SUBR_INDEX ||
10360 subr == DIF_SUBR_INET_NTOA ||
10361 subr == DIF_SUBR_INET_NTOA6 ||
10362 subr == DIF_SUBR_INET_NTOP ||
10363 subr == DIF_SUBR_JSON ||
10364 subr == DIF_SUBR_LLTOSTR ||
10365 subr == DIF_SUBR_STRTOLL ||
10366 subr == DIF_SUBR_RINDEX ||
10367 subr == DIF_SUBR_STRCHR ||
10368 subr == DIF_SUBR_STRJOIN ||
10369 subr == DIF_SUBR_STRRCHR ||
10370 subr == DIF_SUBR_STRSTR ||
10371 subr == DIF_SUBR_HTONS ||
10372 subr == DIF_SUBR_HTONL ||
10373 subr == DIF_SUBR_HTONLL ||
10374 subr == DIF_SUBR_NTOHS ||
10375 subr == DIF_SUBR_NTOHL ||
10376 subr == DIF_SUBR_NTOHLL ||
10377 subr == DIF_SUBR_MEMREF)
10378 break;
10379 #ifdef __FreeBSD__
10380 if (subr == DIF_SUBR_MEMSTR)
10381 break;
10382 #endif
10383
10384 err += efunc(pc, "invalid subr %u\n", subr);
10385 break;
10386
10387 default:
10388 err += efunc(pc, "invalid opcode %u\n",
10389 DIF_INSTR_OP(instr));
10390 }
10391 }
10392
10393 return (err);
10394 }
10395
10396 /*
10397 * Returns 1 if the expression in the DIF object can be cached on a per-thread
10398 * basis; 0 if not.
10399 */
10400 static int
10401 dtrace_difo_cacheable(dtrace_difo_t *dp)
10402 {
10403 int i;
10404
10405 if (dp == NULL)
10406 return (0);
10407
10408 for (i = 0; i < dp->dtdo_varlen; i++) {
10409 dtrace_difv_t *v = &dp->dtdo_vartab[i];
10410
10411 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL)
10412 continue;
10413
10414 switch (v->dtdv_id) {
10415 case DIF_VAR_CURTHREAD:
10416 case DIF_VAR_PID:
10417 case DIF_VAR_TID:
10418 case DIF_VAR_EXECARGS:
10419 case DIF_VAR_EXECNAME:
10420 case DIF_VAR_ZONENAME:
10421 break;
10422
10423 default:
10424 return (0);
10425 }
10426 }
10427
10428 /*
10429 * This DIF object may be cacheable. Now we need to look for any
10430 * array loading instructions, any memory loading instructions, or
10431 * any stores to thread-local variables.
10432 */
10433 for (i = 0; i < dp->dtdo_len; i++) {
10434 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]);
10435
10436 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) ||
10437 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) ||
10438 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) ||
10439 op == DIF_OP_LDGA || op == DIF_OP_STTS)
10440 return (0);
10441 }
10442
10443 return (1);
10444 }
10445
10446 static void
10447 dtrace_difo_hold(dtrace_difo_t *dp)
10448 {
10449 int i;
10450
10451 ASSERT(MUTEX_HELD(&dtrace_lock));
10452
10453 dp->dtdo_refcnt++;
10454 ASSERT(dp->dtdo_refcnt != 0);
10455
10456 /*
10457 * We need to check this DIF object for references to the variable
10458 * DIF_VAR_VTIMESTAMP.
10459 */
10460 for (i = 0; i < dp->dtdo_varlen; i++) {
10461 dtrace_difv_t *v = &dp->dtdo_vartab[i];
10462
10463 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
10464 continue;
10465
10466 if (dtrace_vtime_references++ == 0)
10467 dtrace_vtime_enable();
10468 }
10469 }
10470
10471 /*
10472 * This routine calculates the dynamic variable chunksize for a given DIF
10473 * object. The calculation is not fool-proof, and can probably be tricked by
10474 * malicious DIF -- but it works for all compiler-generated DIF. Because this
10475 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail
10476 * if a dynamic variable size exceeds the chunksize.
10477 */
10478 static void
10479 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10480 {
10481 uint64_t sval = 0;
10482 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
10483 const dif_instr_t *text = dp->dtdo_buf;
10484 uint_t pc, srd = 0;
10485 uint_t ttop = 0;
10486 size_t size, ksize;
10487 uint_t id, i;
10488
10489 for (pc = 0; pc < dp->dtdo_len; pc++) {
10490 dif_instr_t instr = text[pc];
10491 uint_t op = DIF_INSTR_OP(instr);
10492 uint_t rd = DIF_INSTR_RD(instr);
10493 uint_t r1 = DIF_INSTR_R1(instr);
10494 uint_t nkeys = 0;
10495 uchar_t scope = 0;
10496
10497 dtrace_key_t *key = tupregs;
10498
10499 switch (op) {
10500 case DIF_OP_SETX:
10501 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)];
10502 srd = rd;
10503 continue;
10504
10505 case DIF_OP_STTS:
10506 key = &tupregs[DIF_DTR_NREGS];
10507 key[0].dttk_size = 0;
10508 key[1].dttk_size = 0;
10509 nkeys = 2;
10510 scope = DIFV_SCOPE_THREAD;
10511 break;
10512
10513 case DIF_OP_STGAA:
10514 case DIF_OP_STTAA:
10515 nkeys = ttop;
10516
10517 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA)
10518 key[nkeys++].dttk_size = 0;
10519
10520 key[nkeys++].dttk_size = 0;
10521
10522 if (op == DIF_OP_STTAA) {
10523 scope = DIFV_SCOPE_THREAD;
10524 } else {
10525 scope = DIFV_SCOPE_GLOBAL;
10526 }
10527
10528 break;
10529
10530 case DIF_OP_PUSHTR:
10531 if (ttop == DIF_DTR_NREGS)
10532 return;
10533
10534 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) {
10535 /*
10536 * If the register for the size of the "pushtr"
10537 * is %r0 (or the value is 0) and the type is
10538 * a string, we'll use the system-wide default
10539 * string size.
10540 */
10541 tupregs[ttop++].dttk_size =
10542 dtrace_strsize_default;
10543 } else {
10544 if (srd == 0)
10545 return;
10546
10547 if (sval > LONG_MAX)
10548 return;
10549
10550 tupregs[ttop++].dttk_size = sval;
10551 }
10552
10553 break;
10554
10555 case DIF_OP_PUSHTV:
10556 if (ttop == DIF_DTR_NREGS)
10557 return;
10558
10559 tupregs[ttop++].dttk_size = 0;
10560 break;
10561
10562 case DIF_OP_FLUSHTS:
10563 ttop = 0;
10564 break;
10565
10566 case DIF_OP_POPTS:
10567 if (ttop != 0)
10568 ttop--;
10569 break;
10570 }
10571
10572 sval = 0;
10573 srd = 0;
10574
10575 if (nkeys == 0)
10576 continue;
10577
10578 /*
10579 * We have a dynamic variable allocation; calculate its size.
10580 */
10581 for (ksize = 0, i = 0; i < nkeys; i++)
10582 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
10583
10584 size = sizeof (dtrace_dynvar_t);
10585 size += sizeof (dtrace_key_t) * (nkeys - 1);
10586 size += ksize;
10587
10588 /*
10589 * Now we need to determine the size of the stored data.
10590 */
10591 id = DIF_INSTR_VAR(instr);
10592
10593 for (i = 0; i < dp->dtdo_varlen; i++) {
10594 dtrace_difv_t *v = &dp->dtdo_vartab[i];
10595
10596 if (v->dtdv_id == id && v->dtdv_scope == scope) {
10597 size += v->dtdv_type.dtdt_size;
10598 break;
10599 }
10600 }
10601
10602 if (i == dp->dtdo_varlen)
10603 return;
10604
10605 /*
10606 * We have the size. If this is larger than the chunk size
10607 * for our dynamic variable state, reset the chunk size.
10608 */
10609 size = P2ROUNDUP(size, sizeof (uint64_t));
10610
10611 /*
10612 * Before setting the chunk size, check that we're not going
10613 * to set it to a negative value...
10614 */
10615 if (size > LONG_MAX)
10616 return;
10617
10618 /*
10619 * ...and make certain that we didn't badly overflow.
10620 */
10621 if (size < ksize || size < sizeof (dtrace_dynvar_t))
10622 return;
10623
10624 if (size > vstate->dtvs_dynvars.dtds_chunksize)
10625 vstate->dtvs_dynvars.dtds_chunksize = size;
10626 }
10627 }
10628
10629 static void
10630 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10631 {
10632 int i, oldsvars, osz, nsz, otlocals, ntlocals;
10633 uint_t id;
10634
10635 ASSERT(MUTEX_HELD(&dtrace_lock));
10636 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0);
10637
10638 for (i = 0; i < dp->dtdo_varlen; i++) {
10639 dtrace_difv_t *v = &dp->dtdo_vartab[i];
10640 dtrace_statvar_t *svar, ***svarp = NULL;
10641 size_t dsize = 0;
10642 uint8_t scope = v->dtdv_scope;
10643 int *np = NULL;
10644
10645 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
10646 continue;
10647
10648 id -= DIF_VAR_OTHER_UBASE;
10649
10650 switch (scope) {
10651 case DIFV_SCOPE_THREAD:
10652 while (id >= (otlocals = vstate->dtvs_ntlocals)) {
10653 dtrace_difv_t *tlocals;
10654
10655 if ((ntlocals = (otlocals << 1)) == 0)
10656 ntlocals = 1;
10657
10658 osz = otlocals * sizeof (dtrace_difv_t);
10659 nsz = ntlocals * sizeof (dtrace_difv_t);
10660
10661 tlocals = kmem_zalloc(nsz, KM_SLEEP);
10662
10663 if (osz != 0) {
10664 bcopy(vstate->dtvs_tlocals,
10665 tlocals, osz);
10666 kmem_free(vstate->dtvs_tlocals, osz);
10667 }
10668
10669 vstate->dtvs_tlocals = tlocals;
10670 vstate->dtvs_ntlocals = ntlocals;
10671 }
10672
10673 vstate->dtvs_tlocals[id] = *v;
10674 continue;
10675
10676 case DIFV_SCOPE_LOCAL:
10677 np = &vstate->dtvs_nlocals;
10678 svarp = &vstate->dtvs_locals;
10679
10680 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
10681 dsize = NCPU * (v->dtdv_type.dtdt_size +
10682 sizeof (uint64_t));
10683 else
10684 dsize = NCPU * sizeof (uint64_t);
10685
10686 break;
10687
10688 case DIFV_SCOPE_GLOBAL:
10689 np = &vstate->dtvs_nglobals;
10690 svarp = &vstate->dtvs_globals;
10691
10692 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
10693 dsize = v->dtdv_type.dtdt_size +
10694 sizeof (uint64_t);
10695
10696 break;
10697
10698 default:
10699 ASSERT(0);
10700 }
10701
10702 while (id >= (oldsvars = *np)) {
10703 dtrace_statvar_t **statics;
10704 int newsvars, oldsize, newsize;
10705
10706 if ((newsvars = (oldsvars << 1)) == 0)
10707 newsvars = 1;
10708
10709 oldsize = oldsvars * sizeof (dtrace_statvar_t *);
10710 newsize = newsvars * sizeof (dtrace_statvar_t *);
10711
10712 statics = kmem_zalloc(newsize, KM_SLEEP);
10713
10714 if (oldsize != 0) {
10715 bcopy(*svarp, statics, oldsize);
10716 kmem_free(*svarp, oldsize);
10717 }
10718
10719 *svarp = statics;
10720 *np = newsvars;
10721 }
10722
10723 if ((svar = (*svarp)[id]) == NULL) {
10724 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP);
10725 svar->dtsv_var = *v;
10726
10727 if ((svar->dtsv_size = dsize) != 0) {
10728 svar->dtsv_data = (uint64_t)(uintptr_t)
10729 kmem_zalloc(dsize, KM_SLEEP);
10730 }
10731
10732 (*svarp)[id] = svar;
10733 }
10734
10735 svar->dtsv_refcnt++;
10736 }
10737
10738 dtrace_difo_chunksize(dp, vstate);
10739 dtrace_difo_hold(dp);
10740 }
10741
10742 static dtrace_difo_t *
10743 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10744 {
10745 dtrace_difo_t *new;
10746 size_t sz;
10747
10748 ASSERT(dp->dtdo_buf != NULL);
10749 ASSERT(dp->dtdo_refcnt != 0);
10750
10751 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
10752
10753 ASSERT(dp->dtdo_buf != NULL);
10754 sz = dp->dtdo_len * sizeof (dif_instr_t);
10755 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP);
10756 bcopy(dp->dtdo_buf, new->dtdo_buf, sz);
10757 new->dtdo_len = dp->dtdo_len;
10758
10759 if (dp->dtdo_strtab != NULL) {
10760 ASSERT(dp->dtdo_strlen != 0);
10761 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP);
10762 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen);
10763 new->dtdo_strlen = dp->dtdo_strlen;
10764 }
10765
10766 if (dp->dtdo_inttab != NULL) {
10767 ASSERT(dp->dtdo_intlen != 0);
10768 sz = dp->dtdo_intlen * sizeof (uint64_t);
10769 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP);
10770 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz);
10771 new->dtdo_intlen = dp->dtdo_intlen;
10772 }
10773
10774 if (dp->dtdo_vartab != NULL) {
10775 ASSERT(dp->dtdo_varlen != 0);
10776 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t);
10777 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP);
10778 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz);
10779 new->dtdo_varlen = dp->dtdo_varlen;
10780 }
10781
10782 dtrace_difo_init(new, vstate);
10783 return (new);
10784 }
10785
10786 static void
10787 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10788 {
10789 int i;
10790
10791 ASSERT(dp->dtdo_refcnt == 0);
10792
10793 for (i = 0; i < dp->dtdo_varlen; i++) {
10794 dtrace_difv_t *v = &dp->dtdo_vartab[i];
10795 dtrace_statvar_t *svar, **svarp = NULL;
10796 uint_t id;
10797 uint8_t scope = v->dtdv_scope;
10798 int *np = NULL;
10799
10800 switch (scope) {
10801 case DIFV_SCOPE_THREAD:
10802 continue;
10803
10804 case DIFV_SCOPE_LOCAL:
10805 np = &vstate->dtvs_nlocals;
10806 svarp = vstate->dtvs_locals;
10807 break;
10808
10809 case DIFV_SCOPE_GLOBAL:
10810 np = &vstate->dtvs_nglobals;
10811 svarp = vstate->dtvs_globals;
10812 break;
10813
10814 default:
10815 ASSERT(0);
10816 }
10817
10818 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
10819 continue;
10820
10821 id -= DIF_VAR_OTHER_UBASE;
10822 ASSERT(id < *np);
10823
10824 svar = svarp[id];
10825 ASSERT(svar != NULL);
10826 ASSERT(svar->dtsv_refcnt > 0);
10827
10828 if (--svar->dtsv_refcnt > 0)
10829 continue;
10830
10831 if (svar->dtsv_size != 0) {
10832 ASSERT(svar->dtsv_data != 0);
10833 kmem_free((void *)(uintptr_t)svar->dtsv_data,
10834 svar->dtsv_size);
10835 }
10836
10837 kmem_free(svar, sizeof (dtrace_statvar_t));
10838 svarp[id] = NULL;
10839 }
10840
10841 if (dp->dtdo_buf != NULL)
10842 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
10843 if (dp->dtdo_inttab != NULL)
10844 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
10845 if (dp->dtdo_strtab != NULL)
10846 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
10847 if (dp->dtdo_vartab != NULL)
10848 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
10849
10850 kmem_free(dp, sizeof (dtrace_difo_t));
10851 }
10852
10853 static void
10854 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10855 {
10856 int i;
10857
10858 ASSERT(MUTEX_HELD(&dtrace_lock));
10859 ASSERT(dp->dtdo_refcnt != 0);
10860
10861 for (i = 0; i < dp->dtdo_varlen; i++) {
10862 dtrace_difv_t *v = &dp->dtdo_vartab[i];
10863
10864 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
10865 continue;
10866
10867 ASSERT(dtrace_vtime_references > 0);
10868 if (--dtrace_vtime_references == 0)
10869 dtrace_vtime_disable();
10870 }
10871
10872 if (--dp->dtdo_refcnt == 0)
10873 dtrace_difo_destroy(dp, vstate);
10874 }
10875
10876 /*
10877 * DTrace Format Functions
10878 */
10879 static uint16_t
10880 dtrace_format_add(dtrace_state_t *state, char *str)
10881 {
10882 char *fmt, **new;
10883 uint16_t ndx, len = strlen(str) + 1;
10884
10885 fmt = kmem_zalloc(len, KM_SLEEP);
10886 bcopy(str, fmt, len);
10887
10888 for (ndx = 0; ndx < state->dts_nformats; ndx++) {
10889 if (state->dts_formats[ndx] == NULL) {
10890 state->dts_formats[ndx] = fmt;
10891 return (ndx + 1);
10892 }
10893 }
10894
10895 if (state->dts_nformats == USHRT_MAX) {
10896 /*
10897 * This is only likely if a denial-of-service attack is being
10898 * attempted. As such, it's okay to fail silently here.
10899 */
10900 kmem_free(fmt, len);
10901 return (0);
10902 }
10903
10904 /*
10905 * For simplicity, we always resize the formats array to be exactly the
10906 * number of formats.
10907 */
10908 ndx = state->dts_nformats++;
10909 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP);
10910
10911 if (state->dts_formats != NULL) {
10912 ASSERT(ndx != 0);
10913 bcopy(state->dts_formats, new, ndx * sizeof (char *));
10914 kmem_free(state->dts_formats, ndx * sizeof (char *));
10915 }
10916
10917 state->dts_formats = new;
10918 state->dts_formats[ndx] = fmt;
10919
10920 return (ndx + 1);
10921 }
10922
10923 static void
10924 dtrace_format_remove(dtrace_state_t *state, uint16_t format)
10925 {
10926 char *fmt;
10927
10928 ASSERT(state->dts_formats != NULL);
10929 ASSERT(format <= state->dts_nformats);
10930 ASSERT(state->dts_formats[format - 1] != NULL);
10931
10932 fmt = state->dts_formats[format - 1];
10933 kmem_free(fmt, strlen(fmt) + 1);
10934 state->dts_formats[format - 1] = NULL;
10935 }
10936
10937 static void
10938 dtrace_format_destroy(dtrace_state_t *state)
10939 {
10940 int i;
10941
10942 if (state->dts_nformats == 0) {
10943 ASSERT(state->dts_formats == NULL);
10944 return;
10945 }
10946
10947 ASSERT(state->dts_formats != NULL);
10948
10949 for (i = 0; i < state->dts_nformats; i++) {
10950 char *fmt = state->dts_formats[i];
10951
10952 if (fmt == NULL)
10953 continue;
10954
10955 kmem_free(fmt, strlen(fmt) + 1);
10956 }
10957
10958 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *));
10959 state->dts_nformats = 0;
10960 state->dts_formats = NULL;
10961 }
10962
10963 /*
10964 * DTrace Predicate Functions
10965 */
10966 static dtrace_predicate_t *
10967 dtrace_predicate_create(dtrace_difo_t *dp)
10968 {
10969 dtrace_predicate_t *pred;
10970
10971 ASSERT(MUTEX_HELD(&dtrace_lock));
10972 ASSERT(dp->dtdo_refcnt != 0);
10973
10974 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP);
10975 pred->dtp_difo = dp;
10976 pred->dtp_refcnt = 1;
10977
10978 if (!dtrace_difo_cacheable(dp))
10979 return (pred);
10980
10981 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) {
10982 /*
10983 * This is only theoretically possible -- we have had 2^32
10984 * cacheable predicates on this machine. We cannot allow any
10985 * more predicates to become cacheable: as unlikely as it is,
10986 * there may be a thread caching a (now stale) predicate cache
10987 * ID. (N.B.: the temptation is being successfully resisted to
10988 * have this cmn_err() "Holy shit -- we executed this code!")
10989 */
10990 return (pred);
10991 }
10992
10993 pred->dtp_cacheid = dtrace_predcache_id++;
10994
10995 return (pred);
10996 }
10997
10998 static void
10999 dtrace_predicate_hold(dtrace_predicate_t *pred)
11000 {
11001 ASSERT(MUTEX_HELD(&dtrace_lock));
11002 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0);
11003 ASSERT(pred->dtp_refcnt > 0);
11004
11005 pred->dtp_refcnt++;
11006 }
11007
11008 static void
11009 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate)
11010 {
11011 dtrace_difo_t *dp = pred->dtp_difo;
11012
11013 ASSERT(MUTEX_HELD(&dtrace_lock));
11014 ASSERT(dp != NULL && dp->dtdo_refcnt != 0);
11015 ASSERT(pred->dtp_refcnt > 0);
11016
11017 if (--pred->dtp_refcnt == 0) {
11018 dtrace_difo_release(pred->dtp_difo, vstate);
11019 kmem_free(pred, sizeof (dtrace_predicate_t));
11020 }
11021 }
11022
11023 /*
11024 * DTrace Action Description Functions
11025 */
11026 static dtrace_actdesc_t *
11027 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple,
11028 uint64_t uarg, uint64_t arg)
11029 {
11030 dtrace_actdesc_t *act;
11031
11032 #ifdef illumos
11033 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL &&
11034 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA));
11035 #endif
11036
11037 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP);
11038 act->dtad_kind = kind;
11039 act->dtad_ntuple = ntuple;
11040 act->dtad_uarg = uarg;
11041 act->dtad_arg = arg;
11042 act->dtad_refcnt = 1;
11043
11044 return (act);
11045 }
11046
11047 static void
11048 dtrace_actdesc_hold(dtrace_actdesc_t *act)
11049 {
11050 ASSERT(act->dtad_refcnt >= 1);
11051 act->dtad_refcnt++;
11052 }
11053
11054 static void
11055 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate)
11056 {
11057 dtrace_actkind_t kind = act->dtad_kind;
11058 dtrace_difo_t *dp;
11059
11060 ASSERT(act->dtad_refcnt >= 1);
11061
11062 if (--act->dtad_refcnt != 0)
11063 return;
11064
11065 if ((dp = act->dtad_difo) != NULL)
11066 dtrace_difo_release(dp, vstate);
11067
11068 if (DTRACEACT_ISPRINTFLIKE(kind)) {
11069 char *str = (char *)(uintptr_t)act->dtad_arg;
11070
11071 #ifdef illumos
11072 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) ||
11073 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA));
11074 #endif
11075
11076 if (str != NULL)
11077 kmem_free(str, strlen(str) + 1);
11078 }
11079
11080 kmem_free(act, sizeof (dtrace_actdesc_t));
11081 }
11082
11083 /*
11084 * DTrace ECB Functions
11085 */
11086 static dtrace_ecb_t *
11087 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe)
11088 {
11089 dtrace_ecb_t *ecb;
11090 dtrace_epid_t epid;
11091
11092 ASSERT(MUTEX_HELD(&dtrace_lock));
11093
11094 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP);
11095 ecb->dte_predicate = NULL;
11096 ecb->dte_probe = probe;
11097
11098 /*
11099 * The default size is the size of the default action: recording
11100 * the header.
11101 */
11102 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_rechdr_t);
11103 ecb->dte_alignment = sizeof (dtrace_epid_t);
11104
11105 epid = state->dts_epid++;
11106
11107 if (epid - 1 >= state->dts_necbs) {
11108 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs;
11109 int necbs = state->dts_necbs << 1;
11110
11111 ASSERT(epid == state->dts_necbs + 1);
11112
11113 if (necbs == 0) {
11114 ASSERT(oecbs == NULL);
11115 necbs = 1;
11116 }
11117
11118 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP);
11119
11120 if (oecbs != NULL)
11121 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs));
11122
11123 dtrace_membar_producer();
11124 state->dts_ecbs = ecbs;
11125
11126 if (oecbs != NULL) {
11127 /*
11128 * If this state is active, we must dtrace_sync()
11129 * before we can free the old dts_ecbs array: we're
11130 * coming in hot, and there may be active ring
11131 * buffer processing (which indexes into the dts_ecbs
11132 * array) on another CPU.
11133 */
11134 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
11135 dtrace_sync();
11136
11137 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs));
11138 }
11139
11140 dtrace_membar_producer();
11141 state->dts_necbs = necbs;
11142 }
11143
11144 ecb->dte_state = state;
11145
11146 ASSERT(state->dts_ecbs[epid - 1] == NULL);
11147 dtrace_membar_producer();
11148 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb;
11149
11150 return (ecb);
11151 }
11152
11153 static void
11154 dtrace_ecb_enable(dtrace_ecb_t *ecb)
11155 {
11156 dtrace_probe_t *probe = ecb->dte_probe;
11157
11158 ASSERT(MUTEX_HELD(&cpu_lock));
11159 ASSERT(MUTEX_HELD(&dtrace_lock));
11160 ASSERT(ecb->dte_next == NULL);
11161
11162 if (probe == NULL) {
11163 /*
11164 * This is the NULL probe -- there's nothing to do.
11165 */
11166 return;
11167 }
11168
11169 if (probe->dtpr_ecb == NULL) {
11170 dtrace_provider_t *prov = probe->dtpr_provider;
11171
11172 /*
11173 * We're the first ECB on this probe.
11174 */
11175 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb;
11176
11177 if (ecb->dte_predicate != NULL)
11178 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid;
11179
11180 prov->dtpv_pops.dtps_enable(prov->dtpv_arg,
11181 probe->dtpr_id, probe->dtpr_arg);
11182 } else {
11183 /*
11184 * This probe is already active. Swing the last pointer to
11185 * point to the new ECB, and issue a dtrace_sync() to assure
11186 * that all CPUs have seen the change.
11187 */
11188 ASSERT(probe->dtpr_ecb_last != NULL);
11189 probe->dtpr_ecb_last->dte_next = ecb;
11190 probe->dtpr_ecb_last = ecb;
11191 probe->dtpr_predcache = 0;
11192
11193 dtrace_sync();
11194 }
11195 }
11196
11197 static int
11198 dtrace_ecb_resize(dtrace_ecb_t *ecb)
11199 {
11200 dtrace_action_t *act;
11201 uint32_t curneeded = UINT32_MAX;
11202 uint32_t aggbase = UINT32_MAX;
11203
11204 /*
11205 * If we record anything, we always record the dtrace_rechdr_t. (And
11206 * we always record it first.)
11207 */
11208 ecb->dte_size = sizeof (dtrace_rechdr_t);
11209 ecb->dte_alignment = sizeof (dtrace_epid_t);
11210
11211 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
11212 dtrace_recdesc_t *rec = &act->dta_rec;
11213 ASSERT(rec->dtrd_size > 0 || rec->dtrd_alignment == 1);
11214
11215 ecb->dte_alignment = MAX(ecb->dte_alignment,
11216 rec->dtrd_alignment);
11217
11218 if (DTRACEACT_ISAGG(act->dta_kind)) {
11219 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
11220
11221 ASSERT(rec->dtrd_size != 0);
11222 ASSERT(agg->dtag_first != NULL);
11223 ASSERT(act->dta_prev->dta_intuple);
11224 ASSERT(aggbase != UINT32_MAX);
11225 ASSERT(curneeded != UINT32_MAX);
11226
11227 agg->dtag_base = aggbase;
11228
11229 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment);
11230 rec->dtrd_offset = curneeded;
11231 if (curneeded + rec->dtrd_size < curneeded)
11232 return (EINVAL);
11233 curneeded += rec->dtrd_size;
11234 ecb->dte_needed = MAX(ecb->dte_needed, curneeded);
11235
11236 aggbase = UINT32_MAX;
11237 curneeded = UINT32_MAX;
11238 } else if (act->dta_intuple) {
11239 if (curneeded == UINT32_MAX) {
11240 /*
11241 * This is the first record in a tuple. Align
11242 * curneeded to be at offset 4 in an 8-byte
11243 * aligned block.
11244 */
11245 ASSERT(act->dta_prev == NULL ||
11246 !act->dta_prev->dta_intuple);
11247 ASSERT3U(aggbase, ==, UINT32_MAX);
11248 curneeded = P2PHASEUP(ecb->dte_size,
11249 sizeof (uint64_t), sizeof (dtrace_aggid_t));
11250
11251 aggbase = curneeded - sizeof (dtrace_aggid_t);
11252 ASSERT(IS_P2ALIGNED(aggbase,
11253 sizeof (uint64_t)));
11254 }
11255 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment);
11256 rec->dtrd_offset = curneeded;
11257 if (curneeded + rec->dtrd_size < curneeded)
11258 return (EINVAL);
11259 curneeded += rec->dtrd_size;
11260 } else {
11261 /* tuples must be followed by an aggregation */
11262 ASSERT(act->dta_prev == NULL ||
11263 !act->dta_prev->dta_intuple);
11264
11265 ecb->dte_size = P2ROUNDUP(ecb->dte_size,
11266 rec->dtrd_alignment);
11267 rec->dtrd_offset = ecb->dte_size;
11268 if (ecb->dte_size + rec->dtrd_size < ecb->dte_size)
11269 return (EINVAL);
11270 ecb->dte_size += rec->dtrd_size;
11271 ecb->dte_needed = MAX(ecb->dte_needed, ecb->dte_size);
11272 }
11273 }
11274
11275 if ((act = ecb->dte_action) != NULL &&
11276 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) &&
11277 ecb->dte_size == sizeof (dtrace_rechdr_t)) {
11278 /*
11279 * If the size is still sizeof (dtrace_rechdr_t), then all
11280 * actions store no data; set the size to 0.
11281 */
11282 ecb->dte_size = 0;
11283 }
11284
11285 ecb->dte_size = P2ROUNDUP(ecb->dte_size, sizeof (dtrace_epid_t));
11286 ecb->dte_needed = P2ROUNDUP(ecb->dte_needed, (sizeof (dtrace_epid_t)));
11287 ecb->dte_state->dts_needed = MAX(ecb->dte_state->dts_needed,
11288 ecb->dte_needed);
11289 return (0);
11290 }
11291
11292 static dtrace_action_t *
11293 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
11294 {
11295 dtrace_aggregation_t *agg;
11296 size_t size = sizeof (uint64_t);
11297 int ntuple = desc->dtad_ntuple;
11298 dtrace_action_t *act;
11299 dtrace_recdesc_t *frec;
11300 dtrace_aggid_t aggid;
11301 dtrace_state_t *state = ecb->dte_state;
11302
11303 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP);
11304 agg->dtag_ecb = ecb;
11305
11306 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind));
11307
11308 switch (desc->dtad_kind) {
11309 case DTRACEAGG_MIN:
11310 agg->dtag_initial = INT64_MAX;
11311 agg->dtag_aggregate = dtrace_aggregate_min;
11312 break;
11313
11314 case DTRACEAGG_MAX:
11315 agg->dtag_initial = INT64_MIN;
11316 agg->dtag_aggregate = dtrace_aggregate_max;
11317 break;
11318
11319 case DTRACEAGG_COUNT:
11320 agg->dtag_aggregate = dtrace_aggregate_count;
11321 break;
11322
11323 case DTRACEAGG_QUANTIZE:
11324 agg->dtag_aggregate = dtrace_aggregate_quantize;
11325 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) *
11326 sizeof (uint64_t);
11327 break;
11328
11329 case DTRACEAGG_LQUANTIZE: {
11330 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg);
11331 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg);
11332
11333 agg->dtag_initial = desc->dtad_arg;
11334 agg->dtag_aggregate = dtrace_aggregate_lquantize;
11335
11336 if (step == 0 || levels == 0)
11337 goto err;
11338
11339 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t);
11340 break;
11341 }
11342
11343 case DTRACEAGG_LLQUANTIZE: {
11344 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg);
11345 uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg);
11346 uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg);
11347 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg);
11348 int64_t v;
11349
11350 agg->dtag_initial = desc->dtad_arg;
11351 agg->dtag_aggregate = dtrace_aggregate_llquantize;
11352
11353 if (factor < 2 || low >= high || nsteps < factor)
11354 goto err;
11355
11356 /*
11357 * Now check that the number of steps evenly divides a power
11358 * of the factor. (This assures both integer bucket size and
11359 * linearity within each magnitude.)
11360 */
11361 for (v = factor; v < nsteps; v *= factor)
11362 continue;
11363
11364 if ((v % nsteps) || (nsteps % factor))
11365 goto err;
11366
11367 size = (dtrace_aggregate_llquantize_bucket(factor,
11368 low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t);
11369 break;
11370 }
11371
11372 case DTRACEAGG_AVG:
11373 agg->dtag_aggregate = dtrace_aggregate_avg;
11374 size = sizeof (uint64_t) * 2;
11375 break;
11376
11377 case DTRACEAGG_STDDEV:
11378 agg->dtag_aggregate = dtrace_aggregate_stddev;
11379 size = sizeof (uint64_t) * 4;
11380 break;
11381
11382 case DTRACEAGG_SUM:
11383 agg->dtag_aggregate = dtrace_aggregate_sum;
11384 break;
11385
11386 default:
11387 goto err;
11388 }
11389
11390 agg->dtag_action.dta_rec.dtrd_size = size;
11391
11392 if (ntuple == 0)
11393 goto err;
11394
11395 /*
11396 * We must make sure that we have enough actions for the n-tuple.
11397 */
11398 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) {
11399 if (DTRACEACT_ISAGG(act->dta_kind))
11400 break;
11401
11402 if (--ntuple == 0) {
11403 /*
11404 * This is the action with which our n-tuple begins.
11405 */
11406 agg->dtag_first = act;
11407 goto success;
11408 }
11409 }
11410
11411 /*
11412 * This n-tuple is short by ntuple elements. Return failure.
11413 */
11414 ASSERT(ntuple != 0);
11415 err:
11416 kmem_free(agg, sizeof (dtrace_aggregation_t));
11417 return (NULL);
11418
11419 success:
11420 /*
11421 * If the last action in the tuple has a size of zero, it's actually
11422 * an expression argument for the aggregating action.
11423 */
11424 ASSERT(ecb->dte_action_last != NULL);
11425 act = ecb->dte_action_last;
11426
11427 if (act->dta_kind == DTRACEACT_DIFEXPR) {
11428 ASSERT(act->dta_difo != NULL);
11429
11430 if (act->dta_difo->dtdo_rtype.dtdt_size == 0)
11431 agg->dtag_hasarg = 1;
11432 }
11433
11434 /*
11435 * We need to allocate an id for this aggregation.
11436 */
11437 #ifdef illumos
11438 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1,
11439 VM_BESTFIT | VM_SLEEP);
11440 #else
11441 aggid = alloc_unr(state->dts_aggid_arena);
11442 #endif
11443
11444 if (aggid - 1 >= state->dts_naggregations) {
11445 dtrace_aggregation_t **oaggs = state->dts_aggregations;
11446 dtrace_aggregation_t **aggs;
11447 int naggs = state->dts_naggregations << 1;
11448 int onaggs = state->dts_naggregations;
11449
11450 ASSERT(aggid == state->dts_naggregations + 1);
11451
11452 if (naggs == 0) {
11453 ASSERT(oaggs == NULL);
11454 naggs = 1;
11455 }
11456
11457 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP);
11458
11459 if (oaggs != NULL) {
11460 bcopy(oaggs, aggs, onaggs * sizeof (*aggs));
11461 kmem_free(oaggs, onaggs * sizeof (*aggs));
11462 }
11463
11464 state->dts_aggregations = aggs;
11465 state->dts_naggregations = naggs;
11466 }
11467
11468 ASSERT(state->dts_aggregations[aggid - 1] == NULL);
11469 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg;
11470
11471 frec = &agg->dtag_first->dta_rec;
11472 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t))
11473 frec->dtrd_alignment = sizeof (dtrace_aggid_t);
11474
11475 for (act = agg->dtag_first; act != NULL; act = act->dta_next) {
11476 ASSERT(!act->dta_intuple);
11477 act->dta_intuple = 1;
11478 }
11479
11480 return (&agg->dtag_action);
11481 }
11482
11483 static void
11484 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act)
11485 {
11486 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
11487 dtrace_state_t *state = ecb->dte_state;
11488 dtrace_aggid_t aggid = agg->dtag_id;
11489
11490 ASSERT(DTRACEACT_ISAGG(act->dta_kind));
11491 #ifdef illumos
11492 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1);
11493 #else
11494 free_unr(state->dts_aggid_arena, aggid);
11495 #endif
11496
11497 ASSERT(state->dts_aggregations[aggid - 1] == agg);
11498 state->dts_aggregations[aggid - 1] = NULL;
11499
11500 kmem_free(agg, sizeof (dtrace_aggregation_t));
11501 }
11502
11503 static int
11504 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
11505 {
11506 dtrace_action_t *action, *last;
11507 dtrace_difo_t *dp = desc->dtad_difo;
11508 uint32_t size = 0, align = sizeof (uint8_t), mask;
11509 uint16_t format = 0;
11510 dtrace_recdesc_t *rec;
11511 dtrace_state_t *state = ecb->dte_state;
11512 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize;
11513 uint64_t arg = desc->dtad_arg;
11514
11515 ASSERT(MUTEX_HELD(&dtrace_lock));
11516 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1);
11517
11518 if (DTRACEACT_ISAGG(desc->dtad_kind)) {
11519 /*
11520 * If this is an aggregating action, there must be neither
11521 * a speculate nor a commit on the action chain.
11522 */
11523 dtrace_action_t *act;
11524
11525 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
11526 if (act->dta_kind == DTRACEACT_COMMIT)
11527 return (EINVAL);
11528
11529 if (act->dta_kind == DTRACEACT_SPECULATE)
11530 return (EINVAL);
11531 }
11532
11533 action = dtrace_ecb_aggregation_create(ecb, desc);
11534
11535 if (action == NULL)
11536 return (EINVAL);
11537 } else {
11538 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) ||
11539 (desc->dtad_kind == DTRACEACT_DIFEXPR &&
11540 dp != NULL && dp->dtdo_destructive)) {
11541 state->dts_destructive = 1;
11542 }
11543
11544 switch (desc->dtad_kind) {
11545 case DTRACEACT_PRINTF:
11546 case DTRACEACT_PRINTA:
11547 case DTRACEACT_SYSTEM:
11548 case DTRACEACT_FREOPEN:
11549 case DTRACEACT_DIFEXPR:
11550 /*
11551 * We know that our arg is a string -- turn it into a
11552 * format.
11553 */
11554 if (arg == 0) {
11555 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA ||
11556 desc->dtad_kind == DTRACEACT_DIFEXPR);
11557 format = 0;
11558 } else {
11559 ASSERT(arg != 0);
11560 #ifdef illumos
11561 ASSERT(arg > KERNELBASE);
11562 #endif
11563 format = dtrace_format_add(state,
11564 (char *)(uintptr_t)arg);
11565 }
11566
11567 /*FALLTHROUGH*/
11568 case DTRACEACT_LIBACT:
11569 case DTRACEACT_TRACEMEM:
11570 case DTRACEACT_TRACEMEM_DYNSIZE:
11571 if (dp == NULL)
11572 return (EINVAL);
11573
11574 if ((size = dp->dtdo_rtype.dtdt_size) != 0)
11575 break;
11576
11577 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
11578 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11579 return (EINVAL);
11580
11581 size = opt[DTRACEOPT_STRSIZE];
11582 }
11583
11584 break;
11585
11586 case DTRACEACT_STACK:
11587 if ((nframes = arg) == 0) {
11588 nframes = opt[DTRACEOPT_STACKFRAMES];
11589 ASSERT(nframes > 0);
11590 arg = nframes;
11591 }
11592
11593 size = nframes * sizeof (pc_t);
11594 break;
11595
11596 case DTRACEACT_JSTACK:
11597 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0)
11598 strsize = opt[DTRACEOPT_JSTACKSTRSIZE];
11599
11600 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0)
11601 nframes = opt[DTRACEOPT_JSTACKFRAMES];
11602
11603 arg = DTRACE_USTACK_ARG(nframes, strsize);
11604
11605 /*FALLTHROUGH*/
11606 case DTRACEACT_USTACK:
11607 if (desc->dtad_kind != DTRACEACT_JSTACK &&
11608 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) {
11609 strsize = DTRACE_USTACK_STRSIZE(arg);
11610 nframes = opt[DTRACEOPT_USTACKFRAMES];
11611 ASSERT(nframes > 0);
11612 arg = DTRACE_USTACK_ARG(nframes, strsize);
11613 }
11614
11615 /*
11616 * Save a slot for the pid.
11617 */
11618 size = (nframes + 1) * sizeof (uint64_t);
11619 size += DTRACE_USTACK_STRSIZE(arg);
11620 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t)));
11621
11622 break;
11623
11624 case DTRACEACT_SYM:
11625 case DTRACEACT_MOD:
11626 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) !=
11627 sizeof (uint64_t)) ||
11628 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11629 return (EINVAL);
11630 break;
11631
11632 case DTRACEACT_USYM:
11633 case DTRACEACT_UMOD:
11634 case DTRACEACT_UADDR:
11635 if (dp == NULL ||
11636 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) ||
11637 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11638 return (EINVAL);
11639
11640 /*
11641 * We have a slot for the pid, plus a slot for the
11642 * argument. To keep things simple (aligned with
11643 * bitness-neutral sizing), we store each as a 64-bit
11644 * quantity.
11645 */
11646 size = 2 * sizeof (uint64_t);
11647 break;
11648
11649 case DTRACEACT_STOP:
11650 case DTRACEACT_BREAKPOINT:
11651 case DTRACEACT_PANIC:
11652 break;
11653
11654 case DTRACEACT_CHILL:
11655 case DTRACEACT_DISCARD:
11656 case DTRACEACT_RAISE:
11657 if (dp == NULL)
11658 return (EINVAL);
11659 break;
11660
11661 case DTRACEACT_EXIT:
11662 if (dp == NULL ||
11663 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) ||
11664 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11665 return (EINVAL);
11666 break;
11667
11668 case DTRACEACT_SPECULATE:
11669 if (ecb->dte_size > sizeof (dtrace_rechdr_t))
11670 return (EINVAL);
11671
11672 if (dp == NULL)
11673 return (EINVAL);
11674
11675 state->dts_speculates = 1;
11676 break;
11677
11678 case DTRACEACT_PRINTM:
11679 size = dp->dtdo_rtype.dtdt_size;
11680 break;
11681
11682 case DTRACEACT_COMMIT: {
11683 dtrace_action_t *act = ecb->dte_action;
11684
11685 for (; act != NULL; act = act->dta_next) {
11686 if (act->dta_kind == DTRACEACT_COMMIT)
11687 return (EINVAL);
11688 }
11689
11690 if (dp == NULL)
11691 return (EINVAL);
11692 break;
11693 }
11694
11695 default:
11696 return (EINVAL);
11697 }
11698
11699 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) {
11700 /*
11701 * If this is a data-storing action or a speculate,
11702 * we must be sure that there isn't a commit on the
11703 * action chain.
11704 */
11705 dtrace_action_t *act = ecb->dte_action;
11706
11707 for (; act != NULL; act = act->dta_next) {
11708 if (act->dta_kind == DTRACEACT_COMMIT)
11709 return (EINVAL);
11710 }
11711 }
11712
11713 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP);
11714 action->dta_rec.dtrd_size = size;
11715 }
11716
11717 action->dta_refcnt = 1;
11718 rec = &action->dta_rec;
11719 size = rec->dtrd_size;
11720
11721 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) {
11722 if (!(size & mask)) {
11723 align = mask + 1;
11724 break;
11725 }
11726 }
11727
11728 action->dta_kind = desc->dtad_kind;
11729
11730 if ((action->dta_difo = dp) != NULL)
11731 dtrace_difo_hold(dp);
11732
11733 rec->dtrd_action = action->dta_kind;
11734 rec->dtrd_arg = arg;
11735 rec->dtrd_uarg = desc->dtad_uarg;
11736 rec->dtrd_alignment = (uint16_t)align;
11737 rec->dtrd_format = format;
11738
11739 if ((last = ecb->dte_action_last) != NULL) {
11740 ASSERT(ecb->dte_action != NULL);
11741 action->dta_prev = last;
11742 last->dta_next = action;
11743 } else {
11744 ASSERT(ecb->dte_action == NULL);
11745 ecb->dte_action = action;
11746 }
11747
11748 ecb->dte_action_last = action;
11749
11750 return (0);
11751 }
11752
11753 static void
11754 dtrace_ecb_action_remove(dtrace_ecb_t *ecb)
11755 {
11756 dtrace_action_t *act = ecb->dte_action, *next;
11757 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate;
11758 dtrace_difo_t *dp;
11759 uint16_t format;
11760
11761 if (act != NULL && act->dta_refcnt > 1) {
11762 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1);
11763 act->dta_refcnt--;
11764 } else {
11765 for (; act != NULL; act = next) {
11766 next = act->dta_next;
11767 ASSERT(next != NULL || act == ecb->dte_action_last);
11768 ASSERT(act->dta_refcnt == 1);
11769
11770 if ((format = act->dta_rec.dtrd_format) != 0)
11771 dtrace_format_remove(ecb->dte_state, format);
11772
11773 if ((dp = act->dta_difo) != NULL)
11774 dtrace_difo_release(dp, vstate);
11775
11776 if (DTRACEACT_ISAGG(act->dta_kind)) {
11777 dtrace_ecb_aggregation_destroy(ecb, act);
11778 } else {
11779 kmem_free(act, sizeof (dtrace_action_t));
11780 }
11781 }
11782 }
11783
11784 ecb->dte_action = NULL;
11785 ecb->dte_action_last = NULL;
11786 ecb->dte_size = 0;
11787 }
11788
11789 static void
11790 dtrace_ecb_disable(dtrace_ecb_t *ecb)
11791 {
11792 /*
11793 * We disable the ECB by removing it from its probe.
11794 */
11795 dtrace_ecb_t *pecb, *prev = NULL;
11796 dtrace_probe_t *probe = ecb->dte_probe;
11797
11798 ASSERT(MUTEX_HELD(&dtrace_lock));
11799
11800 if (probe == NULL) {
11801 /*
11802 * This is the NULL probe; there is nothing to disable.
11803 */
11804 return;
11805 }
11806
11807 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) {
11808 if (pecb == ecb)
11809 break;
11810 prev = pecb;
11811 }
11812
11813 ASSERT(pecb != NULL);
11814
11815 if (prev == NULL) {
11816 probe->dtpr_ecb = ecb->dte_next;
11817 } else {
11818 prev->dte_next = ecb->dte_next;
11819 }
11820
11821 if (ecb == probe->dtpr_ecb_last) {
11822 ASSERT(ecb->dte_next == NULL);
11823 probe->dtpr_ecb_last = prev;
11824 }
11825
11826 /*
11827 * The ECB has been disconnected from the probe; now sync to assure
11828 * that all CPUs have seen the change before returning.
11829 */
11830 dtrace_sync();
11831
11832 if (probe->dtpr_ecb == NULL) {
11833 /*
11834 * That was the last ECB on the probe; clear the predicate
11835 * cache ID for the probe, disable it and sync one more time
11836 * to assure that we'll never hit it again.
11837 */
11838 dtrace_provider_t *prov = probe->dtpr_provider;
11839
11840 ASSERT(ecb->dte_next == NULL);
11841 ASSERT(probe->dtpr_ecb_last == NULL);
11842 probe->dtpr_predcache = DTRACE_CACHEIDNONE;
11843 prov->dtpv_pops.dtps_disable(prov->dtpv_arg,
11844 probe->dtpr_id, probe->dtpr_arg);
11845 dtrace_sync();
11846 } else {
11847 /*
11848 * There is at least one ECB remaining on the probe. If there
11849 * is _exactly_ one, set the probe's predicate cache ID to be
11850 * the predicate cache ID of the remaining ECB.
11851 */
11852 ASSERT(probe->dtpr_ecb_last != NULL);
11853 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE);
11854
11855 if (probe->dtpr_ecb == probe->dtpr_ecb_last) {
11856 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate;
11857
11858 ASSERT(probe->dtpr_ecb->dte_next == NULL);
11859
11860 if (p != NULL)
11861 probe->dtpr_predcache = p->dtp_cacheid;
11862 }
11863
11864 ecb->dte_next = NULL;
11865 }
11866 }
11867
11868 static void
11869 dtrace_ecb_destroy(dtrace_ecb_t *ecb)
11870 {
11871 dtrace_state_t *state = ecb->dte_state;
11872 dtrace_vstate_t *vstate = &state->dts_vstate;
11873 dtrace_predicate_t *pred;
11874 dtrace_epid_t epid = ecb->dte_epid;
11875
11876 ASSERT(MUTEX_HELD(&dtrace_lock));
11877 ASSERT(ecb->dte_next == NULL);
11878 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb);
11879
11880 if ((pred = ecb->dte_predicate) != NULL)
11881 dtrace_predicate_release(pred, vstate);
11882
11883 dtrace_ecb_action_remove(ecb);
11884
11885 ASSERT(state->dts_ecbs[epid - 1] == ecb);
11886 state->dts_ecbs[epid - 1] = NULL;
11887
11888 kmem_free(ecb, sizeof (dtrace_ecb_t));
11889 }
11890
11891 static dtrace_ecb_t *
11892 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe,
11893 dtrace_enabling_t *enab)
11894 {
11895 dtrace_ecb_t *ecb;
11896 dtrace_predicate_t *pred;
11897 dtrace_actdesc_t *act;
11898 dtrace_provider_t *prov;
11899 dtrace_ecbdesc_t *desc = enab->dten_current;
11900
11901 ASSERT(MUTEX_HELD(&dtrace_lock));
11902 ASSERT(state != NULL);
11903
11904 ecb = dtrace_ecb_add(state, probe);
11905 ecb->dte_uarg = desc->dted_uarg;
11906
11907 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) {
11908 dtrace_predicate_hold(pred);
11909 ecb->dte_predicate = pred;
11910 }
11911
11912 if (probe != NULL) {
11913 /*
11914 * If the provider shows more leg than the consumer is old
11915 * enough to see, we need to enable the appropriate implicit
11916 * predicate bits to prevent the ecb from activating at
11917 * revealing times.
11918 *
11919 * Providers specifying DTRACE_PRIV_USER at register time
11920 * are stating that they need the /proc-style privilege
11921 * model to be enforced, and this is what DTRACE_COND_OWNER
11922 * and DTRACE_COND_ZONEOWNER will then do at probe time.
11923 */
11924 prov = probe->dtpr_provider;
11925 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) &&
11926 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
11927 ecb->dte_cond |= DTRACE_COND_OWNER;
11928
11929 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) &&
11930 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
11931 ecb->dte_cond |= DTRACE_COND_ZONEOWNER;
11932
11933 /*
11934 * If the provider shows us kernel innards and the user
11935 * is lacking sufficient privilege, enable the
11936 * DTRACE_COND_USERMODE implicit predicate.
11937 */
11938 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) &&
11939 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL))
11940 ecb->dte_cond |= DTRACE_COND_USERMODE;
11941 }
11942
11943 if (dtrace_ecb_create_cache != NULL) {
11944 /*
11945 * If we have a cached ecb, we'll use its action list instead
11946 * of creating our own (saving both time and space).
11947 */
11948 dtrace_ecb_t *cached = dtrace_ecb_create_cache;
11949 dtrace_action_t *act = cached->dte_action;
11950
11951 if (act != NULL) {
11952 ASSERT(act->dta_refcnt > 0);
11953 act->dta_refcnt++;
11954 ecb->dte_action = act;
11955 ecb->dte_action_last = cached->dte_action_last;
11956 ecb->dte_needed = cached->dte_needed;
11957 ecb->dte_size = cached->dte_size;
11958 ecb->dte_alignment = cached->dte_alignment;
11959 }
11960
11961 return (ecb);
11962 }
11963
11964 for (act = desc->dted_action; act != NULL; act = act->dtad_next) {
11965 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) {
11966 dtrace_ecb_destroy(ecb);
11967 return (NULL);
11968 }
11969 }
11970
11971 if ((enab->dten_error = dtrace_ecb_resize(ecb)) != 0) {
11972 dtrace_ecb_destroy(ecb);
11973 return (NULL);
11974 }
11975
11976 return (dtrace_ecb_create_cache = ecb);
11977 }
11978
11979 static int
11980 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg)
11981 {
11982 dtrace_ecb_t *ecb;
11983 dtrace_enabling_t *enab = arg;
11984 dtrace_state_t *state = enab->dten_vstate->dtvs_state;
11985
11986 ASSERT(state != NULL);
11987
11988 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) {
11989 /*
11990 * This probe was created in a generation for which this
11991 * enabling has previously created ECBs; we don't want to
11992 * enable it again, so just kick out.
11993 */
11994 return (DTRACE_MATCH_NEXT);
11995 }
11996
11997 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL)
11998 return (DTRACE_MATCH_DONE);
11999
12000 dtrace_ecb_enable(ecb);
12001 return (DTRACE_MATCH_NEXT);
12002 }
12003
12004 static dtrace_ecb_t *
12005 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id)
12006 {
12007 dtrace_ecb_t *ecb;
12008
12009 ASSERT(MUTEX_HELD(&dtrace_lock));
12010
12011 if (id == 0 || id > state->dts_necbs)
12012 return (NULL);
12013
12014 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL);
12015 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id);
12016
12017 return (state->dts_ecbs[id - 1]);
12018 }
12019
12020 static dtrace_aggregation_t *
12021 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id)
12022 {
12023 dtrace_aggregation_t *agg;
12024
12025 ASSERT(MUTEX_HELD(&dtrace_lock));
12026
12027 if (id == 0 || id > state->dts_naggregations)
12028 return (NULL);
12029
12030 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL);
12031 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL ||
12032 agg->dtag_id == id);
12033
12034 return (state->dts_aggregations[id - 1]);
12035 }
12036
12037 /*
12038 * DTrace Buffer Functions
12039 *
12040 * The following functions manipulate DTrace buffers. Most of these functions
12041 * are called in the context of establishing or processing consumer state;
12042 * exceptions are explicitly noted.
12043 */
12044
12045 /*
12046 * Note: called from cross call context. This function switches the two
12047 * buffers on a given CPU. The atomicity of this operation is assured by
12048 * disabling interrupts while the actual switch takes place; the disabling of
12049 * interrupts serializes the execution with any execution of dtrace_probe() on
12050 * the same CPU.
12051 */
12052 static void
12053 dtrace_buffer_switch(dtrace_buffer_t *buf)
12054 {
12055 caddr_t tomax = buf->dtb_tomax;
12056 caddr_t xamot = buf->dtb_xamot;
12057 dtrace_icookie_t cookie;
12058 hrtime_t now;
12059
12060 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
12061 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING));
12062
12063 cookie = dtrace_interrupt_disable();
12064 now = dtrace_gethrtime();
12065 buf->dtb_tomax = xamot;
12066 buf->dtb_xamot = tomax;
12067 buf->dtb_xamot_drops = buf->dtb_drops;
12068 buf->dtb_xamot_offset = buf->dtb_offset;
12069 buf->dtb_xamot_errors = buf->dtb_errors;
12070 buf->dtb_xamot_flags = buf->dtb_flags;
12071 buf->dtb_offset = 0;
12072 buf->dtb_drops = 0;
12073 buf->dtb_errors = 0;
12074 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED);
12075 buf->dtb_interval = now - buf->dtb_switched;
12076 buf->dtb_switched = now;
12077 dtrace_interrupt_enable(cookie);
12078 }
12079
12080 /*
12081 * Note: called from cross call context. This function activates a buffer
12082 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation
12083 * is guaranteed by the disabling of interrupts.
12084 */
12085 static void
12086 dtrace_buffer_activate(dtrace_state_t *state)
12087 {
12088 dtrace_buffer_t *buf;
12089 dtrace_icookie_t cookie = dtrace_interrupt_disable();
12090
12091 buf = &state->dts_buffer[curcpu];
12092
12093 if (buf->dtb_tomax != NULL) {
12094 /*
12095 * We might like to assert that the buffer is marked inactive,
12096 * but this isn't necessarily true: the buffer for the CPU
12097 * that processes the BEGIN probe has its buffer activated
12098 * manually. In this case, we take the (harmless) action
12099 * re-clearing the bit INACTIVE bit.
12100 */
12101 buf->dtb_flags &= ~DTRACEBUF_INACTIVE;
12102 }
12103
12104 dtrace_interrupt_enable(cookie);
12105 }
12106
12107 #ifdef __FreeBSD__
12108 /*
12109 * Activate the specified per-CPU buffer. This is used instead of
12110 * dtrace_buffer_activate() when APs have not yet started, i.e. when
12111 * activating anonymous state.
12112 */
12113 static void
12114 dtrace_buffer_activate_cpu(dtrace_state_t *state, int cpu)
12115 {
12116
12117 if (state->dts_buffer[cpu].dtb_tomax != NULL)
12118 state->dts_buffer[cpu].dtb_flags &= ~DTRACEBUF_INACTIVE;
12119 }
12120 #endif
12121
12122 static int
12123 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
12124 processorid_t cpu, int *factor)
12125 {
12126 #ifdef illumos
12127 cpu_t *cp;
12128 #endif
12129 dtrace_buffer_t *buf;
12130 int allocated = 0, desired = 0;
12131
12132 #ifdef illumos
12133 ASSERT(MUTEX_HELD(&cpu_lock));
12134 ASSERT(MUTEX_HELD(&dtrace_lock));
12135
12136 *factor = 1;
12137
12138 if (size > dtrace_nonroot_maxsize &&
12139 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE))
12140 return (EFBIG);
12141
12142 cp = cpu_list;
12143
12144 do {
12145 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
12146 continue;
12147
12148 buf = &bufs[cp->cpu_id];
12149
12150 /*
12151 * If there is already a buffer allocated for this CPU, it
12152 * is only possible that this is a DR event. In this case,
12153 */
12154 if (buf->dtb_tomax != NULL) {
12155 ASSERT(buf->dtb_size == size);
12156 continue;
12157 }
12158
12159 ASSERT(buf->dtb_xamot == NULL);
12160
12161 if ((buf->dtb_tomax = kmem_zalloc(size,
12162 KM_NOSLEEP | KM_NORMALPRI)) == NULL)
12163 goto err;
12164
12165 buf->dtb_size = size;
12166 buf->dtb_flags = flags;
12167 buf->dtb_offset = 0;
12168 buf->dtb_drops = 0;
12169
12170 if (flags & DTRACEBUF_NOSWITCH)
12171 continue;
12172
12173 if ((buf->dtb_xamot = kmem_zalloc(size,
12174 KM_NOSLEEP | KM_NORMALPRI)) == NULL)
12175 goto err;
12176 } while ((cp = cp->cpu_next) != cpu_list);
12177
12178 return (0);
12179
12180 err:
12181 cp = cpu_list;
12182
12183 do {
12184 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
12185 continue;
12186
12187 buf = &bufs[cp->cpu_id];
12188 desired += 2;
12189
12190 if (buf->dtb_xamot != NULL) {
12191 ASSERT(buf->dtb_tomax != NULL);
12192 ASSERT(buf->dtb_size == size);
12193 kmem_free(buf->dtb_xamot, size);
12194 allocated++;
12195 }
12196
12197 if (buf->dtb_tomax != NULL) {
12198 ASSERT(buf->dtb_size == size);
12199 kmem_free(buf->dtb_tomax, size);
12200 allocated++;
12201 }
12202
12203 buf->dtb_tomax = NULL;
12204 buf->dtb_xamot = NULL;
12205 buf->dtb_size = 0;
12206 } while ((cp = cp->cpu_next) != cpu_list);
12207 #else
12208 int i;
12209
12210 *factor = 1;
12211 #if defined(__aarch64__) || defined(__amd64__) || defined(__arm__) || \
12212 defined(__mips__) || defined(__powerpc__) || defined(__riscv)
12213 /*
12214 * FreeBSD isn't good at limiting the amount of memory we
12215 * ask to malloc, so let's place a limit here before trying
12216 * to do something that might well end in tears at bedtime.
12217 */
12218 int bufsize_percpu_frac = dtrace_bufsize_max_frac * mp_ncpus;
12219 if (size > physmem * PAGE_SIZE / bufsize_percpu_frac)
12220 return (ENOMEM);
12221 #endif
12222
12223 ASSERT(MUTEX_HELD(&dtrace_lock));
12224 CPU_FOREACH(i) {
12225 if (cpu != DTRACE_CPUALL && cpu != i)
12226 continue;
12227
12228 buf = &bufs[i];
12229
12230 /*
12231 * If there is already a buffer allocated for this CPU, it
12232 * is only possible that this is a DR event. In this case,
12233 * the buffer size must match our specified size.
12234 */
12235 if (buf->dtb_tomax != NULL) {
12236 ASSERT(buf->dtb_size == size);
12237 continue;
12238 }
12239
12240 ASSERT(buf->dtb_xamot == NULL);
12241
12242 if ((buf->dtb_tomax = kmem_zalloc(size,
12243 KM_NOSLEEP | KM_NORMALPRI)) == NULL)
12244 goto err;
12245
12246 buf->dtb_size = size;
12247 buf->dtb_flags = flags;
12248 buf->dtb_offset = 0;
12249 buf->dtb_drops = 0;
12250
12251 if (flags & DTRACEBUF_NOSWITCH)
12252 continue;
12253
12254 if ((buf->dtb_xamot = kmem_zalloc(size,
12255 KM_NOSLEEP | KM_NORMALPRI)) == NULL)
12256 goto err;
12257 }
12258
12259 return (0);
12260
12261 err:
12262 /*
12263 * Error allocating memory, so free the buffers that were
12264 * allocated before the failed allocation.
12265 */
12266 CPU_FOREACH(i) {
12267 if (cpu != DTRACE_CPUALL && cpu != i)
12268 continue;
12269
12270 buf = &bufs[i];
12271 desired += 2;
12272
12273 if (buf->dtb_xamot != NULL) {
12274 ASSERT(buf->dtb_tomax != NULL);
12275 ASSERT(buf->dtb_size == size);
12276 kmem_free(buf->dtb_xamot, size);
12277 allocated++;
12278 }
12279
12280 if (buf->dtb_tomax != NULL) {
12281 ASSERT(buf->dtb_size == size);
12282 kmem_free(buf->dtb_tomax, size);
12283 allocated++;
12284 }
12285
12286 buf->dtb_tomax = NULL;
12287 buf->dtb_xamot = NULL;
12288 buf->dtb_size = 0;
12289
12290 }
12291 #endif
12292 *factor = desired / (allocated > 0 ? allocated : 1);
12293
12294 return (ENOMEM);
12295 }
12296
12297 /*
12298 * Note: called from probe context. This function just increments the drop
12299 * count on a buffer. It has been made a function to allow for the
12300 * possibility of understanding the source of mysterious drop counts. (A
12301 * problem for which one may be particularly disappointed that DTrace cannot
12302 * be used to understand DTrace.)
12303 */
12304 static void
12305 dtrace_buffer_drop(dtrace_buffer_t *buf)
12306 {
12307 buf->dtb_drops++;
12308 }
12309
12310 /*
12311 * Note: called from probe context. This function is called to reserve space
12312 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the
12313 * mstate. Returns the new offset in the buffer, or a negative value if an
12314 * error has occurred.
12315 */
12316 static intptr_t
12317 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align,
12318 dtrace_state_t *state, dtrace_mstate_t *mstate)
12319 {
12320 intptr_t offs = buf->dtb_offset, soffs;
12321 intptr_t woffs;
12322 caddr_t tomax;
12323 size_t total;
12324
12325 if (buf->dtb_flags & DTRACEBUF_INACTIVE)
12326 return (-1);
12327
12328 if ((tomax = buf->dtb_tomax) == NULL) {
12329 dtrace_buffer_drop(buf);
12330 return (-1);
12331 }
12332
12333 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) {
12334 while (offs & (align - 1)) {
12335 /*
12336 * Assert that our alignment is off by a number which
12337 * is itself sizeof (uint32_t) aligned.
12338 */
12339 ASSERT(!((align - (offs & (align - 1))) &
12340 (sizeof (uint32_t) - 1)));
12341 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
12342 offs += sizeof (uint32_t);
12343 }
12344
12345 if ((soffs = offs + needed) > buf->dtb_size) {
12346 dtrace_buffer_drop(buf);
12347 return (-1);
12348 }
12349
12350 if (mstate == NULL)
12351 return (offs);
12352
12353 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs;
12354 mstate->dtms_scratch_size = buf->dtb_size - soffs;
12355 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
12356
12357 return (offs);
12358 }
12359
12360 if (buf->dtb_flags & DTRACEBUF_FILL) {
12361 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN &&
12362 (buf->dtb_flags & DTRACEBUF_FULL))
12363 return (-1);
12364 goto out;
12365 }
12366
12367 total = needed + (offs & (align - 1));
12368
12369 /*
12370 * For a ring buffer, life is quite a bit more complicated. Before
12371 * we can store any padding, we need to adjust our wrapping offset.
12372 * (If we've never before wrapped or we're not about to, no adjustment
12373 * is required.)
12374 */
12375 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) ||
12376 offs + total > buf->dtb_size) {
12377 woffs = buf->dtb_xamot_offset;
12378
12379 if (offs + total > buf->dtb_size) {
12380 /*
12381 * We can't fit in the end of the buffer. First, a
12382 * sanity check that we can fit in the buffer at all.
12383 */
12384 if (total > buf->dtb_size) {
12385 dtrace_buffer_drop(buf);
12386 return (-1);
12387 }
12388
12389 /*
12390 * We're going to be storing at the top of the buffer,
12391 * so now we need to deal with the wrapped offset. We
12392 * only reset our wrapped offset to 0 if it is
12393 * currently greater than the current offset. If it
12394 * is less than the current offset, it is because a
12395 * previous allocation induced a wrap -- but the
12396 * allocation didn't subsequently take the space due
12397 * to an error or false predicate evaluation. In this
12398 * case, we'll just leave the wrapped offset alone: if
12399 * the wrapped offset hasn't been advanced far enough
12400 * for this allocation, it will be adjusted in the
12401 * lower loop.
12402 */
12403 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
12404 if (woffs >= offs)
12405 woffs = 0;
12406 } else {
12407 woffs = 0;
12408 }
12409
12410 /*
12411 * Now we know that we're going to be storing to the
12412 * top of the buffer and that there is room for us
12413 * there. We need to clear the buffer from the current
12414 * offset to the end (there may be old gunk there).
12415 */
12416 while (offs < buf->dtb_size)
12417 tomax[offs++] = 0;
12418
12419 /*
12420 * We need to set our offset to zero. And because we
12421 * are wrapping, we need to set the bit indicating as
12422 * much. We can also adjust our needed space back
12423 * down to the space required by the ECB -- we know
12424 * that the top of the buffer is aligned.
12425 */
12426 offs = 0;
12427 total = needed;
12428 buf->dtb_flags |= DTRACEBUF_WRAPPED;
12429 } else {
12430 /*
12431 * There is room for us in the buffer, so we simply
12432 * need to check the wrapped offset.
12433 */
12434 if (woffs < offs) {
12435 /*
12436 * The wrapped offset is less than the offset.
12437 * This can happen if we allocated buffer space
12438 * that induced a wrap, but then we didn't
12439 * subsequently take the space due to an error
12440 * or false predicate evaluation. This is
12441 * okay; we know that _this_ allocation isn't
12442 * going to induce a wrap. We still can't
12443 * reset the wrapped offset to be zero,
12444 * however: the space may have been trashed in
12445 * the previous failed probe attempt. But at
12446 * least the wrapped offset doesn't need to
12447 * be adjusted at all...
12448 */
12449 goto out;
12450 }
12451 }
12452
12453 while (offs + total > woffs) {
12454 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs);
12455 size_t size;
12456
12457 if (epid == DTRACE_EPIDNONE) {
12458 size = sizeof (uint32_t);
12459 } else {
12460 ASSERT3U(epid, <=, state->dts_necbs);
12461 ASSERT(state->dts_ecbs[epid - 1] != NULL);
12462
12463 size = state->dts_ecbs[epid - 1]->dte_size;
12464 }
12465
12466 ASSERT(woffs + size <= buf->dtb_size);
12467 ASSERT(size != 0);
12468
12469 if (woffs + size == buf->dtb_size) {
12470 /*
12471 * We've reached the end of the buffer; we want
12472 * to set the wrapped offset to 0 and break
12473 * out. However, if the offs is 0, then we're
12474 * in a strange edge-condition: the amount of
12475 * space that we want to reserve plus the size
12476 * of the record that we're overwriting is
12477 * greater than the size of the buffer. This
12478 * is problematic because if we reserve the
12479 * space but subsequently don't consume it (due
12480 * to a failed predicate or error) the wrapped
12481 * offset will be 0 -- yet the EPID at offset 0
12482 * will not be committed. This situation is
12483 * relatively easy to deal with: if we're in
12484 * this case, the buffer is indistinguishable
12485 * from one that hasn't wrapped; we need only
12486 * finish the job by clearing the wrapped bit,
12487 * explicitly setting the offset to be 0, and
12488 * zero'ing out the old data in the buffer.
12489 */
12490 if (offs == 0) {
12491 buf->dtb_flags &= ~DTRACEBUF_WRAPPED;
12492 buf->dtb_offset = 0;
12493 woffs = total;
12494
12495 while (woffs < buf->dtb_size)
12496 tomax[woffs++] = 0;
12497 }
12498
12499 woffs = 0;
12500 break;
12501 }
12502
12503 woffs += size;
12504 }
12505
12506 /*
12507 * We have a wrapped offset. It may be that the wrapped offset
12508 * has become zero -- that's okay.
12509 */
12510 buf->dtb_xamot_offset = woffs;
12511 }
12512
12513 out:
12514 /*
12515 * Now we can plow the buffer with any necessary padding.
12516 */
12517 while (offs & (align - 1)) {
12518 /*
12519 * Assert that our alignment is off by a number which
12520 * is itself sizeof (uint32_t) aligned.
12521 */
12522 ASSERT(!((align - (offs & (align - 1))) &
12523 (sizeof (uint32_t) - 1)));
12524 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
12525 offs += sizeof (uint32_t);
12526 }
12527
12528 if (buf->dtb_flags & DTRACEBUF_FILL) {
12529 if (offs + needed > buf->dtb_size - state->dts_reserve) {
12530 buf->dtb_flags |= DTRACEBUF_FULL;
12531 return (-1);
12532 }
12533 }
12534
12535 if (mstate == NULL)
12536 return (offs);
12537
12538 /*
12539 * For ring buffers and fill buffers, the scratch space is always
12540 * the inactive buffer.
12541 */
12542 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot;
12543 mstate->dtms_scratch_size = buf->dtb_size;
12544 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
12545
12546 return (offs);
12547 }
12548
12549 static void
12550 dtrace_buffer_polish(dtrace_buffer_t *buf)
12551 {
12552 ASSERT(buf->dtb_flags & DTRACEBUF_RING);
12553 ASSERT(MUTEX_HELD(&dtrace_lock));
12554
12555 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED))
12556 return;
12557
12558 /*
12559 * We need to polish the ring buffer. There are three cases:
12560 *
12561 * - The first (and presumably most common) is that there is no gap
12562 * between the buffer offset and the wrapped offset. In this case,
12563 * there is nothing in the buffer that isn't valid data; we can
12564 * mark the buffer as polished and return.
12565 *
12566 * - The second (less common than the first but still more common
12567 * than the third) is that there is a gap between the buffer offset
12568 * and the wrapped offset, and the wrapped offset is larger than the
12569 * buffer offset. This can happen because of an alignment issue, or
12570 * can happen because of a call to dtrace_buffer_reserve() that
12571 * didn't subsequently consume the buffer space. In this case,
12572 * we need to zero the data from the buffer offset to the wrapped
12573 * offset.
12574 *
12575 * - The third (and least common) is that there is a gap between the
12576 * buffer offset and the wrapped offset, but the wrapped offset is
12577 * _less_ than the buffer offset. This can only happen because a
12578 * call to dtrace_buffer_reserve() induced a wrap, but the space
12579 * was not subsequently consumed. In this case, we need to zero the
12580 * space from the offset to the end of the buffer _and_ from the
12581 * top of the buffer to the wrapped offset.
12582 */
12583 if (buf->dtb_offset < buf->dtb_xamot_offset) {
12584 bzero(buf->dtb_tomax + buf->dtb_offset,
12585 buf->dtb_xamot_offset - buf->dtb_offset);
12586 }
12587
12588 if (buf->dtb_offset > buf->dtb_xamot_offset) {
12589 bzero(buf->dtb_tomax + buf->dtb_offset,
12590 buf->dtb_size - buf->dtb_offset);
12591 bzero(buf->dtb_tomax, buf->dtb_xamot_offset);
12592 }
12593 }
12594
12595 /*
12596 * This routine determines if data generated at the specified time has likely
12597 * been entirely consumed at user-level. This routine is called to determine
12598 * if an ECB on a defunct probe (but for an active enabling) can be safely
12599 * disabled and destroyed.
12600 */
12601 static int
12602 dtrace_buffer_consumed(dtrace_buffer_t *bufs, hrtime_t when)
12603 {
12604 int i;
12605
12606 for (i = 0; i < NCPU; i++) {
12607 dtrace_buffer_t *buf = &bufs[i];
12608
12609 if (buf->dtb_size == 0)
12610 continue;
12611
12612 if (buf->dtb_flags & DTRACEBUF_RING)
12613 return (0);
12614
12615 if (!buf->dtb_switched && buf->dtb_offset != 0)
12616 return (0);
12617
12618 if (buf->dtb_switched - buf->dtb_interval < when)
12619 return (0);
12620 }
12621
12622 return (1);
12623 }
12624
12625 static void
12626 dtrace_buffer_free(dtrace_buffer_t *bufs)
12627 {
12628 int i;
12629
12630 for (i = 0; i < NCPU; i++) {
12631 dtrace_buffer_t *buf = &bufs[i];
12632
12633 if (buf->dtb_tomax == NULL) {
12634 ASSERT(buf->dtb_xamot == NULL);
12635 ASSERT(buf->dtb_size == 0);
12636 continue;
12637 }
12638
12639 if (buf->dtb_xamot != NULL) {
12640 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
12641 kmem_free(buf->dtb_xamot, buf->dtb_size);
12642 }
12643
12644 kmem_free(buf->dtb_tomax, buf->dtb_size);
12645 buf->dtb_size = 0;
12646 buf->dtb_tomax = NULL;
12647 buf->dtb_xamot = NULL;
12648 }
12649 }
12650
12651 /*
12652 * DTrace Enabling Functions
12653 */
12654 static dtrace_enabling_t *
12655 dtrace_enabling_create(dtrace_vstate_t *vstate)
12656 {
12657 dtrace_enabling_t *enab;
12658
12659 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP);
12660 enab->dten_vstate = vstate;
12661
12662 return (enab);
12663 }
12664
12665 static void
12666 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb)
12667 {
12668 dtrace_ecbdesc_t **ndesc;
12669 size_t osize, nsize;
12670
12671 /*
12672 * We can't add to enablings after we've enabled them, or after we've
12673 * retained them.
12674 */
12675 ASSERT(enab->dten_probegen == 0);
12676 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
12677
12678 if (enab->dten_ndesc < enab->dten_maxdesc) {
12679 enab->dten_desc[enab->dten_ndesc++] = ecb;
12680 return;
12681 }
12682
12683 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
12684
12685 if (enab->dten_maxdesc == 0) {
12686 enab->dten_maxdesc = 1;
12687 } else {
12688 enab->dten_maxdesc <<= 1;
12689 }
12690
12691 ASSERT(enab->dten_ndesc < enab->dten_maxdesc);
12692
12693 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
12694 ndesc = kmem_zalloc(nsize, KM_SLEEP);
12695 bcopy(enab->dten_desc, ndesc, osize);
12696 if (enab->dten_desc != NULL)
12697 kmem_free(enab->dten_desc, osize);
12698
12699 enab->dten_desc = ndesc;
12700 enab->dten_desc[enab->dten_ndesc++] = ecb;
12701 }
12702
12703 static void
12704 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb,
12705 dtrace_probedesc_t *pd)
12706 {
12707 dtrace_ecbdesc_t *new;
12708 dtrace_predicate_t *pred;
12709 dtrace_actdesc_t *act;
12710
12711 /*
12712 * We're going to create a new ECB description that matches the
12713 * specified ECB in every way, but has the specified probe description.
12714 */
12715 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
12716
12717 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL)
12718 dtrace_predicate_hold(pred);
12719
12720 for (act = ecb->dted_action; act != NULL; act = act->dtad_next)
12721 dtrace_actdesc_hold(act);
12722
12723 new->dted_action = ecb->dted_action;
12724 new->dted_pred = ecb->dted_pred;
12725 new->dted_probe = *pd;
12726 new->dted_uarg = ecb->dted_uarg;
12727
12728 dtrace_enabling_add(enab, new);
12729 }
12730
12731 static void
12732 dtrace_enabling_dump(dtrace_enabling_t *enab)
12733 {
12734 int i;
12735
12736 for (i = 0; i < enab->dten_ndesc; i++) {
12737 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe;
12738
12739 #ifdef __FreeBSD__
12740 printf("dtrace: enabling probe %d (%s:%s:%s:%s)\n", i,
12741 desc->dtpd_provider, desc->dtpd_mod,
12742 desc->dtpd_func, desc->dtpd_name);
12743 #else
12744 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i,
12745 desc->dtpd_provider, desc->dtpd_mod,
12746 desc->dtpd_func, desc->dtpd_name);
12747 #endif
12748 }
12749 }
12750
12751 static void
12752 dtrace_enabling_destroy(dtrace_enabling_t *enab)
12753 {
12754 int i;
12755 dtrace_ecbdesc_t *ep;
12756 dtrace_vstate_t *vstate = enab->dten_vstate;
12757
12758 ASSERT(MUTEX_HELD(&dtrace_lock));
12759
12760 for (i = 0; i < enab->dten_ndesc; i++) {
12761 dtrace_actdesc_t *act, *next;
12762 dtrace_predicate_t *pred;
12763
12764 ep = enab->dten_desc[i];
12765
12766 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL)
12767 dtrace_predicate_release(pred, vstate);
12768
12769 for (act = ep->dted_action; act != NULL; act = next) {
12770 next = act->dtad_next;
12771 dtrace_actdesc_release(act, vstate);
12772 }
12773
12774 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
12775 }
12776
12777 if (enab->dten_desc != NULL)
12778 kmem_free(enab->dten_desc,
12779 enab->dten_maxdesc * sizeof (dtrace_enabling_t *));
12780
12781 /*
12782 * If this was a retained enabling, decrement the dts_nretained count
12783 * and take it off of the dtrace_retained list.
12784 */
12785 if (enab->dten_prev != NULL || enab->dten_next != NULL ||
12786 dtrace_retained == enab) {
12787 ASSERT(enab->dten_vstate->dtvs_state != NULL);
12788 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0);
12789 enab->dten_vstate->dtvs_state->dts_nretained--;
12790 dtrace_retained_gen++;
12791 }
12792
12793 if (enab->dten_prev == NULL) {
12794 if (dtrace_retained == enab) {
12795 dtrace_retained = enab->dten_next;
12796
12797 if (dtrace_retained != NULL)
12798 dtrace_retained->dten_prev = NULL;
12799 }
12800 } else {
12801 ASSERT(enab != dtrace_retained);
12802 ASSERT(dtrace_retained != NULL);
12803 enab->dten_prev->dten_next = enab->dten_next;
12804 }
12805
12806 if (enab->dten_next != NULL) {
12807 ASSERT(dtrace_retained != NULL);
12808 enab->dten_next->dten_prev = enab->dten_prev;
12809 }
12810
12811 kmem_free(enab, sizeof (dtrace_enabling_t));
12812 }
12813
12814 static int
12815 dtrace_enabling_retain(dtrace_enabling_t *enab)
12816 {
12817 dtrace_state_t *state;
12818
12819 ASSERT(MUTEX_HELD(&dtrace_lock));
12820 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
12821 ASSERT(enab->dten_vstate != NULL);
12822
12823 state = enab->dten_vstate->dtvs_state;
12824 ASSERT(state != NULL);
12825
12826 /*
12827 * We only allow each state to retain dtrace_retain_max enablings.
12828 */
12829 if (state->dts_nretained >= dtrace_retain_max)
12830 return (ENOSPC);
12831
12832 state->dts_nretained++;
12833 dtrace_retained_gen++;
12834
12835 if (dtrace_retained == NULL) {
12836 dtrace_retained = enab;
12837 return (0);
12838 }
12839
12840 enab->dten_next = dtrace_retained;
12841 dtrace_retained->dten_prev = enab;
12842 dtrace_retained = enab;
12843
12844 return (0);
12845 }
12846
12847 static int
12848 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match,
12849 dtrace_probedesc_t *create)
12850 {
12851 dtrace_enabling_t *new, *enab;
12852 int found = 0, err = ENOENT;
12853
12854 ASSERT(MUTEX_HELD(&dtrace_lock));
12855 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN);
12856 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN);
12857 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN);
12858 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN);
12859
12860 new = dtrace_enabling_create(&state->dts_vstate);
12861
12862 /*
12863 * Iterate over all retained enablings, looking for enablings that
12864 * match the specified state.
12865 */
12866 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
12867 int i;
12868
12869 /*
12870 * dtvs_state can only be NULL for helper enablings -- and
12871 * helper enablings can't be retained.
12872 */
12873 ASSERT(enab->dten_vstate->dtvs_state != NULL);
12874
12875 if (enab->dten_vstate->dtvs_state != state)
12876 continue;
12877
12878 /*
12879 * Now iterate over each probe description; we're looking for
12880 * an exact match to the specified probe description.
12881 */
12882 for (i = 0; i < enab->dten_ndesc; i++) {
12883 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
12884 dtrace_probedesc_t *pd = &ep->dted_probe;
12885
12886 if (strcmp(pd->dtpd_provider, match->dtpd_provider))
12887 continue;
12888
12889 if (strcmp(pd->dtpd_mod, match->dtpd_mod))
12890 continue;
12891
12892 if (strcmp(pd->dtpd_func, match->dtpd_func))
12893 continue;
12894
12895 if (strcmp(pd->dtpd_name, match->dtpd_name))
12896 continue;
12897
12898 /*
12899 * We have a winning probe! Add it to our growing
12900 * enabling.
12901 */
12902 found = 1;
12903 dtrace_enabling_addlike(new, ep, create);
12904 }
12905 }
12906
12907 if (!found || (err = dtrace_enabling_retain(new)) != 0) {
12908 dtrace_enabling_destroy(new);
12909 return (err);
12910 }
12911
12912 return (0);
12913 }
12914
12915 static void
12916 dtrace_enabling_retract(dtrace_state_t *state)
12917 {
12918 dtrace_enabling_t *enab, *next;
12919
12920 ASSERT(MUTEX_HELD(&dtrace_lock));
12921
12922 /*
12923 * Iterate over all retained enablings, destroy the enablings retained
12924 * for the specified state.
12925 */
12926 for (enab = dtrace_retained; enab != NULL; enab = next) {
12927 next = enab->dten_next;
12928
12929 /*
12930 * dtvs_state can only be NULL for helper enablings -- and
12931 * helper enablings can't be retained.
12932 */
12933 ASSERT(enab->dten_vstate->dtvs_state != NULL);
12934
12935 if (enab->dten_vstate->dtvs_state == state) {
12936 ASSERT(state->dts_nretained > 0);
12937 dtrace_enabling_destroy(enab);
12938 }
12939 }
12940
12941 ASSERT(state->dts_nretained == 0);
12942 }
12943
12944 static int
12945 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched)
12946 {
12947 int i = 0;
12948 int matched = 0;
12949
12950 ASSERT(MUTEX_HELD(&cpu_lock));
12951 ASSERT(MUTEX_HELD(&dtrace_lock));
12952
12953 for (i = 0; i < enab->dten_ndesc; i++) {
12954 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
12955
12956 enab->dten_current = ep;
12957 enab->dten_error = 0;
12958
12959 matched += dtrace_probe_enable(&ep->dted_probe, enab);
12960
12961 if (enab->dten_error != 0) {
12962 /*
12963 * If we get an error half-way through enabling the
12964 * probes, we kick out -- perhaps with some number of
12965 * them enabled. Leaving enabled probes enabled may
12966 * be slightly confusing for user-level, but we expect
12967 * that no one will attempt to actually drive on in
12968 * the face of such errors. If this is an anonymous
12969 * enabling (indicated with a NULL nmatched pointer),
12970 * we cmn_err() a message. We aren't expecting to
12971 * get such an error -- such as it can exist at all,
12972 * it would be a result of corrupted DOF in the driver
12973 * properties.
12974 */
12975 if (nmatched == NULL) {
12976 cmn_err(CE_WARN, "dtrace_enabling_match() "
12977 "error on %p: %d", (void *)ep,
12978 enab->dten_error);
12979 }
12980
12981 return (enab->dten_error);
12982 }
12983 }
12984
12985 enab->dten_probegen = dtrace_probegen;
12986 if (nmatched != NULL)
12987 *nmatched = matched;
12988
12989 return (0);
12990 }
12991
12992 static void
12993 dtrace_enabling_matchall(void)
12994 {
12995 dtrace_enabling_t *enab;
12996
12997 mutex_enter(&cpu_lock);
12998 mutex_enter(&dtrace_lock);
12999
13000 /*
13001 * Iterate over all retained enablings to see if any probes match
13002 * against them. We only perform this operation on enablings for which
13003 * we have sufficient permissions by virtue of being in the global zone
13004 * or in the same zone as the DTrace client. Because we can be called
13005 * after dtrace_detach() has been called, we cannot assert that there
13006 * are retained enablings. We can safely load from dtrace_retained,
13007 * however: the taskq_destroy() at the end of dtrace_detach() will
13008 * block pending our completion.
13009 */
13010 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
13011 #ifdef illumos
13012 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred;
13013
13014 if (INGLOBALZONE(curproc) ||
13015 cr != NULL && getzoneid() == crgetzoneid(cr))
13016 #endif
13017 (void) dtrace_enabling_match(enab, NULL);
13018 }
13019
13020 mutex_exit(&dtrace_lock);
13021 mutex_exit(&cpu_lock);
13022 }
13023
13024 /*
13025 * If an enabling is to be enabled without having matched probes (that is, if
13026 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the
13027 * enabling must be _primed_ by creating an ECB for every ECB description.
13028 * This must be done to assure that we know the number of speculations, the
13029 * number of aggregations, the minimum buffer size needed, etc. before we
13030 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually
13031 * enabling any probes, we create ECBs for every ECB decription, but with a
13032 * NULL probe -- which is exactly what this function does.
13033 */
13034 static void
13035 dtrace_enabling_prime(dtrace_state_t *state)
13036 {
13037 dtrace_enabling_t *enab;
13038 int i;
13039
13040 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
13041 ASSERT(enab->dten_vstate->dtvs_state != NULL);
13042
13043 if (enab->dten_vstate->dtvs_state != state)
13044 continue;
13045
13046 /*
13047 * We don't want to prime an enabling more than once, lest
13048 * we allow a malicious user to induce resource exhaustion.
13049 * (The ECBs that result from priming an enabling aren't
13050 * leaked -- but they also aren't deallocated until the
13051 * consumer state is destroyed.)
13052 */
13053 if (enab->dten_primed)
13054 continue;
13055
13056 for (i = 0; i < enab->dten_ndesc; i++) {
13057 enab->dten_current = enab->dten_desc[i];
13058 (void) dtrace_probe_enable(NULL, enab);
13059 }
13060
13061 enab->dten_primed = 1;
13062 }
13063 }
13064
13065 /*
13066 * Called to indicate that probes should be provided due to retained
13067 * enablings. This is implemented in terms of dtrace_probe_provide(), but it
13068 * must take an initial lap through the enabling calling the dtps_provide()
13069 * entry point explicitly to allow for autocreated probes.
13070 */
13071 static void
13072 dtrace_enabling_provide(dtrace_provider_t *prv)
13073 {
13074 int i, all = 0;
13075 dtrace_probedesc_t desc;
13076 dtrace_genid_t gen;
13077
13078 ASSERT(MUTEX_HELD(&dtrace_lock));
13079 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
13080
13081 if (prv == NULL) {
13082 all = 1;
13083 prv = dtrace_provider;
13084 }
13085
13086 do {
13087 dtrace_enabling_t *enab;
13088 void *parg = prv->dtpv_arg;
13089
13090 retry:
13091 gen = dtrace_retained_gen;
13092 for (enab = dtrace_retained; enab != NULL;
13093 enab = enab->dten_next) {
13094 for (i = 0; i < enab->dten_ndesc; i++) {
13095 desc = enab->dten_desc[i]->dted_probe;
13096 mutex_exit(&dtrace_lock);
13097 prv->dtpv_pops.dtps_provide(parg, &desc);
13098 mutex_enter(&dtrace_lock);
13099 /*
13100 * Process the retained enablings again if
13101 * they have changed while we weren't holding
13102 * dtrace_lock.
13103 */
13104 if (gen != dtrace_retained_gen)
13105 goto retry;
13106 }
13107 }
13108 } while (all && (prv = prv->dtpv_next) != NULL);
13109
13110 mutex_exit(&dtrace_lock);
13111 dtrace_probe_provide(NULL, all ? NULL : prv);
13112 mutex_enter(&dtrace_lock);
13113 }
13114
13115 /*
13116 * Called to reap ECBs that are attached to probes from defunct providers.
13117 */
13118 static void
13119 dtrace_enabling_reap(void)
13120 {
13121 dtrace_provider_t *prov;
13122 dtrace_probe_t *probe;
13123 dtrace_ecb_t *ecb;
13124 hrtime_t when;
13125 int i;
13126
13127 mutex_enter(&cpu_lock);
13128 mutex_enter(&dtrace_lock);
13129
13130 for (i = 0; i < dtrace_nprobes; i++) {
13131 if ((probe = dtrace_probes[i]) == NULL)
13132 continue;
13133
13134 if (probe->dtpr_ecb == NULL)
13135 continue;
13136
13137 prov = probe->dtpr_provider;
13138
13139 if ((when = prov->dtpv_defunct) == 0)
13140 continue;
13141
13142 /*
13143 * We have ECBs on a defunct provider: we want to reap these
13144 * ECBs to allow the provider to unregister. The destruction
13145 * of these ECBs must be done carefully: if we destroy the ECB
13146 * and the consumer later wishes to consume an EPID that
13147 * corresponds to the destroyed ECB (and if the EPID metadata
13148 * has not been previously consumed), the consumer will abort
13149 * processing on the unknown EPID. To reduce (but not, sadly,
13150 * eliminate) the possibility of this, we will only destroy an
13151 * ECB for a defunct provider if, for the state that
13152 * corresponds to the ECB:
13153 *
13154 * (a) There is no speculative tracing (which can effectively
13155 * cache an EPID for an arbitrary amount of time).
13156 *
13157 * (b) The principal buffers have been switched twice since the
13158 * provider became defunct.
13159 *
13160 * (c) The aggregation buffers are of zero size or have been
13161 * switched twice since the provider became defunct.
13162 *
13163 * We use dts_speculates to determine (a) and call a function
13164 * (dtrace_buffer_consumed()) to determine (b) and (c). Note
13165 * that as soon as we've been unable to destroy one of the ECBs
13166 * associated with the probe, we quit trying -- reaping is only
13167 * fruitful in as much as we can destroy all ECBs associated
13168 * with the defunct provider's probes.
13169 */
13170 while ((ecb = probe->dtpr_ecb) != NULL) {
13171 dtrace_state_t *state = ecb->dte_state;
13172 dtrace_buffer_t *buf = state->dts_buffer;
13173 dtrace_buffer_t *aggbuf = state->dts_aggbuffer;
13174
13175 if (state->dts_speculates)
13176 break;
13177
13178 if (!dtrace_buffer_consumed(buf, when))
13179 break;
13180
13181 if (!dtrace_buffer_consumed(aggbuf, when))
13182 break;
13183
13184 dtrace_ecb_disable(ecb);
13185 ASSERT(probe->dtpr_ecb != ecb);
13186 dtrace_ecb_destroy(ecb);
13187 }
13188 }
13189
13190 mutex_exit(&dtrace_lock);
13191 mutex_exit(&cpu_lock);
13192 }
13193
13194 /*
13195 * DTrace DOF Functions
13196 */
13197 /*ARGSUSED*/
13198 static void
13199 dtrace_dof_error(dof_hdr_t *dof, const char *str)
13200 {
13201 if (dtrace_err_verbose)
13202 cmn_err(CE_WARN, "failed to process DOF: %s", str);
13203
13204 #ifdef DTRACE_ERRDEBUG
13205 dtrace_errdebug(str);
13206 #endif
13207 }
13208
13209 /*
13210 * Create DOF out of a currently enabled state. Right now, we only create
13211 * DOF containing the run-time options -- but this could be expanded to create
13212 * complete DOF representing the enabled state.
13213 */
13214 static dof_hdr_t *
13215 dtrace_dof_create(dtrace_state_t *state)
13216 {
13217 dof_hdr_t *dof;
13218 dof_sec_t *sec;
13219 dof_optdesc_t *opt;
13220 int i, len = sizeof (dof_hdr_t) +
13221 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) +
13222 sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
13223
13224 ASSERT(MUTEX_HELD(&dtrace_lock));
13225
13226 dof = kmem_zalloc(len, KM_SLEEP);
13227 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0;
13228 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1;
13229 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2;
13230 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3;
13231
13232 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE;
13233 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE;
13234 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION;
13235 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION;
13236 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS;
13237 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS;
13238
13239 dof->dofh_flags = 0;
13240 dof->dofh_hdrsize = sizeof (dof_hdr_t);
13241 dof->dofh_secsize = sizeof (dof_sec_t);
13242 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */
13243 dof->dofh_secoff = sizeof (dof_hdr_t);
13244 dof->dofh_loadsz = len;
13245 dof->dofh_filesz = len;
13246 dof->dofh_pad = 0;
13247
13248 /*
13249 * Fill in the option section header...
13250 */
13251 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t));
13252 sec->dofs_type = DOF_SECT_OPTDESC;
13253 sec->dofs_align = sizeof (uint64_t);
13254 sec->dofs_flags = DOF_SECF_LOAD;
13255 sec->dofs_entsize = sizeof (dof_optdesc_t);
13256
13257 opt = (dof_optdesc_t *)((uintptr_t)sec +
13258 roundup(sizeof (dof_sec_t), sizeof (uint64_t)));
13259
13260 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof;
13261 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
13262
13263 for (i = 0; i < DTRACEOPT_MAX; i++) {
13264 opt[i].dofo_option = i;
13265 opt[i].dofo_strtab = DOF_SECIDX_NONE;
13266 opt[i].dofo_value = state->dts_options[i];
13267 }
13268
13269 return (dof);
13270 }
13271
13272 static dof_hdr_t *
13273 dtrace_dof_copyin(uintptr_t uarg, int *errp)
13274 {
13275 dof_hdr_t hdr, *dof;
13276
13277 ASSERT(!MUTEX_HELD(&dtrace_lock));
13278
13279 /*
13280 * First, we're going to copyin() the sizeof (dof_hdr_t).
13281 */
13282 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) {
13283 dtrace_dof_error(NULL, "failed to copyin DOF header");
13284 *errp = EFAULT;
13285 return (NULL);
13286 }
13287
13288 /*
13289 * Now we'll allocate the entire DOF and copy it in -- provided
13290 * that the length isn't outrageous.
13291 */
13292 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) {
13293 dtrace_dof_error(&hdr, "load size exceeds maximum");
13294 *errp = E2BIG;
13295 return (NULL);
13296 }
13297
13298 if (hdr.dofh_loadsz < sizeof (hdr)) {
13299 dtrace_dof_error(&hdr, "invalid load size");
13300 *errp = EINVAL;
13301 return (NULL);
13302 }
13303
13304 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP);
13305
13306 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0 ||
13307 dof->dofh_loadsz != hdr.dofh_loadsz) {
13308 kmem_free(dof, hdr.dofh_loadsz);
13309 *errp = EFAULT;
13310 return (NULL);
13311 }
13312
13313 return (dof);
13314 }
13315
13316 #ifdef __FreeBSD__
13317 static dof_hdr_t *
13318 dtrace_dof_copyin_proc(struct proc *p, uintptr_t uarg, int *errp)
13319 {
13320 dof_hdr_t hdr, *dof;
13321 struct thread *td;
13322 size_t loadsz;
13323
13324 ASSERT(!MUTEX_HELD(&dtrace_lock));
13325
13326 td = curthread;
13327
13328 /*
13329 * First, we're going to copyin() the sizeof (dof_hdr_t).
13330 */
13331 if (proc_readmem(td, p, uarg, &hdr, sizeof(hdr)) != sizeof(hdr)) {
13332 dtrace_dof_error(NULL, "failed to copyin DOF header");
13333 *errp = EFAULT;
13334 return (NULL);
13335 }
13336
13337 /*
13338 * Now we'll allocate the entire DOF and copy it in -- provided
13339 * that the length isn't outrageous.
13340 */
13341 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) {
13342 dtrace_dof_error(&hdr, "load size exceeds maximum");
13343 *errp = E2BIG;
13344 return (NULL);
13345 }
13346 loadsz = (size_t)hdr.dofh_loadsz;
13347
13348 if (loadsz < sizeof (hdr)) {
13349 dtrace_dof_error(&hdr, "invalid load size");
13350 *errp = EINVAL;
13351 return (NULL);
13352 }
13353
13354 dof = kmem_alloc(loadsz, KM_SLEEP);
13355
13356 if (proc_readmem(td, p, uarg, dof, loadsz) != loadsz ||
13357 dof->dofh_loadsz != loadsz) {
13358 kmem_free(dof, hdr.dofh_loadsz);
13359 *errp = EFAULT;
13360 return (NULL);
13361 }
13362
13363 return (dof);
13364 }
13365
13366 static __inline uchar_t
13367 dtrace_dof_char(char c)
13368 {
13369
13370 switch (c) {
13371 case '0':
13372 case '1':
13373 case '2':
13374 case '3':
13375 case '4':
13376 case '5':
13377 case '6':
13378 case '7':
13379 case '8':
13380 case '9':
13381 return (c - '0');
13382 case 'A':
13383 case 'B':
13384 case 'C':
13385 case 'D':
13386 case 'E':
13387 case 'F':
13388 return (c - 'A' + 10);
13389 case 'a':
13390 case 'b':
13391 case 'c':
13392 case 'd':
13393 case 'e':
13394 case 'f':
13395 return (c - 'a' + 10);
13396 }
13397 /* Should not reach here. */
13398 return (UCHAR_MAX);
13399 }
13400 #endif /* __FreeBSD__ */
13401
13402 static dof_hdr_t *
13403 dtrace_dof_property(const char *name)
13404 {
13405 #ifdef __FreeBSD__
13406 uint8_t *dofbuf;
13407 u_char *data, *eol;
13408 caddr_t doffile;
13409 size_t bytes, len, i;
13410 dof_hdr_t *dof;
13411 u_char c1, c2;
13412
13413 dof = NULL;
13414
13415 doffile = preload_search_by_type("dtrace_dof");
13416 if (doffile == NULL)
13417 return (NULL);
13418
13419 data = preload_fetch_addr(doffile);
13420 len = preload_fetch_size(doffile);
13421 for (;;) {
13422 /* Look for the end of the line. All lines end in a newline. */
13423 eol = memchr(data, '\n', len);
13424 if (eol == NULL)
13425 return (NULL);
13426
13427 if (strncmp(name, data, strlen(name)) == 0)
13428 break;
13429
13430 eol++; /* skip past the newline */
13431 len -= eol - data;
13432 data = eol;
13433 }
13434
13435 /* We've found the data corresponding to the specified key. */
13436
13437 data += strlen(name) + 1; /* skip past the '=' */
13438 len = eol - data;
13439 if (len % 2 != 0) {
13440 dtrace_dof_error(NULL, "invalid DOF encoding length");
13441 goto doferr;
13442 }
13443 bytes = len / 2;
13444 if (bytes < sizeof(dof_hdr_t)) {
13445 dtrace_dof_error(NULL, "truncated header");
13446 goto doferr;
13447 }
13448
13449 /*
13450 * Each byte is represented by the two ASCII characters in its hex
13451 * representation.
13452 */
13453 dofbuf = malloc(bytes, M_SOLARIS, M_WAITOK);
13454 for (i = 0; i < bytes; i++) {
13455 c1 = dtrace_dof_char(data[i * 2]);
13456 c2 = dtrace_dof_char(data[i * 2 + 1]);
13457 if (c1 == UCHAR_MAX || c2 == UCHAR_MAX) {
13458 dtrace_dof_error(NULL, "invalid hex char in DOF");
13459 goto doferr;
13460 }
13461 dofbuf[i] = c1 * 16 + c2;
13462 }
13463
13464 dof = (dof_hdr_t *)dofbuf;
13465 if (bytes < dof->dofh_loadsz) {
13466 dtrace_dof_error(NULL, "truncated DOF");
13467 goto doferr;
13468 }
13469
13470 if (dof->dofh_loadsz >= dtrace_dof_maxsize) {
13471 dtrace_dof_error(NULL, "oversized DOF");
13472 goto doferr;
13473 }
13474
13475 return (dof);
13476
13477 doferr:
13478 free(dof, M_SOLARIS);
13479 return (NULL);
13480 #else /* __FreeBSD__ */
13481 uchar_t *buf;
13482 uint64_t loadsz;
13483 unsigned int len, i;
13484 dof_hdr_t *dof;
13485
13486 /*
13487 * Unfortunately, array of values in .conf files are always (and
13488 * only) interpreted to be integer arrays. We must read our DOF
13489 * as an integer array, and then squeeze it into a byte array.
13490 */
13491 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0,
13492 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS)
13493 return (NULL);
13494
13495 for (i = 0; i < len; i++)
13496 buf[i] = (uchar_t)(((int *)buf)[i]);
13497
13498 if (len < sizeof (dof_hdr_t)) {
13499 ddi_prop_free(buf);
13500 dtrace_dof_error(NULL, "truncated header");
13501 return (NULL);
13502 }
13503
13504 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) {
13505 ddi_prop_free(buf);
13506 dtrace_dof_error(NULL, "truncated DOF");
13507 return (NULL);
13508 }
13509
13510 if (loadsz >= dtrace_dof_maxsize) {
13511 ddi_prop_free(buf);
13512 dtrace_dof_error(NULL, "oversized DOF");
13513 return (NULL);
13514 }
13515
13516 dof = kmem_alloc(loadsz, KM_SLEEP);
13517 bcopy(buf, dof, loadsz);
13518 ddi_prop_free(buf);
13519
13520 return (dof);
13521 #endif /* !__FreeBSD__ */
13522 }
13523
13524 static void
13525 dtrace_dof_destroy(dof_hdr_t *dof)
13526 {
13527 kmem_free(dof, dof->dofh_loadsz);
13528 }
13529
13530 /*
13531 * Return the dof_sec_t pointer corresponding to a given section index. If the
13532 * index is not valid, dtrace_dof_error() is called and NULL is returned. If
13533 * a type other than DOF_SECT_NONE is specified, the header is checked against
13534 * this type and NULL is returned if the types do not match.
13535 */
13536 static dof_sec_t *
13537 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i)
13538 {
13539 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)
13540 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize);
13541
13542 if (i >= dof->dofh_secnum) {
13543 dtrace_dof_error(dof, "referenced section index is invalid");
13544 return (NULL);
13545 }
13546
13547 if (!(sec->dofs_flags & DOF_SECF_LOAD)) {
13548 dtrace_dof_error(dof, "referenced section is not loadable");
13549 return (NULL);
13550 }
13551
13552 if (type != DOF_SECT_NONE && type != sec->dofs_type) {
13553 dtrace_dof_error(dof, "referenced section is the wrong type");
13554 return (NULL);
13555 }
13556
13557 return (sec);
13558 }
13559
13560 static dtrace_probedesc_t *
13561 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc)
13562 {
13563 dof_probedesc_t *probe;
13564 dof_sec_t *strtab;
13565 uintptr_t daddr = (uintptr_t)dof;
13566 uintptr_t str;
13567 size_t size;
13568
13569 if (sec->dofs_type != DOF_SECT_PROBEDESC) {
13570 dtrace_dof_error(dof, "invalid probe section");
13571 return (NULL);
13572 }
13573
13574 if (sec->dofs_align != sizeof (dof_secidx_t)) {
13575 dtrace_dof_error(dof, "bad alignment in probe description");
13576 return (NULL);
13577 }
13578
13579 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) {
13580 dtrace_dof_error(dof, "truncated probe description");
13581 return (NULL);
13582 }
13583
13584 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset);
13585 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab);
13586
13587 if (strtab == NULL)
13588 return (NULL);
13589
13590 str = daddr + strtab->dofs_offset;
13591 size = strtab->dofs_size;
13592
13593 if (probe->dofp_provider >= strtab->dofs_size) {
13594 dtrace_dof_error(dof, "corrupt probe provider");
13595 return (NULL);
13596 }
13597
13598 (void) strncpy(desc->dtpd_provider,
13599 (char *)(str + probe->dofp_provider),
13600 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider));
13601
13602 if (probe->dofp_mod >= strtab->dofs_size) {
13603 dtrace_dof_error(dof, "corrupt probe module");
13604 return (NULL);
13605 }
13606
13607 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod),
13608 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod));
13609
13610 if (probe->dofp_func >= strtab->dofs_size) {
13611 dtrace_dof_error(dof, "corrupt probe function");
13612 return (NULL);
13613 }
13614
13615 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func),
13616 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func));
13617
13618 if (probe->dofp_name >= strtab->dofs_size) {
13619 dtrace_dof_error(dof, "corrupt probe name");
13620 return (NULL);
13621 }
13622
13623 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name),
13624 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name));
13625
13626 return (desc);
13627 }
13628
13629 static dtrace_difo_t *
13630 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
13631 cred_t *cr)
13632 {
13633 dtrace_difo_t *dp;
13634 size_t ttl = 0;
13635 dof_difohdr_t *dofd;
13636 uintptr_t daddr = (uintptr_t)dof;
13637 size_t max = dtrace_difo_maxsize;
13638 int i, l, n;
13639
13640 static const struct {
13641 int section;
13642 int bufoffs;
13643 int lenoffs;
13644 int entsize;
13645 int align;
13646 const char *msg;
13647 } difo[] = {
13648 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf),
13649 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t),
13650 sizeof (dif_instr_t), "multiple DIF sections" },
13651
13652 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab),
13653 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t),
13654 sizeof (uint64_t), "multiple integer tables" },
13655
13656 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab),
13657 offsetof(dtrace_difo_t, dtdo_strlen), 0,
13658 sizeof (char), "multiple string tables" },
13659
13660 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab),
13661 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t),
13662 sizeof (uint_t), "multiple variable tables" },
13663
13664 { DOF_SECT_NONE, 0, 0, 0, 0, NULL }
13665 };
13666
13667 if (sec->dofs_type != DOF_SECT_DIFOHDR) {
13668 dtrace_dof_error(dof, "invalid DIFO header section");
13669 return (NULL);
13670 }
13671
13672 if (sec->dofs_align != sizeof (dof_secidx_t)) {
13673 dtrace_dof_error(dof, "bad alignment in DIFO header");
13674 return (NULL);
13675 }
13676
13677 if (sec->dofs_size < sizeof (dof_difohdr_t) ||
13678 sec->dofs_size % sizeof (dof_secidx_t)) {
13679 dtrace_dof_error(dof, "bad size in DIFO header");
13680 return (NULL);
13681 }
13682
13683 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
13684 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1;
13685
13686 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
13687 dp->dtdo_rtype = dofd->dofd_rtype;
13688
13689 for (l = 0; l < n; l++) {
13690 dof_sec_t *subsec;
13691 void **bufp;
13692 uint32_t *lenp;
13693
13694 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE,
13695 dofd->dofd_links[l])) == NULL)
13696 goto err; /* invalid section link */
13697
13698 if (ttl + subsec->dofs_size > max) {
13699 dtrace_dof_error(dof, "exceeds maximum size");
13700 goto err;
13701 }
13702
13703 ttl += subsec->dofs_size;
13704
13705 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) {
13706 if (subsec->dofs_type != difo[i].section)
13707 continue;
13708
13709 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) {
13710 dtrace_dof_error(dof, "section not loaded");
13711 goto err;
13712 }
13713
13714 if (subsec->dofs_align != difo[i].align) {
13715 dtrace_dof_error(dof, "bad alignment");
13716 goto err;
13717 }
13718
13719 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs);
13720 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs);
13721
13722 if (*bufp != NULL) {
13723 dtrace_dof_error(dof, difo[i].msg);
13724 goto err;
13725 }
13726
13727 if (difo[i].entsize != subsec->dofs_entsize) {
13728 dtrace_dof_error(dof, "entry size mismatch");
13729 goto err;
13730 }
13731
13732 if (subsec->dofs_entsize != 0 &&
13733 (subsec->dofs_size % subsec->dofs_entsize) != 0) {
13734 dtrace_dof_error(dof, "corrupt entry size");
13735 goto err;
13736 }
13737
13738 *lenp = subsec->dofs_size;
13739 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP);
13740 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset),
13741 *bufp, subsec->dofs_size);
13742
13743 if (subsec->dofs_entsize != 0)
13744 *lenp /= subsec->dofs_entsize;
13745
13746 break;
13747 }
13748
13749 /*
13750 * If we encounter a loadable DIFO sub-section that is not
13751 * known to us, assume this is a broken program and fail.
13752 */
13753 if (difo[i].section == DOF_SECT_NONE &&
13754 (subsec->dofs_flags & DOF_SECF_LOAD)) {
13755 dtrace_dof_error(dof, "unrecognized DIFO subsection");
13756 goto err;
13757 }
13758 }
13759
13760 if (dp->dtdo_buf == NULL) {
13761 /*
13762 * We can't have a DIF object without DIF text.
13763 */
13764 dtrace_dof_error(dof, "missing DIF text");
13765 goto err;
13766 }
13767
13768 /*
13769 * Before we validate the DIF object, run through the variable table
13770 * looking for the strings -- if any of their size are under, we'll set
13771 * their size to be the system-wide default string size. Note that
13772 * this should _not_ happen if the "strsize" option has been set --
13773 * in this case, the compiler should have set the size to reflect the
13774 * setting of the option.
13775 */
13776 for (i = 0; i < dp->dtdo_varlen; i++) {
13777 dtrace_difv_t *v = &dp->dtdo_vartab[i];
13778 dtrace_diftype_t *t = &v->dtdv_type;
13779
13780 if (v->dtdv_id < DIF_VAR_OTHER_UBASE)
13781 continue;
13782
13783 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0)
13784 t->dtdt_size = dtrace_strsize_default;
13785 }
13786
13787 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0)
13788 goto err;
13789
13790 dtrace_difo_init(dp, vstate);
13791 return (dp);
13792
13793 err:
13794 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
13795 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
13796 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
13797 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
13798
13799 kmem_free(dp, sizeof (dtrace_difo_t));
13800 return (NULL);
13801 }
13802
13803 static dtrace_predicate_t *
13804 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
13805 cred_t *cr)
13806 {
13807 dtrace_difo_t *dp;
13808
13809 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL)
13810 return (NULL);
13811
13812 return (dtrace_predicate_create(dp));
13813 }
13814
13815 static dtrace_actdesc_t *
13816 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
13817 cred_t *cr)
13818 {
13819 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next;
13820 dof_actdesc_t *desc;
13821 dof_sec_t *difosec;
13822 size_t offs;
13823 uintptr_t daddr = (uintptr_t)dof;
13824 uint64_t arg;
13825 dtrace_actkind_t kind;
13826
13827 if (sec->dofs_type != DOF_SECT_ACTDESC) {
13828 dtrace_dof_error(dof, "invalid action section");
13829 return (NULL);
13830 }
13831
13832 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) {
13833 dtrace_dof_error(dof, "truncated action description");
13834 return (NULL);
13835 }
13836
13837 if (sec->dofs_align != sizeof (uint64_t)) {
13838 dtrace_dof_error(dof, "bad alignment in action description");
13839 return (NULL);
13840 }
13841
13842 if (sec->dofs_size < sec->dofs_entsize) {
13843 dtrace_dof_error(dof, "section entry size exceeds total size");
13844 return (NULL);
13845 }
13846
13847 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) {
13848 dtrace_dof_error(dof, "bad entry size in action description");
13849 return (NULL);
13850 }
13851
13852 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) {
13853 dtrace_dof_error(dof, "actions exceed dtrace_actions_max");
13854 return (NULL);
13855 }
13856
13857 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) {
13858 desc = (dof_actdesc_t *)(daddr +
13859 (uintptr_t)sec->dofs_offset + offs);
13860 kind = (dtrace_actkind_t)desc->dofa_kind;
13861
13862 if ((DTRACEACT_ISPRINTFLIKE(kind) &&
13863 (kind != DTRACEACT_PRINTA ||
13864 desc->dofa_strtab != DOF_SECIDX_NONE)) ||
13865 (kind == DTRACEACT_DIFEXPR &&
13866 desc->dofa_strtab != DOF_SECIDX_NONE)) {
13867 dof_sec_t *strtab;
13868 char *str, *fmt;
13869 uint64_t i;
13870
13871 /*
13872 * The argument to these actions is an index into the
13873 * DOF string table. For printf()-like actions, this
13874 * is the format string. For print(), this is the
13875 * CTF type of the expression result.
13876 */
13877 if ((strtab = dtrace_dof_sect(dof,
13878 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL)
13879 goto err;
13880
13881 str = (char *)((uintptr_t)dof +
13882 (uintptr_t)strtab->dofs_offset);
13883
13884 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) {
13885 if (str[i] == '\0')
13886 break;
13887 }
13888
13889 if (i >= strtab->dofs_size) {
13890 dtrace_dof_error(dof, "bogus format string");
13891 goto err;
13892 }
13893
13894 if (i == desc->dofa_arg) {
13895 dtrace_dof_error(dof, "empty format string");
13896 goto err;
13897 }
13898
13899 i -= desc->dofa_arg;
13900 fmt = kmem_alloc(i + 1, KM_SLEEP);
13901 bcopy(&str[desc->dofa_arg], fmt, i + 1);
13902 arg = (uint64_t)(uintptr_t)fmt;
13903 } else {
13904 if (kind == DTRACEACT_PRINTA) {
13905 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE);
13906 arg = 0;
13907 } else {
13908 arg = desc->dofa_arg;
13909 }
13910 }
13911
13912 act = dtrace_actdesc_create(kind, desc->dofa_ntuple,
13913 desc->dofa_uarg, arg);
13914
13915 if (last != NULL) {
13916 last->dtad_next = act;
13917 } else {
13918 first = act;
13919 }
13920
13921 last = act;
13922
13923 if (desc->dofa_difo == DOF_SECIDX_NONE)
13924 continue;
13925
13926 if ((difosec = dtrace_dof_sect(dof,
13927 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL)
13928 goto err;
13929
13930 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr);
13931
13932 if (act->dtad_difo == NULL)
13933 goto err;
13934 }
13935
13936 ASSERT(first != NULL);
13937 return (first);
13938
13939 err:
13940 for (act = first; act != NULL; act = next) {
13941 next = act->dtad_next;
13942 dtrace_actdesc_release(act, vstate);
13943 }
13944
13945 return (NULL);
13946 }
13947
13948 static dtrace_ecbdesc_t *
13949 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
13950 cred_t *cr)
13951 {
13952 dtrace_ecbdesc_t *ep;
13953 dof_ecbdesc_t *ecb;
13954 dtrace_probedesc_t *desc;
13955 dtrace_predicate_t *pred = NULL;
13956
13957 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) {
13958 dtrace_dof_error(dof, "truncated ECB description");
13959 return (NULL);
13960 }
13961
13962 if (sec->dofs_align != sizeof (uint64_t)) {
13963 dtrace_dof_error(dof, "bad alignment in ECB description");
13964 return (NULL);
13965 }
13966
13967 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset);
13968 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes);
13969
13970 if (sec == NULL)
13971 return (NULL);
13972
13973 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
13974 ep->dted_uarg = ecb->dofe_uarg;
13975 desc = &ep->dted_probe;
13976
13977 if (dtrace_dof_probedesc(dof, sec, desc) == NULL)
13978 goto err;
13979
13980 if (ecb->dofe_pred != DOF_SECIDX_NONE) {
13981 if ((sec = dtrace_dof_sect(dof,
13982 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL)
13983 goto err;
13984
13985 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL)
13986 goto err;
13987
13988 ep->dted_pred.dtpdd_predicate = pred;
13989 }
13990
13991 if (ecb->dofe_actions != DOF_SECIDX_NONE) {
13992 if ((sec = dtrace_dof_sect(dof,
13993 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL)
13994 goto err;
13995
13996 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr);
13997
13998 if (ep->dted_action == NULL)
13999 goto err;
14000 }
14001
14002 return (ep);
14003
14004 err:
14005 if (pred != NULL)
14006 dtrace_predicate_release(pred, vstate);
14007 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
14008 return (NULL);
14009 }
14010
14011 /*
14012 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the
14013 * specified DOF. SETX relocations are computed using 'ubase', the base load
14014 * address of the object containing the DOF, and DOFREL relocations are relative
14015 * to the relocation offset within the DOF.
14016 */
14017 static int
14018 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase,
14019 uint64_t udaddr)
14020 {
14021 uintptr_t daddr = (uintptr_t)dof;
14022 uintptr_t ts_end;
14023 dof_relohdr_t *dofr =
14024 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
14025 dof_sec_t *ss, *rs, *ts;
14026 dof_relodesc_t *r;
14027 uint_t i, n;
14028
14029 if (sec->dofs_size < sizeof (dof_relohdr_t) ||
14030 sec->dofs_align != sizeof (dof_secidx_t)) {
14031 dtrace_dof_error(dof, "invalid relocation header");
14032 return (-1);
14033 }
14034
14035 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab);
14036 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec);
14037 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec);
14038 ts_end = (uintptr_t)ts + sizeof (dof_sec_t);
14039
14040 if (ss == NULL || rs == NULL || ts == NULL)
14041 return (-1); /* dtrace_dof_error() has been called already */
14042
14043 if (rs->dofs_entsize < sizeof (dof_relodesc_t) ||
14044 rs->dofs_align != sizeof (uint64_t)) {
14045 dtrace_dof_error(dof, "invalid relocation section");
14046 return (-1);
14047 }
14048
14049 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset);
14050 n = rs->dofs_size / rs->dofs_entsize;
14051
14052 for (i = 0; i < n; i++) {
14053 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset;
14054
14055 switch (r->dofr_type) {
14056 case DOF_RELO_NONE:
14057 break;
14058 case DOF_RELO_SETX:
14059 case DOF_RELO_DOFREL:
14060 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset +
14061 sizeof (uint64_t) > ts->dofs_size) {
14062 dtrace_dof_error(dof, "bad relocation offset");
14063 return (-1);
14064 }
14065
14066 if (taddr >= (uintptr_t)ts && taddr < ts_end) {
14067 dtrace_dof_error(dof, "bad relocation offset");
14068 return (-1);
14069 }
14070
14071 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) {
14072 dtrace_dof_error(dof, "misaligned setx relo");
14073 return (-1);
14074 }
14075
14076 if (r->dofr_type == DOF_RELO_SETX)
14077 *(uint64_t *)taddr += ubase;
14078 else
14079 *(uint64_t *)taddr +=
14080 udaddr + ts->dofs_offset + r->dofr_offset;
14081 break;
14082 default:
14083 dtrace_dof_error(dof, "invalid relocation type");
14084 return (-1);
14085 }
14086
14087 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize);
14088 }
14089
14090 return (0);
14091 }
14092
14093 /*
14094 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated
14095 * header: it should be at the front of a memory region that is at least
14096 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in
14097 * size. It need not be validated in any other way.
14098 */
14099 static int
14100 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr,
14101 dtrace_enabling_t **enabp, uint64_t ubase, uint64_t udaddr, int noprobes)
14102 {
14103 uint64_t len = dof->dofh_loadsz, seclen;
14104 uintptr_t daddr = (uintptr_t)dof;
14105 dtrace_ecbdesc_t *ep;
14106 dtrace_enabling_t *enab;
14107 uint_t i;
14108
14109 ASSERT(MUTEX_HELD(&dtrace_lock));
14110 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t));
14111
14112 /*
14113 * Check the DOF header identification bytes. In addition to checking
14114 * valid settings, we also verify that unused bits/bytes are zeroed so
14115 * we can use them later without fear of regressing existing binaries.
14116 */
14117 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0],
14118 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) {
14119 dtrace_dof_error(dof, "DOF magic string mismatch");
14120 return (-1);
14121 }
14122
14123 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 &&
14124 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) {
14125 dtrace_dof_error(dof, "DOF has invalid data model");
14126 return (-1);
14127 }
14128
14129 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) {
14130 dtrace_dof_error(dof, "DOF encoding mismatch");
14131 return (-1);
14132 }
14133
14134 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
14135 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) {
14136 dtrace_dof_error(dof, "DOF version mismatch");
14137 return (-1);
14138 }
14139
14140 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) {
14141 dtrace_dof_error(dof, "DOF uses unsupported instruction set");
14142 return (-1);
14143 }
14144
14145 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) {
14146 dtrace_dof_error(dof, "DOF uses too many integer registers");
14147 return (-1);
14148 }
14149
14150 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) {
14151 dtrace_dof_error(dof, "DOF uses too many tuple registers");
14152 return (-1);
14153 }
14154
14155 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) {
14156 if (dof->dofh_ident[i] != 0) {
14157 dtrace_dof_error(dof, "DOF has invalid ident byte set");
14158 return (-1);
14159 }
14160 }
14161
14162 if (dof->dofh_flags & ~DOF_FL_VALID) {
14163 dtrace_dof_error(dof, "DOF has invalid flag bits set");
14164 return (-1);
14165 }
14166
14167 if (dof->dofh_secsize == 0) {
14168 dtrace_dof_error(dof, "zero section header size");
14169 return (-1);
14170 }
14171
14172 /*
14173 * Check that the section headers don't exceed the amount of DOF
14174 * data. Note that we cast the section size and number of sections
14175 * to uint64_t's to prevent possible overflow in the multiplication.
14176 */
14177 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize;
14178
14179 if (dof->dofh_secoff > len || seclen > len ||
14180 dof->dofh_secoff + seclen > len) {
14181 dtrace_dof_error(dof, "truncated section headers");
14182 return (-1);
14183 }
14184
14185 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) {
14186 dtrace_dof_error(dof, "misaligned section headers");
14187 return (-1);
14188 }
14189
14190 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) {
14191 dtrace_dof_error(dof, "misaligned section size");
14192 return (-1);
14193 }
14194
14195 /*
14196 * Take an initial pass through the section headers to be sure that
14197 * the headers don't have stray offsets. If the 'noprobes' flag is
14198 * set, do not permit sections relating to providers, probes, or args.
14199 */
14200 for (i = 0; i < dof->dofh_secnum; i++) {
14201 dof_sec_t *sec = (dof_sec_t *)(daddr +
14202 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
14203
14204 if (noprobes) {
14205 switch (sec->dofs_type) {
14206 case DOF_SECT_PROVIDER:
14207 case DOF_SECT_PROBES:
14208 case DOF_SECT_PRARGS:
14209 case DOF_SECT_PROFFS:
14210 dtrace_dof_error(dof, "illegal sections "
14211 "for enabling");
14212 return (-1);
14213 }
14214 }
14215
14216 if (DOF_SEC_ISLOADABLE(sec->dofs_type) &&
14217 !(sec->dofs_flags & DOF_SECF_LOAD)) {
14218 dtrace_dof_error(dof, "loadable section with load "
14219 "flag unset");
14220 return (-1);
14221 }
14222
14223 if (!(sec->dofs_flags & DOF_SECF_LOAD))
14224 continue; /* just ignore non-loadable sections */
14225
14226 if (!ISP2(sec->dofs_align)) {
14227 dtrace_dof_error(dof, "bad section alignment");
14228 return (-1);
14229 }
14230
14231 if (sec->dofs_offset & (sec->dofs_align - 1)) {
14232 dtrace_dof_error(dof, "misaligned section");
14233 return (-1);
14234 }
14235
14236 if (sec->dofs_offset > len || sec->dofs_size > len ||
14237 sec->dofs_offset + sec->dofs_size > len) {
14238 dtrace_dof_error(dof, "corrupt section header");
14239 return (-1);
14240 }
14241
14242 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr +
14243 sec->dofs_offset + sec->dofs_size - 1) != '\0') {
14244 dtrace_dof_error(dof, "non-terminating string table");
14245 return (-1);
14246 }
14247 }
14248
14249 /*
14250 * Take a second pass through the sections and locate and perform any
14251 * relocations that are present. We do this after the first pass to
14252 * be sure that all sections have had their headers validated.
14253 */
14254 for (i = 0; i < dof->dofh_secnum; i++) {
14255 dof_sec_t *sec = (dof_sec_t *)(daddr +
14256 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
14257
14258 if (!(sec->dofs_flags & DOF_SECF_LOAD))
14259 continue; /* skip sections that are not loadable */
14260
14261 switch (sec->dofs_type) {
14262 case DOF_SECT_URELHDR:
14263 if (dtrace_dof_relocate(dof, sec, ubase, udaddr) != 0)
14264 return (-1);
14265 break;
14266 }
14267 }
14268
14269 if ((enab = *enabp) == NULL)
14270 enab = *enabp = dtrace_enabling_create(vstate);
14271
14272 for (i = 0; i < dof->dofh_secnum; i++) {
14273 dof_sec_t *sec = (dof_sec_t *)(daddr +
14274 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
14275
14276 if (sec->dofs_type != DOF_SECT_ECBDESC)
14277 continue;
14278
14279 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) {
14280 dtrace_enabling_destroy(enab);
14281 *enabp = NULL;
14282 return (-1);
14283 }
14284
14285 dtrace_enabling_add(enab, ep);
14286 }
14287
14288 return (0);
14289 }
14290
14291 /*
14292 * Process DOF for any options. This routine assumes that the DOF has been
14293 * at least processed by dtrace_dof_slurp().
14294 */
14295 static int
14296 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state)
14297 {
14298 int i, rval;
14299 uint32_t entsize;
14300 size_t offs;
14301 dof_optdesc_t *desc;
14302
14303 for (i = 0; i < dof->dofh_secnum; i++) {
14304 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof +
14305 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
14306
14307 if (sec->dofs_type != DOF_SECT_OPTDESC)
14308 continue;
14309
14310 if (sec->dofs_align != sizeof (uint64_t)) {
14311 dtrace_dof_error(dof, "bad alignment in "
14312 "option description");
14313 return (EINVAL);
14314 }
14315
14316 if ((entsize = sec->dofs_entsize) == 0) {
14317 dtrace_dof_error(dof, "zeroed option entry size");
14318 return (EINVAL);
14319 }
14320
14321 if (entsize < sizeof (dof_optdesc_t)) {
14322 dtrace_dof_error(dof, "bad option entry size");
14323 return (EINVAL);
14324 }
14325
14326 for (offs = 0; offs < sec->dofs_size; offs += entsize) {
14327 desc = (dof_optdesc_t *)((uintptr_t)dof +
14328 (uintptr_t)sec->dofs_offset + offs);
14329
14330 if (desc->dofo_strtab != DOF_SECIDX_NONE) {
14331 dtrace_dof_error(dof, "non-zero option string");
14332 return (EINVAL);
14333 }
14334
14335 if (desc->dofo_value == DTRACEOPT_UNSET) {
14336 dtrace_dof_error(dof, "unset option");
14337 return (EINVAL);
14338 }
14339
14340 if ((rval = dtrace_state_option(state,
14341 desc->dofo_option, desc->dofo_value)) != 0) {
14342 dtrace_dof_error(dof, "rejected option");
14343 return (rval);
14344 }
14345 }
14346 }
14347
14348 return (0);
14349 }
14350
14351 /*
14352 * DTrace Consumer State Functions
14353 */
14354 static int
14355 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
14356 {
14357 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize;
14358 void *base;
14359 uintptr_t limit;
14360 dtrace_dynvar_t *dvar, *next, *start;
14361 int i;
14362
14363 ASSERT(MUTEX_HELD(&dtrace_lock));
14364 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL);
14365
14366 bzero(dstate, sizeof (dtrace_dstate_t));
14367
14368 if ((dstate->dtds_chunksize = chunksize) == 0)
14369 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE;
14370
14371 VERIFY(dstate->dtds_chunksize < LONG_MAX);
14372
14373 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
14374 size = min;
14375
14376 if ((base = kmem_zalloc(size, KM_NOSLEEP | KM_NORMALPRI)) == NULL)
14377 return (ENOMEM);
14378
14379 dstate->dtds_size = size;
14380 dstate->dtds_base = base;
14381 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP);
14382 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t));
14383
14384 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t));
14385
14386 if (hashsize != 1 && (hashsize & 1))
14387 hashsize--;
14388
14389 dstate->dtds_hashsize = hashsize;
14390 dstate->dtds_hash = dstate->dtds_base;
14391
14392 /*
14393 * Set all of our hash buckets to point to the single sink, and (if
14394 * it hasn't already been set), set the sink's hash value to be the
14395 * sink sentinel value. The sink is needed for dynamic variable
14396 * lookups to know that they have iterated over an entire, valid hash
14397 * chain.
14398 */
14399 for (i = 0; i < hashsize; i++)
14400 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink;
14401
14402 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK)
14403 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK;
14404
14405 /*
14406 * Determine number of active CPUs. Divide free list evenly among
14407 * active CPUs.
14408 */
14409 start = (dtrace_dynvar_t *)
14410 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t));
14411 limit = (uintptr_t)base + size;
14412
14413 VERIFY((uintptr_t)start < limit);
14414 VERIFY((uintptr_t)start >= (uintptr_t)base);
14415
14416 maxper = (limit - (uintptr_t)start) / NCPU;
14417 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize;
14418
14419 #ifndef illumos
14420 CPU_FOREACH(i) {
14421 #else
14422 for (i = 0; i < NCPU; i++) {
14423 #endif
14424 dstate->dtds_percpu[i].dtdsc_free = dvar = start;
14425
14426 /*
14427 * If we don't even have enough chunks to make it once through
14428 * NCPUs, we're just going to allocate everything to the first
14429 * CPU. And if we're on the last CPU, we're going to allocate
14430 * whatever is left over. In either case, we set the limit to
14431 * be the limit of the dynamic variable space.
14432 */
14433 if (maxper == 0 || i == NCPU - 1) {
14434 limit = (uintptr_t)base + size;
14435 start = NULL;
14436 } else {
14437 limit = (uintptr_t)start + maxper;
14438 start = (dtrace_dynvar_t *)limit;
14439 }
14440
14441 VERIFY(limit <= (uintptr_t)base + size);
14442
14443 for (;;) {
14444 next = (dtrace_dynvar_t *)((uintptr_t)dvar +
14445 dstate->dtds_chunksize);
14446
14447 if ((uintptr_t)next + dstate->dtds_chunksize >= limit)
14448 break;
14449
14450 VERIFY((uintptr_t)dvar >= (uintptr_t)base &&
14451 (uintptr_t)dvar <= (uintptr_t)base + size);
14452 dvar->dtdv_next = next;
14453 dvar = next;
14454 }
14455
14456 if (maxper == 0)
14457 break;
14458 }
14459
14460 return (0);
14461 }
14462
14463 static void
14464 dtrace_dstate_fini(dtrace_dstate_t *dstate)
14465 {
14466 ASSERT(MUTEX_HELD(&cpu_lock));
14467
14468 if (dstate->dtds_base == NULL)
14469 return;
14470
14471 kmem_free(dstate->dtds_base, dstate->dtds_size);
14472 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu);
14473 }
14474
14475 static void
14476 dtrace_vstate_fini(dtrace_vstate_t *vstate)
14477 {
14478 /*
14479 * Logical XOR, where are you?
14480 */
14481 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL));
14482
14483 if (vstate->dtvs_nglobals > 0) {
14484 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals *
14485 sizeof (dtrace_statvar_t *));
14486 }
14487
14488 if (vstate->dtvs_ntlocals > 0) {
14489 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals *
14490 sizeof (dtrace_difv_t));
14491 }
14492
14493 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL));
14494
14495 if (vstate->dtvs_nlocals > 0) {
14496 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals *
14497 sizeof (dtrace_statvar_t *));
14498 }
14499 }
14500
14501 #ifdef illumos
14502 static void
14503 dtrace_state_clean(dtrace_state_t *state)
14504 {
14505 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
14506 return;
14507
14508 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
14509 dtrace_speculation_clean(state);
14510 }
14511
14512 static void
14513 dtrace_state_deadman(dtrace_state_t *state)
14514 {
14515 hrtime_t now;
14516
14517 dtrace_sync();
14518
14519 now = dtrace_gethrtime();
14520
14521 if (state != dtrace_anon.dta_state &&
14522 now - state->dts_laststatus >= dtrace_deadman_user)
14523 return;
14524
14525 /*
14526 * We must be sure that dts_alive never appears to be less than the
14527 * value upon entry to dtrace_state_deadman(), and because we lack a
14528 * dtrace_cas64(), we cannot store to it atomically. We thus instead
14529 * store INT64_MAX to it, followed by a memory barrier, followed by
14530 * the new value. This assures that dts_alive never appears to be
14531 * less than its true value, regardless of the order in which the
14532 * stores to the underlying storage are issued.
14533 */
14534 state->dts_alive = INT64_MAX;
14535 dtrace_membar_producer();
14536 state->dts_alive = now;
14537 }
14538 #else /* !illumos */
14539 static void
14540 dtrace_state_clean(void *arg)
14541 {
14542 dtrace_state_t *state = arg;
14543 dtrace_optval_t *opt = state->dts_options;
14544
14545 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
14546 return;
14547
14548 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
14549 dtrace_speculation_clean(state);
14550
14551 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC,
14552 dtrace_state_clean, state);
14553 }
14554
14555 static void
14556 dtrace_state_deadman(void *arg)
14557 {
14558 dtrace_state_t *state = arg;
14559 hrtime_t now;
14560
14561 dtrace_sync();
14562
14563 dtrace_debug_output();
14564
14565 now = dtrace_gethrtime();
14566
14567 if (state != dtrace_anon.dta_state &&
14568 now - state->dts_laststatus >= dtrace_deadman_user)
14569 return;
14570
14571 /*
14572 * We must be sure that dts_alive never appears to be less than the
14573 * value upon entry to dtrace_state_deadman(), and because we lack a
14574 * dtrace_cas64(), we cannot store to it atomically. We thus instead
14575 * store INT64_MAX to it, followed by a memory barrier, followed by
14576 * the new value. This assures that dts_alive never appears to be
14577 * less than its true value, regardless of the order in which the
14578 * stores to the underlying storage are issued.
14579 */
14580 state->dts_alive = INT64_MAX;
14581 dtrace_membar_producer();
14582 state->dts_alive = now;
14583
14584 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC,
14585 dtrace_state_deadman, state);
14586 }
14587 #endif /* illumos */
14588
14589 static dtrace_state_t *
14590 #ifdef illumos
14591 dtrace_state_create(dev_t *devp, cred_t *cr)
14592 #else
14593 dtrace_state_create(struct cdev *dev, struct ucred *cred __unused)
14594 #endif
14595 {
14596 #ifdef illumos
14597 minor_t minor;
14598 major_t major;
14599 #else
14600 cred_t *cr = NULL;
14601 int m = 0;
14602 #endif
14603 char c[30];
14604 dtrace_state_t *state;
14605 dtrace_optval_t *opt;
14606 int bufsize = NCPU * sizeof (dtrace_buffer_t), i;
14607 int cpu_it;
14608
14609 ASSERT(MUTEX_HELD(&dtrace_lock));
14610 ASSERT(MUTEX_HELD(&cpu_lock));
14611
14612 #ifdef illumos
14613 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1,
14614 VM_BESTFIT | VM_SLEEP);
14615
14616 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) {
14617 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
14618 return (NULL);
14619 }
14620
14621 state = ddi_get_soft_state(dtrace_softstate, minor);
14622 #else
14623 if (dev != NULL) {
14624 cr = dev->si_cred;
14625 m = dev2unit(dev);
14626 }
14627
14628 /* Allocate memory for the state. */
14629 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP);
14630 #endif
14631
14632 state->dts_epid = DTRACE_EPIDNONE + 1;
14633
14634 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m);
14635 #ifdef illumos
14636 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1,
14637 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
14638
14639 if (devp != NULL) {
14640 major = getemajor(*devp);
14641 } else {
14642 major = ddi_driver_major(dtrace_devi);
14643 }
14644
14645 state->dts_dev = makedevice(major, minor);
14646
14647 if (devp != NULL)
14648 *devp = state->dts_dev;
14649 #else
14650 state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx);
14651 state->dts_dev = dev;
14652 #endif
14653
14654 /*
14655 * We allocate NCPU buffers. On the one hand, this can be quite
14656 * a bit of memory per instance (nearly 36K on a Starcat). On the
14657 * other hand, it saves an additional memory reference in the probe
14658 * path.
14659 */
14660 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP);
14661 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP);
14662
14663 /*
14664 * Allocate and initialise the per-process per-CPU random state.
14665 * SI_SUB_RANDOM < SI_SUB_DTRACE_ANON therefore entropy device is
14666 * assumed to be seeded at this point (if from Fortuna seed file).
14667 */
14668 arc4random_buf(&state->dts_rstate[0], 2 * sizeof(uint64_t));
14669 for (cpu_it = 1; cpu_it < NCPU; cpu_it++) {
14670 /*
14671 * Each CPU is assigned a 2^64 period, non-overlapping
14672 * subsequence.
14673 */
14674 dtrace_xoroshiro128_plus_jump(state->dts_rstate[cpu_it-1],
14675 state->dts_rstate[cpu_it]);
14676 }
14677
14678 #ifdef illumos
14679 state->dts_cleaner = CYCLIC_NONE;
14680 state->dts_deadman = CYCLIC_NONE;
14681 #else
14682 callout_init(&state->dts_cleaner, 1);
14683 callout_init(&state->dts_deadman, 1);
14684 #endif
14685 state->dts_vstate.dtvs_state = state;
14686
14687 for (i = 0; i < DTRACEOPT_MAX; i++)
14688 state->dts_options[i] = DTRACEOPT_UNSET;
14689
14690 /*
14691 * Set the default options.
14692 */
14693 opt = state->dts_options;
14694 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH;
14695 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO;
14696 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default;
14697 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default;
14698 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL;
14699 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default;
14700 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default;
14701 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default;
14702 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default;
14703 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default;
14704 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default;
14705 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default;
14706 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default;
14707 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default;
14708
14709 state->dts_activity = DTRACE_ACTIVITY_INACTIVE;
14710
14711 /*
14712 * Depending on the user credentials, we set flag bits which alter probe
14713 * visibility or the amount of destructiveness allowed. In the case of
14714 * actual anonymous tracing, or the possession of all privileges, all of
14715 * the normal checks are bypassed.
14716 */
14717 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
14718 state->dts_cred.dcr_visible = DTRACE_CRV_ALL;
14719 state->dts_cred.dcr_action = DTRACE_CRA_ALL;
14720 } else {
14721 /*
14722 * Set up the credentials for this instantiation. We take a
14723 * hold on the credential to prevent it from disappearing on
14724 * us; this in turn prevents the zone_t referenced by this
14725 * credential from disappearing. This means that we can
14726 * examine the credential and the zone from probe context.
14727 */
14728 crhold(cr);
14729 state->dts_cred.dcr_cred = cr;
14730
14731 /*
14732 * CRA_PROC means "we have *some* privilege for dtrace" and
14733 * unlocks the use of variables like pid, zonename, etc.
14734 */
14735 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) ||
14736 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
14737 state->dts_cred.dcr_action |= DTRACE_CRA_PROC;
14738 }
14739
14740 /*
14741 * dtrace_user allows use of syscall and profile providers.
14742 * If the user also has proc_owner and/or proc_zone, we
14743 * extend the scope to include additional visibility and
14744 * destructive power.
14745 */
14746 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) {
14747 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) {
14748 state->dts_cred.dcr_visible |=
14749 DTRACE_CRV_ALLPROC;
14750
14751 state->dts_cred.dcr_action |=
14752 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
14753 }
14754
14755 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) {
14756 state->dts_cred.dcr_visible |=
14757 DTRACE_CRV_ALLZONE;
14758
14759 state->dts_cred.dcr_action |=
14760 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
14761 }
14762
14763 /*
14764 * If we have all privs in whatever zone this is,
14765 * we can do destructive things to processes which
14766 * have altered credentials.
14767 */
14768 #ifdef illumos
14769 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
14770 cr->cr_zone->zone_privset)) {
14771 state->dts_cred.dcr_action |=
14772 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
14773 }
14774 #endif
14775 }
14776
14777 /*
14778 * Holding the dtrace_kernel privilege also implies that
14779 * the user has the dtrace_user privilege from a visibility
14780 * perspective. But without further privileges, some
14781 * destructive actions are not available.
14782 */
14783 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) {
14784 /*
14785 * Make all probes in all zones visible. However,
14786 * this doesn't mean that all actions become available
14787 * to all zones.
14788 */
14789 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL |
14790 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE;
14791
14792 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL |
14793 DTRACE_CRA_PROC;
14794 /*
14795 * Holding proc_owner means that destructive actions
14796 * for *this* zone are allowed.
14797 */
14798 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
14799 state->dts_cred.dcr_action |=
14800 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
14801
14802 /*
14803 * Holding proc_zone means that destructive actions
14804 * for this user/group ID in all zones is allowed.
14805 */
14806 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
14807 state->dts_cred.dcr_action |=
14808 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
14809
14810 #ifdef illumos
14811 /*
14812 * If we have all privs in whatever zone this is,
14813 * we can do destructive things to processes which
14814 * have altered credentials.
14815 */
14816 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
14817 cr->cr_zone->zone_privset)) {
14818 state->dts_cred.dcr_action |=
14819 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
14820 }
14821 #endif
14822 }
14823
14824 /*
14825 * Holding the dtrace_proc privilege gives control over fasttrap
14826 * and pid providers. We need to grant wider destructive
14827 * privileges in the event that the user has proc_owner and/or
14828 * proc_zone.
14829 */
14830 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
14831 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
14832 state->dts_cred.dcr_action |=
14833 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
14834
14835 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
14836 state->dts_cred.dcr_action |=
14837 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
14838 }
14839 }
14840
14841 return (state);
14842 }
14843
14844 static int
14845 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which)
14846 {
14847 dtrace_optval_t *opt = state->dts_options, size;
14848 processorid_t cpu = 0;
14849 int flags = 0, rval, factor, divisor = 1;
14850
14851 ASSERT(MUTEX_HELD(&dtrace_lock));
14852 ASSERT(MUTEX_HELD(&cpu_lock));
14853 ASSERT(which < DTRACEOPT_MAX);
14854 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE ||
14855 (state == dtrace_anon.dta_state &&
14856 state->dts_activity == DTRACE_ACTIVITY_ACTIVE));
14857
14858 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0)
14859 return (0);
14860
14861 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET)
14862 cpu = opt[DTRACEOPT_CPU];
14863
14864 if (which == DTRACEOPT_SPECSIZE)
14865 flags |= DTRACEBUF_NOSWITCH;
14866
14867 if (which == DTRACEOPT_BUFSIZE) {
14868 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING)
14869 flags |= DTRACEBUF_RING;
14870
14871 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL)
14872 flags |= DTRACEBUF_FILL;
14873
14874 if (state != dtrace_anon.dta_state ||
14875 state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
14876 flags |= DTRACEBUF_INACTIVE;
14877 }
14878
14879 for (size = opt[which]; size >= sizeof (uint64_t); size /= divisor) {
14880 /*
14881 * The size must be 8-byte aligned. If the size is not 8-byte
14882 * aligned, drop it down by the difference.
14883 */
14884 if (size & (sizeof (uint64_t) - 1))
14885 size -= size & (sizeof (uint64_t) - 1);
14886
14887 if (size < state->dts_reserve) {
14888 /*
14889 * Buffers always must be large enough to accommodate
14890 * their prereserved space. We return E2BIG instead
14891 * of ENOMEM in this case to allow for user-level
14892 * software to differentiate the cases.
14893 */
14894 return (E2BIG);
14895 }
14896
14897 rval = dtrace_buffer_alloc(buf, size, flags, cpu, &factor);
14898
14899 if (rval != ENOMEM) {
14900 opt[which] = size;
14901 return (rval);
14902 }
14903
14904 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
14905 return (rval);
14906
14907 for (divisor = 2; divisor < factor; divisor <<= 1)
14908 continue;
14909 }
14910
14911 return (ENOMEM);
14912 }
14913
14914 static int
14915 dtrace_state_buffers(dtrace_state_t *state)
14916 {
14917 dtrace_speculation_t *spec = state->dts_speculations;
14918 int rval, i;
14919
14920 if ((rval = dtrace_state_buffer(state, state->dts_buffer,
14921 DTRACEOPT_BUFSIZE)) != 0)
14922 return (rval);
14923
14924 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer,
14925 DTRACEOPT_AGGSIZE)) != 0)
14926 return (rval);
14927
14928 for (i = 0; i < state->dts_nspeculations; i++) {
14929 if ((rval = dtrace_state_buffer(state,
14930 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0)
14931 return (rval);
14932 }
14933
14934 return (0);
14935 }
14936
14937 static void
14938 dtrace_state_prereserve(dtrace_state_t *state)
14939 {
14940 dtrace_ecb_t *ecb;
14941 dtrace_probe_t *probe;
14942
14943 state->dts_reserve = 0;
14944
14945 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL)
14946 return;
14947
14948 /*
14949 * If our buffer policy is a "fill" buffer policy, we need to set the
14950 * prereserved space to be the space required by the END probes.
14951 */
14952 probe = dtrace_probes[dtrace_probeid_end - 1];
14953 ASSERT(probe != NULL);
14954
14955 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
14956 if (ecb->dte_state != state)
14957 continue;
14958
14959 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment;
14960 }
14961 }
14962
14963 static int
14964 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu)
14965 {
14966 dtrace_optval_t *opt = state->dts_options, sz, nspec;
14967 dtrace_speculation_t *spec;
14968 dtrace_buffer_t *buf;
14969 #ifdef illumos
14970 cyc_handler_t hdlr;
14971 cyc_time_t when;
14972 #endif
14973 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t);
14974 dtrace_icookie_t cookie;
14975
14976 mutex_enter(&cpu_lock);
14977 mutex_enter(&dtrace_lock);
14978
14979 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
14980 rval = EBUSY;
14981 goto out;
14982 }
14983
14984 /*
14985 * Before we can perform any checks, we must prime all of the
14986 * retained enablings that correspond to this state.
14987 */
14988 dtrace_enabling_prime(state);
14989
14990 if (state->dts_destructive && !state->dts_cred.dcr_destructive) {
14991 rval = EACCES;
14992 goto out;
14993 }
14994
14995 dtrace_state_prereserve(state);
14996
14997 /*
14998 * Now we want to do is try to allocate our speculations.
14999 * We do not automatically resize the number of speculations; if
15000 * this fails, we will fail the operation.
15001 */
15002 nspec = opt[DTRACEOPT_NSPEC];
15003 ASSERT(nspec != DTRACEOPT_UNSET);
15004
15005 if (nspec > INT_MAX) {
15006 rval = ENOMEM;
15007 goto out;
15008 }
15009
15010 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t),
15011 KM_NOSLEEP | KM_NORMALPRI);
15012
15013 if (spec == NULL) {
15014 rval = ENOMEM;
15015 goto out;
15016 }
15017
15018 state->dts_speculations = spec;
15019 state->dts_nspeculations = (int)nspec;
15020
15021 for (i = 0; i < nspec; i++) {
15022 if ((buf = kmem_zalloc(bufsize,
15023 KM_NOSLEEP | KM_NORMALPRI)) == NULL) {
15024 rval = ENOMEM;
15025 goto err;
15026 }
15027
15028 spec[i].dtsp_buffer = buf;
15029 }
15030
15031 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) {
15032 if (dtrace_anon.dta_state == NULL) {
15033 rval = ENOENT;
15034 goto out;
15035 }
15036
15037 if (state->dts_necbs != 0) {
15038 rval = EALREADY;
15039 goto out;
15040 }
15041
15042 state->dts_anon = dtrace_anon_grab();
15043 ASSERT(state->dts_anon != NULL);
15044 state = state->dts_anon;
15045
15046 /*
15047 * We want "grabanon" to be set in the grabbed state, so we'll
15048 * copy that option value from the grabbing state into the
15049 * grabbed state.
15050 */
15051 state->dts_options[DTRACEOPT_GRABANON] =
15052 opt[DTRACEOPT_GRABANON];
15053
15054 *cpu = dtrace_anon.dta_beganon;
15055
15056 /*
15057 * If the anonymous state is active (as it almost certainly
15058 * is if the anonymous enabling ultimately matched anything),
15059 * we don't allow any further option processing -- but we
15060 * don't return failure.
15061 */
15062 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
15063 goto out;
15064 }
15065
15066 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET &&
15067 opt[DTRACEOPT_AGGSIZE] != 0) {
15068 if (state->dts_aggregations == NULL) {
15069 /*
15070 * We're not going to create an aggregation buffer
15071 * because we don't have any ECBs that contain
15072 * aggregations -- set this option to 0.
15073 */
15074 opt[DTRACEOPT_AGGSIZE] = 0;
15075 } else {
15076 /*
15077 * If we have an aggregation buffer, we must also have
15078 * a buffer to use as scratch.
15079 */
15080 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET ||
15081 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) {
15082 opt[DTRACEOPT_BUFSIZE] = state->dts_needed;
15083 }
15084 }
15085 }
15086
15087 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET &&
15088 opt[DTRACEOPT_SPECSIZE] != 0) {
15089 if (!state->dts_speculates) {
15090 /*
15091 * We're not going to create speculation buffers
15092 * because we don't have any ECBs that actually
15093 * speculate -- set the speculation size to 0.
15094 */
15095 opt[DTRACEOPT_SPECSIZE] = 0;
15096 }
15097 }
15098
15099 /*
15100 * The bare minimum size for any buffer that we're actually going to
15101 * do anything to is sizeof (uint64_t).
15102 */
15103 sz = sizeof (uint64_t);
15104
15105 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) ||
15106 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) ||
15107 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) {
15108 /*
15109 * A buffer size has been explicitly set to 0 (or to a size
15110 * that will be adjusted to 0) and we need the space -- we
15111 * need to return failure. We return ENOSPC to differentiate
15112 * it from failing to allocate a buffer due to failure to meet
15113 * the reserve (for which we return E2BIG).
15114 */
15115 rval = ENOSPC;
15116 goto out;
15117 }
15118
15119 if ((rval = dtrace_state_buffers(state)) != 0)
15120 goto err;
15121
15122 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET)
15123 sz = dtrace_dstate_defsize;
15124
15125 do {
15126 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz);
15127
15128 if (rval == 0)
15129 break;
15130
15131 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
15132 goto err;
15133 } while (sz >>= 1);
15134
15135 opt[DTRACEOPT_DYNVARSIZE] = sz;
15136
15137 if (rval != 0)
15138 goto err;
15139
15140 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max)
15141 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max;
15142
15143 if (opt[DTRACEOPT_CLEANRATE] == 0)
15144 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
15145
15146 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min)
15147 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min;
15148
15149 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max)
15150 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
15151
15152 state->dts_alive = state->dts_laststatus = dtrace_gethrtime();
15153 #ifdef illumos
15154 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean;
15155 hdlr.cyh_arg = state;
15156 hdlr.cyh_level = CY_LOW_LEVEL;
15157
15158 when.cyt_when = 0;
15159 when.cyt_interval = opt[DTRACEOPT_CLEANRATE];
15160
15161 state->dts_cleaner = cyclic_add(&hdlr, &when);
15162
15163 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman;
15164 hdlr.cyh_arg = state;
15165 hdlr.cyh_level = CY_LOW_LEVEL;
15166
15167 when.cyt_when = 0;
15168 when.cyt_interval = dtrace_deadman_interval;
15169
15170 state->dts_deadman = cyclic_add(&hdlr, &when);
15171 #else
15172 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC,
15173 dtrace_state_clean, state);
15174 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC,
15175 dtrace_state_deadman, state);
15176 #endif
15177
15178 state->dts_activity = DTRACE_ACTIVITY_WARMUP;
15179
15180 #ifdef illumos
15181 if (state->dts_getf != 0 &&
15182 !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) {
15183 /*
15184 * We don't have kernel privs but we have at least one call
15185 * to getf(); we need to bump our zone's count, and (if
15186 * this is the first enabling to have an unprivileged call
15187 * to getf()) we need to hook into closef().
15188 */
15189 state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf++;
15190
15191 if (dtrace_getf++ == 0) {
15192 ASSERT(dtrace_closef == NULL);
15193 dtrace_closef = dtrace_getf_barrier;
15194 }
15195 }
15196 #endif
15197
15198 /*
15199 * Now it's time to actually fire the BEGIN probe. We need to disable
15200 * interrupts here both to record the CPU on which we fired the BEGIN
15201 * probe (the data from this CPU will be processed first at user
15202 * level) and to manually activate the buffer for this CPU.
15203 */
15204 cookie = dtrace_interrupt_disable();
15205 *cpu = curcpu;
15206 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE);
15207 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE;
15208
15209 dtrace_probe(dtrace_probeid_begin,
15210 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
15211 dtrace_interrupt_enable(cookie);
15212 /*
15213 * We may have had an exit action from a BEGIN probe; only change our
15214 * state to ACTIVE if we're still in WARMUP.
15215 */
15216 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP ||
15217 state->dts_activity == DTRACE_ACTIVITY_DRAINING);
15218
15219 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP)
15220 state->dts_activity = DTRACE_ACTIVITY_ACTIVE;
15221
15222 #ifdef __FreeBSD__
15223 /*
15224 * We enable anonymous tracing before APs are started, so we must
15225 * activate buffers using the current CPU.
15226 */
15227 if (state == dtrace_anon.dta_state)
15228 for (int i = 0; i < NCPU; i++)
15229 dtrace_buffer_activate_cpu(state, i);
15230 else
15231 dtrace_xcall(DTRACE_CPUALL,
15232 (dtrace_xcall_t)dtrace_buffer_activate, state);
15233 #else
15234 /*
15235 * Regardless of whether or not now we're in ACTIVE or DRAINING, we
15236 * want each CPU to transition its principal buffer out of the
15237 * INACTIVE state. Doing this assures that no CPU will suddenly begin
15238 * processing an ECB halfway down a probe's ECB chain; all CPUs will
15239 * atomically transition from processing none of a state's ECBs to
15240 * processing all of them.
15241 */
15242 dtrace_xcall(DTRACE_CPUALL,
15243 (dtrace_xcall_t)dtrace_buffer_activate, state);
15244 #endif
15245 goto out;
15246
15247 err:
15248 dtrace_buffer_free(state->dts_buffer);
15249 dtrace_buffer_free(state->dts_aggbuffer);
15250
15251 if ((nspec = state->dts_nspeculations) == 0) {
15252 ASSERT(state->dts_speculations == NULL);
15253 goto out;
15254 }
15255
15256 spec = state->dts_speculations;
15257 ASSERT(spec != NULL);
15258
15259 for (i = 0; i < state->dts_nspeculations; i++) {
15260 if ((buf = spec[i].dtsp_buffer) == NULL)
15261 break;
15262
15263 dtrace_buffer_free(buf);
15264 kmem_free(buf, bufsize);
15265 }
15266
15267 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
15268 state->dts_nspeculations = 0;
15269 state->dts_speculations = NULL;
15270
15271 out:
15272 mutex_exit(&dtrace_lock);
15273 mutex_exit(&cpu_lock);
15274
15275 return (rval);
15276 }
15277
15278 static int
15279 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu)
15280 {
15281 dtrace_icookie_t cookie;
15282
15283 ASSERT(MUTEX_HELD(&dtrace_lock));
15284
15285 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE &&
15286 state->dts_activity != DTRACE_ACTIVITY_DRAINING)
15287 return (EINVAL);
15288
15289 /*
15290 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync
15291 * to be sure that every CPU has seen it. See below for the details
15292 * on why this is done.
15293 */
15294 state->dts_activity = DTRACE_ACTIVITY_DRAINING;
15295 dtrace_sync();
15296
15297 /*
15298 * By this point, it is impossible for any CPU to be still processing
15299 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to
15300 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any
15301 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe()
15302 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN
15303 * iff we're in the END probe.
15304 */
15305 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN;
15306 dtrace_sync();
15307 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN);
15308
15309 /*
15310 * Finally, we can release the reserve and call the END probe. We
15311 * disable interrupts across calling the END probe to allow us to
15312 * return the CPU on which we actually called the END probe. This
15313 * allows user-land to be sure that this CPU's principal buffer is
15314 * processed last.
15315 */
15316 state->dts_reserve = 0;
15317
15318 cookie = dtrace_interrupt_disable();
15319 *cpu = curcpu;
15320 dtrace_probe(dtrace_probeid_end,
15321 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
15322 dtrace_interrupt_enable(cookie);
15323
15324 state->dts_activity = DTRACE_ACTIVITY_STOPPED;
15325 dtrace_sync();
15326
15327 #ifdef illumos
15328 if (state->dts_getf != 0 &&
15329 !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) {
15330 /*
15331 * We don't have kernel privs but we have at least one call
15332 * to getf(); we need to lower our zone's count, and (if
15333 * this is the last enabling to have an unprivileged call
15334 * to getf()) we need to clear the closef() hook.
15335 */
15336 ASSERT(state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf > 0);
15337 ASSERT(dtrace_closef == dtrace_getf_barrier);
15338 ASSERT(dtrace_getf > 0);
15339
15340 state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf--;
15341
15342 if (--dtrace_getf == 0)
15343 dtrace_closef = NULL;
15344 }
15345 #endif
15346
15347 return (0);
15348 }
15349
15350 static int
15351 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option,
15352 dtrace_optval_t val)
15353 {
15354 ASSERT(MUTEX_HELD(&dtrace_lock));
15355
15356 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
15357 return (EBUSY);
15358
15359 if (option >= DTRACEOPT_MAX)
15360 return (EINVAL);
15361
15362 if (option != DTRACEOPT_CPU && val < 0)
15363 return (EINVAL);
15364
15365 switch (option) {
15366 case DTRACEOPT_DESTRUCTIVE:
15367 if (dtrace_destructive_disallow)
15368 return (EACCES);
15369
15370 state->dts_cred.dcr_destructive = 1;
15371 break;
15372
15373 case DTRACEOPT_BUFSIZE:
15374 case DTRACEOPT_DYNVARSIZE:
15375 case DTRACEOPT_AGGSIZE:
15376 case DTRACEOPT_SPECSIZE:
15377 case DTRACEOPT_STRSIZE:
15378 if (val < 0)
15379 return (EINVAL);
15380
15381 if (val >= LONG_MAX) {
15382 /*
15383 * If this is an otherwise negative value, set it to
15384 * the highest multiple of 128m less than LONG_MAX.
15385 * Technically, we're adjusting the size without
15386 * regard to the buffer resizing policy, but in fact,
15387 * this has no effect -- if we set the buffer size to
15388 * ~LONG_MAX and the buffer policy is ultimately set to
15389 * be "manual", the buffer allocation is guaranteed to
15390 * fail, if only because the allocation requires two
15391 * buffers. (We set the the size to the highest
15392 * multiple of 128m because it ensures that the size
15393 * will remain a multiple of a megabyte when
15394 * repeatedly halved -- all the way down to 15m.)
15395 */
15396 val = LONG_MAX - (1 << 27) + 1;
15397 }
15398 }
15399
15400 state->dts_options[option] = val;
15401
15402 return (0);
15403 }
15404
15405 static void
15406 dtrace_state_destroy(dtrace_state_t *state)
15407 {
15408 dtrace_ecb_t *ecb;
15409 dtrace_vstate_t *vstate = &state->dts_vstate;
15410 #ifdef illumos
15411 minor_t minor = getminor(state->dts_dev);
15412 #endif
15413 int i, bufsize = NCPU * sizeof (dtrace_buffer_t);
15414 dtrace_speculation_t *spec = state->dts_speculations;
15415 int nspec = state->dts_nspeculations;
15416 uint32_t match;
15417
15418 ASSERT(MUTEX_HELD(&dtrace_lock));
15419 ASSERT(MUTEX_HELD(&cpu_lock));
15420
15421 /*
15422 * First, retract any retained enablings for this state.
15423 */
15424 dtrace_enabling_retract(state);
15425 ASSERT(state->dts_nretained == 0);
15426
15427 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE ||
15428 state->dts_activity == DTRACE_ACTIVITY_DRAINING) {
15429 /*
15430 * We have managed to come into dtrace_state_destroy() on a
15431 * hot enabling -- almost certainly because of a disorderly
15432 * shutdown of a consumer. (That is, a consumer that is
15433 * exiting without having called dtrace_stop().) In this case,
15434 * we're going to set our activity to be KILLED, and then
15435 * issue a sync to be sure that everyone is out of probe
15436 * context before we start blowing away ECBs.
15437 */
15438 state->dts_activity = DTRACE_ACTIVITY_KILLED;
15439 dtrace_sync();
15440 }
15441
15442 /*
15443 * Release the credential hold we took in dtrace_state_create().
15444 */
15445 if (state->dts_cred.dcr_cred != NULL)
15446 crfree(state->dts_cred.dcr_cred);
15447
15448 /*
15449 * Now we can safely disable and destroy any enabled probes. Because
15450 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress
15451 * (especially if they're all enabled), we take two passes through the
15452 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and
15453 * in the second we disable whatever is left over.
15454 */
15455 for (match = DTRACE_PRIV_KERNEL; ; match = 0) {
15456 for (i = 0; i < state->dts_necbs; i++) {
15457 if ((ecb = state->dts_ecbs[i]) == NULL)
15458 continue;
15459
15460 if (match && ecb->dte_probe != NULL) {
15461 dtrace_probe_t *probe = ecb->dte_probe;
15462 dtrace_provider_t *prov = probe->dtpr_provider;
15463
15464 if (!(prov->dtpv_priv.dtpp_flags & match))
15465 continue;
15466 }
15467
15468 dtrace_ecb_disable(ecb);
15469 dtrace_ecb_destroy(ecb);
15470 }
15471
15472 if (!match)
15473 break;
15474 }
15475
15476 /*
15477 * Before we free the buffers, perform one more sync to assure that
15478 * every CPU is out of probe context.
15479 */
15480 dtrace_sync();
15481
15482 dtrace_buffer_free(state->dts_buffer);
15483 dtrace_buffer_free(state->dts_aggbuffer);
15484
15485 for (i = 0; i < nspec; i++)
15486 dtrace_buffer_free(spec[i].dtsp_buffer);
15487
15488 #ifdef illumos
15489 if (state->dts_cleaner != CYCLIC_NONE)
15490 cyclic_remove(state->dts_cleaner);
15491
15492 if (state->dts_deadman != CYCLIC_NONE)
15493 cyclic_remove(state->dts_deadman);
15494 #else
15495 callout_stop(&state->dts_cleaner);
15496 callout_drain(&state->dts_cleaner);
15497 callout_stop(&state->dts_deadman);
15498 callout_drain(&state->dts_deadman);
15499 #endif
15500
15501 dtrace_dstate_fini(&vstate->dtvs_dynvars);
15502 dtrace_vstate_fini(vstate);
15503 if (state->dts_ecbs != NULL)
15504 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *));
15505
15506 if (state->dts_aggregations != NULL) {
15507 #ifdef DEBUG
15508 for (i = 0; i < state->dts_naggregations; i++)
15509 ASSERT(state->dts_aggregations[i] == NULL);
15510 #endif
15511 ASSERT(state->dts_naggregations > 0);
15512 kmem_free(state->dts_aggregations,
15513 state->dts_naggregations * sizeof (dtrace_aggregation_t *));
15514 }
15515
15516 kmem_free(state->dts_buffer, bufsize);
15517 kmem_free(state->dts_aggbuffer, bufsize);
15518
15519 for (i = 0; i < nspec; i++)
15520 kmem_free(spec[i].dtsp_buffer, bufsize);
15521
15522 if (spec != NULL)
15523 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
15524
15525 dtrace_format_destroy(state);
15526
15527 if (state->dts_aggid_arena != NULL) {
15528 #ifdef illumos
15529 vmem_destroy(state->dts_aggid_arena);
15530 #else
15531 delete_unrhdr(state->dts_aggid_arena);
15532 #endif
15533 state->dts_aggid_arena = NULL;
15534 }
15535 #ifdef illumos
15536 ddi_soft_state_free(dtrace_softstate, minor);
15537 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
15538 #endif
15539 }
15540
15541 /*
15542 * DTrace Anonymous Enabling Functions
15543 */
15544 static dtrace_state_t *
15545 dtrace_anon_grab(void)
15546 {
15547 dtrace_state_t *state;
15548
15549 ASSERT(MUTEX_HELD(&dtrace_lock));
15550
15551 if ((state = dtrace_anon.dta_state) == NULL) {
15552 ASSERT(dtrace_anon.dta_enabling == NULL);
15553 return (NULL);
15554 }
15555
15556 ASSERT(dtrace_anon.dta_enabling != NULL);
15557 ASSERT(dtrace_retained != NULL);
15558
15559 dtrace_enabling_destroy(dtrace_anon.dta_enabling);
15560 dtrace_anon.dta_enabling = NULL;
15561 dtrace_anon.dta_state = NULL;
15562
15563 return (state);
15564 }
15565
15566 static void
15567 dtrace_anon_property(void)
15568 {
15569 int i, rv;
15570 dtrace_state_t *state;
15571 dof_hdr_t *dof;
15572 char c[32]; /* enough for "dof-data-" + digits */
15573
15574 ASSERT(MUTEX_HELD(&dtrace_lock));
15575 ASSERT(MUTEX_HELD(&cpu_lock));
15576
15577 for (i = 0; ; i++) {
15578 (void) snprintf(c, sizeof (c), "dof-data-%d", i);
15579
15580 dtrace_err_verbose = 1;
15581
15582 if ((dof = dtrace_dof_property(c)) == NULL) {
15583 dtrace_err_verbose = 0;
15584 break;
15585 }
15586
15587 #ifdef illumos
15588 /*
15589 * We want to create anonymous state, so we need to transition
15590 * the kernel debugger to indicate that DTrace is active. If
15591 * this fails (e.g. because the debugger has modified text in
15592 * some way), we won't continue with the processing.
15593 */
15594 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
15595 cmn_err(CE_NOTE, "kernel debugger active; anonymous "
15596 "enabling ignored.");
15597 dtrace_dof_destroy(dof);
15598 break;
15599 }
15600 #endif
15601
15602 /*
15603 * If we haven't allocated an anonymous state, we'll do so now.
15604 */
15605 if ((state = dtrace_anon.dta_state) == NULL) {
15606 state = dtrace_state_create(NULL, NULL);
15607 dtrace_anon.dta_state = state;
15608
15609 if (state == NULL) {
15610 /*
15611 * This basically shouldn't happen: the only
15612 * failure mode from dtrace_state_create() is a
15613 * failure of ddi_soft_state_zalloc() that
15614 * itself should never happen. Still, the
15615 * interface allows for a failure mode, and
15616 * we want to fail as gracefully as possible:
15617 * we'll emit an error message and cease
15618 * processing anonymous state in this case.
15619 */
15620 cmn_err(CE_WARN, "failed to create "
15621 "anonymous state");
15622 dtrace_dof_destroy(dof);
15623 break;
15624 }
15625 }
15626
15627 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(),
15628 &dtrace_anon.dta_enabling, 0, 0, B_TRUE);
15629
15630 if (rv == 0)
15631 rv = dtrace_dof_options(dof, state);
15632
15633 dtrace_err_verbose = 0;
15634 dtrace_dof_destroy(dof);
15635
15636 if (rv != 0) {
15637 /*
15638 * This is malformed DOF; chuck any anonymous state
15639 * that we created.
15640 */
15641 ASSERT(dtrace_anon.dta_enabling == NULL);
15642 dtrace_state_destroy(state);
15643 dtrace_anon.dta_state = NULL;
15644 break;
15645 }
15646
15647 ASSERT(dtrace_anon.dta_enabling != NULL);
15648 }
15649
15650 if (dtrace_anon.dta_enabling != NULL) {
15651 int rval;
15652
15653 /*
15654 * dtrace_enabling_retain() can only fail because we are
15655 * trying to retain more enablings than are allowed -- but
15656 * we only have one anonymous enabling, and we are guaranteed
15657 * to be allowed at least one retained enabling; we assert
15658 * that dtrace_enabling_retain() returns success.
15659 */
15660 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling);
15661 ASSERT(rval == 0);
15662
15663 dtrace_enabling_dump(dtrace_anon.dta_enabling);
15664 }
15665 }
15666
15667 /*
15668 * DTrace Helper Functions
15669 */
15670 static void
15671 dtrace_helper_trace(dtrace_helper_action_t *helper,
15672 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where)
15673 {
15674 uint32_t size, next, nnext, i;
15675 dtrace_helptrace_t *ent, *buffer;
15676 uint16_t flags = cpu_core[curcpu].cpuc_dtrace_flags;
15677
15678 if ((buffer = dtrace_helptrace_buffer) == NULL)
15679 return;
15680
15681 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals);
15682
15683 /*
15684 * What would a tracing framework be without its own tracing
15685 * framework? (Well, a hell of a lot simpler, for starters...)
15686 */
15687 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals *
15688 sizeof (uint64_t) - sizeof (uint64_t);
15689
15690 /*
15691 * Iterate until we can allocate a slot in the trace buffer.
15692 */
15693 do {
15694 next = dtrace_helptrace_next;
15695
15696 if (next + size < dtrace_helptrace_bufsize) {
15697 nnext = next + size;
15698 } else {
15699 nnext = size;
15700 }
15701 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next);
15702
15703 /*
15704 * We have our slot; fill it in.
15705 */
15706 if (nnext == size) {
15707 dtrace_helptrace_wrapped++;
15708 next = 0;
15709 }
15710
15711 ent = (dtrace_helptrace_t *)((uintptr_t)buffer + next);
15712 ent->dtht_helper = helper;
15713 ent->dtht_where = where;
15714 ent->dtht_nlocals = vstate->dtvs_nlocals;
15715
15716 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ?
15717 mstate->dtms_fltoffs : -1;
15718 ent->dtht_fault = DTRACE_FLAGS2FLT(flags);
15719 ent->dtht_illval = cpu_core[curcpu].cpuc_dtrace_illval;
15720
15721 for (i = 0; i < vstate->dtvs_nlocals; i++) {
15722 dtrace_statvar_t *svar;
15723
15724 if ((svar = vstate->dtvs_locals[i]) == NULL)
15725 continue;
15726
15727 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t));
15728 ent->dtht_locals[i] =
15729 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu];
15730 }
15731 }
15732
15733 static uint64_t
15734 dtrace_helper(int which, dtrace_mstate_t *mstate,
15735 dtrace_state_t *state, uint64_t arg0, uint64_t arg1)
15736 {
15737 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags;
15738 uint64_t sarg0 = mstate->dtms_arg[0];
15739 uint64_t sarg1 = mstate->dtms_arg[1];
15740 uint64_t rval = 0;
15741 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers;
15742 dtrace_helper_action_t *helper;
15743 dtrace_vstate_t *vstate;
15744 dtrace_difo_t *pred;
15745 int i, trace = dtrace_helptrace_buffer != NULL;
15746
15747 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS);
15748
15749 if (helpers == NULL)
15750 return (0);
15751
15752 if ((helper = helpers->dthps_actions[which]) == NULL)
15753 return (0);
15754
15755 vstate = &helpers->dthps_vstate;
15756 mstate->dtms_arg[0] = arg0;
15757 mstate->dtms_arg[1] = arg1;
15758
15759 /*
15760 * Now iterate over each helper. If its predicate evaluates to 'true',
15761 * we'll call the corresponding actions. Note that the below calls
15762 * to dtrace_dif_emulate() may set faults in machine state. This is
15763 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow
15764 * the stored DIF offset with its own (which is the desired behavior).
15765 * Also, note the calls to dtrace_dif_emulate() may allocate scratch
15766 * from machine state; this is okay, too.
15767 */
15768 for (; helper != NULL; helper = helper->dtha_next) {
15769 if ((pred = helper->dtha_predicate) != NULL) {
15770 if (trace)
15771 dtrace_helper_trace(helper, mstate, vstate, 0);
15772
15773 if (!dtrace_dif_emulate(pred, mstate, vstate, state))
15774 goto next;
15775
15776 if (*flags & CPU_DTRACE_FAULT)
15777 goto err;
15778 }
15779
15780 for (i = 0; i < helper->dtha_nactions; i++) {
15781 if (trace)
15782 dtrace_helper_trace(helper,
15783 mstate, vstate, i + 1);
15784
15785 rval = dtrace_dif_emulate(helper->dtha_actions[i],
15786 mstate, vstate, state);
15787
15788 if (*flags & CPU_DTRACE_FAULT)
15789 goto err;
15790 }
15791
15792 next:
15793 if (trace)
15794 dtrace_helper_trace(helper, mstate, vstate,
15795 DTRACE_HELPTRACE_NEXT);
15796 }
15797
15798 if (trace)
15799 dtrace_helper_trace(helper, mstate, vstate,
15800 DTRACE_HELPTRACE_DONE);
15801
15802 /*
15803 * Restore the arg0 that we saved upon entry.
15804 */
15805 mstate->dtms_arg[0] = sarg0;
15806 mstate->dtms_arg[1] = sarg1;
15807
15808 return (rval);
15809
15810 err:
15811 if (trace)
15812 dtrace_helper_trace(helper, mstate, vstate,
15813 DTRACE_HELPTRACE_ERR);
15814
15815 /*
15816 * Restore the arg0 that we saved upon entry.
15817 */
15818 mstate->dtms_arg[0] = sarg0;
15819 mstate->dtms_arg[1] = sarg1;
15820
15821 return (0);
15822 }
15823
15824 static void
15825 dtrace_helper_action_destroy(dtrace_helper_action_t *helper,
15826 dtrace_vstate_t *vstate)
15827 {
15828 int i;
15829
15830 if (helper->dtha_predicate != NULL)
15831 dtrace_difo_release(helper->dtha_predicate, vstate);
15832
15833 for (i = 0; i < helper->dtha_nactions; i++) {
15834 ASSERT(helper->dtha_actions[i] != NULL);
15835 dtrace_difo_release(helper->dtha_actions[i], vstate);
15836 }
15837
15838 kmem_free(helper->dtha_actions,
15839 helper->dtha_nactions * sizeof (dtrace_difo_t *));
15840 kmem_free(helper, sizeof (dtrace_helper_action_t));
15841 }
15842
15843 static int
15844 dtrace_helper_destroygen(dtrace_helpers_t *help, int gen)
15845 {
15846 proc_t *p = curproc;
15847 dtrace_vstate_t *vstate;
15848 int i;
15849
15850 if (help == NULL)
15851 help = p->p_dtrace_helpers;
15852
15853 ASSERT(MUTEX_HELD(&dtrace_lock));
15854
15855 if (help == NULL || gen > help->dthps_generation)
15856 return (EINVAL);
15857
15858 vstate = &help->dthps_vstate;
15859
15860 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
15861 dtrace_helper_action_t *last = NULL, *h, *next;
15862
15863 for (h = help->dthps_actions[i]; h != NULL; h = next) {
15864 next = h->dtha_next;
15865
15866 if (h->dtha_generation == gen) {
15867 if (last != NULL) {
15868 last->dtha_next = next;
15869 } else {
15870 help->dthps_actions[i] = next;
15871 }
15872
15873 dtrace_helper_action_destroy(h, vstate);
15874 } else {
15875 last = h;
15876 }
15877 }
15878 }
15879
15880 /*
15881 * Interate until we've cleared out all helper providers with the
15882 * given generation number.
15883 */
15884 for (;;) {
15885 dtrace_helper_provider_t *prov;
15886
15887 /*
15888 * Look for a helper provider with the right generation. We
15889 * have to start back at the beginning of the list each time
15890 * because we drop dtrace_lock. It's unlikely that we'll make
15891 * more than two passes.
15892 */
15893 for (i = 0; i < help->dthps_nprovs; i++) {
15894 prov = help->dthps_provs[i];
15895
15896 if (prov->dthp_generation == gen)
15897 break;
15898 }
15899
15900 /*
15901 * If there were no matches, we're done.
15902 */
15903 if (i == help->dthps_nprovs)
15904 break;
15905
15906 /*
15907 * Move the last helper provider into this slot.
15908 */
15909 help->dthps_nprovs--;
15910 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs];
15911 help->dthps_provs[help->dthps_nprovs] = NULL;
15912
15913 mutex_exit(&dtrace_lock);
15914
15915 /*
15916 * If we have a meta provider, remove this helper provider.
15917 */
15918 mutex_enter(&dtrace_meta_lock);
15919 if (dtrace_meta_pid != NULL) {
15920 ASSERT(dtrace_deferred_pid == NULL);
15921 dtrace_helper_provider_remove(&prov->dthp_prov,
15922 p->p_pid);
15923 }
15924 mutex_exit(&dtrace_meta_lock);
15925
15926 dtrace_helper_provider_destroy(prov);
15927
15928 mutex_enter(&dtrace_lock);
15929 }
15930
15931 return (0);
15932 }
15933
15934 static int
15935 dtrace_helper_validate(dtrace_helper_action_t *helper)
15936 {
15937 int err = 0, i;
15938 dtrace_difo_t *dp;
15939
15940 if ((dp = helper->dtha_predicate) != NULL)
15941 err += dtrace_difo_validate_helper(dp);
15942
15943 for (i = 0; i < helper->dtha_nactions; i++)
15944 err += dtrace_difo_validate_helper(helper->dtha_actions[i]);
15945
15946 return (err == 0);
15947 }
15948
15949 static int
15950 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep,
15951 dtrace_helpers_t *help)
15952 {
15953 dtrace_helper_action_t *helper, *last;
15954 dtrace_actdesc_t *act;
15955 dtrace_vstate_t *vstate;
15956 dtrace_predicate_t *pred;
15957 int count = 0, nactions = 0, i;
15958
15959 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS)
15960 return (EINVAL);
15961
15962 last = help->dthps_actions[which];
15963 vstate = &help->dthps_vstate;
15964
15965 for (count = 0; last != NULL; last = last->dtha_next) {
15966 count++;
15967 if (last->dtha_next == NULL)
15968 break;
15969 }
15970
15971 /*
15972 * If we already have dtrace_helper_actions_max helper actions for this
15973 * helper action type, we'll refuse to add a new one.
15974 */
15975 if (count >= dtrace_helper_actions_max)
15976 return (ENOSPC);
15977
15978 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP);
15979 helper->dtha_generation = help->dthps_generation;
15980
15981 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) {
15982 ASSERT(pred->dtp_difo != NULL);
15983 dtrace_difo_hold(pred->dtp_difo);
15984 helper->dtha_predicate = pred->dtp_difo;
15985 }
15986
15987 for (act = ep->dted_action; act != NULL; act = act->dtad_next) {
15988 if (act->dtad_kind != DTRACEACT_DIFEXPR)
15989 goto err;
15990
15991 if (act->dtad_difo == NULL)
15992 goto err;
15993
15994 nactions++;
15995 }
15996
15997 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) *
15998 (helper->dtha_nactions = nactions), KM_SLEEP);
15999
16000 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) {
16001 dtrace_difo_hold(act->dtad_difo);
16002 helper->dtha_actions[i++] = act->dtad_difo;
16003 }
16004
16005 if (!dtrace_helper_validate(helper))
16006 goto err;
16007
16008 if (last == NULL) {
16009 help->dthps_actions[which] = helper;
16010 } else {
16011 last->dtha_next = helper;
16012 }
16013
16014 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) {
16015 dtrace_helptrace_nlocals = vstate->dtvs_nlocals;
16016 dtrace_helptrace_next = 0;
16017 }
16018
16019 return (0);
16020 err:
16021 dtrace_helper_action_destroy(helper, vstate);
16022 return (EINVAL);
16023 }
16024
16025 static void
16026 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help,
16027 dof_helper_t *dofhp)
16028 {
16029 ASSERT(MUTEX_NOT_HELD(&dtrace_lock));
16030
16031 mutex_enter(&dtrace_meta_lock);
16032 mutex_enter(&dtrace_lock);
16033
16034 if (!dtrace_attached() || dtrace_meta_pid == NULL) {
16035 /*
16036 * If the dtrace module is loaded but not attached, or if
16037 * there aren't isn't a meta provider registered to deal with
16038 * these provider descriptions, we need to postpone creating
16039 * the actual providers until later.
16040 */
16041
16042 if (help->dthps_next == NULL && help->dthps_prev == NULL &&
16043 dtrace_deferred_pid != help) {
16044 help->dthps_deferred = 1;
16045 help->dthps_pid = p->p_pid;
16046 help->dthps_next = dtrace_deferred_pid;
16047 help->dthps_prev = NULL;
16048 if (dtrace_deferred_pid != NULL)
16049 dtrace_deferred_pid->dthps_prev = help;
16050 dtrace_deferred_pid = help;
16051 }
16052
16053 mutex_exit(&dtrace_lock);
16054
16055 } else if (dofhp != NULL) {
16056 /*
16057 * If the dtrace module is loaded and we have a particular
16058 * helper provider description, pass that off to the
16059 * meta provider.
16060 */
16061
16062 mutex_exit(&dtrace_lock);
16063
16064 dtrace_helper_provide(dofhp, p->p_pid);
16065
16066 } else {
16067 /*
16068 * Otherwise, just pass all the helper provider descriptions
16069 * off to the meta provider.
16070 */
16071
16072 int i;
16073 mutex_exit(&dtrace_lock);
16074
16075 for (i = 0; i < help->dthps_nprovs; i++) {
16076 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
16077 p->p_pid);
16078 }
16079 }
16080
16081 mutex_exit(&dtrace_meta_lock);
16082 }
16083
16084 static int
16085 dtrace_helper_provider_add(dof_helper_t *dofhp, dtrace_helpers_t *help, int gen)
16086 {
16087 dtrace_helper_provider_t *hprov, **tmp_provs;
16088 uint_t tmp_maxprovs, i;
16089
16090 ASSERT(MUTEX_HELD(&dtrace_lock));
16091 ASSERT(help != NULL);
16092
16093 /*
16094 * If we already have dtrace_helper_providers_max helper providers,
16095 * we're refuse to add a new one.
16096 */
16097 if (help->dthps_nprovs >= dtrace_helper_providers_max)
16098 return (ENOSPC);
16099
16100 /*
16101 * Check to make sure this isn't a duplicate.
16102 */
16103 for (i = 0; i < help->dthps_nprovs; i++) {
16104 if (dofhp->dofhp_addr ==
16105 help->dthps_provs[i]->dthp_prov.dofhp_addr)
16106 return (EALREADY);
16107 }
16108
16109 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP);
16110 hprov->dthp_prov = *dofhp;
16111 hprov->dthp_ref = 1;
16112 hprov->dthp_generation = gen;
16113
16114 /*
16115 * Allocate a bigger table for helper providers if it's already full.
16116 */
16117 if (help->dthps_maxprovs == help->dthps_nprovs) {
16118 tmp_maxprovs = help->dthps_maxprovs;
16119 tmp_provs = help->dthps_provs;
16120
16121 if (help->dthps_maxprovs == 0)
16122 help->dthps_maxprovs = 2;
16123 else
16124 help->dthps_maxprovs *= 2;
16125 if (help->dthps_maxprovs > dtrace_helper_providers_max)
16126 help->dthps_maxprovs = dtrace_helper_providers_max;
16127
16128 ASSERT(tmp_maxprovs < help->dthps_maxprovs);
16129
16130 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs *
16131 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
16132
16133 if (tmp_provs != NULL) {
16134 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs *
16135 sizeof (dtrace_helper_provider_t *));
16136 kmem_free(tmp_provs, tmp_maxprovs *
16137 sizeof (dtrace_helper_provider_t *));
16138 }
16139 }
16140
16141 help->dthps_provs[help->dthps_nprovs] = hprov;
16142 help->dthps_nprovs++;
16143
16144 return (0);
16145 }
16146
16147 static void
16148 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov)
16149 {
16150 mutex_enter(&dtrace_lock);
16151
16152 if (--hprov->dthp_ref == 0) {
16153 dof_hdr_t *dof;
16154 mutex_exit(&dtrace_lock);
16155 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof;
16156 dtrace_dof_destroy(dof);
16157 kmem_free(hprov, sizeof (dtrace_helper_provider_t));
16158 } else {
16159 mutex_exit(&dtrace_lock);
16160 }
16161 }
16162
16163 static int
16164 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec)
16165 {
16166 uintptr_t daddr = (uintptr_t)dof;
16167 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
16168 dof_provider_t *provider;
16169 dof_probe_t *probe;
16170 uint8_t *arg;
16171 char *strtab, *typestr;
16172 dof_stridx_t typeidx;
16173 size_t typesz;
16174 uint_t nprobes, j, k;
16175
16176 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER);
16177
16178 if (sec->dofs_offset & (sizeof (uint_t) - 1)) {
16179 dtrace_dof_error(dof, "misaligned section offset");
16180 return (-1);
16181 }
16182
16183 /*
16184 * The section needs to be large enough to contain the DOF provider
16185 * structure appropriate for the given version.
16186 */
16187 if (sec->dofs_size <
16188 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ?
16189 offsetof(dof_provider_t, dofpv_prenoffs) :
16190 sizeof (dof_provider_t))) {
16191 dtrace_dof_error(dof, "provider section too small");
16192 return (-1);
16193 }
16194
16195 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
16196 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab);
16197 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes);
16198 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs);
16199 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs);
16200
16201 if (str_sec == NULL || prb_sec == NULL ||
16202 arg_sec == NULL || off_sec == NULL)
16203 return (-1);
16204
16205 enoff_sec = NULL;
16206
16207 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
16208 provider->dofpv_prenoffs != DOF_SECT_NONE &&
16209 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS,
16210 provider->dofpv_prenoffs)) == NULL)
16211 return (-1);
16212
16213 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
16214
16215 if (provider->dofpv_name >= str_sec->dofs_size ||
16216 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) {
16217 dtrace_dof_error(dof, "invalid provider name");
16218 return (-1);
16219 }
16220
16221 if (prb_sec->dofs_entsize == 0 ||
16222 prb_sec->dofs_entsize > prb_sec->dofs_size) {
16223 dtrace_dof_error(dof, "invalid entry size");
16224 return (-1);
16225 }
16226
16227 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) {
16228 dtrace_dof_error(dof, "misaligned entry size");
16229 return (-1);
16230 }
16231
16232 if (off_sec->dofs_entsize != sizeof (uint32_t)) {
16233 dtrace_dof_error(dof, "invalid entry size");
16234 return (-1);
16235 }
16236
16237 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) {
16238 dtrace_dof_error(dof, "misaligned section offset");
16239 return (-1);
16240 }
16241
16242 if (arg_sec->dofs_entsize != sizeof (uint8_t)) {
16243 dtrace_dof_error(dof, "invalid entry size");
16244 return (-1);
16245 }
16246
16247 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
16248
16249 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
16250
16251 /*
16252 * Take a pass through the probes to check for errors.
16253 */
16254 for (j = 0; j < nprobes; j++) {
16255 probe = (dof_probe_t *)(uintptr_t)(daddr +
16256 prb_sec->dofs_offset + j * prb_sec->dofs_entsize);
16257
16258 if (probe->dofpr_func >= str_sec->dofs_size) {
16259 dtrace_dof_error(dof, "invalid function name");
16260 return (-1);
16261 }
16262
16263 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) {
16264 dtrace_dof_error(dof, "function name too long");
16265 /*
16266 * Keep going if the function name is too long.
16267 * Unlike provider and probe names, we cannot reasonably
16268 * impose restrictions on function names, since they're
16269 * a property of the code being instrumented. We will
16270 * skip this probe in dtrace_helper_provide_one().
16271 */
16272 }
16273
16274 if (probe->dofpr_name >= str_sec->dofs_size ||
16275 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) {
16276 dtrace_dof_error(dof, "invalid probe name");
16277 return (-1);
16278 }
16279
16280 /*
16281 * The offset count must not wrap the index, and the offsets
16282 * must also not overflow the section's data.
16283 */
16284 if (probe->dofpr_offidx + probe->dofpr_noffs <
16285 probe->dofpr_offidx ||
16286 (probe->dofpr_offidx + probe->dofpr_noffs) *
16287 off_sec->dofs_entsize > off_sec->dofs_size) {
16288 dtrace_dof_error(dof, "invalid probe offset");
16289 return (-1);
16290 }
16291
16292 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) {
16293 /*
16294 * If there's no is-enabled offset section, make sure
16295 * there aren't any is-enabled offsets. Otherwise
16296 * perform the same checks as for probe offsets
16297 * (immediately above).
16298 */
16299 if (enoff_sec == NULL) {
16300 if (probe->dofpr_enoffidx != 0 ||
16301 probe->dofpr_nenoffs != 0) {
16302 dtrace_dof_error(dof, "is-enabled "
16303 "offsets with null section");
16304 return (-1);
16305 }
16306 } else if (probe->dofpr_enoffidx +
16307 probe->dofpr_nenoffs < probe->dofpr_enoffidx ||
16308 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) *
16309 enoff_sec->dofs_entsize > enoff_sec->dofs_size) {
16310 dtrace_dof_error(dof, "invalid is-enabled "
16311 "offset");
16312 return (-1);
16313 }
16314
16315 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) {
16316 dtrace_dof_error(dof, "zero probe and "
16317 "is-enabled offsets");
16318 return (-1);
16319 }
16320 } else if (probe->dofpr_noffs == 0) {
16321 dtrace_dof_error(dof, "zero probe offsets");
16322 return (-1);
16323 }
16324
16325 if (probe->dofpr_argidx + probe->dofpr_xargc <
16326 probe->dofpr_argidx ||
16327 (probe->dofpr_argidx + probe->dofpr_xargc) *
16328 arg_sec->dofs_entsize > arg_sec->dofs_size) {
16329 dtrace_dof_error(dof, "invalid args");
16330 return (-1);
16331 }
16332
16333 typeidx = probe->dofpr_nargv;
16334 typestr = strtab + probe->dofpr_nargv;
16335 for (k = 0; k < probe->dofpr_nargc; k++) {
16336 if (typeidx >= str_sec->dofs_size) {
16337 dtrace_dof_error(dof, "bad "
16338 "native argument type");
16339 return (-1);
16340 }
16341
16342 typesz = strlen(typestr) + 1;
16343 if (typesz > DTRACE_ARGTYPELEN) {
16344 dtrace_dof_error(dof, "native "
16345 "argument type too long");
16346 return (-1);
16347 }
16348 typeidx += typesz;
16349 typestr += typesz;
16350 }
16351
16352 typeidx = probe->dofpr_xargv;
16353 typestr = strtab + probe->dofpr_xargv;
16354 for (k = 0; k < probe->dofpr_xargc; k++) {
16355 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) {
16356 dtrace_dof_error(dof, "bad "
16357 "native argument index");
16358 return (-1);
16359 }
16360
16361 if (typeidx >= str_sec->dofs_size) {
16362 dtrace_dof_error(dof, "bad "
16363 "translated argument type");
16364 return (-1);
16365 }
16366
16367 typesz = strlen(typestr) + 1;
16368 if (typesz > DTRACE_ARGTYPELEN) {
16369 dtrace_dof_error(dof, "translated argument "
16370 "type too long");
16371 return (-1);
16372 }
16373
16374 typeidx += typesz;
16375 typestr += typesz;
16376 }
16377 }
16378
16379 return (0);
16380 }
16381
16382 static int
16383 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp, struct proc *p)
16384 {
16385 dtrace_helpers_t *help;
16386 dtrace_vstate_t *vstate;
16387 dtrace_enabling_t *enab = NULL;
16388 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1;
16389 uintptr_t daddr = (uintptr_t)dof;
16390
16391 ASSERT(MUTEX_HELD(&dtrace_lock));
16392
16393 if ((help = p->p_dtrace_helpers) == NULL)
16394 help = dtrace_helpers_create(p);
16395
16396 vstate = &help->dthps_vstate;
16397
16398 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, dhp->dofhp_addr,
16399 dhp->dofhp_dof, B_FALSE)) != 0) {
16400 dtrace_dof_destroy(dof);
16401 return (rv);
16402 }
16403
16404 /*
16405 * Look for helper providers and validate their descriptions.
16406 */
16407 for (i = 0; i < dof->dofh_secnum; i++) {
16408 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
16409 dof->dofh_secoff + i * dof->dofh_secsize);
16410
16411 if (sec->dofs_type != DOF_SECT_PROVIDER)
16412 continue;
16413
16414 if (dtrace_helper_provider_validate(dof, sec) != 0) {
16415 dtrace_enabling_destroy(enab);
16416 dtrace_dof_destroy(dof);
16417 return (-1);
16418 }
16419
16420 nprovs++;
16421 }
16422
16423 /*
16424 * Now we need to walk through the ECB descriptions in the enabling.
16425 */
16426 for (i = 0; i < enab->dten_ndesc; i++) {
16427 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
16428 dtrace_probedesc_t *desc = &ep->dted_probe;
16429
16430 if (strcmp(desc->dtpd_provider, "dtrace") != 0)
16431 continue;
16432
16433 if (strcmp(desc->dtpd_mod, "helper") != 0)
16434 continue;
16435
16436 if (strcmp(desc->dtpd_func, "ustack") != 0)
16437 continue;
16438
16439 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK,
16440 ep, help)) != 0) {
16441 /*
16442 * Adding this helper action failed -- we are now going
16443 * to rip out the entire generation and return failure.
16444 */
16445 (void) dtrace_helper_destroygen(help,
16446 help->dthps_generation);
16447 dtrace_enabling_destroy(enab);
16448 dtrace_dof_destroy(dof);
16449 return (-1);
16450 }
16451
16452 nhelpers++;
16453 }
16454
16455 if (nhelpers < enab->dten_ndesc)
16456 dtrace_dof_error(dof, "unmatched helpers");
16457
16458 gen = help->dthps_generation++;
16459 dtrace_enabling_destroy(enab);
16460
16461 if (nprovs > 0) {
16462 /*
16463 * Now that this is in-kernel, we change the sense of the
16464 * members: dofhp_dof denotes the in-kernel copy of the DOF
16465 * and dofhp_addr denotes the address at user-level.
16466 */
16467 dhp->dofhp_addr = dhp->dofhp_dof;
16468 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof;
16469
16470 if (dtrace_helper_provider_add(dhp, help, gen) == 0) {
16471 mutex_exit(&dtrace_lock);
16472 dtrace_helper_provider_register(p, help, dhp);
16473 mutex_enter(&dtrace_lock);
16474
16475 destroy = 0;
16476 }
16477 }
16478
16479 if (destroy)
16480 dtrace_dof_destroy(dof);
16481
16482 return (gen);
16483 }
16484
16485 static dtrace_helpers_t *
16486 dtrace_helpers_create(proc_t *p)
16487 {
16488 dtrace_helpers_t *help;
16489
16490 ASSERT(MUTEX_HELD(&dtrace_lock));
16491 ASSERT(p->p_dtrace_helpers == NULL);
16492
16493 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP);
16494 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) *
16495 DTRACE_NHELPER_ACTIONS, KM_SLEEP);
16496
16497 p->p_dtrace_helpers = help;
16498 dtrace_helpers++;
16499
16500 return (help);
16501 }
16502
16503 #ifdef illumos
16504 static
16505 #endif
16506 void
16507 dtrace_helpers_destroy(proc_t *p)
16508 {
16509 dtrace_helpers_t *help;
16510 dtrace_vstate_t *vstate;
16511 #ifdef illumos
16512 proc_t *p = curproc;
16513 #endif
16514 int i;
16515
16516 mutex_enter(&dtrace_lock);
16517
16518 ASSERT(p->p_dtrace_helpers != NULL);
16519 ASSERT(dtrace_helpers > 0);
16520
16521 help = p->p_dtrace_helpers;
16522 vstate = &help->dthps_vstate;
16523
16524 /*
16525 * We're now going to lose the help from this process.
16526 */
16527 p->p_dtrace_helpers = NULL;
16528 dtrace_sync();
16529
16530 /*
16531 * Destory the helper actions.
16532 */
16533 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
16534 dtrace_helper_action_t *h, *next;
16535
16536 for (h = help->dthps_actions[i]; h != NULL; h = next) {
16537 next = h->dtha_next;
16538 dtrace_helper_action_destroy(h, vstate);
16539 h = next;
16540 }
16541 }
16542
16543 mutex_exit(&dtrace_lock);
16544
16545 /*
16546 * Destroy the helper providers.
16547 */
16548 if (help->dthps_maxprovs > 0) {
16549 mutex_enter(&dtrace_meta_lock);
16550 if (dtrace_meta_pid != NULL) {
16551 ASSERT(dtrace_deferred_pid == NULL);
16552
16553 for (i = 0; i < help->dthps_nprovs; i++) {
16554 dtrace_helper_provider_remove(
16555 &help->dthps_provs[i]->dthp_prov, p->p_pid);
16556 }
16557 } else {
16558 mutex_enter(&dtrace_lock);
16559 ASSERT(help->dthps_deferred == 0 ||
16560 help->dthps_next != NULL ||
16561 help->dthps_prev != NULL ||
16562 help == dtrace_deferred_pid);
16563
16564 /*
16565 * Remove the helper from the deferred list.
16566 */
16567 if (help->dthps_next != NULL)
16568 help->dthps_next->dthps_prev = help->dthps_prev;
16569 if (help->dthps_prev != NULL)
16570 help->dthps_prev->dthps_next = help->dthps_next;
16571 if (dtrace_deferred_pid == help) {
16572 dtrace_deferred_pid = help->dthps_next;
16573 ASSERT(help->dthps_prev == NULL);
16574 }
16575
16576 mutex_exit(&dtrace_lock);
16577 }
16578
16579 mutex_exit(&dtrace_meta_lock);
16580
16581 for (i = 0; i < help->dthps_nprovs; i++) {
16582 dtrace_helper_provider_destroy(help->dthps_provs[i]);
16583 }
16584
16585 kmem_free(help->dthps_provs, help->dthps_maxprovs *
16586 sizeof (dtrace_helper_provider_t *));
16587 }
16588
16589 mutex_enter(&dtrace_lock);
16590
16591 dtrace_vstate_fini(&help->dthps_vstate);
16592 kmem_free(help->dthps_actions,
16593 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS);
16594 kmem_free(help, sizeof (dtrace_helpers_t));
16595
16596 --dtrace_helpers;
16597 mutex_exit(&dtrace_lock);
16598 }
16599
16600 #ifdef illumos
16601 static
16602 #endif
16603 void
16604 dtrace_helpers_duplicate(proc_t *from, proc_t *to)
16605 {
16606 dtrace_helpers_t *help, *newhelp;
16607 dtrace_helper_action_t *helper, *new, *last;
16608 dtrace_difo_t *dp;
16609 dtrace_vstate_t *vstate;
16610 int i, j, sz, hasprovs = 0;
16611
16612 mutex_enter(&dtrace_lock);
16613 ASSERT(from->p_dtrace_helpers != NULL);
16614 ASSERT(dtrace_helpers > 0);
16615
16616 help = from->p_dtrace_helpers;
16617 newhelp = dtrace_helpers_create(to);
16618 ASSERT(to->p_dtrace_helpers != NULL);
16619
16620 newhelp->dthps_generation = help->dthps_generation;
16621 vstate = &newhelp->dthps_vstate;
16622
16623 /*
16624 * Duplicate the helper actions.
16625 */
16626 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
16627 if ((helper = help->dthps_actions[i]) == NULL)
16628 continue;
16629
16630 for (last = NULL; helper != NULL; helper = helper->dtha_next) {
16631 new = kmem_zalloc(sizeof (dtrace_helper_action_t),
16632 KM_SLEEP);
16633 new->dtha_generation = helper->dtha_generation;
16634
16635 if ((dp = helper->dtha_predicate) != NULL) {
16636 dp = dtrace_difo_duplicate(dp, vstate);
16637 new->dtha_predicate = dp;
16638 }
16639
16640 new->dtha_nactions = helper->dtha_nactions;
16641 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions;
16642 new->dtha_actions = kmem_alloc(sz, KM_SLEEP);
16643
16644 for (j = 0; j < new->dtha_nactions; j++) {
16645 dtrace_difo_t *dp = helper->dtha_actions[j];
16646
16647 ASSERT(dp != NULL);
16648 dp = dtrace_difo_duplicate(dp, vstate);
16649 new->dtha_actions[j] = dp;
16650 }
16651
16652 if (last != NULL) {
16653 last->dtha_next = new;
16654 } else {
16655 newhelp->dthps_actions[i] = new;
16656 }
16657
16658 last = new;
16659 }
16660 }
16661
16662 /*
16663 * Duplicate the helper providers and register them with the
16664 * DTrace framework.
16665 */
16666 if (help->dthps_nprovs > 0) {
16667 newhelp->dthps_nprovs = help->dthps_nprovs;
16668 newhelp->dthps_maxprovs = help->dthps_nprovs;
16669 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs *
16670 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
16671 for (i = 0; i < newhelp->dthps_nprovs; i++) {
16672 newhelp->dthps_provs[i] = help->dthps_provs[i];
16673 newhelp->dthps_provs[i]->dthp_ref++;
16674 }
16675
16676 hasprovs = 1;
16677 }
16678
16679 mutex_exit(&dtrace_lock);
16680
16681 if (hasprovs)
16682 dtrace_helper_provider_register(to, newhelp, NULL);
16683 }
16684
16685 /*
16686 * DTrace Hook Functions
16687 */
16688 static void
16689 dtrace_module_loaded(modctl_t *ctl)
16690 {
16691 dtrace_provider_t *prv;
16692
16693 mutex_enter(&dtrace_provider_lock);
16694 #ifdef illumos
16695 mutex_enter(&mod_lock);
16696 #endif
16697
16698 #ifdef illumos
16699 ASSERT(ctl->mod_busy);
16700 #endif
16701
16702 /*
16703 * We're going to call each providers per-module provide operation
16704 * specifying only this module.
16705 */
16706 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next)
16707 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
16708
16709 #ifdef illumos
16710 mutex_exit(&mod_lock);
16711 #endif
16712 mutex_exit(&dtrace_provider_lock);
16713
16714 /*
16715 * If we have any retained enablings, we need to match against them.
16716 * Enabling probes requires that cpu_lock be held, and we cannot hold
16717 * cpu_lock here -- it is legal for cpu_lock to be held when loading a
16718 * module. (In particular, this happens when loading scheduling
16719 * classes.) So if we have any retained enablings, we need to dispatch
16720 * our task queue to do the match for us.
16721 */
16722 mutex_enter(&dtrace_lock);
16723
16724 if (dtrace_retained == NULL) {
16725 mutex_exit(&dtrace_lock);
16726 return;
16727 }
16728
16729 (void) taskq_dispatch(dtrace_taskq,
16730 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP);
16731
16732 mutex_exit(&dtrace_lock);
16733
16734 /*
16735 * And now, for a little heuristic sleaze: in general, we want to
16736 * match modules as soon as they load. However, we cannot guarantee
16737 * this, because it would lead us to the lock ordering violation
16738 * outlined above. The common case, of course, is that cpu_lock is
16739 * _not_ held -- so we delay here for a clock tick, hoping that that's
16740 * long enough for the task queue to do its work. If it's not, it's
16741 * not a serious problem -- it just means that the module that we
16742 * just loaded may not be immediately instrumentable.
16743 */
16744 delay(1);
16745 }
16746
16747 static void
16748 #ifdef illumos
16749 dtrace_module_unloaded(modctl_t *ctl)
16750 #else
16751 dtrace_module_unloaded(modctl_t *ctl, int *error)
16752 #endif
16753 {
16754 dtrace_probe_t template, *probe, *first, *next;
16755 dtrace_provider_t *prov;
16756 #ifndef illumos
16757 char modname[DTRACE_MODNAMELEN];
16758 size_t len;
16759 #endif
16760
16761 #ifdef illumos
16762 template.dtpr_mod = ctl->mod_modname;
16763 #else
16764 /* Handle the fact that ctl->filename may end in ".ko". */
16765 strlcpy(modname, ctl->filename, sizeof(modname));
16766 len = strlen(ctl->filename);
16767 if (len > 3 && strcmp(modname + len - 3, ".ko") == 0)
16768 modname[len - 3] = '\0';
16769 template.dtpr_mod = modname;
16770 #endif
16771
16772 mutex_enter(&dtrace_provider_lock);
16773 #ifdef illumos
16774 mutex_enter(&mod_lock);
16775 #endif
16776 mutex_enter(&dtrace_lock);
16777
16778 #ifndef illumos
16779 if (ctl->nenabled > 0) {
16780 /* Don't allow unloads if a probe is enabled. */
16781 mutex_exit(&dtrace_provider_lock);
16782 mutex_exit(&dtrace_lock);
16783 *error = -1;
16784 printf(
16785 "kldunload: attempt to unload module that has DTrace probes enabled\n");
16786 return;
16787 }
16788 #endif
16789
16790 if (dtrace_bymod == NULL) {
16791 /*
16792 * The DTrace module is loaded (obviously) but not attached;
16793 * we don't have any work to do.
16794 */
16795 mutex_exit(&dtrace_provider_lock);
16796 #ifdef illumos
16797 mutex_exit(&mod_lock);
16798 #endif
16799 mutex_exit(&dtrace_lock);
16800 return;
16801 }
16802
16803 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template);
16804 probe != NULL; probe = probe->dtpr_nextmod) {
16805 if (probe->dtpr_ecb != NULL) {
16806 mutex_exit(&dtrace_provider_lock);
16807 #ifdef illumos
16808 mutex_exit(&mod_lock);
16809 #endif
16810 mutex_exit(&dtrace_lock);
16811
16812 /*
16813 * This shouldn't _actually_ be possible -- we're
16814 * unloading a module that has an enabled probe in it.
16815 * (It's normally up to the provider to make sure that
16816 * this can't happen.) However, because dtps_enable()
16817 * doesn't have a failure mode, there can be an
16818 * enable/unload race. Upshot: we don't want to
16819 * assert, but we're not going to disable the
16820 * probe, either.
16821 */
16822 if (dtrace_err_verbose) {
16823 #ifdef illumos
16824 cmn_err(CE_WARN, "unloaded module '%s' had "
16825 "enabled probes", ctl->mod_modname);
16826 #else
16827 cmn_err(CE_WARN, "unloaded module '%s' had "
16828 "enabled probes", modname);
16829 #endif
16830 }
16831
16832 return;
16833 }
16834 }
16835
16836 probe = first;
16837
16838 for (first = NULL; probe != NULL; probe = next) {
16839 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe);
16840
16841 dtrace_probes[probe->dtpr_id - 1] = NULL;
16842
16843 next = probe->dtpr_nextmod;
16844 dtrace_hash_remove(dtrace_bymod, probe);
16845 dtrace_hash_remove(dtrace_byfunc, probe);
16846 dtrace_hash_remove(dtrace_byname, probe);
16847
16848 if (first == NULL) {
16849 first = probe;
16850 probe->dtpr_nextmod = NULL;
16851 } else {
16852 probe->dtpr_nextmod = first;
16853 first = probe;
16854 }
16855 }
16856
16857 /*
16858 * We've removed all of the module's probes from the hash chains and
16859 * from the probe array. Now issue a dtrace_sync() to be sure that
16860 * everyone has cleared out from any probe array processing.
16861 */
16862 dtrace_sync();
16863
16864 for (probe = first; probe != NULL; probe = first) {
16865 first = probe->dtpr_nextmod;
16866 prov = probe->dtpr_provider;
16867 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id,
16868 probe->dtpr_arg);
16869 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
16870 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
16871 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
16872 #ifdef illumos
16873 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1);
16874 #else
16875 free_unr(dtrace_arena, probe->dtpr_id);
16876 #endif
16877 kmem_free(probe, sizeof (dtrace_probe_t));
16878 }
16879
16880 mutex_exit(&dtrace_lock);
16881 #ifdef illumos
16882 mutex_exit(&mod_lock);
16883 #endif
16884 mutex_exit(&dtrace_provider_lock);
16885 }
16886
16887 #ifndef illumos
16888 static void
16889 dtrace_kld_load(void *arg __unused, linker_file_t lf)
16890 {
16891
16892 dtrace_module_loaded(lf);
16893 }
16894
16895 static void
16896 dtrace_kld_unload_try(void *arg __unused, linker_file_t lf, int *error)
16897 {
16898
16899 if (*error != 0)
16900 /* We already have an error, so don't do anything. */
16901 return;
16902 dtrace_module_unloaded(lf, error);
16903 }
16904 #endif
16905
16906 #ifdef illumos
16907 static void
16908 dtrace_suspend(void)
16909 {
16910 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend));
16911 }
16912
16913 static void
16914 dtrace_resume(void)
16915 {
16916 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume));
16917 }
16918 #endif
16919
16920 static int
16921 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu)
16922 {
16923 ASSERT(MUTEX_HELD(&cpu_lock));
16924 mutex_enter(&dtrace_lock);
16925
16926 switch (what) {
16927 case CPU_CONFIG: {
16928 dtrace_state_t *state;
16929 dtrace_optval_t *opt, rs, c;
16930
16931 /*
16932 * For now, we only allocate a new buffer for anonymous state.
16933 */
16934 if ((state = dtrace_anon.dta_state) == NULL)
16935 break;
16936
16937 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
16938 break;
16939
16940 opt = state->dts_options;
16941 c = opt[DTRACEOPT_CPU];
16942
16943 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu)
16944 break;
16945
16946 /*
16947 * Regardless of what the actual policy is, we're going to
16948 * temporarily set our resize policy to be manual. We're
16949 * also going to temporarily set our CPU option to denote
16950 * the newly configured CPU.
16951 */
16952 rs = opt[DTRACEOPT_BUFRESIZE];
16953 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL;
16954 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu;
16955
16956 (void) dtrace_state_buffers(state);
16957
16958 opt[DTRACEOPT_BUFRESIZE] = rs;
16959 opt[DTRACEOPT_CPU] = c;
16960
16961 break;
16962 }
16963
16964 case CPU_UNCONFIG:
16965 /*
16966 * We don't free the buffer in the CPU_UNCONFIG case. (The
16967 * buffer will be freed when the consumer exits.)
16968 */
16969 break;
16970
16971 default:
16972 break;
16973 }
16974
16975 mutex_exit(&dtrace_lock);
16976 return (0);
16977 }
16978
16979 #ifdef illumos
16980 static void
16981 dtrace_cpu_setup_initial(processorid_t cpu)
16982 {
16983 (void) dtrace_cpu_setup(CPU_CONFIG, cpu);
16984 }
16985 #endif
16986
16987 static void
16988 dtrace_toxrange_add(uintptr_t base, uintptr_t limit)
16989 {
16990 if (dtrace_toxranges >= dtrace_toxranges_max) {
16991 int osize, nsize;
16992 dtrace_toxrange_t *range;
16993
16994 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
16995
16996 if (osize == 0) {
16997 ASSERT(dtrace_toxrange == NULL);
16998 ASSERT(dtrace_toxranges_max == 0);
16999 dtrace_toxranges_max = 1;
17000 } else {
17001 dtrace_toxranges_max <<= 1;
17002 }
17003
17004 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
17005 range = kmem_zalloc(nsize, KM_SLEEP);
17006
17007 if (dtrace_toxrange != NULL) {
17008 ASSERT(osize != 0);
17009 bcopy(dtrace_toxrange, range, osize);
17010 kmem_free(dtrace_toxrange, osize);
17011 }
17012
17013 dtrace_toxrange = range;
17014 }
17015
17016 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0);
17017 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0);
17018
17019 dtrace_toxrange[dtrace_toxranges].dtt_base = base;
17020 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit;
17021 dtrace_toxranges++;
17022 }
17023
17024 static void
17025 dtrace_getf_barrier(void)
17026 {
17027 #ifdef illumos
17028 /*
17029 * When we have unprivileged (that is, non-DTRACE_CRV_KERNEL) enablings
17030 * that contain calls to getf(), this routine will be called on every
17031 * closef() before either the underlying vnode is released or the
17032 * file_t itself is freed. By the time we are here, it is essential
17033 * that the file_t can no longer be accessed from a call to getf()
17034 * in probe context -- that assures that a dtrace_sync() can be used
17035 * to clear out any enablings referring to the old structures.
17036 */
17037 if (curthread->t_procp->p_zone->zone_dtrace_getf != 0 ||
17038 kcred->cr_zone->zone_dtrace_getf != 0)
17039 dtrace_sync();
17040 #endif
17041 }
17042
17043 /*
17044 * DTrace Driver Cookbook Functions
17045 */
17046 #ifdef illumos
17047 /*ARGSUSED*/
17048 static int
17049 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
17050 {
17051 dtrace_provider_id_t id;
17052 dtrace_state_t *state = NULL;
17053 dtrace_enabling_t *enab;
17054
17055 mutex_enter(&cpu_lock);
17056 mutex_enter(&dtrace_provider_lock);
17057 mutex_enter(&dtrace_lock);
17058
17059 if (ddi_soft_state_init(&dtrace_softstate,
17060 sizeof (dtrace_state_t), 0) != 0) {
17061 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state");
17062 mutex_exit(&cpu_lock);
17063 mutex_exit(&dtrace_provider_lock);
17064 mutex_exit(&dtrace_lock);
17065 return (DDI_FAILURE);
17066 }
17067
17068 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR,
17069 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE ||
17070 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR,
17071 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) {
17072 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes");
17073 ddi_remove_minor_node(devi, NULL);
17074 ddi_soft_state_fini(&dtrace_softstate);
17075 mutex_exit(&cpu_lock);
17076 mutex_exit(&dtrace_provider_lock);
17077 mutex_exit(&dtrace_lock);
17078 return (DDI_FAILURE);
17079 }
17080
17081 ddi_report_dev(devi);
17082 dtrace_devi = devi;
17083
17084 dtrace_modload = dtrace_module_loaded;
17085 dtrace_modunload = dtrace_module_unloaded;
17086 dtrace_cpu_init = dtrace_cpu_setup_initial;
17087 dtrace_helpers_cleanup = dtrace_helpers_destroy;
17088 dtrace_helpers_fork = dtrace_helpers_duplicate;
17089 dtrace_cpustart_init = dtrace_suspend;
17090 dtrace_cpustart_fini = dtrace_resume;
17091 dtrace_debugger_init = dtrace_suspend;
17092 dtrace_debugger_fini = dtrace_resume;
17093
17094 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
17095
17096 ASSERT(MUTEX_HELD(&cpu_lock));
17097
17098 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1,
17099 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
17100 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE,
17101 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0,
17102 VM_SLEEP | VMC_IDENTIFIER);
17103 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri,
17104 1, INT_MAX, 0);
17105
17106 dtrace_state_cache = kmem_cache_create("dtrace_state_cache",
17107 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN,
17108 NULL, NULL, NULL, NULL, NULL, 0);
17109
17110 ASSERT(MUTEX_HELD(&cpu_lock));
17111 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod),
17112 offsetof(dtrace_probe_t, dtpr_nextmod),
17113 offsetof(dtrace_probe_t, dtpr_prevmod));
17114
17115 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func),
17116 offsetof(dtrace_probe_t, dtpr_nextfunc),
17117 offsetof(dtrace_probe_t, dtpr_prevfunc));
17118
17119 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name),
17120 offsetof(dtrace_probe_t, dtpr_nextname),
17121 offsetof(dtrace_probe_t, dtpr_prevname));
17122
17123 if (dtrace_retain_max < 1) {
17124 cmn_err(CE_WARN, "illegal value (%zu) for dtrace_retain_max; "
17125 "setting to 1", dtrace_retain_max);
17126 dtrace_retain_max = 1;
17127 }
17128
17129 /*
17130 * Now discover our toxic ranges.
17131 */
17132 dtrace_toxic_ranges(dtrace_toxrange_add);
17133
17134 /*
17135 * Before we register ourselves as a provider to our own framework,
17136 * we would like to assert that dtrace_provider is NULL -- but that's
17137 * not true if we were loaded as a dependency of a DTrace provider.
17138 * Once we've registered, we can assert that dtrace_provider is our
17139 * pseudo provider.
17140 */
17141 (void) dtrace_register("dtrace", &dtrace_provider_attr,
17142 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id);
17143
17144 ASSERT(dtrace_provider != NULL);
17145 ASSERT((dtrace_provider_id_t)dtrace_provider == id);
17146
17147 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
17148 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL);
17149 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
17150 dtrace_provider, NULL, NULL, "END", 0, NULL);
17151 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
17152 dtrace_provider, NULL, NULL, "ERROR", 1, NULL);
17153
17154 dtrace_anon_property();
17155 mutex_exit(&cpu_lock);
17156
17157 /*
17158 * If there are already providers, we must ask them to provide their
17159 * probes, and then match any anonymous enabling against them. Note
17160 * that there should be no other retained enablings at this time:
17161 * the only retained enablings at this time should be the anonymous
17162 * enabling.
17163 */
17164 if (dtrace_anon.dta_enabling != NULL) {
17165 ASSERT(dtrace_retained == dtrace_anon.dta_enabling);
17166
17167 dtrace_enabling_provide(NULL);
17168 state = dtrace_anon.dta_state;
17169
17170 /*
17171 * We couldn't hold cpu_lock across the above call to
17172 * dtrace_enabling_provide(), but we must hold it to actually
17173 * enable the probes. We have to drop all of our locks, pick
17174 * up cpu_lock, and regain our locks before matching the
17175 * retained anonymous enabling.
17176 */
17177 mutex_exit(&dtrace_lock);
17178 mutex_exit(&dtrace_provider_lock);
17179
17180 mutex_enter(&cpu_lock);
17181 mutex_enter(&dtrace_provider_lock);
17182 mutex_enter(&dtrace_lock);
17183
17184 if ((enab = dtrace_anon.dta_enabling) != NULL)
17185 (void) dtrace_enabling_match(enab, NULL);
17186
17187 mutex_exit(&cpu_lock);
17188 }
17189
17190 mutex_exit(&dtrace_lock);
17191 mutex_exit(&dtrace_provider_lock);
17192
17193 if (state != NULL) {
17194 /*
17195 * If we created any anonymous state, set it going now.
17196 */
17197 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon);
17198 }
17199
17200 return (DDI_SUCCESS);
17201 }
17202 #endif /* illumos */
17203
17204 #ifndef illumos
17205 static void dtrace_dtr(void *);
17206 #endif
17207
17208 /*ARGSUSED*/
17209 static int
17210 #ifdef illumos
17211 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
17212 #else
17213 dtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
17214 #endif
17215 {
17216 dtrace_state_t *state;
17217 uint32_t priv;
17218 uid_t uid;
17219 zoneid_t zoneid;
17220
17221 #ifdef illumos
17222 if (getminor(*devp) == DTRACEMNRN_HELPER)
17223 return (0);
17224
17225 /*
17226 * If this wasn't an open with the "helper" minor, then it must be
17227 * the "dtrace" minor.
17228 */
17229 if (getminor(*devp) == DTRACEMNRN_DTRACE)
17230 return (ENXIO);
17231 #else
17232 cred_t *cred_p = NULL;
17233 cred_p = dev->si_cred;
17234
17235 /*
17236 * If no DTRACE_PRIV_* bits are set in the credential, then the
17237 * caller lacks sufficient permission to do anything with DTrace.
17238 */
17239 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid);
17240 if (priv == DTRACE_PRIV_NONE) {
17241 #endif
17242
17243 return (EACCES);
17244 }
17245
17246 /*
17247 * Ask all providers to provide all their probes.
17248 */
17249 mutex_enter(&dtrace_provider_lock);
17250 dtrace_probe_provide(NULL, NULL);
17251 mutex_exit(&dtrace_provider_lock);
17252
17253 mutex_enter(&cpu_lock);
17254 mutex_enter(&dtrace_lock);
17255 dtrace_opens++;
17256 dtrace_membar_producer();
17257
17258 #ifdef illumos
17259 /*
17260 * If the kernel debugger is active (that is, if the kernel debugger
17261 * modified text in some way), we won't allow the open.
17262 */
17263 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
17264 dtrace_opens--;
17265 mutex_exit(&cpu_lock);
17266 mutex_exit(&dtrace_lock);
17267 return (EBUSY);
17268 }
17269
17270 if (dtrace_helptrace_enable && dtrace_helptrace_buffer == NULL) {
17271 /*
17272 * If DTrace helper tracing is enabled, we need to allocate the
17273 * trace buffer and initialize the values.
17274 */
17275 dtrace_helptrace_buffer =
17276 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP);
17277 dtrace_helptrace_next = 0;
17278 dtrace_helptrace_wrapped = 0;
17279 dtrace_helptrace_enable = 0;
17280 }
17281
17282 state = dtrace_state_create(devp, cred_p);
17283 #else
17284 state = dtrace_state_create(dev, NULL);
17285 devfs_set_cdevpriv(state, dtrace_dtr);
17286 #endif
17287
17288 mutex_exit(&cpu_lock);
17289
17290 if (state == NULL) {
17291 #ifdef illumos
17292 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
17293 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
17294 #else
17295 --dtrace_opens;
17296 #endif
17297 mutex_exit(&dtrace_lock);
17298 return (EAGAIN);
17299 }
17300
17301 mutex_exit(&dtrace_lock);
17302
17303 return (0);
17304 }
17305
17306 /*ARGSUSED*/
17307 #ifdef illumos
17308 static int
17309 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
17310 #else
17311 static void
17312 dtrace_dtr(void *data)
17313 #endif
17314 {
17315 #ifdef illumos
17316 minor_t minor = getminor(dev);
17317 dtrace_state_t *state;
17318 #endif
17319 dtrace_helptrace_t *buf = NULL;
17320
17321 #ifdef illumos
17322 if (minor == DTRACEMNRN_HELPER)
17323 return (0);
17324
17325 state = ddi_get_soft_state(dtrace_softstate, minor);
17326 #else
17327 dtrace_state_t *state = data;
17328 #endif
17329
17330 mutex_enter(&cpu_lock);
17331 mutex_enter(&dtrace_lock);
17332
17333 #ifdef illumos
17334 if (state->dts_anon)
17335 #else
17336 if (state != NULL && state->dts_anon)
17337 #endif
17338 {
17339 /*
17340 * There is anonymous state. Destroy that first.
17341 */
17342 ASSERT(dtrace_anon.dta_state == NULL);
17343 dtrace_state_destroy(state->dts_anon);
17344 }
17345
17346 if (dtrace_helptrace_disable) {
17347 /*
17348 * If we have been told to disable helper tracing, set the
17349 * buffer to NULL before calling into dtrace_state_destroy();
17350 * we take advantage of its dtrace_sync() to know that no
17351 * CPU is in probe context with enabled helper tracing
17352 * after it returns.
17353 */
17354 buf = dtrace_helptrace_buffer;
17355 dtrace_helptrace_buffer = NULL;
17356 }
17357
17358 #ifdef illumos
17359 dtrace_state_destroy(state);
17360 #else
17361 if (state != NULL) {
17362 dtrace_state_destroy(state);
17363 kmem_free(state, 0);
17364 }
17365 #endif
17366 ASSERT(dtrace_opens > 0);
17367
17368 #ifdef illumos
17369 /*
17370 * Only relinquish control of the kernel debugger interface when there
17371 * are no consumers and no anonymous enablings.
17372 */
17373 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
17374 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
17375 #else
17376 --dtrace_opens;
17377 #endif
17378
17379 if (buf != NULL) {
17380 kmem_free(buf, dtrace_helptrace_bufsize);
17381 dtrace_helptrace_disable = 0;
17382 }
17383
17384 mutex_exit(&dtrace_lock);
17385 mutex_exit(&cpu_lock);
17386
17387 #ifdef illumos
17388 return (0);
17389 #endif
17390 }
17391
17392 #ifdef illumos
17393 /*ARGSUSED*/
17394 static int
17395 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv)
17396 {
17397 int rval;
17398 dof_helper_t help, *dhp = NULL;
17399
17400 switch (cmd) {
17401 case DTRACEHIOC_ADDDOF:
17402 if (copyin((void *)arg, &help, sizeof (help)) != 0) {
17403 dtrace_dof_error(NULL, "failed to copyin DOF helper");
17404 return (EFAULT);
17405 }
17406
17407 dhp = &help;
17408 arg = (intptr_t)help.dofhp_dof;
17409 /*FALLTHROUGH*/
17410
17411 case DTRACEHIOC_ADD: {
17412 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval);
17413
17414 if (dof == NULL)
17415 return (rval);
17416
17417 mutex_enter(&dtrace_lock);
17418
17419 /*
17420 * dtrace_helper_slurp() takes responsibility for the dof --
17421 * it may free it now or it may save it and free it later.
17422 */
17423 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) {
17424 *rv = rval;
17425 rval = 0;
17426 } else {
17427 rval = EINVAL;
17428 }
17429
17430 mutex_exit(&dtrace_lock);
17431 return (rval);
17432 }
17433
17434 case DTRACEHIOC_REMOVE: {
17435 mutex_enter(&dtrace_lock);
17436 rval = dtrace_helper_destroygen(NULL, arg);
17437 mutex_exit(&dtrace_lock);
17438
17439 return (rval);
17440 }
17441
17442 default:
17443 break;
17444 }
17445
17446 return (ENOTTY);
17447 }
17448
17449 /*ARGSUSED*/
17450 static int
17451 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
17452 {
17453 minor_t minor = getminor(dev);
17454 dtrace_state_t *state;
17455 int rval;
17456
17457 if (minor == DTRACEMNRN_HELPER)
17458 return (dtrace_ioctl_helper(cmd, arg, rv));
17459
17460 state = ddi_get_soft_state(dtrace_softstate, minor);
17461
17462 if (state->dts_anon) {
17463 ASSERT(dtrace_anon.dta_state == NULL);
17464 state = state->dts_anon;
17465 }
17466
17467 switch (cmd) {
17468 case DTRACEIOC_PROVIDER: {
17469 dtrace_providerdesc_t pvd;
17470 dtrace_provider_t *pvp;
17471
17472 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0)
17473 return (EFAULT);
17474
17475 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0';
17476 mutex_enter(&dtrace_provider_lock);
17477
17478 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) {
17479 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0)
17480 break;
17481 }
17482
17483 mutex_exit(&dtrace_provider_lock);
17484
17485 if (pvp == NULL)
17486 return (ESRCH);
17487
17488 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t));
17489 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t));
17490
17491 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0)
17492 return (EFAULT);
17493
17494 return (0);
17495 }
17496
17497 case DTRACEIOC_EPROBE: {
17498 dtrace_eprobedesc_t epdesc;
17499 dtrace_ecb_t *ecb;
17500 dtrace_action_t *act;
17501 void *buf;
17502 size_t size;
17503 uintptr_t dest;
17504 int nrecs;
17505
17506 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0)
17507 return (EFAULT);
17508
17509 mutex_enter(&dtrace_lock);
17510
17511 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) {
17512 mutex_exit(&dtrace_lock);
17513 return (EINVAL);
17514 }
17515
17516 if (ecb->dte_probe == NULL) {
17517 mutex_exit(&dtrace_lock);
17518 return (EINVAL);
17519 }
17520
17521 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id;
17522 epdesc.dtepd_uarg = ecb->dte_uarg;
17523 epdesc.dtepd_size = ecb->dte_size;
17524
17525 nrecs = epdesc.dtepd_nrecs;
17526 epdesc.dtepd_nrecs = 0;
17527 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
17528 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
17529 continue;
17530
17531 epdesc.dtepd_nrecs++;
17532 }
17533
17534 /*
17535 * Now that we have the size, we need to allocate a temporary
17536 * buffer in which to store the complete description. We need
17537 * the temporary buffer to be able to drop dtrace_lock()
17538 * across the copyout(), below.
17539 */
17540 size = sizeof (dtrace_eprobedesc_t) +
17541 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t));
17542
17543 buf = kmem_alloc(size, KM_SLEEP);
17544 dest = (uintptr_t)buf;
17545
17546 bcopy(&epdesc, (void *)dest, sizeof (epdesc));
17547 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]);
17548
17549 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
17550 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
17551 continue;
17552
17553 if (nrecs-- == 0)
17554 break;
17555
17556 bcopy(&act->dta_rec, (void *)dest,
17557 sizeof (dtrace_recdesc_t));
17558 dest += sizeof (dtrace_recdesc_t);
17559 }
17560
17561 mutex_exit(&dtrace_lock);
17562
17563 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
17564 kmem_free(buf, size);
17565 return (EFAULT);
17566 }
17567
17568 kmem_free(buf, size);
17569 return (0);
17570 }
17571
17572 case DTRACEIOC_AGGDESC: {
17573 dtrace_aggdesc_t aggdesc;
17574 dtrace_action_t *act;
17575 dtrace_aggregation_t *agg;
17576 int nrecs;
17577 uint32_t offs;
17578 dtrace_recdesc_t *lrec;
17579 void *buf;
17580 size_t size;
17581 uintptr_t dest;
17582
17583 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0)
17584 return (EFAULT);
17585
17586 mutex_enter(&dtrace_lock);
17587
17588 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) {
17589 mutex_exit(&dtrace_lock);
17590 return (EINVAL);
17591 }
17592
17593 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid;
17594
17595 nrecs = aggdesc.dtagd_nrecs;
17596 aggdesc.dtagd_nrecs = 0;
17597
17598 offs = agg->dtag_base;
17599 lrec = &agg->dtag_action.dta_rec;
17600 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs;
17601
17602 for (act = agg->dtag_first; ; act = act->dta_next) {
17603 ASSERT(act->dta_intuple ||
17604 DTRACEACT_ISAGG(act->dta_kind));
17605
17606 /*
17607 * If this action has a record size of zero, it
17608 * denotes an argument to the aggregating action.
17609 * Because the presence of this record doesn't (or
17610 * shouldn't) affect the way the data is interpreted,
17611 * we don't copy it out to save user-level the
17612 * confusion of dealing with a zero-length record.
17613 */
17614 if (act->dta_rec.dtrd_size == 0) {
17615 ASSERT(agg->dtag_hasarg);
17616 continue;
17617 }
17618
17619 aggdesc.dtagd_nrecs++;
17620
17621 if (act == &agg->dtag_action)
17622 break;
17623 }
17624
17625 /*
17626 * Now that we have the size, we need to allocate a temporary
17627 * buffer in which to store the complete description. We need
17628 * the temporary buffer to be able to drop dtrace_lock()
17629 * across the copyout(), below.
17630 */
17631 size = sizeof (dtrace_aggdesc_t) +
17632 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t));
17633
17634 buf = kmem_alloc(size, KM_SLEEP);
17635 dest = (uintptr_t)buf;
17636
17637 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc));
17638 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]);
17639
17640 for (act = agg->dtag_first; ; act = act->dta_next) {
17641 dtrace_recdesc_t rec = act->dta_rec;
17642
17643 /*
17644 * See the comment in the above loop for why we pass
17645 * over zero-length records.
17646 */
17647 if (rec.dtrd_size == 0) {
17648 ASSERT(agg->dtag_hasarg);
17649 continue;
17650 }
17651
17652 if (nrecs-- == 0)
17653 break;
17654
17655 rec.dtrd_offset -= offs;
17656 bcopy(&rec, (void *)dest, sizeof (rec));
17657 dest += sizeof (dtrace_recdesc_t);
17658
17659 if (act == &agg->dtag_action)
17660 break;
17661 }
17662
17663 mutex_exit(&dtrace_lock);
17664
17665 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
17666 kmem_free(buf, size);
17667 return (EFAULT);
17668 }
17669
17670 kmem_free(buf, size);
17671 return (0);
17672 }
17673
17674 case DTRACEIOC_ENABLE: {
17675 dof_hdr_t *dof;
17676 dtrace_enabling_t *enab = NULL;
17677 dtrace_vstate_t *vstate;
17678 int err = 0;
17679
17680 *rv = 0;
17681
17682 /*
17683 * If a NULL argument has been passed, we take this as our
17684 * cue to reevaluate our enablings.
17685 */
17686 if (arg == NULL) {
17687 dtrace_enabling_matchall();
17688
17689 return (0);
17690 }
17691
17692 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL)
17693 return (rval);
17694
17695 mutex_enter(&cpu_lock);
17696 mutex_enter(&dtrace_lock);
17697 vstate = &state->dts_vstate;
17698
17699 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
17700 mutex_exit(&dtrace_lock);
17701 mutex_exit(&cpu_lock);
17702 dtrace_dof_destroy(dof);
17703 return (EBUSY);
17704 }
17705
17706 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) {
17707 mutex_exit(&dtrace_lock);
17708 mutex_exit(&cpu_lock);
17709 dtrace_dof_destroy(dof);
17710 return (EINVAL);
17711 }
17712
17713 if ((rval = dtrace_dof_options(dof, state)) != 0) {
17714 dtrace_enabling_destroy(enab);
17715 mutex_exit(&dtrace_lock);
17716 mutex_exit(&cpu_lock);
17717 dtrace_dof_destroy(dof);
17718 return (rval);
17719 }
17720
17721 if ((err = dtrace_enabling_match(enab, rv)) == 0) {
17722 err = dtrace_enabling_retain(enab);
17723 } else {
17724 dtrace_enabling_destroy(enab);
17725 }
17726
17727 mutex_exit(&cpu_lock);
17728 mutex_exit(&dtrace_lock);
17729 dtrace_dof_destroy(dof);
17730
17731 return (err);
17732 }
17733
17734 case DTRACEIOC_REPLICATE: {
17735 dtrace_repldesc_t desc;
17736 dtrace_probedesc_t *match = &desc.dtrpd_match;
17737 dtrace_probedesc_t *create = &desc.dtrpd_create;
17738 int err;
17739
17740 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
17741 return (EFAULT);
17742
17743 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
17744 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
17745 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
17746 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
17747
17748 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
17749 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
17750 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
17751 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
17752
17753 mutex_enter(&dtrace_lock);
17754 err = dtrace_enabling_replicate(state, match, create);
17755 mutex_exit(&dtrace_lock);
17756
17757 return (err);
17758 }
17759
17760 case DTRACEIOC_PROBEMATCH:
17761 case DTRACEIOC_PROBES: {
17762 dtrace_probe_t *probe = NULL;
17763 dtrace_probedesc_t desc;
17764 dtrace_probekey_t pkey;
17765 dtrace_id_t i;
17766 int m = 0;
17767 uint32_t priv;
17768 uid_t uid;
17769 zoneid_t zoneid;
17770
17771 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
17772 return (EFAULT);
17773
17774 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
17775 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
17776 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
17777 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0';
17778
17779 /*
17780 * Before we attempt to match this probe, we want to give
17781 * all providers the opportunity to provide it.
17782 */
17783 if (desc.dtpd_id == DTRACE_IDNONE) {
17784 mutex_enter(&dtrace_provider_lock);
17785 dtrace_probe_provide(&desc, NULL);
17786 mutex_exit(&dtrace_provider_lock);
17787 desc.dtpd_id++;
17788 }
17789
17790 if (cmd == DTRACEIOC_PROBEMATCH) {
17791 dtrace_probekey(&desc, &pkey);
17792 pkey.dtpk_id = DTRACE_IDNONE;
17793 }
17794
17795 dtrace_cred2priv(cr, &priv, &uid, &zoneid);
17796
17797 mutex_enter(&dtrace_lock);
17798
17799 if (cmd == DTRACEIOC_PROBEMATCH) {
17800 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
17801 if ((probe = dtrace_probes[i - 1]) != NULL &&
17802 (m = dtrace_match_probe(probe, &pkey,
17803 priv, uid, zoneid)) != 0)
17804 break;
17805 }
17806
17807 if (m < 0) {
17808 mutex_exit(&dtrace_lock);
17809 return (EINVAL);
17810 }
17811
17812 } else {
17813 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
17814 if ((probe = dtrace_probes[i - 1]) != NULL &&
17815 dtrace_match_priv(probe, priv, uid, zoneid))
17816 break;
17817 }
17818 }
17819
17820 if (probe == NULL) {
17821 mutex_exit(&dtrace_lock);
17822 return (ESRCH);
17823 }
17824
17825 dtrace_probe_description(probe, &desc);
17826 mutex_exit(&dtrace_lock);
17827
17828 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
17829 return (EFAULT);
17830
17831 return (0);
17832 }
17833
17834 case DTRACEIOC_PROBEARG: {
17835 dtrace_argdesc_t desc;
17836 dtrace_probe_t *probe;
17837 dtrace_provider_t *prov;
17838
17839 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
17840 return (EFAULT);
17841
17842 if (desc.dtargd_id == DTRACE_IDNONE)
17843 return (EINVAL);
17844
17845 if (desc.dtargd_ndx == DTRACE_ARGNONE)
17846 return (EINVAL);
17847
17848 mutex_enter(&dtrace_provider_lock);
17849 mutex_enter(&mod_lock);
17850 mutex_enter(&dtrace_lock);
17851
17852 if (desc.dtargd_id > dtrace_nprobes) {
17853 mutex_exit(&dtrace_lock);
17854 mutex_exit(&mod_lock);
17855 mutex_exit(&dtrace_provider_lock);
17856 return (EINVAL);
17857 }
17858
17859 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) {
17860 mutex_exit(&dtrace_lock);
17861 mutex_exit(&mod_lock);
17862 mutex_exit(&dtrace_provider_lock);
17863 return (EINVAL);
17864 }
17865
17866 mutex_exit(&dtrace_lock);
17867
17868 prov = probe->dtpr_provider;
17869
17870 if (prov->dtpv_pops.dtps_getargdesc == NULL) {
17871 /*
17872 * There isn't any typed information for this probe.
17873 * Set the argument number to DTRACE_ARGNONE.
17874 */
17875 desc.dtargd_ndx = DTRACE_ARGNONE;
17876 } else {
17877 desc.dtargd_native[0] = '\0';
17878 desc.dtargd_xlate[0] = '\0';
17879 desc.dtargd_mapping = desc.dtargd_ndx;
17880
17881 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg,
17882 probe->dtpr_id, probe->dtpr_arg, &desc);
17883 }
17884
17885 mutex_exit(&mod_lock);
17886 mutex_exit(&dtrace_provider_lock);
17887
17888 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
17889 return (EFAULT);
17890
17891 return (0);
17892 }
17893
17894 case DTRACEIOC_GO: {
17895 processorid_t cpuid;
17896 rval = dtrace_state_go(state, &cpuid);
17897
17898 if (rval != 0)
17899 return (rval);
17900
17901 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
17902 return (EFAULT);
17903
17904 return (0);
17905 }
17906
17907 case DTRACEIOC_STOP: {
17908 processorid_t cpuid;
17909
17910 mutex_enter(&dtrace_lock);
17911 rval = dtrace_state_stop(state, &cpuid);
17912 mutex_exit(&dtrace_lock);
17913
17914 if (rval != 0)
17915 return (rval);
17916
17917 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
17918 return (EFAULT);
17919
17920 return (0);
17921 }
17922
17923 case DTRACEIOC_DOFGET: {
17924 dof_hdr_t hdr, *dof;
17925 uint64_t len;
17926
17927 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0)
17928 return (EFAULT);
17929
17930 mutex_enter(&dtrace_lock);
17931 dof = dtrace_dof_create(state);
17932 mutex_exit(&dtrace_lock);
17933
17934 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz);
17935 rval = copyout(dof, (void *)arg, len);
17936 dtrace_dof_destroy(dof);
17937
17938 return (rval == 0 ? 0 : EFAULT);
17939 }
17940
17941 case DTRACEIOC_AGGSNAP:
17942 case DTRACEIOC_BUFSNAP: {
17943 dtrace_bufdesc_t desc;
17944 caddr_t cached;
17945 dtrace_buffer_t *buf;
17946
17947 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
17948 return (EFAULT);
17949
17950 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU)
17951 return (EINVAL);
17952
17953 mutex_enter(&dtrace_lock);
17954
17955 if (cmd == DTRACEIOC_BUFSNAP) {
17956 buf = &state->dts_buffer[desc.dtbd_cpu];
17957 } else {
17958 buf = &state->dts_aggbuffer[desc.dtbd_cpu];
17959 }
17960
17961 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) {
17962 size_t sz = buf->dtb_offset;
17963
17964 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) {
17965 mutex_exit(&dtrace_lock);
17966 return (EBUSY);
17967 }
17968
17969 /*
17970 * If this buffer has already been consumed, we're
17971 * going to indicate that there's nothing left here
17972 * to consume.
17973 */
17974 if (buf->dtb_flags & DTRACEBUF_CONSUMED) {
17975 mutex_exit(&dtrace_lock);
17976
17977 desc.dtbd_size = 0;
17978 desc.dtbd_drops = 0;
17979 desc.dtbd_errors = 0;
17980 desc.dtbd_oldest = 0;
17981 sz = sizeof (desc);
17982
17983 if (copyout(&desc, (void *)arg, sz) != 0)
17984 return (EFAULT);
17985
17986 return (0);
17987 }
17988
17989 /*
17990 * If this is a ring buffer that has wrapped, we want
17991 * to copy the whole thing out.
17992 */
17993 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
17994 dtrace_buffer_polish(buf);
17995 sz = buf->dtb_size;
17996 }
17997
17998 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) {
17999 mutex_exit(&dtrace_lock);
18000 return (EFAULT);
18001 }
18002
18003 desc.dtbd_size = sz;
18004 desc.dtbd_drops = buf->dtb_drops;
18005 desc.dtbd_errors = buf->dtb_errors;
18006 desc.dtbd_oldest = buf->dtb_xamot_offset;
18007 desc.dtbd_timestamp = dtrace_gethrtime();
18008
18009 mutex_exit(&dtrace_lock);
18010
18011 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
18012 return (EFAULT);
18013
18014 buf->dtb_flags |= DTRACEBUF_CONSUMED;
18015
18016 return (0);
18017 }
18018
18019 if (buf->dtb_tomax == NULL) {
18020 ASSERT(buf->dtb_xamot == NULL);
18021 mutex_exit(&dtrace_lock);
18022 return (ENOENT);
18023 }
18024
18025 cached = buf->dtb_tomax;
18026 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
18027
18028 dtrace_xcall(desc.dtbd_cpu,
18029 (dtrace_xcall_t)dtrace_buffer_switch, buf);
18030
18031 state->dts_errors += buf->dtb_xamot_errors;
18032
18033 /*
18034 * If the buffers did not actually switch, then the cross call
18035 * did not take place -- presumably because the given CPU is
18036 * not in the ready set. If this is the case, we'll return
18037 * ENOENT.
18038 */
18039 if (buf->dtb_tomax == cached) {
18040 ASSERT(buf->dtb_xamot != cached);
18041 mutex_exit(&dtrace_lock);
18042 return (ENOENT);
18043 }
18044
18045 ASSERT(cached == buf->dtb_xamot);
18046
18047 /*
18048 * We have our snapshot; now copy it out.
18049 */
18050 if (copyout(buf->dtb_xamot, desc.dtbd_data,
18051 buf->dtb_xamot_offset) != 0) {
18052 mutex_exit(&dtrace_lock);
18053 return (EFAULT);
18054 }
18055
18056 desc.dtbd_size = buf->dtb_xamot_offset;
18057 desc.dtbd_drops = buf->dtb_xamot_drops;
18058 desc.dtbd_errors = buf->dtb_xamot_errors;
18059 desc.dtbd_oldest = 0;
18060 desc.dtbd_timestamp = buf->dtb_switched;
18061
18062 mutex_exit(&dtrace_lock);
18063
18064 /*
18065 * Finally, copy out the buffer description.
18066 */
18067 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
18068 return (EFAULT);
18069
18070 return (0);
18071 }
18072
18073 case DTRACEIOC_CONF: {
18074 dtrace_conf_t conf;
18075
18076 bzero(&conf, sizeof (conf));
18077 conf.dtc_difversion = DIF_VERSION;
18078 conf.dtc_difintregs = DIF_DIR_NREGS;
18079 conf.dtc_diftupregs = DIF_DTR_NREGS;
18080 conf.dtc_ctfmodel = CTF_MODEL_NATIVE;
18081
18082 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0)
18083 return (EFAULT);
18084
18085 return (0);
18086 }
18087
18088 case DTRACEIOC_STATUS: {
18089 dtrace_status_t stat;
18090 dtrace_dstate_t *dstate;
18091 int i, j;
18092 uint64_t nerrs;
18093
18094 /*
18095 * See the comment in dtrace_state_deadman() for the reason
18096 * for setting dts_laststatus to INT64_MAX before setting
18097 * it to the correct value.
18098 */
18099 state->dts_laststatus = INT64_MAX;
18100 dtrace_membar_producer();
18101 state->dts_laststatus = dtrace_gethrtime();
18102
18103 bzero(&stat, sizeof (stat));
18104
18105 mutex_enter(&dtrace_lock);
18106
18107 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) {
18108 mutex_exit(&dtrace_lock);
18109 return (ENOENT);
18110 }
18111
18112 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING)
18113 stat.dtst_exiting = 1;
18114
18115 nerrs = state->dts_errors;
18116 dstate = &state->dts_vstate.dtvs_dynvars;
18117
18118 for (i = 0; i < NCPU; i++) {
18119 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i];
18120
18121 stat.dtst_dyndrops += dcpu->dtdsc_drops;
18122 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops;
18123 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops;
18124
18125 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL)
18126 stat.dtst_filled++;
18127
18128 nerrs += state->dts_buffer[i].dtb_errors;
18129
18130 for (j = 0; j < state->dts_nspeculations; j++) {
18131 dtrace_speculation_t *spec;
18132 dtrace_buffer_t *buf;
18133
18134 spec = &state->dts_speculations[j];
18135 buf = &spec->dtsp_buffer[i];
18136 stat.dtst_specdrops += buf->dtb_xamot_drops;
18137 }
18138 }
18139
18140 stat.dtst_specdrops_busy = state->dts_speculations_busy;
18141 stat.dtst_specdrops_unavail = state->dts_speculations_unavail;
18142 stat.dtst_stkstroverflows = state->dts_stkstroverflows;
18143 stat.dtst_dblerrors = state->dts_dblerrors;
18144 stat.dtst_killed =
18145 (state->dts_activity == DTRACE_ACTIVITY_KILLED);
18146 stat.dtst_errors = nerrs;
18147
18148 mutex_exit(&dtrace_lock);
18149
18150 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0)
18151 return (EFAULT);
18152
18153 return (0);
18154 }
18155
18156 case DTRACEIOC_FORMAT: {
18157 dtrace_fmtdesc_t fmt;
18158 char *str;
18159 int len;
18160
18161 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0)
18162 return (EFAULT);
18163
18164 mutex_enter(&dtrace_lock);
18165
18166 if (fmt.dtfd_format == 0 ||
18167 fmt.dtfd_format > state->dts_nformats) {
18168 mutex_exit(&dtrace_lock);
18169 return (EINVAL);
18170 }
18171
18172 /*
18173 * Format strings are allocated contiguously and they are
18174 * never freed; if a format index is less than the number
18175 * of formats, we can assert that the format map is non-NULL
18176 * and that the format for the specified index is non-NULL.
18177 */
18178 ASSERT(state->dts_formats != NULL);
18179 str = state->dts_formats[fmt.dtfd_format - 1];
18180 ASSERT(str != NULL);
18181
18182 len = strlen(str) + 1;
18183
18184 if (len > fmt.dtfd_length) {
18185 fmt.dtfd_length = len;
18186
18187 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) {
18188 mutex_exit(&dtrace_lock);
18189 return (EINVAL);
18190 }
18191 } else {
18192 if (copyout(str, fmt.dtfd_string, len) != 0) {
18193 mutex_exit(&dtrace_lock);
18194 return (EINVAL);
18195 }
18196 }
18197
18198 mutex_exit(&dtrace_lock);
18199 return (0);
18200 }
18201
18202 default:
18203 break;
18204 }
18205
18206 return (ENOTTY);
18207 }
18208
18209 /*ARGSUSED*/
18210 static int
18211 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
18212 {
18213 dtrace_state_t *state;
18214
18215 switch (cmd) {
18216 case DDI_DETACH:
18217 break;
18218
18219 case DDI_SUSPEND:
18220 return (DDI_SUCCESS);
18221
18222 default:
18223 return (DDI_FAILURE);
18224 }
18225
18226 mutex_enter(&cpu_lock);
18227 mutex_enter(&dtrace_provider_lock);
18228 mutex_enter(&dtrace_lock);
18229
18230 ASSERT(dtrace_opens == 0);
18231
18232 if (dtrace_helpers > 0) {
18233 mutex_exit(&dtrace_provider_lock);
18234 mutex_exit(&dtrace_lock);
18235 mutex_exit(&cpu_lock);
18236 return (DDI_FAILURE);
18237 }
18238
18239 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) {
18240 mutex_exit(&dtrace_provider_lock);
18241 mutex_exit(&dtrace_lock);
18242 mutex_exit(&cpu_lock);
18243 return (DDI_FAILURE);
18244 }
18245
18246 dtrace_provider = NULL;
18247
18248 if ((state = dtrace_anon_grab()) != NULL) {
18249 /*
18250 * If there were ECBs on this state, the provider should
18251 * have not been allowed to detach; assert that there is
18252 * none.
18253 */
18254 ASSERT(state->dts_necbs == 0);
18255 dtrace_state_destroy(state);
18256
18257 /*
18258 * If we're being detached with anonymous state, we need to
18259 * indicate to the kernel debugger that DTrace is now inactive.
18260 */
18261 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
18262 }
18263
18264 bzero(&dtrace_anon, sizeof (dtrace_anon_t));
18265 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
18266 dtrace_cpu_init = NULL;
18267 dtrace_helpers_cleanup = NULL;
18268 dtrace_helpers_fork = NULL;
18269 dtrace_cpustart_init = NULL;
18270 dtrace_cpustart_fini = NULL;
18271 dtrace_debugger_init = NULL;
18272 dtrace_debugger_fini = NULL;
18273 dtrace_modload = NULL;
18274 dtrace_modunload = NULL;
18275
18276 ASSERT(dtrace_getf == 0);
18277 ASSERT(dtrace_closef == NULL);
18278
18279 mutex_exit(&cpu_lock);
18280
18281 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *));
18282 dtrace_probes = NULL;
18283 dtrace_nprobes = 0;
18284
18285 dtrace_hash_destroy(dtrace_bymod);
18286 dtrace_hash_destroy(dtrace_byfunc);
18287 dtrace_hash_destroy(dtrace_byname);
18288 dtrace_bymod = NULL;
18289 dtrace_byfunc = NULL;
18290 dtrace_byname = NULL;
18291
18292 kmem_cache_destroy(dtrace_state_cache);
18293 vmem_destroy(dtrace_minor);
18294 vmem_destroy(dtrace_arena);
18295
18296 if (dtrace_toxrange != NULL) {
18297 kmem_free(dtrace_toxrange,
18298 dtrace_toxranges_max * sizeof (dtrace_toxrange_t));
18299 dtrace_toxrange = NULL;
18300 dtrace_toxranges = 0;
18301 dtrace_toxranges_max = 0;
18302 }
18303
18304 ddi_remove_minor_node(dtrace_devi, NULL);
18305 dtrace_devi = NULL;
18306
18307 ddi_soft_state_fini(&dtrace_softstate);
18308
18309 ASSERT(dtrace_vtime_references == 0);
18310 ASSERT(dtrace_opens == 0);
18311 ASSERT(dtrace_retained == NULL);
18312
18313 mutex_exit(&dtrace_lock);
18314 mutex_exit(&dtrace_provider_lock);
18315
18316 /*
18317 * We don't destroy the task queue until after we have dropped our
18318 * locks (taskq_destroy() may block on running tasks). To prevent
18319 * attempting to do work after we have effectively detached but before
18320 * the task queue has been destroyed, all tasks dispatched via the
18321 * task queue must check that DTrace is still attached before
18322 * performing any operation.
18323 */
18324 taskq_destroy(dtrace_taskq);
18325 dtrace_taskq = NULL;
18326
18327 return (DDI_SUCCESS);
18328 }
18329 #endif
18330
18331 #ifdef illumos
18332 /*ARGSUSED*/
18333 static int
18334 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
18335 {
18336 int error;
18337
18338 switch (infocmd) {
18339 case DDI_INFO_DEVT2DEVINFO:
18340 *result = (void *)dtrace_devi;
18341 error = DDI_SUCCESS;
18342 break;
18343 case DDI_INFO_DEVT2INSTANCE:
18344 *result = (void *)0;
18345 error = DDI_SUCCESS;
18346 break;
18347 default:
18348 error = DDI_FAILURE;
18349 }
18350 return (error);
18351 }
18352 #endif
18353
18354 #ifdef illumos
18355 static struct cb_ops dtrace_cb_ops = {
18356 dtrace_open, /* open */
18357 dtrace_close, /* close */
18358 nulldev, /* strategy */
18359 nulldev, /* print */
18360 nodev, /* dump */
18361 nodev, /* read */
18362 nodev, /* write */
18363 dtrace_ioctl, /* ioctl */
18364 nodev, /* devmap */
18365 nodev, /* mmap */
18366 nodev, /* segmap */
18367 nochpoll, /* poll */
18368 ddi_prop_op, /* cb_prop_op */
18369 0, /* streamtab */
18370 D_NEW | D_MP /* Driver compatibility flag */
18371 };
18372
18373 static struct dev_ops dtrace_ops = {
18374 DEVO_REV, /* devo_rev */
18375 0, /* refcnt */
18376 dtrace_info, /* get_dev_info */
18377 nulldev, /* identify */
18378 nulldev, /* probe */
18379 dtrace_attach, /* attach */
18380 dtrace_detach, /* detach */
18381 nodev, /* reset */
18382 &dtrace_cb_ops, /* driver operations */
18383 NULL, /* bus operations */
18384 nodev /* dev power */
18385 };
18386
18387 static struct modldrv modldrv = {
18388 &mod_driverops, /* module type (this is a pseudo driver) */
18389 "Dynamic Tracing", /* name of module */
18390 &dtrace_ops, /* driver ops */
18391 };
18392
18393 static struct modlinkage modlinkage = {
18394 MODREV_1,
18395 (void *)&modldrv,
18396 NULL
18397 };
18398
18399 int
18400 _init(void)
18401 {
18402 return (mod_install(&modlinkage));
18403 }
18404
18405 int
18406 _info(struct modinfo *modinfop)
18407 {
18408 return (mod_info(&modlinkage, modinfop));
18409 }
18410
18411 int
18412 _fini(void)
18413 {
18414 return (mod_remove(&modlinkage));
18415 }
18416 #else
18417
18418 static d_ioctl_t dtrace_ioctl;
18419 static d_ioctl_t dtrace_ioctl_helper;
18420 static void dtrace_load(void *);
18421 static int dtrace_unload(void);
18422 static struct cdev *dtrace_dev;
18423 static struct cdev *helper_dev;
18424
18425 void dtrace_invop_init(void);
18426 void dtrace_invop_uninit(void);
18427
18428 static struct cdevsw dtrace_cdevsw = {
18429 .d_version = D_VERSION,
18430 .d_ioctl = dtrace_ioctl,
18431 .d_open = dtrace_open,
18432 .d_name = "dtrace",
18433 };
18434
18435 static struct cdevsw helper_cdevsw = {
18436 .d_version = D_VERSION,
18437 .d_ioctl = dtrace_ioctl_helper,
18438 .d_name = "helper",
18439 };
18440
18441 #include <dtrace_anon.c>
18442 #include <dtrace_ioctl.c>
18443 #include <dtrace_load.c>
18444 #include <dtrace_modevent.c>
18445 #include <dtrace_sysctl.c>
18446 #include <dtrace_unload.c>
18447 #include <dtrace_vtime.c>
18448 #include <dtrace_hacks.c>
18449
18450 SYSINIT(dtrace_load, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_load, NULL);
18451 SYSUNINIT(dtrace_unload, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_unload, NULL);
18452 SYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init, NULL);
18453
18454 DEV_MODULE(dtrace, dtrace_modevent, NULL);
18455 MODULE_VERSION(dtrace, 1);
18456 MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1);
18457 #endif
18458