1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 *
21 * $FreeBSD: head/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c 313266 2017-02-05 02:47:34Z markj $
22 */
23
24 /*
25 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright (c) 2016, Joyent, Inc. All rights reserved.
27 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
28 */
29
30 /*
31 * DTrace - Dynamic Tracing for Solaris
32 *
33 * This is the implementation of the Solaris Dynamic Tracing framework
34 * (DTrace). The user-visible interface to DTrace is described at length in
35 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace
36 * library, the in-kernel DTrace framework, and the DTrace providers are
37 * described in the block comments in the <sys/dtrace.h> header file. The
38 * internal architecture of DTrace is described in the block comments in the
39 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace
40 * implementation very much assume mastery of all of these sources; if one has
41 * an unanswered question about the implementation, one should consult them
42 * first.
43 *
44 * The functions here are ordered roughly as follows:
45 *
46 * - Probe context functions
47 * - Probe hashing functions
48 * - Non-probe context utility functions
49 * - Matching functions
50 * - Provider-to-Framework API functions
51 * - Probe management functions
52 * - DIF object functions
53 * - Format functions
54 * - Predicate functions
55 * - ECB functions
56 * - Buffer functions
57 * - Enabling functions
58 * - DOF functions
59 * - Anonymous enabling functions
60 * - Consumer state functions
61 * - Helper functions
62 * - Hook functions
63 * - Driver cookbook functions
64 *
65 * Each group of functions begins with a block comment labelled the "DTrace
66 * [Group] Functions", allowing one to find each block by searching forward
67 * on capital-f functions.
68 */
69 #ifdef __NetBSD__
70 #define __MUTEX_PRIVATE
71 #define __RWLOCK_PRIVATE
72 #include <sys/proc.h>
73 #endif
74
75 #include <sys/errno.h>
76 #ifndef illumos
77 #include <sys/time.h>
78 #endif
79 #include <sys/stat.h>
80 #include <sys/modctl.h>
81 #include <sys/conf.h>
82 #include <sys/systm.h>
83 #ifdef illumos
84 #include <sys/ddi.h>
85 #include <sys/sunddi.h>
86 #endif
87 #include <sys/cpuvar.h>
88 #include <sys/kmem.h>
89 #ifdef illumos
90 #include <sys/strsubr.h>
91 #endif
92 #include <sys/sysmacros.h>
93 #include <sys/dtrace_impl.h>
94 #include <sys/atomic.h>
95 #include <sys/cmn_err.h>
96 #ifdef illumos
97 #include <sys/mutex_impl.h>
98 #include <sys/rwlock_impl.h>
99 #endif
100 #include <sys/ctf_api.h>
101 #ifdef illumos
102 #include <sys/panic.h>
103 #include <sys/priv_impl.h>
104 #endif
105 #include <sys/policy.h>
106 #ifdef illumos
107 #include <sys/cred_impl.h>
108 #include <sys/procfs_isa.h>
109 #endif
110 #include <sys/taskq.h>
111 #ifdef illumos
112 #include <sys/mkdev.h>
113 #include <sys/kdi.h>
114 #endif
115 #include <sys/zone.h>
116 #include <sys/socket.h>
117 #include <netinet/in.h>
118 #include "strtolctype.h"
119
120 /* FreeBSD includes: */
121 #ifdef __FreeBSD__
122 #include <sys/callout.h>
123 #include <sys/ctype.h>
124 #include <sys/eventhandler.h>
125 #include <sys/limits.h>
126 #include <sys/linker.h>
127 #include <sys/kdb.h>
128 #include <sys/kernel.h>
129 #include <sys/malloc.h>
130 #include <sys/lock.h>
131 #include <sys/mutex.h>
132 #include <sys/ptrace.h>
133 #include <sys/random.h>
134 #include <sys/rwlock.h>
135 #include <sys/sx.h>
136 #include <sys/sysctl.h>
137 #endif
138
139 #ifdef __NetBSD__
140 #include <sys/cred.h>
141 #include <sys/callout.h>
142 #include <sys/ctype.h>
143 #include <sys/kernel.h>
144 #include <sys/malloc.h>
145 #include <sys/lock.h>
146 #include <sys/mutex.h>
147 #include <sys/random.h>
148 #include <sys/rwlock.h>
149 #include <sys/sysctl.h>
150 #include <sys/mutex_impl.h>
151 #include <sys/rwlock_impl.h>
152 #include <sys/mkdev.h>
153 #include <sys/file.h>
154 #include <sys/filedesc.h>
155 #include <sys/vmem.h>
156 #include <sys/module.h>
157 #include <sys/cpu.h>
158 #endif
159
160 #ifndef illumos
161
162 #include <sys/dtrace_bsd.h>
163
164 #include "dtrace_xoroshiro128_plus.h"
165
166 #include <netinet/in.h>
167
168 #include "dtrace_cddl.h"
169 #include "dtrace_debug.c"
170
171 #ifdef __NetBSD__
172 struct dtrace_state_worker *dtrace_state_worker_add(void (*fn)(dtrace_state_t *),
173 dtrace_state_t *state, hrtime_t interval);
174 void dtrace_state_worker_remove(struct dtrace_state_worker *w);
175
176 modctl_t *mod_nbsd;
177
178 #endif /* __NetBSD__ */
179
180 #endif /* !illumos */
181
182
183 /*
184 * DTrace Tunable Variables
185 *
186 * The following variables may be tuned by adding a line to /etc/system that
187 * includes both the name of the DTrace module ("dtrace") and the name of the
188 * variable. For example:
189 *
190 * set dtrace:dtrace_destructive_disallow = 1
191 *
192 * In general, the only variables that one should be tuning this way are those
193 * that affect system-wide DTrace behavior, and for which the default behavior
194 * is undesirable. Most of these variables are tunable on a per-consumer
195 * basis using DTrace options, and need not be tuned on a system-wide basis.
196 * When tuning these variables, avoid pathological values; while some attempt
197 * is made to verify the integrity of these variables, they are not considered
198 * part of the supported interface to DTrace, and they are therefore not
199 * checked comprehensively. Further, these variables should not be tuned
200 * dynamically via "mdb -kw" or other means; they should only be tuned via
201 * /etc/system.
202 */
203 int dtrace_destructive_disallow = 0;
204 #ifndef illumos
205 /* Positive logic version of dtrace_destructive_disallow for loader tunable */
206 int dtrace_allow_destructive = 1;
207 #endif
208 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024);
209 size_t dtrace_difo_maxsize = (256 * 1024);
210 dtrace_optval_t dtrace_dof_maxsize = (8 * 1024 * 1024);
211 size_t dtrace_statvar_maxsize = (16 * 1024);
212 size_t dtrace_actions_max = (16 * 1024);
213 size_t dtrace_retain_max = 1024;
214 dtrace_optval_t dtrace_helper_actions_max = 128;
215 dtrace_optval_t dtrace_helper_providers_max = 32;
216 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024);
217 size_t dtrace_strsize_default = 256;
218 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */
219 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */
220 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */
221 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */
222 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */
223 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */
224 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */
225 dtrace_optval_t dtrace_nspec_default = 1;
226 dtrace_optval_t dtrace_specsize_default = 32 * 1024;
227 dtrace_optval_t dtrace_stackframes_default = 20;
228 dtrace_optval_t dtrace_ustackframes_default = 20;
229 dtrace_optval_t dtrace_jstackframes_default = 50;
230 dtrace_optval_t dtrace_jstackstrsize_default = 512;
231 int dtrace_msgdsize_max = 128;
232 hrtime_t dtrace_chill_max = MSEC2NSEC(500); /* 500 ms */
233 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */
234 int dtrace_devdepth_max = 32;
235 int dtrace_err_verbose;
236 hrtime_t dtrace_deadman_interval = NANOSEC;
237 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC;
238 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC;
239 hrtime_t dtrace_unregister_defunct_reap = (hrtime_t)60 * NANOSEC;
240 #ifndef illumos
241 int dtrace_memstr_max = 4096;
242 #endif
243
244 /*
245 * DTrace External Variables
246 *
247 * As dtrace(7D) is a kernel module, any DTrace variables are obviously
248 * available to DTrace consumers via the backtick (`) syntax. One of these,
249 * dtrace_zero, is made deliberately so: it is provided as a source of
250 * well-known, zero-filled memory. While this variable is not documented,
251 * it is used by some translators as an implementation detail.
252 */
253 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */
254
255 /*
256 * DTrace Internal Variables
257 */
258 #ifdef illumos
259 static dev_info_t *dtrace_devi; /* device info */
260 #endif
261 #ifdef illumos
262 static vmem_t *dtrace_arena; /* probe ID arena */
263 static vmem_t *dtrace_minor; /* minor number arena */
264 #else
265 static taskq_t *dtrace_taskq; /* task queue */
266 #ifdef __NetBSD__
267 static vmem_t *dtrace_arena; /* probe ID arena */
268 #else
269 static struct unrhdr *dtrace_arena; /* Probe ID number. */
270 #endif
271 #endif
272 static dtrace_probe_t **dtrace_probes; /* array of all probes */
273 static int dtrace_nprobes; /* number of probes */
274 static dtrace_provider_t *dtrace_provider; /* provider list */
275 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */
276 static int dtrace_opens; /* number of opens */
277 static int dtrace_helpers; /* number of helpers */
278 static int dtrace_getf; /* number of unpriv getf()s */
279 #ifdef illumos
280 static void *dtrace_softstate; /* softstate pointer */
281 #endif
282 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */
283 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */
284 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */
285 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */
286 static int dtrace_toxranges; /* number of toxic ranges */
287 static int dtrace_toxranges_max; /* size of toxic range array */
288 static dtrace_anon_t dtrace_anon; /* anonymous enabling */
289 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */
290 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */
291 static kthread_t *dtrace_panicked; /* panicking thread */
292 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */
293 static dtrace_genid_t dtrace_probegen; /* current probe generation */
294 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */
295 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */
296 static dtrace_genid_t dtrace_retained_gen; /* current retained enab gen */
297 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */
298 static int dtrace_dynvar_failclean; /* dynvars failed to clean */
299 #ifdef __FreeBSD__
300 static struct mtx dtrace_unr_mtx;
301 MTX_SYSINIT(dtrace_unr_mtx, &dtrace_unr_mtx, "Unique resource identifier", MTX_DEF);
302 static eventhandler_tag dtrace_kld_load_tag;
303 static eventhandler_tag dtrace_kld_unload_try_tag;
304 #endif
305
306 /*
307 * DTrace Locking
308 * DTrace is protected by three (relatively coarse-grained) locks:
309 *
310 * (1) dtrace_lock is required to manipulate essentially any DTrace state,
311 * including enabling state, probes, ECBs, consumer state, helper state,
312 * etc. Importantly, dtrace_lock is _not_ required when in probe context;
313 * probe context is lock-free -- synchronization is handled via the
314 * dtrace_sync() cross call mechanism.
315 *
316 * (2) dtrace_provider_lock is required when manipulating provider state, or
317 * when provider state must be held constant.
318 *
319 * (3) dtrace_meta_lock is required when manipulating meta provider state, or
320 * when meta provider state must be held constant.
321 *
322 * The lock ordering between these three locks is dtrace_meta_lock before
323 * dtrace_provider_lock before dtrace_lock. (In particular, there are
324 * several places where dtrace_provider_lock is held by the framework as it
325 * calls into the providers -- which then call back into the framework,
326 * grabbing dtrace_lock.)
327 *
328 * There are two other locks in the mix: mod_lock and cpu_lock. With respect
329 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical
330 * role as a coarse-grained lock; it is acquired before both of these locks.
331 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must
332 * be acquired _between_ dtrace_meta_lock and any other DTrace locks.
333 * mod_lock is similar with respect to dtrace_provider_lock in that it must be
334 * acquired _between_ dtrace_provider_lock and dtrace_lock.
335 */
336 static kmutex_t dtrace_lock; /* probe state lock */
337 static kmutex_t dtrace_provider_lock; /* provider state lock */
338 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */
339
340 #ifndef illumos
341 /* XXX FreeBSD hacks. */
342 #ifdef __FreeBSD__
343 static kmutex_t mod_lock;
344 #endif
345
346 #define cr_suid cr_svuid
347 #define cr_sgid cr_svgid
348 #define ipaddr_t in_addr_t
349 #define mod_modname pathname
350 #define vuprintf vprintf
351 #ifdef __NetBSD__
352 #define ttoproc(_a) ((_a)->l_proc)
353 #else
354 #define ttoproc(_a) ((_a)->td_proc)
355 #endif
356 #define crgetzoneid(_a) 0
357 #define SNOCD 0
358 #define CPU_ON_INTR(_a) 0
359
360 #define PRIV_EFFECTIVE (1 << 0)
361 #define PRIV_DTRACE_KERNEL (1 << 1)
362 #define PRIV_DTRACE_PROC (1 << 2)
363 #define PRIV_DTRACE_USER (1 << 3)
364 #define PRIV_PROC_OWNER (1 << 4)
365 #define PRIV_PROC_ZONE (1 << 5)
366 #define PRIV_ALL ~0
367
368 SYSCTL_NODE(_debug, OID_AUTO, dtrace, CTLFLAG_RD, 0, "DTrace Information");
369 SYSCTL_DECL(_debug_dtrace);
370 SYSCTL_DECL(_kern_dtrace);
371 #endif
372
373 #ifdef illumos
374 #define curcpu_id CPU->cpu_id
375 #endif
376 #ifdef __FreeBSD__
377 #define curcpu_id curcpu
378 #endif
379 #ifdef __NetBSD__
380 #define curcpu_id cpu_number()
381 #endif
382
383
384 /*
385 * DTrace Provider Variables
386 *
387 * These are the variables relating to DTrace as a provider (that is, the
388 * provider of the BEGIN, END, and ERROR probes).
389 */
390 static dtrace_pattr_t dtrace_provider_attr = {
391 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
392 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
393 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
394 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
395 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
396 };
397
398 static int
dtrace_nullop(void)399 dtrace_nullop(void)
400 {
401
402 return 0;
403 }
404
405 static dtrace_pops_t dtrace_provider_ops = {
406 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop,
407 (void (*)(void *, modctl_t *))dtrace_nullop,
408 (int (*)(void *, dtrace_id_t, void *))dtrace_nullop,
409 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
410 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
411 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
412 NULL,
413 NULL,
414 NULL,
415 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop
416 };
417
418 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */
419 static dtrace_id_t dtrace_probeid_end; /* special END probe */
420 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */
421
422 /*
423 * DTrace Helper Tracing Variables
424 *
425 * These variables should be set dynamically to enable helper tracing. The
426 * only variables that should be set are dtrace_helptrace_enable (which should
427 * be set to a non-zero value to allocate helper tracing buffers on the next
428 * open of /dev/dtrace) and dtrace_helptrace_disable (which should be set to a
429 * non-zero value to deallocate helper tracing buffers on the next close of
430 * /dev/dtrace). When (and only when) helper tracing is disabled, the
431 * buffer size may also be set via dtrace_helptrace_bufsize.
432 */
433 int dtrace_helptrace_enable = 0;
434 int dtrace_helptrace_disable = 0;
435 int dtrace_helptrace_bufsize = 16 * 1024 * 1024;
436 uint32_t dtrace_helptrace_nlocals;
437 static dtrace_helptrace_t *dtrace_helptrace_buffer;
438 static uint32_t dtrace_helptrace_next = 0;
439 static int dtrace_helptrace_wrapped = 0;
440
441 /*
442 * DTrace Error Hashing
443 *
444 * On DEBUG kernels, DTrace will track the errors that has seen in a hash
445 * table. This is very useful for checking coverage of tests that are
446 * expected to induce DIF or DOF processing errors, and may be useful for
447 * debugging problems in the DIF code generator or in DOF generation . The
448 * error hash may be examined with the ::dtrace_errhash MDB dcmd.
449 */
450 #ifdef DEBUG
451 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ];
452 static const char *dtrace_errlast;
453 static kthread_t *dtrace_errthread;
454 static kmutex_t dtrace_errlock;
455 #endif
456
457 /*
458 * DTrace Macros and Constants
459 *
460 * These are various macros that are useful in various spots in the
461 * implementation, along with a few random constants that have no meaning
462 * outside of the implementation. There is no real structure to this cpp
463 * mishmash -- but is there ever?
464 */
465 #define DTRACE_HASHSTR(hash, probe) \
466 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs)))
467
468 #define DTRACE_HASHNEXT(hash, probe) \
469 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs)
470
471 #define DTRACE_HASHPREV(hash, probe) \
472 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs)
473
474 #define DTRACE_HASHEQ(hash, lhs, rhs) \
475 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \
476 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0)
477
478 #define DTRACE_AGGHASHSIZE_SLEW 17
479
480 #define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3)
481
482 /*
483 * The key for a thread-local variable consists of the lower 61 bits of the
484 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL.
485 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never
486 * equal to a variable identifier. This is necessary (but not sufficient) to
487 * assure that global associative arrays never collide with thread-local
488 * variables. To guarantee that they cannot collide, we must also define the
489 * order for keying dynamic variables. That order is:
490 *
491 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ]
492 *
493 * Because the variable-key and the tls-key are in orthogonal spaces, there is
494 * no way for a global variable key signature to match a thread-local key
495 * signature.
496 */
497 #ifdef illumos
498 #define DTRACE_TLS_THRKEY(where) { \
499 uint_t intr = 0; \
500 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \
501 for (; actv; actv >>= 1) \
502 intr++; \
503 ASSERT(intr < (1 << 3)); \
504 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \
505 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
506 }
507 #endif
508 #ifdef __FreeBSD__
509 #define DTRACE_TLS_THRKEY(where) { \
510 solaris_cpu_t *_c = &solaris_cpu[curcpu]; \
511 uint_t intr = 0; \
512 uint_t actv = _c->cpu_intr_actv; \
513 for (; actv; actv >>= 1) \
514 intr++; \
515 ASSERT(intr < (1 << 3)); \
516 (where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \
517 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
518 }
519 #endif
520 #ifdef __NetBSD__
521 #define DTRACE_TLS_THRKEY(where) { \
522 uint_t intr = 0; \
523 (where) = ((curthread->l_lid + (curthread->l_proc->p_pid << 16) + \
524 DIF_VARIABLE_MAX) & \
525 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
526 }
527 #endif
528
529 #define DT_BSWAP_8(x) ((x) & 0xff)
530 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8))
531 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16))
532 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32))
533
534 #define DT_MASK_LO 0x00000000FFFFFFFFULL
535
536 #define DTRACE_STORE(type, tomax, offset, what) \
537 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what);
538
539 #ifndef __x86
540 #define DTRACE_ALIGNCHECK(addr, size, flags) \
541 if (addr & (size - 1)) { \
542 *flags |= CPU_DTRACE_BADALIGN; \
543 cpu_core[curcpu_id].cpuc_dtrace_illval = addr; \
544 return (0); \
545 }
546 #else
547 #define DTRACE_ALIGNCHECK(addr, size, flags)
548 #endif
549
550 /*
551 * Test whether a range of memory starting at testaddr of size testsz falls
552 * within the range of memory described by addr, sz. We take care to avoid
553 * problems with overflow and underflow of the unsigned quantities, and
554 * disallow all negative sizes. Ranges of size 0 are allowed.
555 */
556 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \
557 ((testaddr) - (uintptr_t)(baseaddr) < (basesz) && \
558 (testaddr) + (testsz) - (uintptr_t)(baseaddr) <= (basesz) && \
559 (testaddr) + (testsz) >= (testaddr))
560
561 #define DTRACE_RANGE_REMAIN(remp, addr, baseaddr, basesz) \
562 do { \
563 if ((remp) != NULL) { \
564 *(remp) = (uintptr_t)(baseaddr) + (basesz) - (addr); \
565 } \
566 _NOTE(CONSTCOND) } while (0)
567
568
569 /*
570 * Test whether alloc_sz bytes will fit in the scratch region. We isolate
571 * alloc_sz on the righthand side of the comparison in order to avoid overflow
572 * or underflow in the comparison with it. This is simpler than the INRANGE
573 * check above, because we know that the dtms_scratch_ptr is valid in the
574 * range. Allocations of size zero are allowed.
575 */
576 #define DTRACE_INSCRATCH(mstate, alloc_sz) \
577 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \
578 (mstate)->dtms_scratch_ptr >= (alloc_sz))
579
580 #define DTRACE_LOADFUNC(bits) \
581 /*CSTYLED*/ \
582 uint##bits##_t \
583 dtrace_load##bits(uintptr_t addr) \
584 { \
585 size_t size = bits / NBBY; \
586 /*CSTYLED*/ \
587 uint##bits##_t rval; \
588 int i; \
589 volatile uint16_t *flags = (volatile uint16_t *) \
590 &cpu_core[curcpu_id].cpuc_dtrace_flags; \
591 \
592 DTRACE_ALIGNCHECK(addr, size, flags); \
593 \
594 for (i = 0; i < dtrace_toxranges; i++) { \
595 if (addr >= dtrace_toxrange[i].dtt_limit) \
596 continue; \
597 \
598 if (addr + size <= dtrace_toxrange[i].dtt_base) \
599 continue; \
600 \
601 /* \
602 * This address falls within a toxic region; return 0. \
603 */ \
604 *flags |= CPU_DTRACE_BADADDR; \
605 cpu_core[curcpu_id].cpuc_dtrace_illval = addr; \
606 return (0); \
607 } \
608 \
609 *flags |= CPU_DTRACE_NOFAULT; \
610 /*CSTYLED*/ \
611 rval = *((volatile uint##bits##_t *)addr); \
612 *flags &= ~CPU_DTRACE_NOFAULT; \
613 \
614 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \
615 }
616
617 #ifdef _LP64
618 #define dtrace_loadptr dtrace_load64
619 #else
620 #define dtrace_loadptr dtrace_load32
621 #endif
622
623 #define DTRACE_DYNHASH_FREE 0
624 #define DTRACE_DYNHASH_SINK 1
625 #define DTRACE_DYNHASH_VALID 2
626
627 #define DTRACE_MATCH_FAIL -1
628 #define DTRACE_MATCH_NEXT 0
629 #define DTRACE_MATCH_DONE 1
630 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0')
631 #define DTRACE_STATE_ALIGN 64
632
633 #define DTRACE_FLAGS2FLT(flags) \
634 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \
635 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \
636 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \
637 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \
638 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \
639 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \
640 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \
641 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \
642 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \
643 DTRACEFLT_UNKNOWN)
644
645 #define DTRACEACT_ISSTRING(act) \
646 ((act)->dta_kind == DTRACEACT_DIFEXPR && \
647 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING)
648
649 /* Function prototype definitions: */
650 static size_t dtrace_strlen(const char *, size_t);
651 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id);
652 static void dtrace_enabling_provide(dtrace_provider_t *);
653 static int dtrace_enabling_match(dtrace_enabling_t *, int *);
654 static void dtrace_enabling_matchall(void);
655 static void dtrace_enabling_reap(void);
656 static dtrace_state_t *dtrace_anon_grab(void);
657 static uint64_t dtrace_helper(int, dtrace_mstate_t *,
658 dtrace_state_t *, uint64_t, uint64_t);
659 static dtrace_helpers_t *dtrace_helpers_create(proc_t *);
660 static void dtrace_buffer_drop(dtrace_buffer_t *);
661 static int dtrace_buffer_consumed(dtrace_buffer_t *, hrtime_t when);
662 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t,
663 dtrace_state_t *, dtrace_mstate_t *);
664 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t,
665 dtrace_optval_t);
666 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *);
667 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *);
668 uint16_t dtrace_load16(uintptr_t);
669 uint32_t dtrace_load32(uintptr_t);
670 uint64_t dtrace_load64(uintptr_t);
671 uint8_t dtrace_load8(uintptr_t);
672 void dtrace_dynvar_clean(dtrace_dstate_t *);
673 dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *,
674 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *);
675 uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *);
676 static int dtrace_priv_proc(dtrace_state_t *);
677 static void dtrace_getf_barrier(void);
678 static int dtrace_canload_remains(uint64_t, size_t, size_t *,
679 dtrace_mstate_t *, dtrace_vstate_t *);
680 static int dtrace_canstore_remains(uint64_t, size_t, size_t *,
681 dtrace_mstate_t *, dtrace_vstate_t *);
682
683 /*
684 * DTrace Probe Context Functions
685 *
686 * These functions are called from probe context. Because probe context is
687 * any context in which C may be called, arbitrarily locks may be held,
688 * interrupts may be disabled, we may be in arbitrary dispatched state, etc.
689 * As a result, functions called from probe context may only call other DTrace
690 * support functions -- they may not interact at all with the system at large.
691 * (Note that the ASSERT macro is made probe-context safe by redefining it in
692 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary
693 * loads are to be performed from probe context, they _must_ be in terms of
694 * the safe dtrace_load*() variants.
695 *
696 * Some functions in this block are not actually called from probe context;
697 * for these functions, there will be a comment above the function reading
698 * "Note: not called from probe context."
699 */
700 void
dtrace_panic(const char * format,...)701 dtrace_panic(const char *format, ...)
702 {
703 va_list alist;
704
705 va_start(alist, format);
706 #ifndef illumos
707 vpanic(format, alist);
708 #else
709 dtrace_vpanic(format, alist);
710 #endif
711 va_end(alist);
712 }
713
714 int
dtrace_assfail(const char * a,const char * f,int l)715 dtrace_assfail(const char *a, const char *f, int l)
716 {
717 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l);
718
719 /*
720 * We just need something here that even the most clever compiler
721 * cannot optimize away.
722 */
723 return (a[(uintptr_t)f]);
724 }
725
726 /*
727 * Atomically increment a specified error counter from probe context.
728 */
729 static void
dtrace_error(uint32_t * counter)730 dtrace_error(uint32_t *counter)
731 {
732 /*
733 * Most counters stored to in probe context are per-CPU counters.
734 * However, there are some error conditions that are sufficiently
735 * arcane that they don't merit per-CPU storage. If these counters
736 * are incremented concurrently on different CPUs, scalability will be
737 * adversely affected -- but we don't expect them to be white-hot in a
738 * correctly constructed enabling...
739 */
740 uint32_t oval, nval;
741
742 do {
743 oval = *counter;
744
745 if ((nval = oval + 1) == 0) {
746 /*
747 * If the counter would wrap, set it to 1 -- assuring
748 * that the counter is never zero when we have seen
749 * errors. (The counter must be 32-bits because we
750 * aren't guaranteed a 64-bit compare&swap operation.)
751 * To save this code both the infamy of being fingered
752 * by a priggish news story and the indignity of being
753 * the target of a neo-puritan witch trial, we're
754 * carefully avoiding any colorful description of the
755 * likelihood of this condition -- but suffice it to
756 * say that it is only slightly more likely than the
757 * overflow of predicate cache IDs, as discussed in
758 * dtrace_predicate_create().
759 */
760 nval = 1;
761 }
762 } while (dtrace_cas32(counter, oval, nval) != oval);
763 }
764
765 /*
766 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a
767 * uint8_t, a uint16_t, a uint32_t and a uint64_t.
768 */
769 /* BEGIN CSTYLED */
770 DTRACE_LOADFUNC(8)
771 DTRACE_LOADFUNC(16)
772 DTRACE_LOADFUNC(32)
773 DTRACE_LOADFUNC(64)
774 /* END CSTYLED */
775
776 static int
dtrace_inscratch(uintptr_t dest,size_t size,dtrace_mstate_t * mstate)777 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate)
778 {
779 if (dest < mstate->dtms_scratch_base)
780 return (0);
781
782 if (dest + size < dest)
783 return (0);
784
785 if (dest + size > mstate->dtms_scratch_ptr)
786 return (0);
787
788 return (1);
789 }
790
791 static int
dtrace_canstore_statvar(uint64_t addr,size_t sz,size_t * remain,dtrace_statvar_t ** svars,int nsvars)792 dtrace_canstore_statvar(uint64_t addr, size_t sz, size_t *remain,
793 dtrace_statvar_t **svars, int nsvars)
794 {
795 int i;
796 size_t maxglobalsize, maxlocalsize;
797
798 if (nsvars == 0)
799 return (0);
800
801 maxglobalsize = dtrace_statvar_maxsize + sizeof (uint64_t);
802 maxlocalsize = maxglobalsize * NCPU;
803
804 for (i = 0; i < nsvars; i++) {
805 dtrace_statvar_t *svar = svars[i];
806 uint8_t scope;
807 size_t size;
808
809 if (svar == NULL || (size = svar->dtsv_size) == 0)
810 continue;
811
812 scope = svar->dtsv_var.dtdv_scope;
813
814 /*
815 * We verify that our size is valid in the spirit of providing
816 * defense in depth: we want to prevent attackers from using
817 * DTrace to escalate an orthogonal kernel heap corruption bug
818 * into the ability to store to arbitrary locations in memory.
819 */
820 VERIFY((scope == DIFV_SCOPE_GLOBAL && size <= maxglobalsize) ||
821 (scope == DIFV_SCOPE_LOCAL && size <= maxlocalsize));
822
823 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data,
824 svar->dtsv_size)) {
825 DTRACE_RANGE_REMAIN(remain, addr, svar->dtsv_data,
826 svar->dtsv_size);
827 return (1);
828 }
829 }
830
831 return (0);
832 }
833
834 /*
835 * Check to see if the address is within a memory region to which a store may
836 * be issued. This includes the DTrace scratch areas, and any DTrace variable
837 * region. The caller of dtrace_canstore() is responsible for performing any
838 * alignment checks that are needed before stores are actually executed.
839 */
840 static int
dtrace_canstore(uint64_t addr,size_t sz,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)841 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
842 dtrace_vstate_t *vstate)
843 {
844 return (dtrace_canstore_remains(addr, sz, NULL, mstate, vstate));
845 }
846
847 /*
848 * Implementation of dtrace_canstore which communicates the upper bound of the
849 * allowed memory region.
850 */
851 static int
dtrace_canstore_remains(uint64_t addr,size_t sz,size_t * remain,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)852 dtrace_canstore_remains(uint64_t addr, size_t sz, size_t *remain,
853 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
854 {
855 /*
856 * First, check to see if the address is in scratch space...
857 */
858 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base,
859 mstate->dtms_scratch_size)) {
860 DTRACE_RANGE_REMAIN(remain, addr, mstate->dtms_scratch_base,
861 mstate->dtms_scratch_size);
862 return (1);
863 }
864
865 /*
866 * Now check to see if it's a dynamic variable. This check will pick
867 * up both thread-local variables and any global dynamically-allocated
868 * variables.
869 */
870 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base,
871 vstate->dtvs_dynvars.dtds_size)) {
872 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
873 uintptr_t base = (uintptr_t)dstate->dtds_base +
874 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t));
875 uintptr_t chunkoffs;
876 dtrace_dynvar_t *dvar;
877
878 /*
879 * Before we assume that we can store here, we need to make
880 * sure that it isn't in our metadata -- storing to our
881 * dynamic variable metadata would corrupt our state. For
882 * the range to not include any dynamic variable metadata,
883 * it must:
884 *
885 * (1) Start above the hash table that is at the base of
886 * the dynamic variable space
887 *
888 * (2) Have a starting chunk offset that is beyond the
889 * dtrace_dynvar_t that is at the base of every chunk
890 *
891 * (3) Not span a chunk boundary
892 *
893 * (4) Not be in the tuple space of a dynamic variable
894 *
895 */
896 if (addr < base)
897 return (0);
898
899 chunkoffs = (addr - base) % dstate->dtds_chunksize;
900
901 if (chunkoffs < sizeof (dtrace_dynvar_t))
902 return (0);
903
904 if (chunkoffs + sz > dstate->dtds_chunksize)
905 return (0);
906
907 dvar = (dtrace_dynvar_t *)((uintptr_t)addr - chunkoffs);
908
909 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE)
910 return (0);
911
912 if (chunkoffs < sizeof (dtrace_dynvar_t) +
913 ((dvar->dtdv_tuple.dtt_nkeys - 1) * sizeof (dtrace_key_t)))
914 return (0);
915
916 DTRACE_RANGE_REMAIN(remain, addr, dvar, dstate->dtds_chunksize);
917 return (1);
918 }
919
920 /*
921 * Finally, check the static local and global variables. These checks
922 * take the longest, so we perform them last.
923 */
924 if (dtrace_canstore_statvar(addr, sz, remain,
925 vstate->dtvs_locals, vstate->dtvs_nlocals))
926 return (1);
927
928 if (dtrace_canstore_statvar(addr, sz, remain,
929 vstate->dtvs_globals, vstate->dtvs_nglobals))
930 return (1);
931
932 return (0);
933 }
934
935
936 /*
937 * Convenience routine to check to see if the address is within a memory
938 * region in which a load may be issued given the user's privilege level;
939 * if not, it sets the appropriate error flags and loads 'addr' into the
940 * illegal value slot.
941 *
942 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement
943 * appropriate memory access protection.
944 */
945 static int
dtrace_canload(uint64_t addr,size_t sz,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)946 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
947 dtrace_vstate_t *vstate)
948 {
949 return (dtrace_canload_remains(addr, sz, NULL, mstate, vstate));
950 }
951
952 /*
953 * Implementation of dtrace_canload which communicates the uppoer bound of the
954 * allowed memory region.
955 */
956 static int
dtrace_canload_remains(uint64_t addr,size_t sz,size_t * remain,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)957 dtrace_canload_remains(uint64_t addr, size_t sz, size_t *remain,
958 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
959 {
960 volatile uintptr_t *illval = &cpu_core[curcpu_id].cpuc_dtrace_illval;
961 file_t *fp;
962
963 /*
964 * If we hold the privilege to read from kernel memory, then
965 * everything is readable.
966 */
967 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) {
968 DTRACE_RANGE_REMAIN(remain, addr, addr, sz);
969 return (1);
970 }
971
972 /*
973 * You can obviously read that which you can store.
974 */
975 if (dtrace_canstore_remains(addr, sz, remain, mstate, vstate))
976 return (1);
977
978 /*
979 * We're allowed to read from our own string table.
980 */
981 if (DTRACE_INRANGE(addr, sz, mstate->dtms_difo->dtdo_strtab,
982 mstate->dtms_difo->dtdo_strlen)) {
983 DTRACE_RANGE_REMAIN(remain, addr,
984 mstate->dtms_difo->dtdo_strtab,
985 mstate->dtms_difo->dtdo_strlen);
986 return (1);
987 }
988
989 if (vstate->dtvs_state != NULL &&
990 dtrace_priv_proc(vstate->dtvs_state)) {
991 proc_t *p;
992
993 /*
994 * When we have privileges to the current process, there are
995 * several context-related kernel structures that are safe to
996 * read, even absent the privilege to read from kernel memory.
997 * These reads are safe because these structures contain only
998 * state that (1) we're permitted to read, (2) is harmless or
999 * (3) contains pointers to additional kernel state that we're
1000 * not permitted to read (and as such, do not present an
1001 * opportunity for privilege escalation). Finally (and
1002 * critically), because of the nature of their relation with
1003 * the current thread context, the memory associated with these
1004 * structures cannot change over the duration of probe context,
1005 * and it is therefore impossible for this memory to be
1006 * deallocated and reallocated as something else while it's
1007 * being operated upon.
1008 */
1009 if (DTRACE_INRANGE(addr, sz, curthread, sizeof (kthread_t))) {
1010 DTRACE_RANGE_REMAIN(remain, addr, curthread,
1011 sizeof (kthread_t));
1012 return (1);
1013 }
1014
1015 if ((p = curthread->t_procp) != NULL && DTRACE_INRANGE(addr,
1016 sz, curthread->t_procp, sizeof (proc_t))) {
1017 DTRACE_RANGE_REMAIN(remain, addr, curthread->t_procp,
1018 sizeof (proc_t));
1019 return (1);
1020 }
1021
1022 #ifndef __NetBSD__
1023 if (curthread->t_cred != NULL && DTRACE_INRANGE(addr, sz,
1024 curthread->t_cred, sizeof (cred_t))) {
1025 DTRACE_RANGE_REMAIN(remain, addr, curthread->t_cred,
1026 sizeof (cred_t));
1027 return (1);
1028 }
1029 #endif
1030
1031 #ifdef illumos
1032 if (p != NULL && p->p_pidp != NULL && DTRACE_INRANGE(addr, sz,
1033 &(p->p_pidp->pid_id), sizeof (pid_t))) {
1034 DTRACE_RANGE_REMAIN(remain, addr, &(p->p_pidp->pid_id),
1035 sizeof (pid_t));
1036 return (1);
1037 }
1038
1039 if (curthread->t_cpu != NULL && DTRACE_INRANGE(addr, sz,
1040 curthread->t_cpu, offsetof(cpu_t, cpu_pause_thread))) {
1041 DTRACE_RANGE_REMAIN(remain, addr, curthread->t_cpu,
1042 offsetof(cpu_t, cpu_pause_thread));
1043 return (1);
1044 }
1045 #endif
1046 }
1047
1048 if ((fp = mstate->dtms_getf) != NULL) {
1049 uintptr_t psz = sizeof (void *);
1050 vnode_t *vp;
1051 vnodeops_t *op;
1052
1053 /*
1054 * When getf() returns a file_t, the enabling is implicitly
1055 * granted the (transient) right to read the returned file_t
1056 * as well as the v_path and v_op->vnop_name of the underlying
1057 * vnode. These accesses are allowed after a successful
1058 * getf() because the members that they refer to cannot change
1059 * once set -- and the barrier logic in the kernel's closef()
1060 * path assures that the file_t and its referenced vode_t
1061 * cannot themselves be stale (that is, it impossible for
1062 * either dtms_getf itself or its f_vnode member to reference
1063 * freed memory).
1064 */
1065 if (DTRACE_INRANGE(addr, sz, fp, sizeof (file_t))) {
1066 DTRACE_RANGE_REMAIN(remain, addr, fp, sizeof (file_t));
1067 return (1);
1068 }
1069
1070 if ((vp = fp->f_vnode) != NULL) {
1071 size_t slen;
1072 #ifdef illumos
1073 if (DTRACE_INRANGE(addr, sz, &vp->v_path, psz)) {
1074 DTRACE_RANGE_REMAIN(remain, addr, &vp->v_path,
1075 psz);
1076 return (1);
1077 }
1078 slen = strlen(vp->v_path) + 1;
1079 if (DTRACE_INRANGE(addr, sz, vp->v_path, slen)) {
1080 DTRACE_RANGE_REMAIN(remain, addr, vp->v_path,
1081 slen);
1082 return (1);
1083 }
1084 #endif
1085
1086 if (DTRACE_INRANGE(addr, sz, &vp->v_op, psz)) {
1087 DTRACE_RANGE_REMAIN(remain, addr, &vp->v_op,
1088 psz);
1089 return (1);
1090 }
1091
1092 #ifdef illumos
1093 if ((op = vp->v_op) != NULL &&
1094 DTRACE_INRANGE(addr, sz, &op->vnop_name, psz)) {
1095 DTRACE_RANGE_REMAIN(remain, addr,
1096 &op->vnop_name, psz);
1097 return (1);
1098 }
1099
1100 if (op != NULL && op->vnop_name != NULL &&
1101 DTRACE_INRANGE(addr, sz, op->vnop_name,
1102 (slen = strlen(op->vnop_name) + 1))) {
1103 DTRACE_RANGE_REMAIN(remain, addr,
1104 op->vnop_name, slen);
1105 return (1);
1106 }
1107 #endif
1108 }
1109 }
1110
1111 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV);
1112 *illval = addr;
1113 return (0);
1114 }
1115
1116 /*
1117 * Convenience routine to check to see if a given string is within a memory
1118 * region in which a load may be issued given the user's privilege level;
1119 * this exists so that we don't need to issue unnecessary dtrace_strlen()
1120 * calls in the event that the user has all privileges.
1121 */
1122 static int
dtrace_strcanload(uint64_t addr,size_t sz,size_t * remain,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)1123 dtrace_strcanload(uint64_t addr, size_t sz, size_t *remain,
1124 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
1125 {
1126 size_t rsize;
1127
1128 /*
1129 * If we hold the privilege to read from kernel memory, then
1130 * everything is readable.
1131 */
1132
1133 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) {
1134 DTRACE_RANGE_REMAIN(remain, addr, addr, sz);
1135 return (1);
1136 }
1137
1138 /*
1139 * Even if the caller is uninterested in querying the remaining valid
1140 * range, it is required to ensure that the access is allowed.
1141 */
1142 if (remain == NULL) {
1143 remain = &rsize;
1144 }
1145 if (dtrace_canload_remains(addr, 0, remain, mstate, vstate)) {
1146 size_t strsz;
1147 /*
1148 * Perform the strlen after determining the length of the
1149 * memory region which is accessible. This prevents timing
1150 * information from being used to find NULs in memory which is
1151 * not accessible to the caller.
1152 */
1153 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr,
1154 MIN(sz, *remain));
1155 if (strsz <= *remain) {
1156 return (1);
1157 }
1158 }
1159
1160 return (0);
1161 }
1162
1163 /*
1164 * Convenience routine to check to see if a given variable is within a memory
1165 * region in which a load may be issued given the user's privilege level.
1166 */
1167 static int
dtrace_vcanload(void * src,dtrace_diftype_t * type,size_t * remain,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)1168 dtrace_vcanload(void *src, dtrace_diftype_t *type, size_t *remain,
1169 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
1170 {
1171 size_t sz;
1172 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
1173
1174 /*
1175 * Calculate the max size before performing any checks since even
1176 * DTRACE_ACCESS_KERNEL-credentialed callers expect that this function
1177 * return the max length via 'remain'.
1178 */
1179 if (type->dtdt_kind == DIF_TYPE_STRING) {
1180 dtrace_state_t *state = vstate->dtvs_state;
1181
1182 if (state != NULL) {
1183 sz = state->dts_options[DTRACEOPT_STRSIZE];
1184 } else {
1185 /*
1186 * In helper context, we have a NULL state; fall back
1187 * to using the system-wide default for the string size
1188 * in this case.
1189 */
1190 sz = dtrace_strsize_default;
1191 }
1192 } else {
1193 sz = type->dtdt_size;
1194 }
1195
1196 /*
1197 * If we hold the privilege to read from kernel memory, then
1198 * everything is readable.
1199 */
1200 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) {
1201 DTRACE_RANGE_REMAIN(remain, (uintptr_t)src, src, sz);
1202 return (1);
1203 }
1204
1205 if (type->dtdt_kind == DIF_TYPE_STRING) {
1206 return (dtrace_strcanload((uintptr_t)src, sz, remain, mstate,
1207 vstate));
1208 }
1209 return (dtrace_canload_remains((uintptr_t)src, sz, remain, mstate,
1210 vstate));
1211 }
1212
1213 /*
1214 * Convert a string to a signed integer using safe loads.
1215 *
1216 * NOTE: This function uses various macros from strtolctype.h to manipulate
1217 * digit values, etc -- these have all been checked to ensure they make
1218 * no additional function calls.
1219 */
1220 static int64_t
dtrace_strtoll(char * input,int base,size_t limit)1221 dtrace_strtoll(char *input, int base, size_t limit)
1222 {
1223 uintptr_t pos = (uintptr_t)input;
1224 int64_t val = 0;
1225 int x;
1226 boolean_t neg = B_FALSE;
1227 char c, cc, ccc;
1228 uintptr_t end = pos + limit;
1229
1230 /*
1231 * Consume any whitespace preceding digits.
1232 */
1233 while ((c = dtrace_load8(pos)) == ' ' || c == '\t')
1234 pos++;
1235
1236 /*
1237 * Handle an explicit sign if one is present.
1238 */
1239 if (c == '-' || c == '+') {
1240 if (c == '-')
1241 neg = B_TRUE;
1242 c = dtrace_load8(++pos);
1243 }
1244
1245 /*
1246 * Check for an explicit hexadecimal prefix ("0x" or "0X") and skip it
1247 * if present.
1248 */
1249 if (base == 16 && c == '0' && ((cc = dtrace_load8(pos + 1)) == 'x' ||
1250 cc == 'X') && isxdigit(ccc = dtrace_load8(pos + 2))) {
1251 pos += 2;
1252 c = ccc;
1253 }
1254
1255 /*
1256 * Read in contiguous digits until the first non-digit character.
1257 */
1258 for (; pos < end && c != '\0' && lisalnum(c) && (x = DIGIT(c)) < base;
1259 c = dtrace_load8(++pos))
1260 val = val * base + x;
1261
1262 return (neg ? -val : val);
1263 }
1264
1265 /*
1266 * Compare two strings using safe loads.
1267 */
1268 static int
dtrace_strncmp(char * s1,char * s2,size_t limit)1269 dtrace_strncmp(char *s1, char *s2, size_t limit)
1270 {
1271 uint8_t c1, c2;
1272 volatile uint16_t *flags;
1273
1274 if (s1 == s2 || limit == 0)
1275 return (0);
1276
1277 flags = (volatile uint16_t *)&cpu_core[curcpu_id].cpuc_dtrace_flags;
1278
1279 do {
1280 if (s1 == NULL) {
1281 c1 = '\0';
1282 } else {
1283 c1 = dtrace_load8((uintptr_t)s1++);
1284 }
1285
1286 if (s2 == NULL) {
1287 c2 = '\0';
1288 } else {
1289 c2 = dtrace_load8((uintptr_t)s2++);
1290 }
1291
1292 if (c1 != c2)
1293 return (c1 - c2);
1294 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT));
1295
1296 return (0);
1297 }
1298
1299 /*
1300 * Compute strlen(s) for a string using safe memory accesses. The additional
1301 * len parameter is used to specify a maximum length to ensure completion.
1302 */
1303 static size_t
dtrace_strlen(const char * s,size_t lim)1304 dtrace_strlen(const char *s, size_t lim)
1305 {
1306 uint_t len;
1307
1308 for (len = 0; len != lim; len++) {
1309 if (dtrace_load8((uintptr_t)s++) == '\0')
1310 break;
1311 }
1312
1313 return (len);
1314 }
1315
1316 /*
1317 * Check if an address falls within a toxic region.
1318 */
1319 static int
dtrace_istoxic(uintptr_t kaddr,size_t size)1320 dtrace_istoxic(uintptr_t kaddr, size_t size)
1321 {
1322 uintptr_t taddr, tsize;
1323 int i;
1324
1325 for (i = 0; i < dtrace_toxranges; i++) {
1326 taddr = dtrace_toxrange[i].dtt_base;
1327 tsize = dtrace_toxrange[i].dtt_limit - taddr;
1328
1329 if (kaddr - taddr < tsize) {
1330 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1331 cpu_core[curcpu_id].cpuc_dtrace_illval = kaddr;
1332 return (1);
1333 }
1334
1335 if (taddr - kaddr < size) {
1336 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
1337 cpu_core[curcpu_id].cpuc_dtrace_illval = taddr;
1338 return (1);
1339 }
1340 }
1341
1342 return (0);
1343 }
1344
1345 /*
1346 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe
1347 * memory specified by the DIF program. The dst is assumed to be safe memory
1348 * that we can store to directly because it is managed by DTrace. As with
1349 * standard bcopy, overlapping copies are handled properly.
1350 */
1351 static void
dtrace_bcopy(const void * src,void * dst,size_t len)1352 dtrace_bcopy(const void *src, void *dst, size_t len)
1353 {
1354 if (len != 0) {
1355 uint8_t *s1 = dst;
1356 const uint8_t *s2 = src;
1357
1358 if (s1 <= s2) {
1359 do {
1360 *s1++ = dtrace_load8((uintptr_t)s2++);
1361 } while (--len != 0);
1362 } else {
1363 s2 += len;
1364 s1 += len;
1365
1366 do {
1367 *--s1 = dtrace_load8((uintptr_t)--s2);
1368 } while (--len != 0);
1369 }
1370 }
1371 }
1372
1373 /*
1374 * Copy src to dst using safe memory accesses, up to either the specified
1375 * length, or the point that a nul byte is encountered. The src is assumed to
1376 * be unsafe memory specified by the DIF program. The dst is assumed to be
1377 * safe memory that we can store to directly because it is managed by DTrace.
1378 * Unlike dtrace_bcopy(), overlapping regions are not handled.
1379 */
1380 static void
dtrace_strcpy(const void * src,void * dst,size_t len)1381 dtrace_strcpy(const void *src, void *dst, size_t len)
1382 {
1383 if (len != 0) {
1384 uint8_t *s1 = dst, c;
1385 const uint8_t *s2 = src;
1386
1387 do {
1388 *s1++ = c = dtrace_load8((uintptr_t)s2++);
1389 } while (--len != 0 && c != '\0');
1390 }
1391 }
1392
1393 /*
1394 * Copy src to dst, deriving the size and type from the specified (BYREF)
1395 * variable type. The src is assumed to be unsafe memory specified by the DIF
1396 * program. The dst is assumed to be DTrace variable memory that is of the
1397 * specified type; we assume that we can store to directly.
1398 */
1399 static void
dtrace_vcopy(void * src,void * dst,dtrace_diftype_t * type,size_t limit)1400 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type, size_t limit)
1401 {
1402 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
1403
1404 if (type->dtdt_kind == DIF_TYPE_STRING) {
1405 dtrace_strcpy(src, dst, MIN(type->dtdt_size, limit));
1406 } else {
1407 dtrace_bcopy(src, dst, MIN(type->dtdt_size, limit));
1408 }
1409 }
1410
1411 /*
1412 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be
1413 * unsafe memory specified by the DIF program. The s2 data is assumed to be
1414 * safe memory that we can access directly because it is managed by DTrace.
1415 */
1416 static int
dtrace_bcmp(const void * s1,const void * s2,size_t len)1417 dtrace_bcmp(const void *s1, const void *s2, size_t len)
1418 {
1419 volatile uint16_t *flags;
1420
1421 flags = (volatile uint16_t *)&cpu_core[curcpu_id].cpuc_dtrace_flags;
1422
1423 if (s1 == s2)
1424 return (0);
1425
1426 if (s1 == NULL || s2 == NULL)
1427 return (1);
1428
1429 if (s1 != s2 && len != 0) {
1430 const uint8_t *ps1 = s1;
1431 const uint8_t *ps2 = s2;
1432
1433 do {
1434 if (dtrace_load8((uintptr_t)ps1++) != *ps2++)
1435 return (1);
1436 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT));
1437 }
1438 return (0);
1439 }
1440
1441 /*
1442 * Zero the specified region using a simple byte-by-byte loop. Note that this
1443 * is for safe DTrace-managed memory only.
1444 */
1445 static void
dtrace_bzero(void * dst,size_t len)1446 dtrace_bzero(void *dst, size_t len)
1447 {
1448 uchar_t *cp;
1449
1450 for (cp = dst; len != 0; len--)
1451 *cp++ = 0;
1452 }
1453
1454 static void
dtrace_add_128(uint64_t * addend1,uint64_t * addend2,uint64_t * sum)1455 dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum)
1456 {
1457 uint64_t result[2];
1458
1459 result[0] = addend1[0] + addend2[0];
1460 result[1] = addend1[1] + addend2[1] +
1461 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0);
1462
1463 sum[0] = result[0];
1464 sum[1] = result[1];
1465 }
1466
1467 /*
1468 * Shift the 128-bit value in a by b. If b is positive, shift left.
1469 * If b is negative, shift right.
1470 */
1471 static void
dtrace_shift_128(uint64_t * a,int b)1472 dtrace_shift_128(uint64_t *a, int b)
1473 {
1474 uint64_t mask;
1475
1476 if (b == 0)
1477 return;
1478
1479 if (b < 0) {
1480 b = -b;
1481 if (b >= 64) {
1482 a[0] = a[1] >> (b - 64);
1483 a[1] = 0;
1484 } else {
1485 a[0] >>= b;
1486 mask = 1LL << (64 - b);
1487 mask -= 1;
1488 a[0] |= ((a[1] & mask) << (64 - b));
1489 a[1] >>= b;
1490 }
1491 } else {
1492 if (b >= 64) {
1493 a[1] = a[0] << (b - 64);
1494 a[0] = 0;
1495 } else {
1496 a[1] <<= b;
1497 mask = a[0] >> (64 - b);
1498 a[1] |= mask;
1499 a[0] <<= b;
1500 }
1501 }
1502 }
1503
1504 /*
1505 * The basic idea is to break the 2 64-bit values into 4 32-bit values,
1506 * use native multiplication on those, and then re-combine into the
1507 * resulting 128-bit value.
1508 *
1509 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) =
1510 * hi1 * hi2 << 64 +
1511 * hi1 * lo2 << 32 +
1512 * hi2 * lo1 << 32 +
1513 * lo1 * lo2
1514 */
1515 static void
dtrace_multiply_128(uint64_t factor1,uint64_t factor2,uint64_t * product)1516 dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product)
1517 {
1518 uint64_t hi1, hi2, lo1, lo2;
1519 uint64_t tmp[2];
1520
1521 hi1 = factor1 >> 32;
1522 hi2 = factor2 >> 32;
1523
1524 lo1 = factor1 & DT_MASK_LO;
1525 lo2 = factor2 & DT_MASK_LO;
1526
1527 product[0] = lo1 * lo2;
1528 product[1] = hi1 * hi2;
1529
1530 tmp[0] = hi1 * lo2;
1531 tmp[1] = 0;
1532 dtrace_shift_128(tmp, 32);
1533 dtrace_add_128(product, tmp, product);
1534
1535 tmp[0] = hi2 * lo1;
1536 tmp[1] = 0;
1537 dtrace_shift_128(tmp, 32);
1538 dtrace_add_128(product, tmp, product);
1539 }
1540
1541 /*
1542 * This privilege check should be used by actions and subroutines to
1543 * verify that the user credentials of the process that enabled the
1544 * invoking ECB match the target credentials
1545 */
1546 static int
dtrace_priv_proc_common_user(dtrace_state_t * state)1547 dtrace_priv_proc_common_user(dtrace_state_t *state)
1548 {
1549 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1550
1551 /*
1552 * We should always have a non-NULL state cred here, since if cred
1553 * is null (anonymous tracing), we fast-path bypass this routine.
1554 */
1555 ASSERT(s_cr != NULL);
1556
1557 #ifdef __NetBSD__
1558 if ((cr = CRED()) != NULL) {
1559 uid_t uid;
1560 gid_t gid;
1561
1562 uid = kauth_cred_getuid(s_cr);
1563 gid = kauth_cred_getgid(s_cr);
1564
1565 if (uid == kauth_cred_getuid(cr) &&
1566 uid == kauth_cred_geteuid(cr) &&
1567 uid == kauth_cred_getsvuid(cr) &&
1568 gid == kauth_cred_getgid(cr) &&
1569 gid == kauth_cred_getegid(cr) &&
1570 gid == kauth_cred_getsvgid(cr))
1571 return 1;
1572 }
1573 #else
1574 if ((cr = CRED()) != NULL &&
1575 s_cr->cr_uid == cr->cr_uid &&
1576 s_cr->cr_uid == cr->cr_ruid &&
1577 s_cr->cr_uid == cr->cr_suid &&
1578 s_cr->cr_gid == cr->cr_gid &&
1579 s_cr->cr_gid == cr->cr_rgid &&
1580 s_cr->cr_gid == cr->cr_sgid)
1581 return (1);
1582 #endif
1583
1584 return (0);
1585 }
1586
1587 /*
1588 * This privilege check should be used by actions and subroutines to
1589 * verify that the zone of the process that enabled the invoking ECB
1590 * matches the target credentials
1591 */
1592 static int
dtrace_priv_proc_common_zone(dtrace_state_t * state)1593 dtrace_priv_proc_common_zone(dtrace_state_t *state)
1594 {
1595 #ifdef illumos
1596 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1597
1598 /*
1599 * We should always have a non-NULL state cred here, since if cred
1600 * is null (anonymous tracing), we fast-path bypass this routine.
1601 */
1602 ASSERT(s_cr != NULL);
1603
1604 if ((cr = CRED()) != NULL && s_cr->cr_zone == cr->cr_zone)
1605 s_cr->cr_zone == cr->cr_zone)
1606 return (1);
1607
1608 return (0);
1609 #else
1610 return (1);
1611 #endif
1612 }
1613
1614 /*
1615 * This privilege check should be used by actions and subroutines to
1616 * verify that the process has not setuid or changed credentials.
1617 */
1618 static int
dtrace_priv_proc_common_nocd(void)1619 dtrace_priv_proc_common_nocd(void)
1620 {
1621 proc_t *proc;
1622
1623 if ((proc = ttoproc(curthread)) != NULL &&
1624 !(proc->p_flag & SNOCD))
1625 return (1);
1626
1627 return (0);
1628 }
1629
1630 static int
dtrace_priv_proc_destructive(dtrace_state_t * state)1631 dtrace_priv_proc_destructive(dtrace_state_t *state)
1632 {
1633 int action = state->dts_cred.dcr_action;
1634
1635 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) &&
1636 dtrace_priv_proc_common_zone(state) == 0)
1637 goto bad;
1638
1639 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) &&
1640 dtrace_priv_proc_common_user(state) == 0)
1641 goto bad;
1642
1643 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) &&
1644 dtrace_priv_proc_common_nocd() == 0)
1645 goto bad;
1646
1647 return (1);
1648
1649 bad:
1650 cpu_core[curcpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1651
1652 return (0);
1653 }
1654
1655 static int
dtrace_priv_proc_control(dtrace_state_t * state)1656 dtrace_priv_proc_control(dtrace_state_t *state)
1657 {
1658 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL)
1659 return (1);
1660
1661 if (dtrace_priv_proc_common_zone(state) &&
1662 dtrace_priv_proc_common_user(state) &&
1663 dtrace_priv_proc_common_nocd())
1664 return (1);
1665
1666 cpu_core[curcpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1667
1668 return (0);
1669 }
1670
1671 static int
dtrace_priv_proc(dtrace_state_t * state)1672 dtrace_priv_proc(dtrace_state_t *state)
1673 {
1674 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC)
1675 return (1);
1676
1677 cpu_core[curcpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1678
1679 return (0);
1680 }
1681
1682 static int
dtrace_priv_kernel(dtrace_state_t * state)1683 dtrace_priv_kernel(dtrace_state_t *state)
1684 {
1685 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL)
1686 return (1);
1687
1688 cpu_core[curcpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1689
1690 return (0);
1691 }
1692
1693 static int
dtrace_priv_kernel_destructive(dtrace_state_t * state)1694 dtrace_priv_kernel_destructive(dtrace_state_t *state)
1695 {
1696 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE)
1697 return (1);
1698
1699 cpu_core[curcpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1700
1701 return (0);
1702 }
1703
1704 /*
1705 * Determine if the dte_cond of the specified ECB allows for processing of
1706 * the current probe to continue. Note that this routine may allow continued
1707 * processing, but with access(es) stripped from the mstate's dtms_access
1708 * field.
1709 */
1710 static int
dtrace_priv_probe(dtrace_state_t * state,dtrace_mstate_t * mstate,dtrace_ecb_t * ecb)1711 dtrace_priv_probe(dtrace_state_t *state, dtrace_mstate_t *mstate,
1712 dtrace_ecb_t *ecb)
1713 {
1714 dtrace_probe_t *probe = ecb->dte_probe;
1715 dtrace_provider_t *prov = probe->dtpr_provider;
1716 dtrace_pops_t *pops = &prov->dtpv_pops;
1717 int mode = DTRACE_MODE_NOPRIV_DROP;
1718
1719 ASSERT(ecb->dte_cond);
1720
1721 #ifdef illumos
1722 if (pops->dtps_mode != NULL) {
1723 mode = pops->dtps_mode(prov->dtpv_arg,
1724 probe->dtpr_id, probe->dtpr_arg);
1725
1726 ASSERT((mode & DTRACE_MODE_USER) ||
1727 (mode & DTRACE_MODE_KERNEL));
1728 ASSERT((mode & DTRACE_MODE_NOPRIV_RESTRICT) ||
1729 (mode & DTRACE_MODE_NOPRIV_DROP));
1730 }
1731
1732 /*
1733 * If the dte_cond bits indicate that this consumer is only allowed to
1734 * see user-mode firings of this probe, call the provider's dtps_mode()
1735 * entry point to check that the probe was fired while in a user
1736 * context. If that's not the case, use the policy specified by the
1737 * provider to determine if we drop the probe or merely restrict
1738 * operation.
1739 */
1740 if (ecb->dte_cond & DTRACE_COND_USERMODE) {
1741 ASSERT(mode != DTRACE_MODE_NOPRIV_DROP);
1742
1743 if (!(mode & DTRACE_MODE_USER)) {
1744 if (mode & DTRACE_MODE_NOPRIV_DROP)
1745 return (0);
1746
1747 mstate->dtms_access &= ~DTRACE_ACCESS_ARGS;
1748 }
1749 }
1750 #endif
1751
1752 /*
1753 * This is more subtle than it looks. We have to be absolutely certain
1754 * that CRED() isn't going to change out from under us so it's only
1755 * legit to examine that structure if we're in constrained situations.
1756 * Currently, the only times we'll this check is if a non-super-user
1757 * has enabled the profile or syscall providers -- providers that
1758 * allow visibility of all processes. For the profile case, the check
1759 * above will ensure that we're examining a user context.
1760 */
1761 if (ecb->dte_cond & DTRACE_COND_OWNER) {
1762 cred_t *cr;
1763 cred_t *s_cr = state->dts_cred.dcr_cred;
1764 proc_t *proc;
1765
1766 ASSERT(s_cr != NULL);
1767
1768 #ifdef __NetBSD__
1769 uid_t uid = kauth_cred_getuid(s_cr);
1770 gid_t gid = kauth_cred_getgid(s_cr);
1771
1772 if ((cr = CRED()) == NULL ||
1773 uid != kauth_cred_geteuid(cr) ||
1774 uid != kauth_cred_getuid(cr) ||
1775 uid != kauth_cred_getsvuid(cr) ||
1776 gid != kauth_cred_getegid(cr) ||
1777 gid != kauth_cred_getgid(cr) ||
1778 gid != kauth_cred_getsvgid(cr) ||
1779 (proc = ttoproc(curthread)) == NULL ||
1780 (proc->p_flag & SNOCD)) {
1781 if (mode & DTRACE_MODE_NOPRIV_DROP)
1782 return (0);
1783 }
1784 #else /* __NetBSD__ */
1785 if ((cr = CRED()) == NULL ||
1786 s_cr->cr_uid != cr->cr_uid ||
1787 s_cr->cr_uid != cr->cr_ruid ||
1788 s_cr->cr_uid != cr->cr_suid ||
1789 s_cr->cr_gid != cr->cr_gid ||
1790 s_cr->cr_gid != cr->cr_rgid ||
1791 s_cr->cr_gid != cr->cr_sgid ||
1792 (proc = ttoproc(curthread)) == NULL ||
1793 (proc->p_flag & SNOCD)) {
1794 if (mode & DTRACE_MODE_NOPRIV_DROP)
1795 return (0);
1796
1797 #ifdef illumos
1798 mstate->dtms_access &= ~DTRACE_ACCESS_PROC;
1799 #endif
1800 }
1801 #endif /* __NetBSD__ */
1802 }
1803
1804 #ifdef illumos
1805 /*
1806 * If our dte_cond is set to DTRACE_COND_ZONEOWNER and we are not
1807 * in our zone, check to see if our mode policy is to restrict rather
1808 * than to drop; if to restrict, strip away both DTRACE_ACCESS_PROC
1809 * and DTRACE_ACCESS_ARGS
1810 */
1811 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) {
1812 cred_t *cr;
1813 cred_t *s_cr = state->dts_cred.dcr_cred;
1814
1815 ASSERT(s_cr != NULL);
1816
1817 if ((cr = CRED()) == NULL ||
1818 s_cr->cr_zone->zone_id != cr->cr_zone->zone_id) {
1819 if (mode & DTRACE_MODE_NOPRIV_DROP)
1820 return (0);
1821
1822 mstate->dtms_access &=
1823 ~(DTRACE_ACCESS_PROC | DTRACE_ACCESS_ARGS);
1824 }
1825 }
1826 #endif
1827
1828 return (1);
1829 }
1830
1831 /*
1832 * Note: not called from probe context. This function is called
1833 * asynchronously (and at a regular interval) from outside of probe context to
1834 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable
1835 * cleaning is explained in detail in <sys/dtrace_impl.h>.
1836 */
1837 void
dtrace_dynvar_clean(dtrace_dstate_t * dstate)1838 dtrace_dynvar_clean(dtrace_dstate_t *dstate)
1839 {
1840 dtrace_dynvar_t *dirty;
1841 dtrace_dstate_percpu_t *dcpu;
1842 dtrace_dynvar_t **rinsep;
1843 int i, j, work = 0;
1844
1845 for (i = 0; i < NCPU; i++) {
1846 dcpu = &dstate->dtds_percpu[i];
1847
1848 rinsep = &dcpu->dtdsc_rinsing;
1849
1850 /*
1851 * If the dirty list is NULL, there is no dirty work to do.
1852 */
1853 if (dcpu->dtdsc_dirty == NULL)
1854 continue;
1855
1856 if (dcpu->dtdsc_rinsing != NULL) {
1857 /*
1858 * If the rinsing list is non-NULL, then it is because
1859 * this CPU was selected to accept another CPU's
1860 * dirty list -- and since that time, dirty buffers
1861 * have accumulated. This is a highly unlikely
1862 * condition, but we choose to ignore the dirty
1863 * buffers -- they'll be picked up a future cleanse.
1864 */
1865 continue;
1866 }
1867
1868 if (dcpu->dtdsc_clean != NULL) {
1869 /*
1870 * If the clean list is non-NULL, then we're in a
1871 * situation where a CPU has done deallocations (we
1872 * have a non-NULL dirty list) but no allocations (we
1873 * also have a non-NULL clean list). We can't simply
1874 * move the dirty list into the clean list on this
1875 * CPU, yet we also don't want to allow this condition
1876 * to persist, lest a short clean list prevent a
1877 * massive dirty list from being cleaned (which in
1878 * turn could lead to otherwise avoidable dynamic
1879 * drops). To deal with this, we look for some CPU
1880 * with a NULL clean list, NULL dirty list, and NULL
1881 * rinsing list -- and then we borrow this CPU to
1882 * rinse our dirty list.
1883 */
1884 for (j = 0; j < NCPU; j++) {
1885 dtrace_dstate_percpu_t *rinser;
1886
1887 rinser = &dstate->dtds_percpu[j];
1888
1889 if (rinser->dtdsc_rinsing != NULL)
1890 continue;
1891
1892 if (rinser->dtdsc_dirty != NULL)
1893 continue;
1894
1895 if (rinser->dtdsc_clean != NULL)
1896 continue;
1897
1898 rinsep = &rinser->dtdsc_rinsing;
1899 break;
1900 }
1901
1902 if (j == NCPU) {
1903 /*
1904 * We were unable to find another CPU that
1905 * could accept this dirty list -- we are
1906 * therefore unable to clean it now.
1907 */
1908 dtrace_dynvar_failclean++;
1909 continue;
1910 }
1911 }
1912
1913 work = 1;
1914
1915 /*
1916 * Atomically move the dirty list aside.
1917 */
1918 do {
1919 dirty = dcpu->dtdsc_dirty;
1920
1921 /*
1922 * Before we zap the dirty list, set the rinsing list.
1923 * (This allows for a potential assertion in
1924 * dtrace_dynvar(): if a free dynamic variable appears
1925 * on a hash chain, either the dirty list or the
1926 * rinsing list for some CPU must be non-NULL.)
1927 */
1928 *rinsep = dirty;
1929 dtrace_membar_producer();
1930 } while (dtrace_casptr(&dcpu->dtdsc_dirty,
1931 dirty, NULL) != dirty);
1932 }
1933
1934 if (!work) {
1935 /*
1936 * We have no work to do; we can simply return.
1937 */
1938 return;
1939 }
1940
1941 dtrace_sync();
1942
1943 for (i = 0; i < NCPU; i++) {
1944 dcpu = &dstate->dtds_percpu[i];
1945
1946 if (dcpu->dtdsc_rinsing == NULL)
1947 continue;
1948
1949 /*
1950 * We are now guaranteed that no hash chain contains a pointer
1951 * into this dirty list; we can make it clean.
1952 */
1953 ASSERT(dcpu->dtdsc_clean == NULL);
1954 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing;
1955 dcpu->dtdsc_rinsing = NULL;
1956 }
1957
1958 /*
1959 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make
1960 * sure that all CPUs have seen all of the dtdsc_clean pointers.
1961 * This prevents a race whereby a CPU incorrectly decides that
1962 * the state should be something other than DTRACE_DSTATE_CLEAN
1963 * after dtrace_dynvar_clean() has completed.
1964 */
1965 dtrace_sync();
1966
1967 dstate->dtds_state = DTRACE_DSTATE_CLEAN;
1968 }
1969
1970 /*
1971 * Depending on the value of the op parameter, this function looks-up,
1972 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an
1973 * allocation is requested, this function will return a pointer to a
1974 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no
1975 * variable can be allocated. If NULL is returned, the appropriate counter
1976 * will be incremented.
1977 */
1978 dtrace_dynvar_t *
dtrace_dynvar(dtrace_dstate_t * dstate,uint_t nkeys,dtrace_key_t * key,size_t dsize,dtrace_dynvar_op_t op,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)1979 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys,
1980 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op,
1981 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
1982 {
1983 uint64_t hashval = DTRACE_DYNHASH_VALID;
1984 dtrace_dynhash_t *hash = dstate->dtds_hash;
1985 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL;
1986 processorid_t me = curcpu_id, cpu = me;
1987 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me];
1988 size_t bucket, ksize;
1989 size_t chunksize = dstate->dtds_chunksize;
1990 uintptr_t kdata, lock, nstate;
1991 uint_t i;
1992
1993 ASSERT(nkeys != 0);
1994
1995 /*
1996 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time"
1997 * algorithm. For the by-value portions, we perform the algorithm in
1998 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a
1999 * bit, and seems to have only a minute effect on distribution. For
2000 * the by-reference data, we perform "One-at-a-time" iterating (safely)
2001 * over each referenced byte. It's painful to do this, but it's much
2002 * better than pathological hash distribution. The efficacy of the
2003 * hashing algorithm (and a comparison with other algorithms) may be
2004 * found by running the ::dtrace_dynstat MDB dcmd.
2005 */
2006 for (i = 0; i < nkeys; i++) {
2007 if (key[i].dttk_size == 0) {
2008 uint64_t val = key[i].dttk_value;
2009
2010 hashval += (val >> 48) & 0xffff;
2011 hashval += (hashval << 10);
2012 hashval ^= (hashval >> 6);
2013
2014 hashval += (val >> 32) & 0xffff;
2015 hashval += (hashval << 10);
2016 hashval ^= (hashval >> 6);
2017
2018 hashval += (val >> 16) & 0xffff;
2019 hashval += (hashval << 10);
2020 hashval ^= (hashval >> 6);
2021
2022 hashval += val & 0xffff;
2023 hashval += (hashval << 10);
2024 hashval ^= (hashval >> 6);
2025 } else {
2026 /*
2027 * This is incredibly painful, but it beats the hell
2028 * out of the alternative.
2029 */
2030 uint64_t j, size = key[i].dttk_size;
2031 uintptr_t base = (uintptr_t)key[i].dttk_value;
2032
2033 if (!dtrace_canload(base, size, mstate, vstate))
2034 break;
2035
2036 for (j = 0; j < size; j++) {
2037 hashval += dtrace_load8(base + j);
2038 hashval += (hashval << 10);
2039 hashval ^= (hashval >> 6);
2040 }
2041 }
2042 }
2043
2044 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
2045 return (NULL);
2046
2047 hashval += (hashval << 3);
2048 hashval ^= (hashval >> 11);
2049 hashval += (hashval << 15);
2050
2051 /*
2052 * There is a remote chance (ideally, 1 in 2^31) that our hashval
2053 * comes out to be one of our two sentinel hash values. If this
2054 * actually happens, we set the hashval to be a value known to be a
2055 * non-sentinel value.
2056 */
2057 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK)
2058 hashval = DTRACE_DYNHASH_VALID;
2059
2060 /*
2061 * Yes, it's painful to do a divide here. If the cycle count becomes
2062 * important here, tricks can be pulled to reduce it. (However, it's
2063 * critical that hash collisions be kept to an absolute minimum;
2064 * they're much more painful than a divide.) It's better to have a
2065 * solution that generates few collisions and still keeps things
2066 * relatively simple.
2067 */
2068 bucket = hashval % dstate->dtds_hashsize;
2069
2070 if (op == DTRACE_DYNVAR_DEALLOC) {
2071 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock;
2072
2073 for (;;) {
2074 while ((lock = *lockp) & 1)
2075 continue;
2076
2077 if (dtrace_casptr((volatile void *)lockp,
2078 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock)
2079 break;
2080 }
2081
2082 dtrace_membar_producer();
2083 }
2084
2085 top:
2086 prev = NULL;
2087 lock = hash[bucket].dtdh_lock;
2088
2089 dtrace_membar_consumer();
2090
2091 start = hash[bucket].dtdh_chain;
2092 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK ||
2093 start->dtdv_hashval != DTRACE_DYNHASH_FREE ||
2094 op != DTRACE_DYNVAR_DEALLOC));
2095
2096 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) {
2097 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple;
2098 dtrace_key_t *dkey = &dtuple->dtt_key[0];
2099
2100 if (dvar->dtdv_hashval != hashval) {
2101 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) {
2102 /*
2103 * We've reached the sink, and therefore the
2104 * end of the hash chain; we can kick out of
2105 * the loop knowing that we have seen a valid
2106 * snapshot of state.
2107 */
2108 ASSERT(dvar->dtdv_next == NULL);
2109 ASSERT(dvar == &dtrace_dynhash_sink);
2110 break;
2111 }
2112
2113 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) {
2114 /*
2115 * We've gone off the rails: somewhere along
2116 * the line, one of the members of this hash
2117 * chain was deleted. Note that we could also
2118 * detect this by simply letting this loop run
2119 * to completion, as we would eventually hit
2120 * the end of the dirty list. However, we
2121 * want to avoid running the length of the
2122 * dirty list unnecessarily (it might be quite
2123 * long), so we catch this as early as
2124 * possible by detecting the hash marker. In
2125 * this case, we simply set dvar to NULL and
2126 * break; the conditional after the loop will
2127 * send us back to top.
2128 */
2129 dvar = NULL;
2130 break;
2131 }
2132
2133 goto next;
2134 }
2135
2136 if (dtuple->dtt_nkeys != nkeys)
2137 goto next;
2138
2139 for (i = 0; i < nkeys; i++, dkey++) {
2140 if (dkey->dttk_size != key[i].dttk_size)
2141 goto next; /* size or type mismatch */
2142
2143 if (dkey->dttk_size != 0) {
2144 if (dtrace_bcmp(
2145 (void *)(uintptr_t)key[i].dttk_value,
2146 (void *)(uintptr_t)dkey->dttk_value,
2147 dkey->dttk_size))
2148 goto next;
2149 } else {
2150 if (dkey->dttk_value != key[i].dttk_value)
2151 goto next;
2152 }
2153 }
2154
2155 if (op != DTRACE_DYNVAR_DEALLOC)
2156 return (dvar);
2157
2158 ASSERT(dvar->dtdv_next == NULL ||
2159 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE);
2160
2161 if (prev != NULL) {
2162 ASSERT(hash[bucket].dtdh_chain != dvar);
2163 ASSERT(start != dvar);
2164 ASSERT(prev->dtdv_next == dvar);
2165 prev->dtdv_next = dvar->dtdv_next;
2166 } else {
2167 if (dtrace_casptr(&hash[bucket].dtdh_chain,
2168 start, dvar->dtdv_next) != start) {
2169 /*
2170 * We have failed to atomically swing the
2171 * hash table head pointer, presumably because
2172 * of a conflicting allocation on another CPU.
2173 * We need to reread the hash chain and try
2174 * again.
2175 */
2176 goto top;
2177 }
2178 }
2179
2180 dtrace_membar_producer();
2181
2182 /*
2183 * Now set the hash value to indicate that it's free.
2184 */
2185 ASSERT(hash[bucket].dtdh_chain != dvar);
2186 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
2187
2188 dtrace_membar_producer();
2189
2190 /*
2191 * Set the next pointer to point at the dirty list, and
2192 * atomically swing the dirty pointer to the newly freed dvar.
2193 */
2194 do {
2195 next = dcpu->dtdsc_dirty;
2196 dvar->dtdv_next = next;
2197 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next);
2198
2199 /*
2200 * Finally, unlock this hash bucket.
2201 */
2202 ASSERT(hash[bucket].dtdh_lock == lock);
2203 ASSERT(lock & 1);
2204 hash[bucket].dtdh_lock++;
2205
2206 return (NULL);
2207 next:
2208 prev = dvar;
2209 continue;
2210 }
2211
2212 if (dvar == NULL) {
2213 /*
2214 * If dvar is NULL, it is because we went off the rails:
2215 * one of the elements that we traversed in the hash chain
2216 * was deleted while we were traversing it. In this case,
2217 * we assert that we aren't doing a dealloc (deallocs lock
2218 * the hash bucket to prevent themselves from racing with
2219 * one another), and retry the hash chain traversal.
2220 */
2221 ASSERT(op != DTRACE_DYNVAR_DEALLOC);
2222 goto top;
2223 }
2224
2225 if (op != DTRACE_DYNVAR_ALLOC) {
2226 /*
2227 * If we are not to allocate a new variable, we want to
2228 * return NULL now. Before we return, check that the value
2229 * of the lock word hasn't changed. If it has, we may have
2230 * seen an inconsistent snapshot.
2231 */
2232 if (op == DTRACE_DYNVAR_NOALLOC) {
2233 if (hash[bucket].dtdh_lock != lock)
2234 goto top;
2235 } else {
2236 ASSERT(op == DTRACE_DYNVAR_DEALLOC);
2237 ASSERT(hash[bucket].dtdh_lock == lock);
2238 ASSERT(lock & 1);
2239 hash[bucket].dtdh_lock++;
2240 }
2241
2242 return (NULL);
2243 }
2244
2245 /*
2246 * We need to allocate a new dynamic variable. The size we need is the
2247 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the
2248 * size of any auxiliary key data (rounded up to 8-byte alignment) plus
2249 * the size of any referred-to data (dsize). We then round the final
2250 * size up to the chunksize for allocation.
2251 */
2252 for (ksize = 0, i = 0; i < nkeys; i++)
2253 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
2254
2255 /*
2256 * This should be pretty much impossible, but could happen if, say,
2257 * strange DIF specified the tuple. Ideally, this should be an
2258 * assertion and not an error condition -- but that requires that the
2259 * chunksize calculation in dtrace_difo_chunksize() be absolutely
2260 * bullet-proof. (That is, it must not be able to be fooled by
2261 * malicious DIF.) Given the lack of backwards branches in DIF,
2262 * solving this would presumably not amount to solving the Halting
2263 * Problem -- but it still seems awfully hard.
2264 */
2265 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) +
2266 ksize + dsize > chunksize) {
2267 dcpu->dtdsc_drops++;
2268 return (NULL);
2269 }
2270
2271 nstate = DTRACE_DSTATE_EMPTY;
2272
2273 do {
2274 retry:
2275 free = dcpu->dtdsc_free;
2276
2277 if (free == NULL) {
2278 dtrace_dynvar_t *clean = dcpu->dtdsc_clean;
2279 void *rval;
2280
2281 if (clean == NULL) {
2282 /*
2283 * We're out of dynamic variable space on
2284 * this CPU. Unless we have tried all CPUs,
2285 * we'll try to allocate from a different
2286 * CPU.
2287 */
2288 switch (dstate->dtds_state) {
2289 case DTRACE_DSTATE_CLEAN: {
2290 void *sp = &dstate->dtds_state;
2291
2292 if (++cpu >= NCPU)
2293 cpu = 0;
2294
2295 if (dcpu->dtdsc_dirty != NULL &&
2296 nstate == DTRACE_DSTATE_EMPTY)
2297 nstate = DTRACE_DSTATE_DIRTY;
2298
2299 if (dcpu->dtdsc_rinsing != NULL)
2300 nstate = DTRACE_DSTATE_RINSING;
2301
2302 dcpu = &dstate->dtds_percpu[cpu];
2303
2304 if (cpu != me)
2305 goto retry;
2306
2307 (void) dtrace_cas32(sp,
2308 DTRACE_DSTATE_CLEAN, nstate);
2309
2310 /*
2311 * To increment the correct bean
2312 * counter, take another lap.
2313 */
2314 goto retry;
2315 }
2316
2317 case DTRACE_DSTATE_DIRTY:
2318 dcpu->dtdsc_dirty_drops++;
2319 break;
2320
2321 case DTRACE_DSTATE_RINSING:
2322 dcpu->dtdsc_rinsing_drops++;
2323 break;
2324
2325 case DTRACE_DSTATE_EMPTY:
2326 dcpu->dtdsc_drops++;
2327 break;
2328 }
2329
2330 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP);
2331 return (NULL);
2332 }
2333
2334 /*
2335 * The clean list appears to be non-empty. We want to
2336 * move the clean list to our free list; we start by
2337 * moving the clean pointer aside.
2338 */
2339 if (dtrace_casptr(&dcpu->dtdsc_clean,
2340 clean, NULL) != clean) {
2341 /*
2342 * We are in one of two situations:
2343 *
2344 * (a) The clean list was switched to the
2345 * free list by another CPU.
2346 *
2347 * (b) The clean list was added to by the
2348 * cleansing cyclic.
2349 *
2350 * In either of these situations, we can
2351 * just reattempt the free list allocation.
2352 */
2353 goto retry;
2354 }
2355
2356 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE);
2357
2358 /*
2359 * Now we'll move the clean list to the free list.
2360 * It's impossible for this to fail: the only way
2361 * the free list can be updated is through this
2362 * code path, and only one CPU can own the clean list.
2363 * Thus, it would only be possible for this to fail if
2364 * this code were racing with dtrace_dynvar_clean().
2365 * (That is, if dtrace_dynvar_clean() updated the clean
2366 * list, and we ended up racing to update the free
2367 * list.) This race is prevented by the dtrace_sync()
2368 * in dtrace_dynvar_clean() -- which flushes the
2369 * owners of the clean lists out before resetting
2370 * the clean lists.
2371 */
2372 dcpu = &dstate->dtds_percpu[me];
2373 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean);
2374 ASSERT(rval == NULL);
2375 goto retry;
2376 }
2377
2378 dvar = free;
2379 new_free = dvar->dtdv_next;
2380 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free);
2381
2382 /*
2383 * We have now allocated a new chunk. We copy the tuple keys into the
2384 * tuple array and copy any referenced key data into the data space
2385 * following the tuple array. As we do this, we relocate dttk_value
2386 * in the final tuple to point to the key data address in the chunk.
2387 */
2388 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys];
2389 dvar->dtdv_data = (void *)(kdata + ksize);
2390 dvar->dtdv_tuple.dtt_nkeys = nkeys;
2391
2392 for (i = 0; i < nkeys; i++) {
2393 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i];
2394 size_t kesize = key[i].dttk_size;
2395
2396 if (kesize != 0) {
2397 dtrace_bcopy(
2398 (const void *)(uintptr_t)key[i].dttk_value,
2399 (void *)kdata, kesize);
2400 dkey->dttk_value = kdata;
2401 kdata += P2ROUNDUP(kesize, sizeof (uint64_t));
2402 } else {
2403 dkey->dttk_value = key[i].dttk_value;
2404 }
2405
2406 dkey->dttk_size = kesize;
2407 }
2408
2409 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE);
2410 dvar->dtdv_hashval = hashval;
2411 dvar->dtdv_next = start;
2412
2413 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start)
2414 return (dvar);
2415
2416 /*
2417 * The cas has failed. Either another CPU is adding an element to
2418 * this hash chain, or another CPU is deleting an element from this
2419 * hash chain. The simplest way to deal with both of these cases
2420 * (though not necessarily the most efficient) is to free our
2421 * allocated block and re-attempt it all. Note that the free is
2422 * to the dirty list and _not_ to the free list. This is to prevent
2423 * races with allocators, above.
2424 */
2425 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
2426
2427 dtrace_membar_producer();
2428
2429 do {
2430 free = dcpu->dtdsc_dirty;
2431 dvar->dtdv_next = free;
2432 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free);
2433
2434 goto top;
2435 }
2436
2437 /*ARGSUSED*/
2438 static void
dtrace_aggregate_min(uint64_t * oval,uint64_t nval,uint64_t arg)2439 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg)
2440 {
2441 if ((int64_t)nval < (int64_t)*oval)
2442 *oval = nval;
2443 }
2444
2445 /*ARGSUSED*/
2446 static void
dtrace_aggregate_max(uint64_t * oval,uint64_t nval,uint64_t arg)2447 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg)
2448 {
2449 if ((int64_t)nval > (int64_t)*oval)
2450 *oval = nval;
2451 }
2452
2453 static void
dtrace_aggregate_quantize(uint64_t * quanta,uint64_t nval,uint64_t incr)2454 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr)
2455 {
2456 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET;
2457 int64_t val = (int64_t)nval;
2458
2459 if (val < 0) {
2460 for (i = 0; i < zero; i++) {
2461 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) {
2462 quanta[i] += incr;
2463 return;
2464 }
2465 }
2466 } else {
2467 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) {
2468 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) {
2469 quanta[i - 1] += incr;
2470 return;
2471 }
2472 }
2473
2474 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr;
2475 return;
2476 }
2477
2478 ASSERT(0);
2479 }
2480
2481 static void
dtrace_aggregate_lquantize(uint64_t * lquanta,uint64_t nval,uint64_t incr)2482 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr)
2483 {
2484 uint64_t arg = *lquanta++;
2485 int32_t base = DTRACE_LQUANTIZE_BASE(arg);
2486 uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
2487 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
2488 int32_t val = (int32_t)nval, level;
2489
2490 ASSERT(step != 0);
2491 ASSERT(levels != 0);
2492
2493 if (val < base) {
2494 /*
2495 * This is an underflow.
2496 */
2497 lquanta[0] += incr;
2498 return;
2499 }
2500
2501 level = (val - base) / step;
2502
2503 if (level < levels) {
2504 lquanta[level + 1] += incr;
2505 return;
2506 }
2507
2508 /*
2509 * This is an overflow.
2510 */
2511 lquanta[levels + 1] += incr;
2512 }
2513
2514 static int
dtrace_aggregate_llquantize_bucket(uint16_t factor,uint16_t low,uint16_t high,uint16_t nsteps,int64_t value)2515 dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low,
2516 uint16_t high, uint16_t nsteps, int64_t value)
2517 {
2518 int64_t this = 1, last, next;
2519 int base = 1, order;
2520
2521 ASSERT(factor <= nsteps);
2522 ASSERT(nsteps % factor == 0);
2523
2524 for (order = 0; order < low; order++)
2525 this *= factor;
2526
2527 /*
2528 * If our value is less than our factor taken to the power of the
2529 * low order of magnitude, it goes into the zeroth bucket.
2530 */
2531 if (value < (last = this))
2532 return (0);
2533
2534 for (this *= factor; order <= high; order++) {
2535 int nbuckets = this > nsteps ? nsteps : this;
2536
2537 if ((next = this * factor) < this) {
2538 /*
2539 * We should not generally get log/linear quantizations
2540 * with a high magnitude that allows 64-bits to
2541 * overflow, but we nonetheless protect against this
2542 * by explicitly checking for overflow, and clamping
2543 * our value accordingly.
2544 */
2545 value = this - 1;
2546 }
2547
2548 if (value < this) {
2549 /*
2550 * If our value lies within this order of magnitude,
2551 * determine its position by taking the offset within
2552 * the order of magnitude, dividing by the bucket
2553 * width, and adding to our (accumulated) base.
2554 */
2555 return (base + (value - last) / (this / nbuckets));
2556 }
2557
2558 base += nbuckets - (nbuckets / factor);
2559 last = this;
2560 this = next;
2561 }
2562
2563 /*
2564 * Our value is greater than or equal to our factor taken to the
2565 * power of one plus the high magnitude -- return the top bucket.
2566 */
2567 return (base);
2568 }
2569
2570 static void
dtrace_aggregate_llquantize(uint64_t * llquanta,uint64_t nval,uint64_t incr)2571 dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr)
2572 {
2573 uint64_t arg = *llquanta++;
2574 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg);
2575 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg);
2576 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg);
2577 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg);
2578
2579 llquanta[dtrace_aggregate_llquantize_bucket(factor,
2580 low, high, nsteps, nval)] += incr;
2581 }
2582
2583 /*ARGSUSED*/
2584 static void
dtrace_aggregate_avg(uint64_t * data,uint64_t nval,uint64_t arg)2585 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg)
2586 {
2587 data[0]++;
2588 data[1] += nval;
2589 }
2590
2591 /*ARGSUSED*/
2592 static void
dtrace_aggregate_stddev(uint64_t * data,uint64_t nval,uint64_t arg)2593 dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg)
2594 {
2595 int64_t snval = (int64_t)nval;
2596 uint64_t tmp[2];
2597
2598 data[0]++;
2599 data[1] += nval;
2600
2601 /*
2602 * What we want to say here is:
2603 *
2604 * data[2] += nval * nval;
2605 *
2606 * But given that nval is 64-bit, we could easily overflow, so
2607 * we do this as 128-bit arithmetic.
2608 */
2609 if (snval < 0)
2610 snval = -snval;
2611
2612 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp);
2613 dtrace_add_128(data + 2, tmp, data + 2);
2614 }
2615
2616 /*ARGSUSED*/
2617 static void
dtrace_aggregate_count(uint64_t * oval,uint64_t nval,uint64_t arg)2618 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg)
2619 {
2620 *oval = *oval + 1;
2621 }
2622
2623 /*ARGSUSED*/
2624 static void
dtrace_aggregate_sum(uint64_t * oval,uint64_t nval,uint64_t arg)2625 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg)
2626 {
2627 *oval += nval;
2628 }
2629
2630 /*
2631 * Aggregate given the tuple in the principal data buffer, and the aggregating
2632 * action denoted by the specified dtrace_aggregation_t. The aggregation
2633 * buffer is specified as the buf parameter. This routine does not return
2634 * failure; if there is no space in the aggregation buffer, the data will be
2635 * dropped, and a corresponding counter incremented.
2636 */
2637 static void
dtrace_aggregate(dtrace_aggregation_t * agg,dtrace_buffer_t * dbuf,intptr_t offset,dtrace_buffer_t * buf,uint64_t expr,uint64_t arg)2638 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf,
2639 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg)
2640 {
2641 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec;
2642 uint32_t i, ndx, size, fsize;
2643 uint32_t align = sizeof (uint64_t) - 1;
2644 dtrace_aggbuffer_t *agb;
2645 dtrace_aggkey_t *key;
2646 uint32_t hashval = 0, limit, isstr;
2647 caddr_t tomax, data, kdata;
2648 dtrace_actkind_t action;
2649 dtrace_action_t *act;
2650 uintptr_t offs;
2651
2652 if (buf == NULL)
2653 return;
2654
2655 if (!agg->dtag_hasarg) {
2656 /*
2657 * Currently, only quantize() and lquantize() take additional
2658 * arguments, and they have the same semantics: an increment
2659 * value that defaults to 1 when not present. If additional
2660 * aggregating actions take arguments, the setting of the
2661 * default argument value will presumably have to become more
2662 * sophisticated...
2663 */
2664 arg = 1;
2665 }
2666
2667 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION;
2668 size = rec->dtrd_offset - agg->dtag_base;
2669 fsize = size + rec->dtrd_size;
2670
2671 ASSERT(dbuf->dtb_tomax != NULL);
2672 data = dbuf->dtb_tomax + offset + agg->dtag_base;
2673
2674 if ((tomax = buf->dtb_tomax) == NULL) {
2675 dtrace_buffer_drop(buf);
2676 return;
2677 }
2678
2679 /*
2680 * The metastructure is always at the bottom of the buffer.
2681 */
2682 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size -
2683 sizeof (dtrace_aggbuffer_t));
2684
2685 if (buf->dtb_offset == 0) {
2686 /*
2687 * We just kludge up approximately 1/8th of the size to be
2688 * buckets. If this guess ends up being routinely
2689 * off-the-mark, we may need to dynamically readjust this
2690 * based on past performance.
2691 */
2692 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t);
2693
2694 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) <
2695 (uintptr_t)tomax || hashsize == 0) {
2696 /*
2697 * We've been given a ludicrously small buffer;
2698 * increment our drop count and leave.
2699 */
2700 dtrace_buffer_drop(buf);
2701 return;
2702 }
2703
2704 /*
2705 * And now, a pathetic attempt to try to get a an odd (or
2706 * perchance, a prime) hash size for better hash distribution.
2707 */
2708 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3))
2709 hashsize -= DTRACE_AGGHASHSIZE_SLEW;
2710
2711 agb->dtagb_hashsize = hashsize;
2712 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb -
2713 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *));
2714 agb->dtagb_free = (uintptr_t)agb->dtagb_hash;
2715
2716 for (i = 0; i < agb->dtagb_hashsize; i++)
2717 agb->dtagb_hash[i] = NULL;
2718 }
2719
2720 ASSERT(agg->dtag_first != NULL);
2721 ASSERT(agg->dtag_first->dta_intuple);
2722
2723 /*
2724 * Calculate the hash value based on the key. Note that we _don't_
2725 * include the aggid in the hashing (but we will store it as part of
2726 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time"
2727 * algorithm: a simple, quick algorithm that has no known funnels, and
2728 * gets good distribution in practice. The efficacy of the hashing
2729 * algorithm (and a comparison with other algorithms) may be found by
2730 * running the ::dtrace_aggstat MDB dcmd.
2731 */
2732 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2733 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2734 limit = i + act->dta_rec.dtrd_size;
2735 ASSERT(limit <= size);
2736 isstr = DTRACEACT_ISSTRING(act);
2737
2738 for (; i < limit; i++) {
2739 hashval += data[i];
2740 hashval += (hashval << 10);
2741 hashval ^= (hashval >> 6);
2742
2743 if (isstr && data[i] == '\0')
2744 break;
2745 }
2746 }
2747
2748 hashval += (hashval << 3);
2749 hashval ^= (hashval >> 11);
2750 hashval += (hashval << 15);
2751
2752 /*
2753 * Yes, the divide here is expensive -- but it's generally the least
2754 * of the performance issues given the amount of data that we iterate
2755 * over to compute hash values, compare data, etc.
2756 */
2757 ndx = hashval % agb->dtagb_hashsize;
2758
2759 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) {
2760 ASSERT((caddr_t)key >= tomax);
2761 ASSERT((caddr_t)key < tomax + buf->dtb_size);
2762
2763 if (hashval != key->dtak_hashval || key->dtak_size != size)
2764 continue;
2765
2766 kdata = key->dtak_data;
2767 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size);
2768
2769 for (act = agg->dtag_first; act->dta_intuple;
2770 act = act->dta_next) {
2771 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2772 limit = i + act->dta_rec.dtrd_size;
2773 ASSERT(limit <= size);
2774 isstr = DTRACEACT_ISSTRING(act);
2775
2776 for (; i < limit; i++) {
2777 if (kdata[i] != data[i])
2778 goto next;
2779
2780 if (isstr && data[i] == '\0')
2781 break;
2782 }
2783 }
2784
2785 if (action != key->dtak_action) {
2786 /*
2787 * We are aggregating on the same value in the same
2788 * aggregation with two different aggregating actions.
2789 * (This should have been picked up in the compiler,
2790 * so we may be dealing with errant or devious DIF.)
2791 * This is an error condition; we indicate as much,
2792 * and return.
2793 */
2794 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
2795 return;
2796 }
2797
2798 /*
2799 * This is a hit: we need to apply the aggregator to
2800 * the value at this key.
2801 */
2802 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg);
2803 return;
2804 next:
2805 continue;
2806 }
2807
2808 /*
2809 * We didn't find it. We need to allocate some zero-filled space,
2810 * link it into the hash table appropriately, and apply the aggregator
2811 * to the (zero-filled) value.
2812 */
2813 offs = buf->dtb_offset;
2814 while (offs & (align - 1))
2815 offs += sizeof (uint32_t);
2816
2817 /*
2818 * If we don't have enough room to both allocate a new key _and_
2819 * its associated data, increment the drop count and return.
2820 */
2821 if ((uintptr_t)tomax + offs + fsize >
2822 agb->dtagb_free - sizeof (dtrace_aggkey_t)) {
2823 dtrace_buffer_drop(buf);
2824 return;
2825 }
2826
2827 /*CONSTCOND*/
2828 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1)));
2829 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t));
2830 agb->dtagb_free -= sizeof (dtrace_aggkey_t);
2831
2832 key->dtak_data = kdata = tomax + offs;
2833 buf->dtb_offset = offs + fsize;
2834
2835 /*
2836 * Now copy the data across.
2837 */
2838 *((dtrace_aggid_t *)kdata) = agg->dtag_id;
2839
2840 for (i = sizeof (dtrace_aggid_t); i < size; i++)
2841 kdata[i] = data[i];
2842
2843 /*
2844 * Because strings are not zeroed out by default, we need to iterate
2845 * looking for actions that store strings, and we need to explicitly
2846 * pad these strings out with zeroes.
2847 */
2848 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2849 int nul;
2850
2851 if (!DTRACEACT_ISSTRING(act))
2852 continue;
2853
2854 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2855 limit = i + act->dta_rec.dtrd_size;
2856 ASSERT(limit <= size);
2857
2858 for (nul = 0; i < limit; i++) {
2859 if (nul) {
2860 kdata[i] = '\0';
2861 continue;
2862 }
2863
2864 if (data[i] != '\0')
2865 continue;
2866
2867 nul = 1;
2868 }
2869 }
2870
2871 for (i = size; i < fsize; i++)
2872 kdata[i] = 0;
2873
2874 key->dtak_hashval = hashval;
2875 key->dtak_size = size;
2876 key->dtak_action = action;
2877 key->dtak_next = agb->dtagb_hash[ndx];
2878 agb->dtagb_hash[ndx] = key;
2879
2880 /*
2881 * Finally, apply the aggregator.
2882 */
2883 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial;
2884 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg);
2885 }
2886
2887 /*
2888 * Given consumer state, this routine finds a speculation in the INACTIVE
2889 * state and transitions it into the ACTIVE state. If there is no speculation
2890 * in the INACTIVE state, 0 is returned. In this case, no error counter is
2891 * incremented -- it is up to the caller to take appropriate action.
2892 */
2893 static int
dtrace_speculation(dtrace_state_t * state)2894 dtrace_speculation(dtrace_state_t *state)
2895 {
2896 int i = 0;
2897 dtrace_speculation_state_t current;
2898 uint32_t *stat = &state->dts_speculations_unavail, count;
2899
2900 while (i < state->dts_nspeculations) {
2901 dtrace_speculation_t *spec = &state->dts_speculations[i];
2902
2903 current = spec->dtsp_state;
2904
2905 if (current != DTRACESPEC_INACTIVE) {
2906 if (current == DTRACESPEC_COMMITTINGMANY ||
2907 current == DTRACESPEC_COMMITTING ||
2908 current == DTRACESPEC_DISCARDING)
2909 stat = &state->dts_speculations_busy;
2910 i++;
2911 continue;
2912 }
2913
2914 if (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2915 current, DTRACESPEC_ACTIVE) == current)
2916 return (i + 1);
2917 }
2918
2919 /*
2920 * We couldn't find a speculation. If we found as much as a single
2921 * busy speculation buffer, we'll attribute this failure as "busy"
2922 * instead of "unavail".
2923 */
2924 do {
2925 count = *stat;
2926 } while (dtrace_cas32(stat, count, count + 1) != count);
2927
2928 return (0);
2929 }
2930
2931 /*
2932 * This routine commits an active speculation. If the specified speculation
2933 * is not in a valid state to perform a commit(), this routine will silently do
2934 * nothing. The state of the specified speculation is transitioned according
2935 * to the state transition diagram outlined in <sys/dtrace_impl.h>
2936 */
2937 static void
dtrace_speculation_commit(dtrace_state_t * state,processorid_t cpu,dtrace_specid_t which)2938 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu,
2939 dtrace_specid_t which)
2940 {
2941 dtrace_speculation_t *spec;
2942 dtrace_buffer_t *src, *dest;
2943 uintptr_t daddr, saddr, dlimit, slimit;
2944 dtrace_speculation_state_t current, new = 0;
2945 intptr_t offs;
2946 uint64_t timestamp;
2947
2948 if (which == 0)
2949 return;
2950
2951 if (which > state->dts_nspeculations) {
2952 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2953 return;
2954 }
2955
2956 spec = &state->dts_speculations[which - 1];
2957 src = &spec->dtsp_buffer[cpu];
2958 dest = &state->dts_buffer[cpu];
2959
2960 do {
2961 current = spec->dtsp_state;
2962
2963 if (current == DTRACESPEC_COMMITTINGMANY)
2964 break;
2965
2966 switch (current) {
2967 case DTRACESPEC_INACTIVE:
2968 case DTRACESPEC_DISCARDING:
2969 return;
2970
2971 case DTRACESPEC_COMMITTING:
2972 /*
2973 * This is only possible if we are (a) commit()'ing
2974 * without having done a prior speculate() on this CPU
2975 * and (b) racing with another commit() on a different
2976 * CPU. There's nothing to do -- we just assert that
2977 * our offset is 0.
2978 */
2979 ASSERT(src->dtb_offset == 0);
2980 return;
2981
2982 case DTRACESPEC_ACTIVE:
2983 new = DTRACESPEC_COMMITTING;
2984 break;
2985
2986 case DTRACESPEC_ACTIVEONE:
2987 /*
2988 * This speculation is active on one CPU. If our
2989 * buffer offset is non-zero, we know that the one CPU
2990 * must be us. Otherwise, we are committing on a
2991 * different CPU from the speculate(), and we must
2992 * rely on being asynchronously cleaned.
2993 */
2994 if (src->dtb_offset != 0) {
2995 new = DTRACESPEC_COMMITTING;
2996 break;
2997 }
2998 /*FALLTHROUGH*/
2999
3000 case DTRACESPEC_ACTIVEMANY:
3001 new = DTRACESPEC_COMMITTINGMANY;
3002 break;
3003
3004 default:
3005 ASSERT(0);
3006 }
3007 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
3008 current, new) != current);
3009
3010 /*
3011 * We have set the state to indicate that we are committing this
3012 * speculation. Now reserve the necessary space in the destination
3013 * buffer.
3014 */
3015 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset,
3016 sizeof (uint64_t), state, NULL)) < 0) {
3017 dtrace_buffer_drop(dest);
3018 goto out;
3019 }
3020
3021 /*
3022 * We have sufficient space to copy the speculative buffer into the
3023 * primary buffer. First, modify the speculative buffer, filling
3024 * in the timestamp of all entries with the current time. The data
3025 * must have the commit() time rather than the time it was traced,
3026 * so that all entries in the primary buffer are in timestamp order.
3027 */
3028 timestamp = dtrace_gethrtime();
3029 saddr = (uintptr_t)src->dtb_tomax;
3030 slimit = saddr + src->dtb_offset;
3031 while (saddr < slimit) {
3032 size_t size;
3033 dtrace_rechdr_t *dtrh = (dtrace_rechdr_t *)saddr;
3034
3035 if (dtrh->dtrh_epid == DTRACE_EPIDNONE) {
3036 saddr += sizeof (dtrace_epid_t);
3037 continue;
3038 }
3039 ASSERT3U(dtrh->dtrh_epid, <=, state->dts_necbs);
3040 size = state->dts_ecbs[dtrh->dtrh_epid - 1]->dte_size;
3041
3042 ASSERT3U(saddr + size, <=, slimit);
3043 ASSERT3U(size, >=, sizeof (dtrace_rechdr_t));
3044 ASSERT3U(DTRACE_RECORD_LOAD_TIMESTAMP(dtrh), ==, UINT64_MAX);
3045
3046 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, timestamp);
3047
3048 saddr += size;
3049 }
3050
3051 /*
3052 * Copy the buffer across. (Note that this is a
3053 * highly subobtimal bcopy(); in the unlikely event that this becomes
3054 * a serious performance issue, a high-performance DTrace-specific
3055 * bcopy() should obviously be invented.)
3056 */
3057 daddr = (uintptr_t)dest->dtb_tomax + offs;
3058 dlimit = daddr + src->dtb_offset;
3059 saddr = (uintptr_t)src->dtb_tomax;
3060
3061 /*
3062 * First, the aligned portion.
3063 */
3064 while (dlimit - daddr >= sizeof (uint64_t)) {
3065 *((uint64_t *)daddr) = *((uint64_t *)saddr);
3066
3067 daddr += sizeof (uint64_t);
3068 saddr += sizeof (uint64_t);
3069 }
3070
3071 /*
3072 * Now any left-over bit...
3073 */
3074 while (dlimit - daddr)
3075 *((uint8_t *)daddr++) = *((uint8_t *)saddr++);
3076
3077 /*
3078 * Finally, commit the reserved space in the destination buffer.
3079 */
3080 dest->dtb_offset = offs + src->dtb_offset;
3081
3082 out:
3083 /*
3084 * If we're lucky enough to be the only active CPU on this speculation
3085 * buffer, we can just set the state back to DTRACESPEC_INACTIVE.
3086 */
3087 if (current == DTRACESPEC_ACTIVE ||
3088 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) {
3089 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state,
3090 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE);
3091
3092 ASSERT(rval == DTRACESPEC_COMMITTING);
3093 }
3094
3095 src->dtb_offset = 0;
3096 src->dtb_xamot_drops += src->dtb_drops;
3097 src->dtb_drops = 0;
3098 }
3099
3100 /*
3101 * This routine discards an active speculation. If the specified speculation
3102 * is not in a valid state to perform a discard(), this routine will silently
3103 * do nothing. The state of the specified speculation is transitioned
3104 * according to the state transition diagram outlined in <sys/dtrace_impl.h>
3105 */
3106 static void
dtrace_speculation_discard(dtrace_state_t * state,processorid_t cpu,dtrace_specid_t which)3107 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu,
3108 dtrace_specid_t which)
3109 {
3110 dtrace_speculation_t *spec;
3111 dtrace_speculation_state_t current, new = 0;
3112 dtrace_buffer_t *buf;
3113
3114 if (which == 0)
3115 return;
3116
3117 if (which > state->dts_nspeculations) {
3118 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
3119 return;
3120 }
3121
3122 spec = &state->dts_speculations[which - 1];
3123 buf = &spec->dtsp_buffer[cpu];
3124
3125 do {
3126 current = spec->dtsp_state;
3127
3128 switch (current) {
3129 case DTRACESPEC_INACTIVE:
3130 case DTRACESPEC_COMMITTINGMANY:
3131 case DTRACESPEC_COMMITTING:
3132 case DTRACESPEC_DISCARDING:
3133 return;
3134
3135 case DTRACESPEC_ACTIVE:
3136 case DTRACESPEC_ACTIVEMANY:
3137 new = DTRACESPEC_DISCARDING;
3138 break;
3139
3140 case DTRACESPEC_ACTIVEONE:
3141 if (buf->dtb_offset != 0) {
3142 new = DTRACESPEC_INACTIVE;
3143 } else {
3144 new = DTRACESPEC_DISCARDING;
3145 }
3146 break;
3147
3148 default:
3149 ASSERT(0);
3150 }
3151 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
3152 current, new) != current);
3153
3154 buf->dtb_offset = 0;
3155 buf->dtb_drops = 0;
3156 }
3157
3158 /*
3159 * Note: not called from probe context. This function is called
3160 * asynchronously from cross call context to clean any speculations that are
3161 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be
3162 * transitioned back to the INACTIVE state until all CPUs have cleaned the
3163 * speculation.
3164 */
3165 static void
dtrace_speculation_clean_here(dtrace_state_t * state)3166 dtrace_speculation_clean_here(dtrace_state_t *state)
3167 {
3168 dtrace_icookie_t cookie;
3169 processorid_t cpu = curcpu_id;
3170 dtrace_buffer_t *dest = &state->dts_buffer[cpu];
3171 dtrace_specid_t i;
3172
3173 cookie = dtrace_interrupt_disable();
3174
3175 if (dest->dtb_tomax == NULL) {
3176 dtrace_interrupt_enable(cookie);
3177 return;
3178 }
3179
3180 for (i = 0; i < state->dts_nspeculations; i++) {
3181 dtrace_speculation_t *spec = &state->dts_speculations[i];
3182 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu];
3183
3184 if (src->dtb_tomax == NULL)
3185 continue;
3186
3187 if (spec->dtsp_state == DTRACESPEC_DISCARDING) {
3188 src->dtb_offset = 0;
3189 continue;
3190 }
3191
3192 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
3193 continue;
3194
3195 if (src->dtb_offset == 0)
3196 continue;
3197
3198 dtrace_speculation_commit(state, cpu, i + 1);
3199 }
3200
3201 dtrace_interrupt_enable(cookie);
3202 }
3203
3204 /*
3205 * Note: not called from probe context. This function is called
3206 * asynchronously (and at a regular interval) to clean any speculations that
3207 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there
3208 * is work to be done, it cross calls all CPUs to perform that work;
3209 * COMMITMANY and DISCARDING speculations may not be transitioned back to the
3210 * INACTIVE state until they have been cleaned by all CPUs.
3211 */
3212 static void
dtrace_speculation_clean(dtrace_state_t * state)3213 dtrace_speculation_clean(dtrace_state_t *state)
3214 {
3215 int work = 0, rv;
3216 dtrace_specid_t i;
3217
3218 for (i = 0; i < state->dts_nspeculations; i++) {
3219 dtrace_speculation_t *spec = &state->dts_speculations[i];
3220
3221 ASSERT(!spec->dtsp_cleaning);
3222
3223 if (spec->dtsp_state != DTRACESPEC_DISCARDING &&
3224 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
3225 continue;
3226
3227 work++;
3228 spec->dtsp_cleaning = 1;
3229 }
3230
3231 if (!work)
3232 return;
3233
3234 dtrace_xcall(DTRACE_CPUALL,
3235 (dtrace_xcall_t)dtrace_speculation_clean_here, state);
3236
3237 /*
3238 * We now know that all CPUs have committed or discarded their
3239 * speculation buffers, as appropriate. We can now set the state
3240 * to inactive.
3241 */
3242 for (i = 0; i < state->dts_nspeculations; i++) {
3243 dtrace_speculation_t *spec = &state->dts_speculations[i];
3244 dtrace_speculation_state_t current, new;
3245
3246 if (!spec->dtsp_cleaning)
3247 continue;
3248
3249 current = spec->dtsp_state;
3250 ASSERT(current == DTRACESPEC_DISCARDING ||
3251 current == DTRACESPEC_COMMITTINGMANY);
3252
3253 new = DTRACESPEC_INACTIVE;
3254
3255 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new);
3256 ASSERT(rv == current);
3257 spec->dtsp_cleaning = 0;
3258 }
3259 }
3260
3261 /*
3262 * Called as part of a speculate() to get the speculative buffer associated
3263 * with a given speculation. Returns NULL if the specified speculation is not
3264 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and
3265 * the active CPU is not the specified CPU -- the speculation will be
3266 * atomically transitioned into the ACTIVEMANY state.
3267 */
3268 static dtrace_buffer_t *
dtrace_speculation_buffer(dtrace_state_t * state,processorid_t cpuid,dtrace_specid_t which)3269 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid,
3270 dtrace_specid_t which)
3271 {
3272 dtrace_speculation_t *spec;
3273 dtrace_speculation_state_t current, new = 0;
3274 dtrace_buffer_t *buf;
3275
3276 if (which == 0)
3277 return (NULL);
3278
3279 if (which > state->dts_nspeculations) {
3280 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
3281 return (NULL);
3282 }
3283
3284 spec = &state->dts_speculations[which - 1];
3285 buf = &spec->dtsp_buffer[cpuid];
3286
3287 do {
3288 current = spec->dtsp_state;
3289
3290 switch (current) {
3291 case DTRACESPEC_INACTIVE:
3292 case DTRACESPEC_COMMITTINGMANY:
3293 case DTRACESPEC_DISCARDING:
3294 return (NULL);
3295
3296 case DTRACESPEC_COMMITTING:
3297 ASSERT(buf->dtb_offset == 0);
3298 return (NULL);
3299
3300 case DTRACESPEC_ACTIVEONE:
3301 /*
3302 * This speculation is currently active on one CPU.
3303 * Check the offset in the buffer; if it's non-zero,
3304 * that CPU must be us (and we leave the state alone).
3305 * If it's zero, assume that we're starting on a new
3306 * CPU -- and change the state to indicate that the
3307 * speculation is active on more than one CPU.
3308 */
3309 if (buf->dtb_offset != 0)
3310 return (buf);
3311
3312 new = DTRACESPEC_ACTIVEMANY;
3313 break;
3314
3315 case DTRACESPEC_ACTIVEMANY:
3316 return (buf);
3317
3318 case DTRACESPEC_ACTIVE:
3319 new = DTRACESPEC_ACTIVEONE;
3320 break;
3321
3322 default:
3323 ASSERT(0);
3324 }
3325 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
3326 current, new) != current);
3327
3328 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY);
3329 return (buf);
3330 }
3331
3332 /*
3333 * Return a string. In the event that the user lacks the privilege to access
3334 * arbitrary kernel memory, we copy the string out to scratch memory so that we
3335 * don't fail access checking.
3336 *
3337 * dtrace_dif_variable() uses this routine as a helper for various
3338 * builtin values such as 'execname' and 'probefunc.'
3339 */
3340 uintptr_t
dtrace_dif_varstr(uintptr_t addr,dtrace_state_t * state,dtrace_mstate_t * mstate)3341 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state,
3342 dtrace_mstate_t *mstate)
3343 {
3344 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3345 uintptr_t ret;
3346 size_t strsz;
3347
3348 /*
3349 * The easy case: this probe is allowed to read all of memory, so
3350 * we can just return this as a vanilla pointer.
3351 */
3352 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
3353 return (addr);
3354
3355 /*
3356 * This is the tougher case: we copy the string in question from
3357 * kernel memory into scratch memory and return it that way: this
3358 * ensures that we won't trip up when access checking tests the
3359 * BYREF return value.
3360 */
3361 strsz = dtrace_strlen((char *)addr, size) + 1;
3362
3363 if (mstate->dtms_scratch_ptr + strsz >
3364 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
3365 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3366 return (0);
3367 }
3368
3369 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
3370 strsz);
3371 ret = mstate->dtms_scratch_ptr;
3372 mstate->dtms_scratch_ptr += strsz;
3373 return (ret);
3374 }
3375
3376 /*
3377 * Return a string from a memoy address which is known to have one or
3378 * more concatenated, individually zero terminated, sub-strings.
3379 * In the event that the user lacks the privilege to access
3380 * arbitrary kernel memory, we copy the string out to scratch memory so that we
3381 * don't fail access checking.
3382 *
3383 * dtrace_dif_variable() uses this routine as a helper for various
3384 * builtin values such as 'execargs'.
3385 */
3386 static uintptr_t
dtrace_dif_varstrz(uintptr_t addr,size_t strsz,dtrace_state_t * state,dtrace_mstate_t * mstate)3387 dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state,
3388 dtrace_mstate_t *mstate)
3389 {
3390 char *p;
3391 size_t i;
3392 uintptr_t ret;
3393
3394 if (mstate->dtms_scratch_ptr + strsz >
3395 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
3396 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3397 return (0);
3398 }
3399
3400 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
3401 strsz);
3402
3403 /* Replace sub-string termination characters with a space. */
3404 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1;
3405 p++, i++)
3406 if (*p == '\0')
3407 *p = ' ';
3408
3409 ret = mstate->dtms_scratch_ptr;
3410 mstate->dtms_scratch_ptr += strsz;
3411 return (ret);
3412 }
3413
3414 /*
3415 * This function implements the DIF emulator's variable lookups. The emulator
3416 * passes a reserved variable identifier and optional built-in array index.
3417 */
3418 static uint64_t
dtrace_dif_variable(dtrace_mstate_t * mstate,dtrace_state_t * state,uint64_t v,uint64_t ndx)3419 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v,
3420 uint64_t ndx)
3421 {
3422 /*
3423 * If we're accessing one of the uncached arguments, we'll turn this
3424 * into a reference in the args array.
3425 */
3426 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) {
3427 ndx = v - DIF_VAR_ARG0;
3428 v = DIF_VAR_ARGS;
3429 }
3430
3431 switch (v) {
3432 case DIF_VAR_ARGS:
3433 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS);
3434 if (ndx >= sizeof (mstate->dtms_arg) /
3435 sizeof (mstate->dtms_arg[0])) {
3436 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3437 dtrace_provider_t *pv;
3438 uint64_t val;
3439
3440 pv = mstate->dtms_probe->dtpr_provider;
3441 if (pv->dtpv_pops.dtps_getargval != NULL)
3442 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg,
3443 mstate->dtms_probe->dtpr_id,
3444 mstate->dtms_probe->dtpr_arg, ndx, aframes);
3445 else
3446 val = dtrace_getarg(ndx, aframes);
3447
3448 /*
3449 * This is regrettably required to keep the compiler
3450 * from tail-optimizing the call to dtrace_getarg().
3451 * The condition always evaluates to true, but the
3452 * compiler has no way of figuring that out a priori.
3453 * (None of this would be necessary if the compiler
3454 * could be relied upon to _always_ tail-optimize
3455 * the call to dtrace_getarg() -- but it can't.)
3456 */
3457 if (mstate->dtms_probe != NULL)
3458 return (val);
3459
3460 ASSERT(0);
3461 }
3462
3463 return (mstate->dtms_arg[ndx]);
3464
3465 #ifdef illumos
3466 case DIF_VAR_UREGS: {
3467 klwp_t *lwp;
3468
3469 if (!dtrace_priv_proc(state))
3470 return (0);
3471
3472 if ((lwp = curthread->t_lwp) == NULL) {
3473 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
3474 cpu_core[curcpu_id].cpuc_dtrace_illval = NULL;
3475 return (0);
3476 }
3477
3478 return (dtrace_getreg(lwp->lwp_regs, ndx));
3479 return (0);
3480 }
3481 #endif
3482 #ifdef __FreeBSD__
3483 case DIF_VAR_UREGS: {
3484 struct trapframe *tframe;
3485
3486 if (!dtrace_priv_proc(state))
3487 return (0);
3488
3489 if ((tframe = curthread->td_frame) == NULL) {
3490 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
3491 cpu_core[curcpu].cpuc_dtrace_illval = 0;
3492 return (0);
3493 }
3494
3495 return (dtrace_getreg(tframe, ndx));
3496 }
3497 #endif
3498 #ifdef __NetBSD__
3499 case DIF_VAR_UREGS: {
3500 struct trapframe *tframe;
3501
3502 if (!dtrace_priv_proc(state))
3503 return (0);
3504
3505 if ((tframe = lwp_trapframe(curlwp)) == NULL) {
3506 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
3507 cpu_core[curcpu_id].cpuc_dtrace_illval = 0;
3508 return (0);
3509 }
3510
3511 return (dtrace_getreg(tframe, ndx));
3512 }
3513 #endif
3514
3515 case DIF_VAR_CURTHREAD:
3516 if (!dtrace_priv_proc(state))
3517 return (0);
3518 return ((uint64_t)(uintptr_t)curthread);
3519
3520 case DIF_VAR_TIMESTAMP:
3521 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
3522 mstate->dtms_timestamp = dtrace_gethrtime();
3523 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP;
3524 }
3525 return (mstate->dtms_timestamp);
3526
3527 case DIF_VAR_VTIMESTAMP:
3528 ASSERT(dtrace_vtime_references != 0);
3529 return (curthread->t_dtrace_vtime);
3530
3531 case DIF_VAR_WALLTIMESTAMP:
3532 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) {
3533 mstate->dtms_walltimestamp = dtrace_gethrestime();
3534 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP;
3535 }
3536 return (mstate->dtms_walltimestamp);
3537
3538 #ifdef illumos
3539 case DIF_VAR_IPL:
3540 if (!dtrace_priv_kernel(state))
3541 return (0);
3542 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) {
3543 mstate->dtms_ipl = dtrace_getipl();
3544 mstate->dtms_present |= DTRACE_MSTATE_IPL;
3545 }
3546 return (mstate->dtms_ipl);
3547 #endif
3548
3549 case DIF_VAR_EPID:
3550 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID);
3551 return (mstate->dtms_epid);
3552
3553 case DIF_VAR_ID:
3554 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3555 return (mstate->dtms_probe->dtpr_id);
3556
3557 case DIF_VAR_STACKDEPTH:
3558 if (!dtrace_priv_kernel(state))
3559 return (0);
3560 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) {
3561 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3562
3563 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes);
3564 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH;
3565 }
3566 return (mstate->dtms_stackdepth);
3567
3568 case DIF_VAR_USTACKDEPTH:
3569 if (!dtrace_priv_proc(state))
3570 return (0);
3571 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) {
3572 /*
3573 * See comment in DIF_VAR_PID.
3574 */
3575 if (DTRACE_ANCHORED(mstate->dtms_probe) &&
3576 CPU_ON_INTR(CPU)) {
3577 mstate->dtms_ustackdepth = 0;
3578 } else {
3579 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3580 mstate->dtms_ustackdepth =
3581 dtrace_getustackdepth();
3582 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3583 }
3584 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH;
3585 }
3586 return (mstate->dtms_ustackdepth);
3587
3588 case DIF_VAR_CALLER:
3589 if (!dtrace_priv_kernel(state))
3590 return (0);
3591 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) {
3592 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
3593
3594 if (!DTRACE_ANCHORED(mstate->dtms_probe)) {
3595 /*
3596 * If this is an unanchored probe, we are
3597 * required to go through the slow path:
3598 * dtrace_caller() only guarantees correct
3599 * results for anchored probes.
3600 */
3601 pc_t caller[2] = {0, 0};
3602
3603 dtrace_getpcstack(caller, 2, aframes,
3604 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]);
3605 mstate->dtms_caller = caller[1];
3606 } else if ((mstate->dtms_caller =
3607 dtrace_caller(aframes)) == -1) {
3608 /*
3609 * We have failed to do this the quick way;
3610 * we must resort to the slower approach of
3611 * calling dtrace_getpcstack().
3612 */
3613 pc_t caller = 0;
3614
3615 dtrace_getpcstack(&caller, 1, aframes, NULL);
3616 mstate->dtms_caller = caller;
3617 }
3618
3619 mstate->dtms_present |= DTRACE_MSTATE_CALLER;
3620 }
3621 return (mstate->dtms_caller);
3622
3623 case DIF_VAR_UCALLER:
3624 if (!dtrace_priv_proc(state))
3625 return (0);
3626
3627 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) {
3628 uint64_t ustack[3];
3629
3630 /*
3631 * dtrace_getupcstack() fills in the first uint64_t
3632 * with the current PID. The second uint64_t will
3633 * be the program counter at user-level. The third
3634 * uint64_t will contain the caller, which is what
3635 * we're after.
3636 */
3637 ustack[2] = 0;
3638 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3639 dtrace_getupcstack(ustack, 3);
3640 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3641 mstate->dtms_ucaller = ustack[2];
3642 mstate->dtms_present |= DTRACE_MSTATE_UCALLER;
3643 }
3644
3645 return (mstate->dtms_ucaller);
3646
3647 case DIF_VAR_PROBEPROV:
3648 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3649 return (dtrace_dif_varstr(
3650 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name,
3651 state, mstate));
3652
3653 case DIF_VAR_PROBEMOD:
3654 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3655 return (dtrace_dif_varstr(
3656 (uintptr_t)mstate->dtms_probe->dtpr_mod,
3657 state, mstate));
3658
3659 case DIF_VAR_PROBEFUNC:
3660 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3661 return (dtrace_dif_varstr(
3662 (uintptr_t)mstate->dtms_probe->dtpr_func,
3663 state, mstate));
3664
3665 case DIF_VAR_PROBENAME:
3666 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
3667 return (dtrace_dif_varstr(
3668 (uintptr_t)mstate->dtms_probe->dtpr_name,
3669 state, mstate));
3670
3671 case DIF_VAR_PID:
3672 if (!dtrace_priv_proc(state))
3673 return (0);
3674
3675 #ifdef illumos
3676 /*
3677 * Note that we are assuming that an unanchored probe is
3678 * always due to a high-level interrupt. (And we're assuming
3679 * that there is only a single high level interrupt.)
3680 */
3681 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3682 return (pid0.pid_id);
3683
3684 /*
3685 * It is always safe to dereference one's own t_procp pointer:
3686 * it always points to a valid, allocated proc structure.
3687 * Further, it is always safe to dereference the p_pidp member
3688 * of one's own proc structure. (These are truisms becuase
3689 * threads and processes don't clean up their own state --
3690 * they leave that task to whomever reaps them.)
3691 */
3692 return ((uint64_t)curthread->t_procp->p_pidp->pid_id);
3693 #else
3694 return ((uint64_t)curproc->p_pid);
3695 #endif
3696
3697 case DIF_VAR_PPID:
3698 if (!dtrace_priv_proc(state))
3699 return (0);
3700
3701 #ifdef illumos
3702 /*
3703 * See comment in DIF_VAR_PID.
3704 */
3705 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3706 return (pid0.pid_id);
3707
3708 /*
3709 * It is always safe to dereference one's own t_procp pointer:
3710 * it always points to a valid, allocated proc structure.
3711 * (This is true because threads don't clean up their own
3712 * state -- they leave that task to whomever reaps them.)
3713 */
3714 return ((uint64_t)curthread->t_procp->p_ppid);
3715 #else
3716 if (curproc->p_pid == proc0.p_pid)
3717 return (curproc->p_pid);
3718 else
3719 return (curproc->p_pptr->p_pid);
3720 #endif
3721
3722 case DIF_VAR_TID:
3723 #ifdef illumos
3724 /*
3725 * See comment in DIF_VAR_PID.
3726 */
3727 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3728 return (0);
3729 #endif
3730
3731 return ((uint64_t)curthread->t_tid);
3732
3733 case DIF_VAR_EXECARGS: {
3734 #ifdef __FreeBSD__
3735 struct pargs *p_args = curthread->td_proc->p_args;
3736
3737 if (p_args == NULL)
3738 return(0);
3739
3740 return (dtrace_dif_varstrz(
3741 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate));
3742 #else
3743 return 0;
3744 #endif
3745 }
3746
3747 case DIF_VAR_EXECNAME:
3748 #ifdef illumos
3749 if (!dtrace_priv_proc(state))
3750 return (0);
3751
3752 /*
3753 * See comment in DIF_VAR_PID.
3754 */
3755 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3756 return ((uint64_t)(uintptr_t)p0.p_user.u_comm);
3757
3758 /*
3759 * It is always safe to dereference one's own t_procp pointer:
3760 * it always points to a valid, allocated proc structure.
3761 * (This is true because threads don't clean up their own
3762 * state -- they leave that task to whomever reaps them.)
3763 */
3764 return (dtrace_dif_varstr(
3765 (uintptr_t)curthread->t_procp->p_user.u_comm,
3766 state, mstate));
3767 #else
3768 return (dtrace_dif_varstr(
3769 (uintptr_t) curproc->p_comm, state, mstate));
3770 #endif
3771
3772 case DIF_VAR_ZONENAME:
3773 #ifdef illumos
3774 if (!dtrace_priv_proc(state))
3775 return (0);
3776
3777 /*
3778 * See comment in DIF_VAR_PID.
3779 */
3780 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3781 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name);
3782
3783 /*
3784 * It is always safe to dereference one's own t_procp pointer:
3785 * it always points to a valid, allocated proc structure.
3786 * (This is true because threads don't clean up their own
3787 * state -- they leave that task to whomever reaps them.)
3788 */
3789 return (dtrace_dif_varstr(
3790 (uintptr_t)curthread->t_procp->p_zone->zone_name,
3791 state, mstate));
3792 #else
3793 return (0);
3794 #endif
3795
3796 case DIF_VAR_UID:
3797 if (!dtrace_priv_proc(state))
3798 return (0);
3799
3800 #ifdef illumos
3801 /*
3802 * See comment in DIF_VAR_PID.
3803 */
3804 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3805 return ((uint64_t)p0.p_cred->cr_uid);
3806
3807 /*
3808 * It is always safe to dereference one's own t_procp pointer:
3809 * it always points to a valid, allocated proc structure.
3810 * (This is true because threads don't clean up their own
3811 * state -- they leave that task to whomever reaps them.)
3812 *
3813 * Additionally, it is safe to dereference one's own process
3814 * credential, since this is never NULL after process birth.
3815 */
3816 return ((uint64_t)curthread->t_procp->p_cred->cr_uid);
3817 #endif
3818 #ifdef __FreeBSD__
3819 return ((uint64_t)curthread->td_ucred->cr_uid);
3820 #endif
3821 #ifdef __NetBSD__
3822 return ((uint64_t)kauth_cred_getuid(curthread->t_procp->p_cred));
3823 #endif
3824
3825 case DIF_VAR_GID:
3826 if (!dtrace_priv_proc(state))
3827 return (0);
3828
3829 #ifdef illumos
3830 /*
3831 * See comment in DIF_VAR_PID.
3832 */
3833 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3834 return ((uint64_t)p0.p_cred->cr_gid);
3835
3836 /*
3837 * It is always safe to dereference one's own t_procp pointer:
3838 * it always points to a valid, allocated proc structure.
3839 * (This is true because threads don't clean up their own
3840 * state -- they leave that task to whomever reaps them.)
3841 *
3842 * Additionally, it is safe to dereference one's own process
3843 * credential, since this is never NULL after process birth.
3844 */
3845 return ((uint64_t)curthread->t_procp->p_cred->cr_gid);
3846 #endif
3847 #ifdef __FreeBSD__
3848 return ((uint64_t)curthread->td_ucred->cr_gid);
3849 #endif
3850 #ifdef __NetBSD__
3851 return ((uint64_t)kauth_cred_getgid(curthread->t_procp->p_cred));
3852 #endif
3853
3854 case DIF_VAR_ERRNO: {
3855 #ifdef illumos
3856 klwp_t *lwp;
3857 if (!dtrace_priv_proc(state))
3858 return (0);
3859
3860 /*
3861 * See comment in DIF_VAR_PID.
3862 */
3863 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
3864 return (0);
3865
3866 /*
3867 * It is always safe to dereference one's own t_lwp pointer in
3868 * the event that this pointer is non-NULL. (This is true
3869 * because threads and lwps don't clean up their own state --
3870 * they leave that task to whomever reaps them.)
3871 */
3872 if ((lwp = curthread->t_lwp) == NULL)
3873 return (0);
3874
3875 return ((uint64_t)lwp->lwp_errno);
3876 #endif
3877 #ifdef __FreeBSD__
3878 return (curthread->td_errno);
3879 #endif
3880 #ifdef __NetBSD__
3881 return 0; /* XXX TBD errno support at lwp level? */
3882 #endif
3883 }
3884 #ifndef illumos
3885 case DIF_VAR_CPU: {
3886 return curcpu_id;
3887 }
3888 #endif
3889 default:
3890 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
3891 return (0);
3892 }
3893 }
3894
3895
3896 typedef enum dtrace_json_state {
3897 DTRACE_JSON_REST = 1,
3898 DTRACE_JSON_OBJECT,
3899 DTRACE_JSON_STRING,
3900 DTRACE_JSON_STRING_ESCAPE,
3901 DTRACE_JSON_STRING_ESCAPE_UNICODE,
3902 DTRACE_JSON_COLON,
3903 DTRACE_JSON_COMMA,
3904 DTRACE_JSON_VALUE,
3905 DTRACE_JSON_IDENTIFIER,
3906 DTRACE_JSON_NUMBER,
3907 DTRACE_JSON_NUMBER_FRAC,
3908 DTRACE_JSON_NUMBER_EXP,
3909 DTRACE_JSON_COLLECT_OBJECT
3910 } dtrace_json_state_t;
3911
3912 /*
3913 * This function possesses just enough knowledge about JSON to extract a single
3914 * value from a JSON string and store it in the scratch buffer. It is able
3915 * to extract nested object values, and members of arrays by index.
3916 *
3917 * elemlist is a list of JSON keys, stored as packed NUL-terminated strings, to
3918 * be looked up as we descend into the object tree. e.g.
3919 *
3920 * foo[0].bar.baz[32] --> "foo" NUL "0" NUL "bar" NUL "baz" NUL "32" NUL
3921 * with nelems = 5.
3922 *
3923 * The run time of this function must be bounded above by strsize to limit the
3924 * amount of work done in probe context. As such, it is implemented as a
3925 * simple state machine, reading one character at a time using safe loads
3926 * until we find the requested element, hit a parsing error or run off the
3927 * end of the object or string.
3928 *
3929 * As there is no way for a subroutine to return an error without interrupting
3930 * clause execution, we simply return NULL in the event of a missing key or any
3931 * other error condition. Each NULL return in this function is commented with
3932 * the error condition it represents -- parsing or otherwise.
3933 *
3934 * The set of states for the state machine closely matches the JSON
3935 * specification (http://json.org/). Briefly:
3936 *
3937 * DTRACE_JSON_REST:
3938 * Skip whitespace until we find either a top-level Object, moving
3939 * to DTRACE_JSON_OBJECT; or an Array, moving to DTRACE_JSON_VALUE.
3940 *
3941 * DTRACE_JSON_OBJECT:
3942 * Locate the next key String in an Object. Sets a flag to denote
3943 * the next String as a key string and moves to DTRACE_JSON_STRING.
3944 *
3945 * DTRACE_JSON_COLON:
3946 * Skip whitespace until we find the colon that separates key Strings
3947 * from their values. Once found, move to DTRACE_JSON_VALUE.
3948 *
3949 * DTRACE_JSON_VALUE:
3950 * Detects the type of the next value (String, Number, Identifier, Object
3951 * or Array) and routes to the states that process that type. Here we also
3952 * deal with the element selector list if we are requested to traverse down
3953 * into the object tree.
3954 *
3955 * DTRACE_JSON_COMMA:
3956 * Skip whitespace until we find the comma that separates key-value pairs
3957 * in Objects (returning to DTRACE_JSON_OBJECT) or values in Arrays
3958 * (similarly DTRACE_JSON_VALUE). All following literal value processing
3959 * states return to this state at the end of their value, unless otherwise
3960 * noted.
3961 *
3962 * DTRACE_JSON_NUMBER, DTRACE_JSON_NUMBER_FRAC, DTRACE_JSON_NUMBER_EXP:
3963 * Processes a Number literal from the JSON, including any exponent
3964 * component that may be present. Numbers are returned as strings, which
3965 * may be passed to strtoll() if an integer is required.
3966 *
3967 * DTRACE_JSON_IDENTIFIER:
3968 * Processes a "true", "false" or "null" literal in the JSON.
3969 *
3970 * DTRACE_JSON_STRING, DTRACE_JSON_STRING_ESCAPE,
3971 * DTRACE_JSON_STRING_ESCAPE_UNICODE:
3972 * Processes a String literal from the JSON, whether the String denotes
3973 * a key, a value or part of a larger Object. Handles all escape sequences
3974 * present in the specification, including four-digit unicode characters,
3975 * but merely includes the escape sequence without converting it to the
3976 * actual escaped character. If the String is flagged as a key, we
3977 * move to DTRACE_JSON_COLON rather than DTRACE_JSON_COMMA.
3978 *
3979 * DTRACE_JSON_COLLECT_OBJECT:
3980 * This state collects an entire Object (or Array), correctly handling
3981 * embedded strings. If the full element selector list matches this nested
3982 * object, we return the Object in full as a string. If not, we use this
3983 * state to skip to the next value at this level and continue processing.
3984 *
3985 * NOTE: This function uses various macros from strtolctype.h to manipulate
3986 * digit values, etc -- these have all been checked to ensure they make
3987 * no additional function calls.
3988 */
3989 static char *
dtrace_json(uint64_t size,uintptr_t json,char * elemlist,int nelems,char * dest)3990 dtrace_json(uint64_t size, uintptr_t json, char *elemlist, int nelems,
3991 char *dest)
3992 {
3993 dtrace_json_state_t state = DTRACE_JSON_REST;
3994 int64_t array_elem = INT64_MIN;
3995 int64_t array_pos = 0;
3996 uint8_t escape_unicount = 0;
3997 boolean_t string_is_key = B_FALSE;
3998 boolean_t collect_object = B_FALSE;
3999 boolean_t found_key = B_FALSE;
4000 boolean_t in_array = B_FALSE;
4001 uint32_t braces = 0, brackets = 0;
4002 char *elem = elemlist;
4003 char *dd = dest;
4004 uintptr_t cur;
4005
4006 for (cur = json; cur < json + size; cur++) {
4007 char cc = dtrace_load8(cur);
4008 if (cc == '\0')
4009 return (NULL);
4010
4011 switch (state) {
4012 case DTRACE_JSON_REST:
4013 if (isspace(cc))
4014 break;
4015
4016 if (cc == '{') {
4017 state = DTRACE_JSON_OBJECT;
4018 break;
4019 }
4020
4021 if (cc == '[') {
4022 in_array = B_TRUE;
4023 array_pos = 0;
4024 array_elem = dtrace_strtoll(elem, 10, size);
4025 found_key = array_elem == 0 ? B_TRUE : B_FALSE;
4026 state = DTRACE_JSON_VALUE;
4027 break;
4028 }
4029
4030 /*
4031 * ERROR: expected to find a top-level object or array.
4032 */
4033 return (NULL);
4034 case DTRACE_JSON_OBJECT:
4035 if (isspace(cc))
4036 break;
4037
4038 if (cc == '"') {
4039 state = DTRACE_JSON_STRING;
4040 string_is_key = B_TRUE;
4041 break;
4042 }
4043
4044 /*
4045 * ERROR: either the object did not start with a key
4046 * string, or we've run off the end of the object
4047 * without finding the requested key.
4048 */
4049 return (NULL);
4050 case DTRACE_JSON_STRING:
4051 if (cc == '\\') {
4052 *dd++ = '\\';
4053 state = DTRACE_JSON_STRING_ESCAPE;
4054 break;
4055 }
4056
4057 if (cc == '"') {
4058 if (collect_object) {
4059 /*
4060 * We don't reset the dest here, as
4061 * the string is part of a larger
4062 * object being collected.
4063 */
4064 *dd++ = cc;
4065 collect_object = B_FALSE;
4066 state = DTRACE_JSON_COLLECT_OBJECT;
4067 break;
4068 }
4069 *dd = '\0';
4070 dd = dest; /* reset string buffer */
4071 if (string_is_key) {
4072 if (dtrace_strncmp(dest, elem,
4073 size) == 0)
4074 found_key = B_TRUE;
4075 } else if (found_key) {
4076 if (nelems > 1) {
4077 /*
4078 * We expected an object, not
4079 * this string.
4080 */
4081 return (NULL);
4082 }
4083 return (dest);
4084 }
4085 state = string_is_key ? DTRACE_JSON_COLON :
4086 DTRACE_JSON_COMMA;
4087 string_is_key = B_FALSE;
4088 break;
4089 }
4090
4091 *dd++ = cc;
4092 break;
4093 case DTRACE_JSON_STRING_ESCAPE:
4094 *dd++ = cc;
4095 if (cc == 'u') {
4096 escape_unicount = 0;
4097 state = DTRACE_JSON_STRING_ESCAPE_UNICODE;
4098 } else {
4099 state = DTRACE_JSON_STRING;
4100 }
4101 break;
4102 case DTRACE_JSON_STRING_ESCAPE_UNICODE:
4103 if (!isxdigit(cc)) {
4104 /*
4105 * ERROR: invalid unicode escape, expected
4106 * four valid hexidecimal digits.
4107 */
4108 return (NULL);
4109 }
4110
4111 *dd++ = cc;
4112 if (++escape_unicount == 4)
4113 state = DTRACE_JSON_STRING;
4114 break;
4115 case DTRACE_JSON_COLON:
4116 if (isspace(cc))
4117 break;
4118
4119 if (cc == ':') {
4120 state = DTRACE_JSON_VALUE;
4121 break;
4122 }
4123
4124 /*
4125 * ERROR: expected a colon.
4126 */
4127 return (NULL);
4128 case DTRACE_JSON_COMMA:
4129 if (isspace(cc))
4130 break;
4131
4132 if (cc == ',') {
4133 if (in_array) {
4134 state = DTRACE_JSON_VALUE;
4135 if (++array_pos == array_elem)
4136 found_key = B_TRUE;
4137 } else {
4138 state = DTRACE_JSON_OBJECT;
4139 }
4140 break;
4141 }
4142
4143 /*
4144 * ERROR: either we hit an unexpected character, or
4145 * we reached the end of the object or array without
4146 * finding the requested key.
4147 */
4148 return (NULL);
4149 case DTRACE_JSON_IDENTIFIER:
4150 if (islower(cc)) {
4151 *dd++ = cc;
4152 break;
4153 }
4154
4155 *dd = '\0';
4156 dd = dest; /* reset string buffer */
4157
4158 if (dtrace_strncmp(dest, "true", 5) == 0 ||
4159 dtrace_strncmp(dest, "false", 6) == 0 ||
4160 dtrace_strncmp(dest, "null", 5) == 0) {
4161 if (found_key) {
4162 if (nelems > 1) {
4163 /*
4164 * ERROR: We expected an object,
4165 * not this identifier.
4166 */
4167 return (NULL);
4168 }
4169 return (dest);
4170 } else {
4171 cur--;
4172 state = DTRACE_JSON_COMMA;
4173 break;
4174 }
4175 }
4176
4177 /*
4178 * ERROR: we did not recognise the identifier as one
4179 * of those in the JSON specification.
4180 */
4181 return (NULL);
4182 case DTRACE_JSON_NUMBER:
4183 if (cc == '.') {
4184 *dd++ = cc;
4185 state = DTRACE_JSON_NUMBER_FRAC;
4186 break;
4187 }
4188
4189 if (cc == 'x' || cc == 'X') {
4190 /*
4191 * ERROR: specification explicitly excludes
4192 * hexidecimal or octal numbers.
4193 */
4194 return (NULL);
4195 }
4196
4197 /* FALLTHRU */
4198 case DTRACE_JSON_NUMBER_FRAC:
4199 if (cc == 'e' || cc == 'E') {
4200 *dd++ = cc;
4201 state = DTRACE_JSON_NUMBER_EXP;
4202 break;
4203 }
4204
4205 if (cc == '+' || cc == '-') {
4206 /*
4207 * ERROR: expect sign as part of exponent only.
4208 */
4209 return (NULL);
4210 }
4211 /* FALLTHRU */
4212 case DTRACE_JSON_NUMBER_EXP:
4213 if (isdigit(cc) || cc == '+' || cc == '-') {
4214 *dd++ = cc;
4215 break;
4216 }
4217
4218 *dd = '\0';
4219 dd = dest; /* reset string buffer */
4220 if (found_key) {
4221 if (nelems > 1) {
4222 /*
4223 * ERROR: We expected an object, not
4224 * this number.
4225 */
4226 return (NULL);
4227 }
4228 return (dest);
4229 }
4230
4231 cur--;
4232 state = DTRACE_JSON_COMMA;
4233 break;
4234 case DTRACE_JSON_VALUE:
4235 if (isspace(cc))
4236 break;
4237
4238 if (cc == '{' || cc == '[') {
4239 if (nelems > 1 && found_key) {
4240 in_array = cc == '[' ? B_TRUE : B_FALSE;
4241 /*
4242 * If our element selector directs us
4243 * to descend into this nested object,
4244 * then move to the next selector
4245 * element in the list and restart the
4246 * state machine.
4247 */
4248 while (*elem != '\0')
4249 elem++;
4250 elem++; /* skip the inter-element NUL */
4251 nelems--;
4252 dd = dest;
4253 if (in_array) {
4254 state = DTRACE_JSON_VALUE;
4255 array_pos = 0;
4256 array_elem = dtrace_strtoll(
4257 elem, 10, size);
4258 found_key = array_elem == 0 ?
4259 B_TRUE : B_FALSE;
4260 } else {
4261 found_key = B_FALSE;
4262 state = DTRACE_JSON_OBJECT;
4263 }
4264 break;
4265 }
4266
4267 /*
4268 * Otherwise, we wish to either skip this
4269 * nested object or return it in full.
4270 */
4271 if (cc == '[')
4272 brackets = 1;
4273 else
4274 braces = 1;
4275 *dd++ = cc;
4276 state = DTRACE_JSON_COLLECT_OBJECT;
4277 break;
4278 }
4279
4280 if (cc == '"') {
4281 state = DTRACE_JSON_STRING;
4282 break;
4283 }
4284
4285 if (islower(cc)) {
4286 /*
4287 * Here we deal with true, false and null.
4288 */
4289 *dd++ = cc;
4290 state = DTRACE_JSON_IDENTIFIER;
4291 break;
4292 }
4293
4294 if (cc == '-' || isdigit(cc)) {
4295 *dd++ = cc;
4296 state = DTRACE_JSON_NUMBER;
4297 break;
4298 }
4299
4300 /*
4301 * ERROR: unexpected character at start of value.
4302 */
4303 return (NULL);
4304 case DTRACE_JSON_COLLECT_OBJECT:
4305 if (cc == '\0')
4306 /*
4307 * ERROR: unexpected end of input.
4308 */
4309 return (NULL);
4310
4311 *dd++ = cc;
4312 if (cc == '"') {
4313 collect_object = B_TRUE;
4314 state = DTRACE_JSON_STRING;
4315 break;
4316 }
4317
4318 if (cc == ']') {
4319 if (brackets-- == 0) {
4320 /*
4321 * ERROR: unbalanced brackets.
4322 */
4323 return (NULL);
4324 }
4325 } else if (cc == '}') {
4326 if (braces-- == 0) {
4327 /*
4328 * ERROR: unbalanced braces.
4329 */
4330 return (NULL);
4331 }
4332 } else if (cc == '{') {
4333 braces++;
4334 } else if (cc == '[') {
4335 brackets++;
4336 }
4337
4338 if (brackets == 0 && braces == 0) {
4339 if (found_key) {
4340 *dd = '\0';
4341 return (dest);
4342 }
4343 dd = dest; /* reset string buffer */
4344 state = DTRACE_JSON_COMMA;
4345 }
4346 break;
4347 }
4348 }
4349 return (NULL);
4350 }
4351
4352 /*
4353 * Emulate the execution of DTrace ID subroutines invoked by the call opcode.
4354 * Notice that we don't bother validating the proper number of arguments or
4355 * their types in the tuple stack. This isn't needed because all argument
4356 * interpretation is safe because of our load safety -- the worst that can
4357 * happen is that a bogus program can obtain bogus results.
4358 */
4359 static void
dtrace_dif_subr(uint_t subr,uint_t rd,uint64_t * regs,dtrace_key_t * tupregs,int nargs,dtrace_mstate_t * mstate,dtrace_state_t * state)4360 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs,
4361 dtrace_key_t *tupregs, int nargs,
4362 dtrace_mstate_t *mstate, dtrace_state_t *state)
4363 {
4364 volatile uint16_t *flags = &cpu_core[curcpu_id].cpuc_dtrace_flags;
4365 volatile uintptr_t *illval = &cpu_core[curcpu_id].cpuc_dtrace_illval;
4366 dtrace_vstate_t *vstate = &state->dts_vstate;
4367
4368 #ifdef illumos
4369 union {
4370 mutex_impl_t mi;
4371 uint64_t mx;
4372 } m;
4373
4374 union {
4375 krwlock_t ri;
4376 uintptr_t rw;
4377 } r;
4378 #endif
4379 #ifdef __FreeBSD__
4380 struct thread *lowner;
4381 union {
4382 struct lock_object *li;
4383 uintptr_t lx;
4384 } l;
4385 #endif
4386 #ifdef __NetBSD__
4387 union {
4388 kmutex_t mi;
4389 uint64_t mx;
4390 } m;
4391
4392 union {
4393 krwlock_t ri;
4394 uintptr_t rw;
4395 } r;
4396 #endif
4397
4398 switch (subr) {
4399 case DIF_SUBR_RAND:
4400 regs[rd] = dtrace_xoroshiro128_plus_next(
4401 state->dts_rstate[curcpu_id]);
4402 break;
4403
4404 #ifdef illumos
4405 case DIF_SUBR_MUTEX_OWNED:
4406 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4407 mstate, vstate)) {
4408 regs[rd] = 0;
4409 break;
4410 }
4411
4412 m.mx = dtrace_load64(tupregs[0].dttk_value);
4413 if (MUTEX_TYPE_ADAPTIVE(&m.mi))
4414 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER;
4415 else
4416 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock);
4417 break;
4418
4419 case DIF_SUBR_MUTEX_OWNER:
4420 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4421 mstate, vstate)) {
4422 regs[rd] = 0;
4423 break;
4424 }
4425
4426 m.mx = dtrace_load64(tupregs[0].dttk_value);
4427 if (MUTEX_TYPE_ADAPTIVE(&m.mi) &&
4428 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER)
4429 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi);
4430 else
4431 regs[rd] = 0;
4432 break;
4433
4434 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
4435 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4436 mstate, vstate)) {
4437 regs[rd] = 0;
4438 break;
4439 }
4440
4441 m.mx = dtrace_load64(tupregs[0].dttk_value);
4442 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi);
4443 break;
4444
4445 case DIF_SUBR_MUTEX_TYPE_SPIN:
4446 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4447 mstate, vstate)) {
4448 regs[rd] = 0;
4449 break;
4450 }
4451
4452 m.mx = dtrace_load64(tupregs[0].dttk_value);
4453 regs[rd] = MUTEX_TYPE_SPIN(&m.mi);
4454 break;
4455
4456 case DIF_SUBR_RW_READ_HELD: {
4457 uintptr_t tmp;
4458
4459 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
4460 mstate, vstate)) {
4461 regs[rd] = 0;
4462 break;
4463 }
4464
4465 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4466 regs[rd] = _RW_READ_HELD(&r.ri, tmp);
4467 break;
4468 }
4469
4470 case DIF_SUBR_RW_WRITE_HELD:
4471 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
4472 mstate, vstate)) {
4473 regs[rd] = 0;
4474 break;
4475 }
4476
4477 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4478 regs[rd] = _RW_WRITE_HELD(&r.ri);
4479 break;
4480
4481 case DIF_SUBR_RW_ISWRITER:
4482 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
4483 mstate, vstate)) {
4484 regs[rd] = 0;
4485 break;
4486 }
4487
4488 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4489 regs[rd] = _RW_ISWRITER(&r.ri);
4490 break;
4491
4492 #endif /* illumos */
4493 #ifdef __FreeBSD__
4494 case DIF_SUBR_MUTEX_OWNED:
4495 if (!dtrace_canload(tupregs[0].dttk_value,
4496 sizeof (struct lock_object), mstate, vstate)) {
4497 regs[rd] = 0;
4498 break;
4499 }
4500 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
4501 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4502 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
4503 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4504 break;
4505
4506 case DIF_SUBR_MUTEX_OWNER:
4507 if (!dtrace_canload(tupregs[0].dttk_value,
4508 sizeof (struct lock_object), mstate, vstate)) {
4509 regs[rd] = 0;
4510 break;
4511 }
4512 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
4513 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4514 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
4515 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4516 regs[rd] = (uintptr_t)lowner;
4517 break;
4518
4519 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
4520 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx),
4521 mstate, vstate)) {
4522 regs[rd] = 0;
4523 break;
4524 }
4525 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
4526 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4527 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SLEEPLOCK) != 0;
4528 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4529 break;
4530
4531 case DIF_SUBR_MUTEX_TYPE_SPIN:
4532 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx),
4533 mstate, vstate)) {
4534 regs[rd] = 0;
4535 break;
4536 }
4537 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
4538 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4539 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SPINLOCK) != 0;
4540 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4541 break;
4542
4543 case DIF_SUBR_RW_READ_HELD:
4544 case DIF_SUBR_SX_SHARED_HELD:
4545 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
4546 mstate, vstate)) {
4547 regs[rd] = 0;
4548 break;
4549 }
4550 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value);
4551 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4552 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) &&
4553 lowner == NULL;
4554 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4555 break;
4556
4557 case DIF_SUBR_RW_WRITE_HELD:
4558 case DIF_SUBR_SX_EXCLUSIVE_HELD:
4559 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
4560 mstate, vstate)) {
4561 regs[rd] = 0;
4562 break;
4563 }
4564 l.lx = dtrace_loadptr(tupregs[0].dttk_value);
4565 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4566 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) &&
4567 lowner != NULL;
4568 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4569 break;
4570
4571 case DIF_SUBR_RW_ISWRITER:
4572 case DIF_SUBR_SX_ISEXCLUSIVE:
4573 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
4574 mstate, vstate)) {
4575 regs[rd] = 0;
4576 break;
4577 }
4578 l.lx = dtrace_loadptr(tupregs[0].dttk_value);
4579 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4580 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner);
4581 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4582 regs[rd] = (lowner == curthread);
4583 break;
4584
4585 #endif /* __FreeBSD__ */
4586 #ifdef __NetBSD__
4587 case DIF_SUBR_MUTEX_OWNED:
4588 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4589 mstate, vstate)) {
4590 regs[rd] = 0;
4591 break;
4592 }
4593
4594 m.mx = dtrace_load64(tupregs[0].dttk_value);
4595 if (MUTEX_TYPE_ADAPTIVE(&m.mi))
4596 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER;
4597 else
4598 regs[rd] = __SIMPLELOCK_LOCKED_P(&m.mi.mtx_lock);
4599 break;
4600
4601 case DIF_SUBR_MUTEX_OWNER:
4602 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4603 mstate, vstate)) {
4604 regs[rd] = 0;
4605 break;
4606 }
4607
4608 m.mx = dtrace_load64(tupregs[0].dttk_value);
4609 if (MUTEX_TYPE_ADAPTIVE(&m.mi) &&
4610 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER)
4611 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi);
4612 else
4613 regs[rd] = 0;
4614 break;
4615
4616 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
4617 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4618 mstate, vstate)) {
4619 regs[rd] = 0;
4620 break;
4621 }
4622
4623 m.mx = dtrace_load64(tupregs[0].dttk_value);
4624 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi);
4625 break;
4626
4627 case DIF_SUBR_MUTEX_TYPE_SPIN:
4628 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
4629 mstate, vstate)) {
4630 regs[rd] = 0;
4631 break;
4632 }
4633
4634 m.mx = dtrace_load64(tupregs[0].dttk_value);
4635 regs[rd] = MUTEX_TYPE_SPIN(&m.mi);
4636 break;
4637
4638 case DIF_SUBR_RW_READ_HELD: {
4639 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
4640 mstate, vstate)) {
4641 regs[rd] = 0;
4642 break;
4643 }
4644
4645 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4646 regs[rd] = _RW_READ_HELD(&r.ri);
4647 break;
4648 }
4649
4650 case DIF_SUBR_RW_WRITE_HELD:
4651 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
4652 mstate, vstate)) {
4653 regs[rd] = 0;
4654 break;
4655 }
4656
4657 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4658 regs[rd] = _RW_WRITE_HELD(&r.ri);
4659 break;
4660
4661 case DIF_SUBR_RW_ISWRITER:
4662 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
4663 mstate, vstate)) {
4664 regs[rd] = 0;
4665 break;
4666 }
4667
4668 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
4669 regs[rd] = _RW_ISWRITER(&r.ri);
4670 break;
4671
4672 #endif /* __NetBSD__ */
4673
4674 case DIF_SUBR_BCOPY: {
4675 /*
4676 * We need to be sure that the destination is in the scratch
4677 * region -- no other region is allowed.
4678 */
4679 uintptr_t src = tupregs[0].dttk_value;
4680 uintptr_t dest = tupregs[1].dttk_value;
4681 size_t size = tupregs[2].dttk_value;
4682
4683 if (!dtrace_inscratch(dest, size, mstate)) {
4684 *flags |= CPU_DTRACE_BADADDR;
4685 *illval = regs[rd];
4686 break;
4687 }
4688
4689 if (!dtrace_canload(src, size, mstate, vstate)) {
4690 regs[rd] = 0;
4691 break;
4692 }
4693
4694 dtrace_bcopy((void *)src, (void *)dest, size);
4695 break;
4696 }
4697
4698 case DIF_SUBR_ALLOCA:
4699 case DIF_SUBR_COPYIN: {
4700 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
4701 uint64_t size =
4702 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value;
4703 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size;
4704
4705 /*
4706 * This action doesn't require any credential checks since
4707 * probes will not activate in user contexts to which the
4708 * enabling user does not have permissions.
4709 */
4710
4711 /*
4712 * Rounding up the user allocation size could have overflowed
4713 * a large, bogus allocation (like -1ULL) to 0.
4714 */
4715 if (scratch_size < size ||
4716 !DTRACE_INSCRATCH(mstate, scratch_size)) {
4717 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4718 regs[rd] = 0;
4719 break;
4720 }
4721
4722 if (subr == DIF_SUBR_COPYIN) {
4723 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4724 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
4725 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4726 }
4727
4728 mstate->dtms_scratch_ptr += scratch_size;
4729 regs[rd] = dest;
4730 break;
4731 }
4732
4733 case DIF_SUBR_COPYINTO: {
4734 uint64_t size = tupregs[1].dttk_value;
4735 uintptr_t dest = tupregs[2].dttk_value;
4736
4737 /*
4738 * This action doesn't require any credential checks since
4739 * probes will not activate in user contexts to which the
4740 * enabling user does not have permissions.
4741 */
4742 if (!dtrace_inscratch(dest, size, mstate)) {
4743 *flags |= CPU_DTRACE_BADADDR;
4744 *illval = regs[rd];
4745 break;
4746 }
4747
4748 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4749 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
4750 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4751 break;
4752 }
4753
4754 case DIF_SUBR_COPYINSTR: {
4755 uintptr_t dest = mstate->dtms_scratch_ptr;
4756 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4757
4758 if (nargs > 1 && tupregs[1].dttk_value < size)
4759 size = tupregs[1].dttk_value + 1;
4760
4761 /*
4762 * This action doesn't require any credential checks since
4763 * probes will not activate in user contexts to which the
4764 * enabling user does not have permissions.
4765 */
4766 if (!DTRACE_INSCRATCH(mstate, size)) {
4767 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4768 regs[rd] = 0;
4769 break;
4770 }
4771
4772 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4773 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags);
4774 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4775
4776 ((char *)dest)[size - 1] = '\0';
4777 mstate->dtms_scratch_ptr += size;
4778 regs[rd] = dest;
4779 break;
4780 }
4781
4782 #ifdef illumos
4783 case DIF_SUBR_MSGSIZE:
4784 case DIF_SUBR_MSGDSIZE: {
4785 uintptr_t baddr = tupregs[0].dttk_value, daddr;
4786 uintptr_t wptr, rptr;
4787 size_t count = 0;
4788 int cont = 0;
4789
4790 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) {
4791
4792 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate,
4793 vstate)) {
4794 regs[rd] = 0;
4795 break;
4796 }
4797
4798 wptr = dtrace_loadptr(baddr +
4799 offsetof(mblk_t, b_wptr));
4800
4801 rptr = dtrace_loadptr(baddr +
4802 offsetof(mblk_t, b_rptr));
4803
4804 if (wptr < rptr) {
4805 *flags |= CPU_DTRACE_BADADDR;
4806 *illval = tupregs[0].dttk_value;
4807 break;
4808 }
4809
4810 daddr = dtrace_loadptr(baddr +
4811 offsetof(mblk_t, b_datap));
4812
4813 baddr = dtrace_loadptr(baddr +
4814 offsetof(mblk_t, b_cont));
4815
4816 /*
4817 * We want to prevent against denial-of-service here,
4818 * so we're only going to search the list for
4819 * dtrace_msgdsize_max mblks.
4820 */
4821 if (cont++ > dtrace_msgdsize_max) {
4822 *flags |= CPU_DTRACE_ILLOP;
4823 break;
4824 }
4825
4826 if (subr == DIF_SUBR_MSGDSIZE) {
4827 if (dtrace_load8(daddr +
4828 offsetof(dblk_t, db_type)) != M_DATA)
4829 continue;
4830 }
4831
4832 count += wptr - rptr;
4833 }
4834
4835 if (!(*flags & CPU_DTRACE_FAULT))
4836 regs[rd] = count;
4837
4838 break;
4839 }
4840 #endif
4841
4842 case DIF_SUBR_PROGENYOF: {
4843 pid_t pid = tupregs[0].dttk_value;
4844 proc_t *p;
4845 int rval = 0;
4846
4847 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4848
4849 for (p = curthread->t_procp; p != NULL; p = p->p_parent) {
4850 #ifdef illumos
4851 if (p->p_pidp->pid_id == pid) {
4852 #else
4853 if (p->p_pid == pid) {
4854 #endif
4855 rval = 1;
4856 break;
4857 }
4858 }
4859
4860 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4861
4862 regs[rd] = rval;
4863 break;
4864 }
4865
4866 case DIF_SUBR_SPECULATION:
4867 regs[rd] = dtrace_speculation(state);
4868 break;
4869
4870 case DIF_SUBR_COPYOUT: {
4871 uintptr_t kaddr = tupregs[0].dttk_value;
4872 uintptr_t uaddr = tupregs[1].dttk_value;
4873 uint64_t size = tupregs[2].dttk_value;
4874
4875 if (!dtrace_destructive_disallow &&
4876 dtrace_priv_proc_control(state) &&
4877 !dtrace_istoxic(kaddr, size)) {
4878 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4879 dtrace_copyout(kaddr, uaddr, size, flags);
4880 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4881 }
4882 break;
4883 }
4884
4885 case DIF_SUBR_COPYOUTSTR: {
4886 uintptr_t kaddr = tupregs[0].dttk_value;
4887 uintptr_t uaddr = tupregs[1].dttk_value;
4888 uint64_t size = tupregs[2].dttk_value;
4889 size_t lim;
4890
4891 if (!dtrace_destructive_disallow &&
4892 dtrace_priv_proc_control(state) &&
4893 !dtrace_istoxic(kaddr, size) &&
4894 dtrace_strcanload(kaddr, size, &lim, mstate, vstate)) {
4895 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
4896 dtrace_copyoutstr(kaddr, uaddr, lim, flags);
4897 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
4898 }
4899 break;
4900 }
4901
4902 case DIF_SUBR_STRLEN: {
4903 size_t size = state->dts_options[DTRACEOPT_STRSIZE];
4904 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value;
4905 size_t lim;
4906
4907 if (!dtrace_strcanload(addr, size, &lim, mstate, vstate)) {
4908 regs[rd] = 0;
4909 break;
4910 }
4911
4912 regs[rd] = dtrace_strlen((char *)addr, lim);
4913 break;
4914 }
4915
4916 case DIF_SUBR_STRCHR:
4917 case DIF_SUBR_STRRCHR: {
4918 /*
4919 * We're going to iterate over the string looking for the
4920 * specified character. We will iterate until we have reached
4921 * the string length or we have found the character. If this
4922 * is DIF_SUBR_STRRCHR, we will look for the last occurrence
4923 * of the specified character instead of the first.
4924 */
4925 uintptr_t addr = tupregs[0].dttk_value;
4926 uintptr_t addr_limit;
4927 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4928 size_t lim;
4929 char c, target = (char)tupregs[1].dttk_value;
4930
4931 if (!dtrace_strcanload(addr, size, &lim, mstate, vstate)) {
4932 regs[rd] = 0;
4933 break;
4934 }
4935 addr_limit = addr + lim;
4936
4937 for (regs[rd] = 0; addr < addr_limit; addr++) {
4938 if ((c = dtrace_load8(addr)) == target) {
4939 regs[rd] = addr;
4940
4941 if (subr == DIF_SUBR_STRCHR)
4942 break;
4943 }
4944
4945 if (c == '\0')
4946 break;
4947 }
4948 break;
4949 }
4950
4951 case DIF_SUBR_STRSTR:
4952 case DIF_SUBR_INDEX:
4953 case DIF_SUBR_RINDEX: {
4954 /*
4955 * We're going to iterate over the string looking for the
4956 * specified string. We will iterate until we have reached
4957 * the string length or we have found the string. (Yes, this
4958 * is done in the most naive way possible -- but considering
4959 * that the string we're searching for is likely to be
4960 * relatively short, the complexity of Rabin-Karp or similar
4961 * hardly seems merited.)
4962 */
4963 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value;
4964 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value;
4965 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4966 size_t len = dtrace_strlen(addr, size);
4967 size_t sublen = dtrace_strlen(substr, size);
4968 char *limit = addr + len, *orig = addr;
4969 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1;
4970 int inc = 1;
4971
4972 regs[rd] = notfound;
4973
4974 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) {
4975 regs[rd] = 0;
4976 break;
4977 }
4978
4979 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate,
4980 vstate)) {
4981 regs[rd] = 0;
4982 break;
4983 }
4984
4985 /*
4986 * strstr() and index()/rindex() have similar semantics if
4987 * both strings are the empty string: strstr() returns a
4988 * pointer to the (empty) string, and index() and rindex()
4989 * both return index 0 (regardless of any position argument).
4990 */
4991 if (sublen == 0 && len == 0) {
4992 if (subr == DIF_SUBR_STRSTR)
4993 regs[rd] = (uintptr_t)addr;
4994 else
4995 regs[rd] = 0;
4996 break;
4997 }
4998
4999 if (subr != DIF_SUBR_STRSTR) {
5000 if (subr == DIF_SUBR_RINDEX) {
5001 limit = orig - 1;
5002 addr += len;
5003 inc = -1;
5004 }
5005
5006 /*
5007 * Both index() and rindex() take an optional position
5008 * argument that denotes the starting position.
5009 */
5010 if (nargs == 3) {
5011 int64_t pos = (int64_t)tupregs[2].dttk_value;
5012
5013 /*
5014 * If the position argument to index() is
5015 * negative, Perl implicitly clamps it at
5016 * zero. This semantic is a little surprising
5017 * given the special meaning of negative
5018 * positions to similar Perl functions like
5019 * substr(), but it appears to reflect a
5020 * notion that index() can start from a
5021 * negative index and increment its way up to
5022 * the string. Given this notion, Perl's
5023 * rindex() is at least self-consistent in
5024 * that it implicitly clamps positions greater
5025 * than the string length to be the string
5026 * length. Where Perl completely loses
5027 * coherence, however, is when the specified
5028 * substring is the empty string (""). In
5029 * this case, even if the position is
5030 * negative, rindex() returns 0 -- and even if
5031 * the position is greater than the length,
5032 * index() returns the string length. These
5033 * semantics violate the notion that index()
5034 * should never return a value less than the
5035 * specified position and that rindex() should
5036 * never return a value greater than the
5037 * specified position. (One assumes that
5038 * these semantics are artifacts of Perl's
5039 * implementation and not the results of
5040 * deliberate design -- it beggars belief that
5041 * even Larry Wall could desire such oddness.)
5042 * While in the abstract one would wish for
5043 * consistent position semantics across
5044 * substr(), index() and rindex() -- or at the
5045 * very least self-consistent position
5046 * semantics for index() and rindex() -- we
5047 * instead opt to keep with the extant Perl
5048 * semantics, in all their broken glory. (Do
5049 * we have more desire to maintain Perl's
5050 * semantics than Perl does? Probably.)
5051 */
5052 if (subr == DIF_SUBR_RINDEX) {
5053 if (pos < 0) {
5054 if (sublen == 0)
5055 regs[rd] = 0;
5056 break;
5057 }
5058
5059 if (pos > len)
5060 pos = len;
5061 } else {
5062 if (pos < 0)
5063 pos = 0;
5064
5065 if (pos >= len) {
5066 if (sublen == 0)
5067 regs[rd] = len;
5068 break;
5069 }
5070 }
5071
5072 addr = orig + pos;
5073 }
5074 }
5075
5076 for (regs[rd] = notfound; addr != limit; addr += inc) {
5077 if (dtrace_strncmp(addr, substr, sublen) == 0) {
5078 if (subr != DIF_SUBR_STRSTR) {
5079 /*
5080 * As D index() and rindex() are
5081 * modeled on Perl (and not on awk),
5082 * we return a zero-based (and not a
5083 * one-based) index. (For you Perl
5084 * weenies: no, we're not going to add
5085 * $[ -- and shouldn't you be at a con
5086 * or something?)
5087 */
5088 regs[rd] = (uintptr_t)(addr - orig);
5089 break;
5090 }
5091
5092 ASSERT(subr == DIF_SUBR_STRSTR);
5093 regs[rd] = (uintptr_t)addr;
5094 break;
5095 }
5096 }
5097
5098 break;
5099 }
5100
5101 case DIF_SUBR_STRTOK: {
5102 uintptr_t addr = tupregs[0].dttk_value;
5103 uintptr_t tokaddr = tupregs[1].dttk_value;
5104 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5105 uintptr_t limit, toklimit;
5106 size_t clim;
5107 uint8_t c = 0, tokmap[32]; /* 256 / 8 */
5108 char *dest = (char *)mstate->dtms_scratch_ptr;
5109 int i;
5110
5111 /*
5112 * Check both the token buffer and (later) the input buffer,
5113 * since both could be non-scratch addresses.
5114 */
5115 if (!dtrace_strcanload(tokaddr, size, &clim, mstate, vstate)) {
5116 regs[rd] = 0;
5117 break;
5118 }
5119 toklimit = tokaddr + clim;
5120
5121 if (!DTRACE_INSCRATCH(mstate, size)) {
5122 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5123 regs[rd] = 0;
5124 break;
5125 }
5126
5127 if (addr == 0) {
5128 /*
5129 * If the address specified is NULL, we use our saved
5130 * strtok pointer from the mstate. Note that this
5131 * means that the saved strtok pointer is _only_
5132 * valid within multiple enablings of the same probe --
5133 * it behaves like an implicit clause-local variable.
5134 */
5135 addr = mstate->dtms_strtok;
5136 limit = mstate->dtms_strtok_limit;
5137 } else {
5138 /*
5139 * If the user-specified address is non-NULL we must
5140 * access check it. This is the only time we have
5141 * a chance to do so, since this address may reside
5142 * in the string table of this clause-- future calls
5143 * (when we fetch addr from mstate->dtms_strtok)
5144 * would fail this access check.
5145 */
5146 if (!dtrace_strcanload(addr, size, &clim, mstate,
5147 vstate)) {
5148 regs[rd] = 0;
5149 break;
5150 }
5151 limit = addr + clim;
5152 }
5153
5154 /*
5155 * First, zero the token map, and then process the token
5156 * string -- setting a bit in the map for every character
5157 * found in the token string.
5158 */
5159 for (i = 0; i < sizeof (tokmap); i++)
5160 tokmap[i] = 0;
5161
5162 for (; tokaddr < toklimit; tokaddr++) {
5163 if ((c = dtrace_load8(tokaddr)) == '\0')
5164 break;
5165
5166 ASSERT((c >> 3) < sizeof (tokmap));
5167 tokmap[c >> 3] |= (1 << (c & 0x7));
5168 }
5169
5170 for (; addr < limit; addr++) {
5171 /*
5172 * We're looking for a character that is _not_
5173 * contained in the token string.
5174 */
5175 if ((c = dtrace_load8(addr)) == '\0')
5176 break;
5177
5178 if (!(tokmap[c >> 3] & (1 << (c & 0x7))))
5179 break;
5180 }
5181
5182 if (c == '\0') {
5183 /*
5184 * We reached the end of the string without finding
5185 * any character that was not in the token string.
5186 * We return NULL in this case, and we set the saved
5187 * address to NULL as well.
5188 */
5189 regs[rd] = 0;
5190 mstate->dtms_strtok = 0;
5191 mstate->dtms_strtok_limit = 0;
5192 break;
5193 }
5194
5195 /*
5196 * From here on, we're copying into the destination string.
5197 */
5198 for (i = 0; addr < limit && i < size - 1; addr++) {
5199 if ((c = dtrace_load8(addr)) == '\0')
5200 break;
5201
5202 if (tokmap[c >> 3] & (1 << (c & 0x7)))
5203 break;
5204
5205 ASSERT(i < size);
5206 dest[i++] = c;
5207 }
5208
5209 ASSERT(i < size);
5210 dest[i] = '\0';
5211 regs[rd] = (uintptr_t)dest;
5212 mstate->dtms_scratch_ptr += size;
5213 mstate->dtms_strtok = addr;
5214 mstate->dtms_strtok_limit = limit;
5215 break;
5216 }
5217
5218 case DIF_SUBR_SUBSTR: {
5219 uintptr_t s = tupregs[0].dttk_value;
5220 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5221 char *d = (char *)mstate->dtms_scratch_ptr;
5222 int64_t index = (int64_t)tupregs[1].dttk_value;
5223 int64_t remaining = (int64_t)tupregs[2].dttk_value;
5224 size_t len = dtrace_strlen((char *)s, size);
5225 int64_t i;
5226
5227 if (!dtrace_canload(s, len + 1, mstate, vstate)) {
5228 regs[rd] = 0;
5229 break;
5230 }
5231
5232 if (!DTRACE_INSCRATCH(mstate, size)) {
5233 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5234 regs[rd] = 0;
5235 break;
5236 }
5237
5238 if (nargs <= 2)
5239 remaining = (int64_t)size;
5240
5241 if (index < 0) {
5242 index += len;
5243
5244 if (index < 0 && index + remaining > 0) {
5245 remaining += index;
5246 index = 0;
5247 }
5248 }
5249
5250 if (index >= len || index < 0) {
5251 remaining = 0;
5252 } else if (remaining < 0) {
5253 remaining += len - index;
5254 } else if (index + remaining > size) {
5255 remaining = size - index;
5256 }
5257
5258 for (i = 0; i < remaining; i++) {
5259 if ((d[i] = dtrace_load8(s + index + i)) == '\0')
5260 break;
5261 }
5262
5263 d[i] = '\0';
5264
5265 mstate->dtms_scratch_ptr += size;
5266 regs[rd] = (uintptr_t)d;
5267 break;
5268 }
5269
5270 case DIF_SUBR_JSON: {
5271 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5272 uintptr_t json = tupregs[0].dttk_value;
5273 size_t jsonlen = dtrace_strlen((char *)json, size);
5274 uintptr_t elem = tupregs[1].dttk_value;
5275 size_t elemlen = dtrace_strlen((char *)elem, size);
5276
5277 char *dest = (char *)mstate->dtms_scratch_ptr;
5278 char *elemlist = (char *)mstate->dtms_scratch_ptr + jsonlen + 1;
5279 char *ee = elemlist;
5280 int nelems = 1;
5281 uintptr_t cur;
5282
5283 if (!dtrace_canload(json, jsonlen + 1, mstate, vstate) ||
5284 !dtrace_canload(elem, elemlen + 1, mstate, vstate)) {
5285 regs[rd] = 0;
5286 break;
5287 }
5288
5289 if (!DTRACE_INSCRATCH(mstate, jsonlen + 1 + elemlen + 1)) {
5290 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5291 regs[rd] = 0;
5292 break;
5293 }
5294
5295 /*
5296 * Read the element selector and split it up into a packed list
5297 * of strings.
5298 */
5299 for (cur = elem; cur < elem + elemlen; cur++) {
5300 char cc = dtrace_load8(cur);
5301
5302 if (cur == elem && cc == '[') {
5303 /*
5304 * If the first element selector key is
5305 * actually an array index then ignore the
5306 * bracket.
5307 */
5308 continue;
5309 }
5310
5311 if (cc == ']')
5312 continue;
5313
5314 if (cc == '.' || cc == '[') {
5315 nelems++;
5316 cc = '\0';
5317 }
5318
5319 *ee++ = cc;
5320 }
5321 *ee++ = '\0';
5322
5323 if ((regs[rd] = (uintptr_t)dtrace_json(size, json, elemlist,
5324 nelems, dest)) != 0)
5325 mstate->dtms_scratch_ptr += jsonlen + 1;
5326 break;
5327 }
5328
5329 case DIF_SUBR_TOUPPER:
5330 case DIF_SUBR_TOLOWER: {
5331 uintptr_t s = tupregs[0].dttk_value;
5332 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5333 char *dest = (char *)mstate->dtms_scratch_ptr, c;
5334 size_t len = dtrace_strlen((char *)s, size);
5335 char lower, upper, convert;
5336 int64_t i;
5337
5338 if (subr == DIF_SUBR_TOUPPER) {
5339 lower = 'a';
5340 upper = 'z';
5341 convert = 'A';
5342 } else {
5343 lower = 'A';
5344 upper = 'Z';
5345 convert = 'a';
5346 }
5347
5348 if (!dtrace_canload(s, len + 1, mstate, vstate)) {
5349 regs[rd] = 0;
5350 break;
5351 }
5352
5353 if (!DTRACE_INSCRATCH(mstate, size)) {
5354 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5355 regs[rd] = 0;
5356 break;
5357 }
5358
5359 for (i = 0; i < size - 1; i++) {
5360 if ((c = dtrace_load8(s + i)) == '\0')
5361 break;
5362
5363 if (c >= lower && c <= upper)
5364 c = convert + (c - lower);
5365
5366 dest[i] = c;
5367 }
5368
5369 ASSERT(i < size);
5370 dest[i] = '\0';
5371 regs[rd] = (uintptr_t)dest;
5372 mstate->dtms_scratch_ptr += size;
5373 break;
5374 }
5375
5376 #ifdef illumos
5377 case DIF_SUBR_GETMAJOR:
5378 #ifdef _LP64
5379 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64;
5380 #else
5381 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ;
5382 #endif
5383 break;
5384
5385 case DIF_SUBR_GETMINOR:
5386 #ifdef _LP64
5387 regs[rd] = tupregs[0].dttk_value & MAXMIN64;
5388 #else
5389 regs[rd] = tupregs[0].dttk_value & MAXMIN;
5390 #endif
5391 break;
5392
5393 case DIF_SUBR_DDI_PATHNAME: {
5394 /*
5395 * This one is a galactic mess. We are going to roughly
5396 * emulate ddi_pathname(), but it's made more complicated
5397 * by the fact that we (a) want to include the minor name and
5398 * (b) must proceed iteratively instead of recursively.
5399 */
5400 uintptr_t dest = mstate->dtms_scratch_ptr;
5401 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5402 char *start = (char *)dest, *end = start + size - 1;
5403 uintptr_t daddr = tupregs[0].dttk_value;
5404 int64_t minor = (int64_t)tupregs[1].dttk_value;
5405 char *s;
5406 int i, len, depth = 0;
5407
5408 /*
5409 * Due to all the pointer jumping we do and context we must
5410 * rely upon, we just mandate that the user must have kernel
5411 * read privileges to use this routine.
5412 */
5413 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) {
5414 *flags |= CPU_DTRACE_KPRIV;
5415 *illval = daddr;
5416 regs[rd] = 0;
5417 }
5418
5419 if (!DTRACE_INSCRATCH(mstate, size)) {
5420 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5421 regs[rd] = 0;
5422 break;
5423 }
5424
5425 *end = '\0';
5426
5427 /*
5428 * We want to have a name for the minor. In order to do this,
5429 * we need to walk the minor list from the devinfo. We want
5430 * to be sure that we don't infinitely walk a circular list,
5431 * so we check for circularity by sending a scout pointer
5432 * ahead two elements for every element that we iterate over;
5433 * if the list is circular, these will ultimately point to the
5434 * same element. You may recognize this little trick as the
5435 * answer to a stupid interview question -- one that always
5436 * seems to be asked by those who had to have it laboriously
5437 * explained to them, and who can't even concisely describe
5438 * the conditions under which one would be forced to resort to
5439 * this technique. Needless to say, those conditions are
5440 * found here -- and probably only here. Is this the only use
5441 * of this infamous trick in shipping, production code? If it
5442 * isn't, it probably should be...
5443 */
5444 if (minor != -1) {
5445 uintptr_t maddr = dtrace_loadptr(daddr +
5446 offsetof(struct dev_info, devi_minor));
5447
5448 uintptr_t next = offsetof(struct ddi_minor_data, next);
5449 uintptr_t name = offsetof(struct ddi_minor_data,
5450 d_minor) + offsetof(struct ddi_minor, name);
5451 uintptr_t dev = offsetof(struct ddi_minor_data,
5452 d_minor) + offsetof(struct ddi_minor, dev);
5453 uintptr_t scout;
5454
5455 if (maddr != NULL)
5456 scout = dtrace_loadptr(maddr + next);
5457
5458 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
5459 uint64_t m;
5460 #ifdef _LP64
5461 m = dtrace_load64(maddr + dev) & MAXMIN64;
5462 #else
5463 m = dtrace_load32(maddr + dev) & MAXMIN;
5464 #endif
5465 if (m != minor) {
5466 maddr = dtrace_loadptr(maddr + next);
5467
5468 if (scout == NULL)
5469 continue;
5470
5471 scout = dtrace_loadptr(scout + next);
5472
5473 if (scout == NULL)
5474 continue;
5475
5476 scout = dtrace_loadptr(scout + next);
5477
5478 if (scout == NULL)
5479 continue;
5480
5481 if (scout == maddr) {
5482 *flags |= CPU_DTRACE_ILLOP;
5483 break;
5484 }
5485
5486 continue;
5487 }
5488
5489 /*
5490 * We have the minor data. Now we need to
5491 * copy the minor's name into the end of the
5492 * pathname.
5493 */
5494 s = (char *)dtrace_loadptr(maddr + name);
5495 len = dtrace_strlen(s, size);
5496
5497 if (*flags & CPU_DTRACE_FAULT)
5498 break;
5499
5500 if (len != 0) {
5501 if ((end -= (len + 1)) < start)
5502 break;
5503
5504 *end = ':';
5505 }
5506
5507 for (i = 1; i <= len; i++)
5508 end[i] = dtrace_load8((uintptr_t)s++);
5509 break;
5510 }
5511 }
5512
5513 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
5514 ddi_node_state_t devi_state;
5515
5516 devi_state = dtrace_load32(daddr +
5517 offsetof(struct dev_info, devi_node_state));
5518
5519 if (*flags & CPU_DTRACE_FAULT)
5520 break;
5521
5522 if (devi_state >= DS_INITIALIZED) {
5523 s = (char *)dtrace_loadptr(daddr +
5524 offsetof(struct dev_info, devi_addr));
5525 len = dtrace_strlen(s, size);
5526
5527 if (*flags & CPU_DTRACE_FAULT)
5528 break;
5529
5530 if (len != 0) {
5531 if ((end -= (len + 1)) < start)
5532 break;
5533
5534 *end = '@';
5535 }
5536
5537 for (i = 1; i <= len; i++)
5538 end[i] = dtrace_load8((uintptr_t)s++);
5539 }
5540
5541 /*
5542 * Now for the node name...
5543 */
5544 s = (char *)dtrace_loadptr(daddr +
5545 offsetof(struct dev_info, devi_node_name));
5546
5547 daddr = dtrace_loadptr(daddr +
5548 offsetof(struct dev_info, devi_parent));
5549
5550 /*
5551 * If our parent is NULL (that is, if we're the root
5552 * node), we're going to use the special path
5553 * "devices".
5554 */
5555 if (daddr == 0)
5556 s = "devices";
5557
5558 len = dtrace_strlen(s, size);
5559 if (*flags & CPU_DTRACE_FAULT)
5560 break;
5561
5562 if ((end -= (len + 1)) < start)
5563 break;
5564
5565 for (i = 1; i <= len; i++)
5566 end[i] = dtrace_load8((uintptr_t)s++);
5567 *end = '/';
5568
5569 if (depth++ > dtrace_devdepth_max) {
5570 *flags |= CPU_DTRACE_ILLOP;
5571 break;
5572 }
5573 }
5574
5575 if (end < start)
5576 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5577
5578 if (daddr == 0) {
5579 regs[rd] = (uintptr_t)end;
5580 mstate->dtms_scratch_ptr += size;
5581 }
5582
5583 break;
5584 }
5585 #endif
5586
5587 case DIF_SUBR_STRJOIN: {
5588 char *d = (char *)mstate->dtms_scratch_ptr;
5589 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5590 uintptr_t s1 = tupregs[0].dttk_value;
5591 uintptr_t s2 = tupregs[1].dttk_value;
5592 int i = 0, j = 0;
5593 size_t lim1, lim2;
5594 char c;
5595
5596 if (!dtrace_strcanload(s1, size, &lim1, mstate, vstate) ||
5597 !dtrace_strcanload(s2, size, &lim2, mstate, vstate)) {
5598 regs[rd] = 0;
5599 break;
5600 }
5601
5602 if (!DTRACE_INSCRATCH(mstate, size)) {
5603 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5604 regs[rd] = 0;
5605 break;
5606 }
5607
5608 for (;;) {
5609 if (i >= size) {
5610 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5611 regs[rd] = 0;
5612 break;
5613 }
5614
5615 c = (i >= lim1) ? '\0' : dtrace_load8(s1++);
5616 if ((d[i++] = c) == '\0') {
5617 i--;
5618 break;
5619 }
5620 }
5621
5622 for (;;) {
5623 if (i >= size) {
5624 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5625 regs[rd] = 0;
5626 break;
5627 }
5628
5629 c = (j++ >= lim2) ? '\0' : dtrace_load8(s2++);
5630 if ((d[i++] = c) == '\0')
5631 break;
5632 }
5633
5634 if (i < size) {
5635 mstate->dtms_scratch_ptr += i;
5636 regs[rd] = (uintptr_t)d;
5637 }
5638
5639 break;
5640 }
5641
5642 case DIF_SUBR_STRTOLL: {
5643 uintptr_t s = tupregs[0].dttk_value;
5644 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5645 size_t lim;
5646 int base = 10;
5647
5648 if (nargs > 1) {
5649 if ((base = tupregs[1].dttk_value) <= 1 ||
5650 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) {
5651 *flags |= CPU_DTRACE_ILLOP;
5652 break;
5653 }
5654 }
5655
5656 if (!dtrace_strcanload(s, size, &lim, mstate, vstate)) {
5657 regs[rd] = INT64_MIN;
5658 break;
5659 }
5660
5661 regs[rd] = dtrace_strtoll((char *)s, base, lim);
5662 break;
5663 }
5664
5665 case DIF_SUBR_LLTOSTR: {
5666 int64_t i = (int64_t)tupregs[0].dttk_value;
5667 uint64_t val, digit;
5668 uint64_t size = 65; /* enough room for 2^64 in binary */
5669 char *end = (char *)mstate->dtms_scratch_ptr + size - 1;
5670 int base = 10;
5671
5672 if (nargs > 1) {
5673 if ((base = tupregs[1].dttk_value) <= 1 ||
5674 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) {
5675 *flags |= CPU_DTRACE_ILLOP;
5676 break;
5677 }
5678 }
5679
5680 val = (base == 10 && i < 0) ? i * -1 : i;
5681
5682 if (!DTRACE_INSCRATCH(mstate, size)) {
5683 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5684 regs[rd] = 0;
5685 break;
5686 }
5687
5688 for (*end-- = '\0'; val; val /= base) {
5689 if ((digit = val % base) <= '9' - '0') {
5690 *end-- = '0' + digit;
5691 } else {
5692 *end-- = 'a' + (digit - ('9' - '0') - 1);
5693 }
5694 }
5695
5696 if (i == 0 && base == 16)
5697 *end-- = '0';
5698
5699 if (base == 16)
5700 *end-- = 'x';
5701
5702 if (i == 0 || base == 8 || base == 16)
5703 *end-- = '0';
5704
5705 if (i < 0 && base == 10)
5706 *end-- = '-';
5707
5708 regs[rd] = (uintptr_t)end + 1;
5709 mstate->dtms_scratch_ptr += size;
5710 break;
5711 }
5712
5713 case DIF_SUBR_HTONS:
5714 case DIF_SUBR_NTOHS:
5715 #if BYTE_ORDER == BIG_ENDIAN
5716 regs[rd] = (uint16_t)tupregs[0].dttk_value;
5717 #else
5718 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value);
5719 #endif
5720 break;
5721
5722
5723 case DIF_SUBR_HTONL:
5724 case DIF_SUBR_NTOHL:
5725 #if BYTE_ORDER == BIG_ENDIAN
5726 regs[rd] = (uint32_t)tupregs[0].dttk_value;
5727 #else
5728 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value);
5729 #endif
5730 break;
5731
5732
5733 case DIF_SUBR_HTONLL:
5734 case DIF_SUBR_NTOHLL:
5735 #if BYTE_ORDER == BIG_ENDIAN
5736 regs[rd] = (uint64_t)tupregs[0].dttk_value;
5737 #else
5738 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value);
5739 #endif
5740 break;
5741
5742
5743 case DIF_SUBR_DIRNAME:
5744 case DIF_SUBR_BASENAME: {
5745 char *dest = (char *)mstate->dtms_scratch_ptr;
5746 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5747 uintptr_t src = tupregs[0].dttk_value;
5748 int i, j, len = dtrace_strlen((char *)src, size);
5749 int lastbase = -1, firstbase = -1, lastdir = -1;
5750 int start, end;
5751
5752 if (!dtrace_canload(src, len + 1, mstate, vstate)) {
5753 regs[rd] = 0;
5754 break;
5755 }
5756
5757 if (!DTRACE_INSCRATCH(mstate, size)) {
5758 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5759 regs[rd] = 0;
5760 break;
5761 }
5762
5763 /*
5764 * The basename and dirname for a zero-length string is
5765 * defined to be "."
5766 */
5767 if (len == 0) {
5768 len = 1;
5769 src = (uintptr_t)".";
5770 }
5771
5772 /*
5773 * Start from the back of the string, moving back toward the
5774 * front until we see a character that isn't a slash. That
5775 * character is the last character in the basename.
5776 */
5777 for (i = len - 1; i >= 0; i--) {
5778 if (dtrace_load8(src + i) != '/')
5779 break;
5780 }
5781
5782 if (i >= 0)
5783 lastbase = i;
5784
5785 /*
5786 * Starting from the last character in the basename, move
5787 * towards the front until we find a slash. The character
5788 * that we processed immediately before that is the first
5789 * character in the basename.
5790 */
5791 for (; i >= 0; i--) {
5792 if (dtrace_load8(src + i) == '/')
5793 break;
5794 }
5795
5796 if (i >= 0)
5797 firstbase = i + 1;
5798
5799 /*
5800 * Now keep going until we find a non-slash character. That
5801 * character is the last character in the dirname.
5802 */
5803 for (; i >= 0; i--) {
5804 if (dtrace_load8(src + i) != '/')
5805 break;
5806 }
5807
5808 if (i >= 0)
5809 lastdir = i;
5810
5811 ASSERT(!(lastbase == -1 && firstbase != -1));
5812 ASSERT(!(firstbase == -1 && lastdir != -1));
5813
5814 if (lastbase == -1) {
5815 /*
5816 * We didn't find a non-slash character. We know that
5817 * the length is non-zero, so the whole string must be
5818 * slashes. In either the dirname or the basename
5819 * case, we return '/'.
5820 */
5821 ASSERT(firstbase == -1);
5822 firstbase = lastbase = lastdir = 0;
5823 }
5824
5825 if (firstbase == -1) {
5826 /*
5827 * The entire string consists only of a basename
5828 * component. If we're looking for dirname, we need
5829 * to change our string to be just "."; if we're
5830 * looking for a basename, we'll just set the first
5831 * character of the basename to be 0.
5832 */
5833 if (subr == DIF_SUBR_DIRNAME) {
5834 ASSERT(lastdir == -1);
5835 src = (uintptr_t)".";
5836 lastdir = 0;
5837 } else {
5838 firstbase = 0;
5839 }
5840 }
5841
5842 if (subr == DIF_SUBR_DIRNAME) {
5843 if (lastdir == -1) {
5844 /*
5845 * We know that we have a slash in the name --
5846 * or lastdir would be set to 0, above. And
5847 * because lastdir is -1, we know that this
5848 * slash must be the first character. (That
5849 * is, the full string must be of the form
5850 * "/basename".) In this case, the last
5851 * character of the directory name is 0.
5852 */
5853 lastdir = 0;
5854 }
5855
5856 start = 0;
5857 end = lastdir;
5858 } else {
5859 ASSERT(subr == DIF_SUBR_BASENAME);
5860 ASSERT(firstbase != -1 && lastbase != -1);
5861 start = firstbase;
5862 end = lastbase;
5863 }
5864
5865 for (i = start, j = 0; i <= end && j < size - 1; i++, j++)
5866 dest[j] = dtrace_load8(src + i);
5867
5868 dest[j] = '\0';
5869 regs[rd] = (uintptr_t)dest;
5870 mstate->dtms_scratch_ptr += size;
5871 break;
5872 }
5873
5874 case DIF_SUBR_GETF: {
5875 uintptr_t fd = tupregs[0].dttk_value;
5876 struct filedesc *fdp;
5877 file_t *fp;
5878
5879 if (!dtrace_priv_proc(state)) {
5880 regs[rd] = 0;
5881 break;
5882 }
5883 #ifdef __FreeBSD_
5884 fdp = curproc->p_fd;
5885 FILEDESC_SLOCK(fdp);
5886 fp = fget_locked(fdp, fd);
5887 mstate->dtms_getf = fp;
5888 regs[rd] = (uintptr_t)fp;
5889 FILEDESC_SUNLOCK(fdp);
5890 #endif
5891 #ifdef __NetBSD__
5892 regs[rd] = 0;
5893 #endif
5894 break;
5895 }
5896 case DIF_SUBR_CLEANPATH: {
5897 char *dest = (char *)mstate->dtms_scratch_ptr, c;
5898 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
5899 uintptr_t src = tupregs[0].dttk_value;
5900 size_t lim;
5901 int i = 0, j = 0;
5902 #ifdef illumos
5903 zone_t *z;
5904 #endif
5905
5906 if (!dtrace_strcanload(src, size, &lim, mstate, vstate)) {
5907 regs[rd] = 0;
5908 break;
5909 }
5910
5911 if (!DTRACE_INSCRATCH(mstate, size)) {
5912 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5913 regs[rd] = 0;
5914 break;
5915 }
5916
5917 /*
5918 * Move forward, loading each character.
5919 */
5920 do {
5921 c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
5922 next:
5923 if (j + 5 >= size) /* 5 = strlen("/..c\0") */
5924 break;
5925
5926 if (c != '/') {
5927 dest[j++] = c;
5928 continue;
5929 }
5930
5931 c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
5932
5933 if (c == '/') {
5934 /*
5935 * We have two slashes -- we can just advance
5936 * to the next character.
5937 */
5938 goto next;
5939 }
5940
5941 if (c != '.') {
5942 /*
5943 * This is not "." and it's not ".." -- we can
5944 * just store the "/" and this character and
5945 * drive on.
5946 */
5947 dest[j++] = '/';
5948 dest[j++] = c;
5949 continue;
5950 }
5951
5952 c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
5953
5954 if (c == '/') {
5955 /*
5956 * This is a "/./" component. We're not going
5957 * to store anything in the destination buffer;
5958 * we're just going to go to the next component.
5959 */
5960 goto next;
5961 }
5962
5963 if (c != '.') {
5964 /*
5965 * This is not ".." -- we can just store the
5966 * "/." and this character and continue
5967 * processing.
5968 */
5969 dest[j++] = '/';
5970 dest[j++] = '.';
5971 dest[j++] = c;
5972 continue;
5973 }
5974
5975 c = (i >= lim) ? '\0' : dtrace_load8(src + i++);
5976
5977 if (c != '/' && c != '\0') {
5978 /*
5979 * This is not ".." -- it's "..[mumble]".
5980 * We'll store the "/.." and this character
5981 * and continue processing.
5982 */
5983 dest[j++] = '/';
5984 dest[j++] = '.';
5985 dest[j++] = '.';
5986 dest[j++] = c;
5987 continue;
5988 }
5989
5990 /*
5991 * This is "/../" or "/..\0". We need to back up
5992 * our destination pointer until we find a "/".
5993 */
5994 i--;
5995 while (j != 0 && dest[--j] != '/')
5996 continue;
5997
5998 if (c == '\0')
5999 dest[++j] = '/';
6000 } while (c != '\0');
6001
6002 dest[j] = '\0';
6003
6004 #ifdef illumos
6005 if (mstate->dtms_getf != NULL &&
6006 !(mstate->dtms_access & DTRACE_ACCESS_KERNEL) &&
6007 (z = state->dts_cred.dcr_cred->cr_zone) != kcred->cr_zone) {
6008 /*
6009 * If we've done a getf() as a part of this ECB and we
6010 * don't have kernel access (and we're not in the global
6011 * zone), check if the path we cleaned up begins with
6012 * the zone's root path, and trim it off if so. Note
6013 * that this is an output cleanliness issue, not a
6014 * security issue: knowing one's zone root path does
6015 * not enable privilege escalation.
6016 */
6017 if (strstr(dest, z->zone_rootpath) == dest)
6018 dest += strlen(z->zone_rootpath) - 1;
6019 }
6020 #endif
6021
6022 regs[rd] = (uintptr_t)dest;
6023 mstate->dtms_scratch_ptr += size;
6024 break;
6025 }
6026
6027 case DIF_SUBR_INET_NTOA:
6028 case DIF_SUBR_INET_NTOA6:
6029 case DIF_SUBR_INET_NTOP: {
6030 size_t size;
6031 int af, argi, i;
6032 char *base, *end;
6033
6034 if (subr == DIF_SUBR_INET_NTOP) {
6035 af = (int)tupregs[0].dttk_value;
6036 argi = 1;
6037 } else {
6038 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6;
6039 argi = 0;
6040 }
6041
6042 if (af == AF_INET) {
6043 ipaddr_t ip4;
6044 uint8_t *ptr8, val;
6045
6046 if (!dtrace_canload(tupregs[argi].dttk_value,
6047 sizeof (ipaddr_t), mstate, vstate)) {
6048 regs[rd] = 0;
6049 break;
6050 }
6051
6052 /*
6053 * Safely load the IPv4 address.
6054 */
6055 ip4 = dtrace_load32(tupregs[argi].dttk_value);
6056
6057 /*
6058 * Check an IPv4 string will fit in scratch.
6059 */
6060 size = INET_ADDRSTRLEN;
6061 if (!DTRACE_INSCRATCH(mstate, size)) {
6062 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
6063 regs[rd] = 0;
6064 break;
6065 }
6066 base = (char *)mstate->dtms_scratch_ptr;
6067 end = (char *)mstate->dtms_scratch_ptr + size - 1;
6068
6069 /*
6070 * Stringify as a dotted decimal quad.
6071 */
6072 *end-- = '\0';
6073 ptr8 = (uint8_t *)&ip4;
6074 for (i = 3; i >= 0; i--) {
6075 val = ptr8[i];
6076
6077 if (val == 0) {
6078 *end-- = '0';
6079 } else {
6080 for (; val; val /= 10) {
6081 *end-- = '0' + (val % 10);
6082 }
6083 }
6084
6085 if (i > 0)
6086 *end-- = '.';
6087 }
6088 ASSERT(end + 1 >= base);
6089
6090 } else if (af == AF_INET6) {
6091 struct in6_addr ip6;
6092 int firstzero, tryzero, numzero, v6end;
6093 uint16_t val;
6094 const char digits[] = "0123456789abcdef";
6095
6096 /*
6097 * Stringify using RFC 1884 convention 2 - 16 bit
6098 * hexadecimal values with a zero-run compression.
6099 * Lower case hexadecimal digits are used.
6100 * eg, fe80::214:4fff:fe0b:76c8.
6101 * The IPv4 embedded form is returned for inet_ntop,
6102 * just the IPv4 string is returned for inet_ntoa6.
6103 */
6104
6105 if (!dtrace_canload(tupregs[argi].dttk_value,
6106 sizeof (struct in6_addr), mstate, vstate)) {
6107 regs[rd] = 0;
6108 break;
6109 }
6110
6111 /*
6112 * Safely load the IPv6 address.
6113 */
6114 dtrace_bcopy(
6115 (void *)(uintptr_t)tupregs[argi].dttk_value,
6116 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr));
6117
6118 /*
6119 * Check an IPv6 string will fit in scratch.
6120 */
6121 size = INET6_ADDRSTRLEN;
6122 if (!DTRACE_INSCRATCH(mstate, size)) {
6123 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
6124 regs[rd] = 0;
6125 break;
6126 }
6127 base = (char *)mstate->dtms_scratch_ptr;
6128 end = (char *)mstate->dtms_scratch_ptr + size - 1;
6129 *end-- = '\0';
6130
6131 /*
6132 * Find the longest run of 16 bit zero values
6133 * for the single allowed zero compression - "::".
6134 */
6135 firstzero = -1;
6136 tryzero = -1;
6137 numzero = 1;
6138 for (i = 0; i < sizeof (struct in6_addr); i++) {
6139 #ifdef illumos
6140 if (ip6._S6_un._S6_u8[i] == 0 &&
6141 #else
6142 if (ip6.__u6_addr.__u6_addr8[i] == 0 &&
6143 #endif
6144 tryzero == -1 && i % 2 == 0) {
6145 tryzero = i;
6146 continue;
6147 }
6148
6149 if (tryzero != -1 &&
6150 #ifdef illumos
6151 (ip6._S6_un._S6_u8[i] != 0 ||
6152 #else
6153 (ip6.__u6_addr.__u6_addr8[i] != 0 ||
6154 #endif
6155 i == sizeof (struct in6_addr) - 1)) {
6156
6157 if (i - tryzero <= numzero) {
6158 tryzero = -1;
6159 continue;
6160 }
6161
6162 firstzero = tryzero;
6163 numzero = i - i % 2 - tryzero;
6164 tryzero = -1;
6165
6166 #ifdef illumos
6167 if (ip6._S6_un._S6_u8[i] == 0 &&
6168 #else
6169 if (ip6.__u6_addr.__u6_addr8[i] == 0 &&
6170 #endif
6171 i == sizeof (struct in6_addr) - 1)
6172 numzero += 2;
6173 }
6174 }
6175 ASSERT(firstzero + numzero <= sizeof (struct in6_addr));
6176
6177 /*
6178 * Check for an IPv4 embedded address.
6179 */
6180 v6end = sizeof (struct in6_addr) - 2;
6181 if (IN6_IS_ADDR_V4MAPPED(&ip6) ||
6182 IN6_IS_ADDR_V4COMPAT(&ip6)) {
6183 for (i = sizeof (struct in6_addr) - 1;
6184 i >= DTRACE_V4MAPPED_OFFSET; i--) {
6185 ASSERT(end >= base);
6186
6187 #ifdef illumos
6188 val = ip6._S6_un._S6_u8[i];
6189 #else
6190 val = ip6.__u6_addr.__u6_addr8[i];
6191 #endif
6192
6193 if (val == 0) {
6194 *end-- = '0';
6195 } else {
6196 for (; val; val /= 10) {
6197 *end-- = '0' + val % 10;
6198 }
6199 }
6200
6201 if (i > DTRACE_V4MAPPED_OFFSET)
6202 *end-- = '.';
6203 }
6204
6205 if (subr == DIF_SUBR_INET_NTOA6)
6206 goto inetout;
6207
6208 /*
6209 * Set v6end to skip the IPv4 address that
6210 * we have already stringified.
6211 */
6212 v6end = 10;
6213 }
6214
6215 /*
6216 * Build the IPv6 string by working through the
6217 * address in reverse.
6218 */
6219 for (i = v6end; i >= 0; i -= 2) {
6220 ASSERT(end >= base);
6221
6222 if (i == firstzero + numzero - 2) {
6223 *end-- = ':';
6224 *end-- = ':';
6225 i -= numzero - 2;
6226 continue;
6227 }
6228
6229 if (i < 14 && i != firstzero - 2)
6230 *end-- = ':';
6231
6232 #ifdef illumos
6233 val = (ip6._S6_un._S6_u8[i] << 8) +
6234 ip6._S6_un._S6_u8[i + 1];
6235 #else
6236 val = (ip6.__u6_addr.__u6_addr8[i] << 8) +
6237 ip6.__u6_addr.__u6_addr8[i + 1];
6238 #endif
6239
6240 if (val == 0) {
6241 *end-- = '0';
6242 } else {
6243 for (; val; val /= 16) {
6244 *end-- = digits[val % 16];
6245 }
6246 }
6247 }
6248 ASSERT(end + 1 >= base);
6249
6250 } else {
6251 /*
6252 * The user didn't use AH_INET or AH_INET6.
6253 */
6254 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
6255 regs[rd] = 0;
6256 break;
6257 }
6258
6259 inetout: regs[rd] = (uintptr_t)end + 1;
6260 mstate->dtms_scratch_ptr += size;
6261 break;
6262 }
6263
6264 case DIF_SUBR_MEMREF: {
6265 uintptr_t size = 2 * sizeof(uintptr_t);
6266 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t));
6267 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size;
6268
6269 /* address and length */
6270 memref[0] = tupregs[0].dttk_value;
6271 memref[1] = tupregs[1].dttk_value;
6272
6273 regs[rd] = (uintptr_t) memref;
6274 mstate->dtms_scratch_ptr += scratch_size;
6275 break;
6276 }
6277
6278 #ifndef illumos
6279 case DIF_SUBR_MEMSTR: {
6280 char *str = (char *)mstate->dtms_scratch_ptr;
6281 uintptr_t mem = tupregs[0].dttk_value;
6282 char c = tupregs[1].dttk_value;
6283 size_t size = tupregs[2].dttk_value;
6284 uint8_t n;
6285 int i;
6286
6287 regs[rd] = 0;
6288
6289 if (size == 0)
6290 break;
6291
6292 if (!dtrace_canload(mem, size - 1, mstate, vstate))
6293 break;
6294
6295 if (!DTRACE_INSCRATCH(mstate, size)) {
6296 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
6297 break;
6298 }
6299
6300 if (dtrace_memstr_max != 0 && size > dtrace_memstr_max) {
6301 *flags |= CPU_DTRACE_ILLOP;
6302 break;
6303 }
6304
6305 for (i = 0; i < size - 1; i++) {
6306 n = dtrace_load8(mem++);
6307 str[i] = (n == 0) ? c : n;
6308 }
6309 str[size - 1] = 0;
6310
6311 regs[rd] = (uintptr_t)str;
6312 mstate->dtms_scratch_ptr += size;
6313 break;
6314 }
6315 #endif
6316 }
6317 }
6318
6319 /*
6320 * Emulate the execution of DTrace IR instructions specified by the given
6321 * DIF object. This function is deliberately void of assertions as all of
6322 * the necessary checks are handled by a call to dtrace_difo_validate().
6323 */
6324 static uint64_t
6325 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate,
6326 dtrace_vstate_t *vstate, dtrace_state_t *state)
6327 {
6328 const dif_instr_t *text = difo->dtdo_buf;
6329 const uint_t textlen = difo->dtdo_len;
6330 const char *strtab = difo->dtdo_strtab;
6331 const uint64_t *inttab = difo->dtdo_inttab;
6332
6333 uint64_t rval = 0;
6334 dtrace_statvar_t *svar;
6335 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
6336 dtrace_difv_t *v;
6337 volatile uint16_t *flags = &cpu_core[curcpu_id].cpuc_dtrace_flags;
6338 volatile uintptr_t *illval = &cpu_core[curcpu_id].cpuc_dtrace_illval;
6339
6340 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
6341 uint64_t regs[DIF_DIR_NREGS];
6342 uint64_t *tmp;
6343
6344 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0;
6345 int64_t cc_r;
6346 uint_t pc = 0, id, opc = 0;
6347 uint8_t ttop = 0;
6348 dif_instr_t instr;
6349 uint_t r1, r2, rd;
6350
6351 /*
6352 * We stash the current DIF object into the machine state: we need it
6353 * for subsequent access checking.
6354 */
6355 mstate->dtms_difo = difo;
6356
6357 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */
6358
6359 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) {
6360 opc = pc;
6361
6362 instr = text[pc++];
6363 r1 = DIF_INSTR_R1(instr);
6364 r2 = DIF_INSTR_R2(instr);
6365 rd = DIF_INSTR_RD(instr);
6366
6367 switch (DIF_INSTR_OP(instr)) {
6368 case DIF_OP_OR:
6369 regs[rd] = regs[r1] | regs[r2];
6370 break;
6371 case DIF_OP_XOR:
6372 regs[rd] = regs[r1] ^ regs[r2];
6373 break;
6374 case DIF_OP_AND:
6375 regs[rd] = regs[r1] & regs[r2];
6376 break;
6377 case DIF_OP_SLL:
6378 regs[rd] = regs[r1] << regs[r2];
6379 break;
6380 case DIF_OP_SRL:
6381 regs[rd] = regs[r1] >> regs[r2];
6382 break;
6383 case DIF_OP_SUB:
6384 regs[rd] = regs[r1] - regs[r2];
6385 break;
6386 case DIF_OP_ADD:
6387 regs[rd] = regs[r1] + regs[r2];
6388 break;
6389 case DIF_OP_MUL:
6390 regs[rd] = regs[r1] * regs[r2];
6391 break;
6392 case DIF_OP_SDIV:
6393 if (regs[r2] == 0) {
6394 regs[rd] = 0;
6395 *flags |= CPU_DTRACE_DIVZERO;
6396 } else {
6397 regs[rd] = (int64_t)regs[r1] /
6398 (int64_t)regs[r2];
6399 }
6400 break;
6401
6402 case DIF_OP_UDIV:
6403 if (regs[r2] == 0) {
6404 regs[rd] = 0;
6405 *flags |= CPU_DTRACE_DIVZERO;
6406 } else {
6407 regs[rd] = regs[r1] / regs[r2];
6408 }
6409 break;
6410
6411 case DIF_OP_SREM:
6412 if (regs[r2] == 0) {
6413 regs[rd] = 0;
6414 *flags |= CPU_DTRACE_DIVZERO;
6415 } else {
6416 regs[rd] = (int64_t)regs[r1] %
6417 (int64_t)regs[r2];
6418 }
6419 break;
6420
6421 case DIF_OP_UREM:
6422 if (regs[r2] == 0) {
6423 regs[rd] = 0;
6424 *flags |= CPU_DTRACE_DIVZERO;
6425 } else {
6426 regs[rd] = regs[r1] % regs[r2];
6427 }
6428 break;
6429
6430 case DIF_OP_NOT:
6431 regs[rd] = ~regs[r1];
6432 break;
6433 case DIF_OP_MOV:
6434 regs[rd] = regs[r1];
6435 break;
6436 case DIF_OP_CMP:
6437 cc_r = regs[r1] - regs[r2];
6438 cc_n = cc_r < 0;
6439 cc_z = cc_r == 0;
6440 cc_v = 0;
6441 cc_c = regs[r1] < regs[r2];
6442 break;
6443 case DIF_OP_TST:
6444 cc_n = cc_v = cc_c = 0;
6445 cc_z = regs[r1] == 0;
6446 break;
6447 case DIF_OP_BA:
6448 pc = DIF_INSTR_LABEL(instr);
6449 break;
6450 case DIF_OP_BE:
6451 if (cc_z)
6452 pc = DIF_INSTR_LABEL(instr);
6453 break;
6454 case DIF_OP_BNE:
6455 if (cc_z == 0)
6456 pc = DIF_INSTR_LABEL(instr);
6457 break;
6458 case DIF_OP_BG:
6459 if ((cc_z | (cc_n ^ cc_v)) == 0)
6460 pc = DIF_INSTR_LABEL(instr);
6461 break;
6462 case DIF_OP_BGU:
6463 if ((cc_c | cc_z) == 0)
6464 pc = DIF_INSTR_LABEL(instr);
6465 break;
6466 case DIF_OP_BGE:
6467 if ((cc_n ^ cc_v) == 0)
6468 pc = DIF_INSTR_LABEL(instr);
6469 break;
6470 case DIF_OP_BGEU:
6471 if (cc_c == 0)
6472 pc = DIF_INSTR_LABEL(instr);
6473 break;
6474 case DIF_OP_BL:
6475 if (cc_n ^ cc_v)
6476 pc = DIF_INSTR_LABEL(instr);
6477 break;
6478 case DIF_OP_BLU:
6479 if (cc_c)
6480 pc = DIF_INSTR_LABEL(instr);
6481 break;
6482 case DIF_OP_BLE:
6483 if (cc_z | (cc_n ^ cc_v))
6484 pc = DIF_INSTR_LABEL(instr);
6485 break;
6486 case DIF_OP_BLEU:
6487 if (cc_c | cc_z)
6488 pc = DIF_INSTR_LABEL(instr);
6489 break;
6490 case DIF_OP_RLDSB:
6491 if (!dtrace_canload(regs[r1], 1, mstate, vstate))
6492 break;
6493 /*FALLTHROUGH*/
6494 case DIF_OP_LDSB:
6495 regs[rd] = (int8_t)dtrace_load8(regs[r1]);
6496 break;
6497 case DIF_OP_RLDSH:
6498 if (!dtrace_canload(regs[r1], 2, mstate, vstate))
6499 break;
6500 /*FALLTHROUGH*/
6501 case DIF_OP_LDSH:
6502 regs[rd] = (int16_t)dtrace_load16(regs[r1]);
6503 break;
6504 case DIF_OP_RLDSW:
6505 if (!dtrace_canload(regs[r1], 4, mstate, vstate))
6506 break;
6507 /*FALLTHROUGH*/
6508 case DIF_OP_LDSW:
6509 regs[rd] = (int32_t)dtrace_load32(regs[r1]);
6510 break;
6511 case DIF_OP_RLDUB:
6512 if (!dtrace_canload(regs[r1], 1, mstate, vstate))
6513 break;
6514 /*FALLTHROUGH*/
6515 case DIF_OP_LDUB:
6516 regs[rd] = dtrace_load8(regs[r1]);
6517 break;
6518 case DIF_OP_RLDUH:
6519 if (!dtrace_canload(regs[r1], 2, mstate, vstate))
6520 break;
6521 /*FALLTHROUGH*/
6522 case DIF_OP_LDUH:
6523 regs[rd] = dtrace_load16(regs[r1]);
6524 break;
6525 case DIF_OP_RLDUW:
6526 if (!dtrace_canload(regs[r1], 4, mstate, vstate))
6527 break;
6528 /*FALLTHROUGH*/
6529 case DIF_OP_LDUW:
6530 regs[rd] = dtrace_load32(regs[r1]);
6531 break;
6532 case DIF_OP_RLDX:
6533 if (!dtrace_canload(regs[r1], 8, mstate, vstate))
6534 break;
6535 /*FALLTHROUGH*/
6536 case DIF_OP_LDX:
6537 regs[rd] = dtrace_load64(regs[r1]);
6538 break;
6539 case DIF_OP_ULDSB:
6540 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6541 regs[rd] = (int8_t)
6542 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
6543 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6544 break;
6545 case DIF_OP_ULDSH:
6546 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6547 regs[rd] = (int16_t)
6548 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
6549 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6550 break;
6551 case DIF_OP_ULDSW:
6552 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6553 regs[rd] = (int32_t)
6554 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
6555 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6556 break;
6557 case DIF_OP_ULDUB:
6558 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6559 regs[rd] =
6560 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
6561 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6562 break;
6563 case DIF_OP_ULDUH:
6564 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6565 regs[rd] =
6566 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
6567 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6568 break;
6569 case DIF_OP_ULDUW:
6570 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6571 regs[rd] =
6572 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
6573 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6574 break;
6575 case DIF_OP_ULDX:
6576 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
6577 regs[rd] =
6578 dtrace_fuword64((void *)(uintptr_t)regs[r1]);
6579 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
6580 break;
6581 case DIF_OP_RET:
6582 rval = regs[rd];
6583 pc = textlen;
6584 break;
6585 case DIF_OP_NOP:
6586 break;
6587 case DIF_OP_SETX:
6588 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)];
6589 break;
6590 case DIF_OP_SETS:
6591 regs[rd] = (uint64_t)(uintptr_t)
6592 (strtab + DIF_INSTR_STRING(instr));
6593 break;
6594 case DIF_OP_SCMP: {
6595 size_t sz = state->dts_options[DTRACEOPT_STRSIZE];
6596 uintptr_t s1 = regs[r1];
6597 uintptr_t s2 = regs[r2];
6598 size_t lim1, lim2;
6599
6600 if (s1 != 0 &&
6601 !dtrace_strcanload(s1, sz, &lim1, mstate, vstate))
6602 break;
6603 if (s2 != 0 &&
6604 !dtrace_strcanload(s2, sz, &lim2, mstate, vstate))
6605 break;
6606
6607 cc_r = dtrace_strncmp((char *)s1, (char *)s2,
6608 MIN(lim1, lim2));
6609
6610 cc_n = cc_r < 0;
6611 cc_z = cc_r == 0;
6612 cc_v = cc_c = 0;
6613 break;
6614 }
6615 case DIF_OP_LDGA:
6616 regs[rd] = dtrace_dif_variable(mstate, state,
6617 r1, regs[r2]);
6618 break;
6619 case DIF_OP_LDGS:
6620 id = DIF_INSTR_VAR(instr);
6621
6622 if (id >= DIF_VAR_OTHER_UBASE) {
6623 uintptr_t a;
6624
6625 id -= DIF_VAR_OTHER_UBASE;
6626 svar = vstate->dtvs_globals[id];
6627 ASSERT(svar != NULL);
6628 v = &svar->dtsv_var;
6629
6630 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) {
6631 regs[rd] = svar->dtsv_data;
6632 break;
6633 }
6634
6635 a = (uintptr_t)svar->dtsv_data;
6636
6637 if (*(uint8_t *)a == UINT8_MAX) {
6638 /*
6639 * If the 0th byte is set to UINT8_MAX
6640 * then this is to be treated as a
6641 * reference to a NULL variable.
6642 */
6643 regs[rd] = 0;
6644 } else {
6645 regs[rd] = a + sizeof (uint64_t);
6646 }
6647
6648 break;
6649 }
6650
6651 regs[rd] = dtrace_dif_variable(mstate, state, id, 0);
6652 break;
6653
6654 case DIF_OP_STGS:
6655 id = DIF_INSTR_VAR(instr);
6656
6657 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6658 id -= DIF_VAR_OTHER_UBASE;
6659
6660 VERIFY(id < vstate->dtvs_nglobals);
6661 svar = vstate->dtvs_globals[id];
6662 ASSERT(svar != NULL);
6663 v = &svar->dtsv_var;
6664
6665 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6666 uintptr_t a = (uintptr_t)svar->dtsv_data;
6667 size_t lim;
6668
6669 ASSERT(a != 0);
6670 ASSERT(svar->dtsv_size != 0);
6671
6672 if (regs[rd] == 0) {
6673 *(uint8_t *)a = UINT8_MAX;
6674 break;
6675 } else {
6676 *(uint8_t *)a = 0;
6677 a += sizeof (uint64_t);
6678 }
6679 if (!dtrace_vcanload(
6680 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
6681 &lim, mstate, vstate))
6682 break;
6683
6684 dtrace_vcopy((void *)(uintptr_t)regs[rd],
6685 (void *)a, &v->dtdv_type, lim);
6686 break;
6687 }
6688
6689 svar->dtsv_data = regs[rd];
6690 break;
6691
6692 case DIF_OP_LDTA:
6693 /*
6694 * There are no DTrace built-in thread-local arrays at
6695 * present. This opcode is saved for future work.
6696 */
6697 *flags |= CPU_DTRACE_ILLOP;
6698 regs[rd] = 0;
6699 break;
6700
6701 case DIF_OP_LDLS:
6702 id = DIF_INSTR_VAR(instr);
6703
6704 if (id < DIF_VAR_OTHER_UBASE) {
6705 /*
6706 * For now, this has no meaning.
6707 */
6708 regs[rd] = 0;
6709 break;
6710 }
6711
6712 id -= DIF_VAR_OTHER_UBASE;
6713
6714 ASSERT(id < vstate->dtvs_nlocals);
6715 ASSERT(vstate->dtvs_locals != NULL);
6716
6717 svar = vstate->dtvs_locals[id];
6718 ASSERT(svar != NULL);
6719 v = &svar->dtsv_var;
6720
6721 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6722 uintptr_t a = (uintptr_t)svar->dtsv_data;
6723 size_t sz = v->dtdv_type.dtdt_size;
6724 size_t lim;
6725
6726 sz += sizeof (uint64_t);
6727 ASSERT(svar->dtsv_size == NCPU * sz);
6728 a += curcpu_id * sz;
6729
6730 if (*(uint8_t *)a == UINT8_MAX) {
6731 /*
6732 * If the 0th byte is set to UINT8_MAX
6733 * then this is to be treated as a
6734 * reference to a NULL variable.
6735 */
6736 regs[rd] = 0;
6737 } else {
6738 regs[rd] = a + sizeof (uint64_t);
6739 }
6740
6741 break;
6742 }
6743
6744 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
6745 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
6746 regs[rd] = tmp[curcpu_id];
6747 break;
6748
6749 case DIF_OP_STLS:
6750 id = DIF_INSTR_VAR(instr);
6751
6752 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6753 id -= DIF_VAR_OTHER_UBASE;
6754 VERIFY(id < vstate->dtvs_nlocals);
6755
6756 ASSERT(vstate->dtvs_locals != NULL);
6757 svar = vstate->dtvs_locals[id];
6758 ASSERT(svar != NULL);
6759 v = &svar->dtsv_var;
6760
6761 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6762 uintptr_t a = (uintptr_t)svar->dtsv_data;
6763 size_t sz = v->dtdv_type.dtdt_size;
6764 size_t lim;
6765
6766 sz += sizeof (uint64_t);
6767 ASSERT(svar->dtsv_size == NCPU * sz);
6768 a += curcpu_id * sz;
6769
6770 if (regs[rd] == 0) {
6771 *(uint8_t *)a = UINT8_MAX;
6772 break;
6773 } else {
6774 *(uint8_t *)a = 0;
6775 a += sizeof (uint64_t);
6776 }
6777
6778 if (!dtrace_vcanload(
6779 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
6780 &lim, mstate, vstate))
6781 break;
6782
6783 dtrace_vcopy((void *)(uintptr_t)regs[rd],
6784 (void *)a, &v->dtdv_type, lim);
6785 break;
6786 }
6787
6788 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
6789 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
6790 tmp[curcpu_id] = regs[rd];
6791 break;
6792
6793 case DIF_OP_LDTS: {
6794 dtrace_dynvar_t *dvar;
6795 dtrace_key_t *key;
6796
6797 id = DIF_INSTR_VAR(instr);
6798 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6799 id -= DIF_VAR_OTHER_UBASE;
6800 v = &vstate->dtvs_tlocals[id];
6801
6802 key = &tupregs[DIF_DTR_NREGS];
6803 key[0].dttk_value = (uint64_t)id;
6804 key[0].dttk_size = 0;
6805 DTRACE_TLS_THRKEY(key[1].dttk_value);
6806 key[1].dttk_size = 0;
6807
6808 dvar = dtrace_dynvar(dstate, 2, key,
6809 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC,
6810 mstate, vstate);
6811
6812 if (dvar == NULL) {
6813 regs[rd] = 0;
6814 break;
6815 }
6816
6817 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6818 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
6819 } else {
6820 regs[rd] = *((uint64_t *)dvar->dtdv_data);
6821 }
6822
6823 break;
6824 }
6825
6826 case DIF_OP_STTS: {
6827 dtrace_dynvar_t *dvar;
6828 dtrace_key_t *key;
6829
6830 id = DIF_INSTR_VAR(instr);
6831 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6832 id -= DIF_VAR_OTHER_UBASE;
6833 VERIFY(id < vstate->dtvs_ntlocals);
6834
6835 key = &tupregs[DIF_DTR_NREGS];
6836 key[0].dttk_value = (uint64_t)id;
6837 key[0].dttk_size = 0;
6838 DTRACE_TLS_THRKEY(key[1].dttk_value);
6839 key[1].dttk_size = 0;
6840 v = &vstate->dtvs_tlocals[id];
6841
6842 dvar = dtrace_dynvar(dstate, 2, key,
6843 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6844 v->dtdv_type.dtdt_size : sizeof (uint64_t),
6845 regs[rd] ? DTRACE_DYNVAR_ALLOC :
6846 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
6847
6848 /*
6849 * Given that we're storing to thread-local data,
6850 * we need to flush our predicate cache.
6851 */
6852 curthread->t_predcache = 0;
6853
6854 if (dvar == NULL)
6855 break;
6856
6857 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6858 size_t lim;
6859
6860 if (!dtrace_vcanload(
6861 (void *)(uintptr_t)regs[rd],
6862 &v->dtdv_type, &lim, mstate, vstate))
6863 break;
6864
6865 dtrace_vcopy((void *)(uintptr_t)regs[rd],
6866 dvar->dtdv_data, &v->dtdv_type, lim);
6867 } else {
6868 *((uint64_t *)dvar->dtdv_data) = regs[rd];
6869 }
6870
6871 break;
6872 }
6873
6874 case DIF_OP_SRA:
6875 regs[rd] = (int64_t)regs[r1] >> regs[r2];
6876 break;
6877
6878 case DIF_OP_CALL:
6879 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd,
6880 regs, tupregs, ttop, mstate, state);
6881 break;
6882
6883 case DIF_OP_PUSHTR:
6884 if (ttop == DIF_DTR_NREGS) {
6885 *flags |= CPU_DTRACE_TUPOFLOW;
6886 break;
6887 }
6888
6889 if (r1 == DIF_TYPE_STRING) {
6890 /*
6891 * If this is a string type and the size is 0,
6892 * we'll use the system-wide default string
6893 * size. Note that we are _not_ looking at
6894 * the value of the DTRACEOPT_STRSIZE option;
6895 * had this been set, we would expect to have
6896 * a non-zero size value in the "pushtr".
6897 */
6898 tupregs[ttop].dttk_size =
6899 dtrace_strlen((char *)(uintptr_t)regs[rd],
6900 regs[r2] ? regs[r2] :
6901 dtrace_strsize_default) + 1;
6902 } else {
6903 if (regs[r2] > LONG_MAX) {
6904 *flags |= CPU_DTRACE_ILLOP;
6905 break;
6906 }
6907
6908 tupregs[ttop].dttk_size = regs[r2];
6909 }
6910
6911 tupregs[ttop++].dttk_value = regs[rd];
6912 break;
6913
6914 case DIF_OP_PUSHTV:
6915 if (ttop == DIF_DTR_NREGS) {
6916 *flags |= CPU_DTRACE_TUPOFLOW;
6917 break;
6918 }
6919
6920 tupregs[ttop].dttk_value = regs[rd];
6921 tupregs[ttop++].dttk_size = 0;
6922 break;
6923
6924 case DIF_OP_POPTS:
6925 if (ttop != 0)
6926 ttop--;
6927 break;
6928
6929 case DIF_OP_FLUSHTS:
6930 ttop = 0;
6931 break;
6932
6933 case DIF_OP_LDGAA:
6934 case DIF_OP_LDTAA: {
6935 dtrace_dynvar_t *dvar;
6936 dtrace_key_t *key = tupregs;
6937 uint_t nkeys = ttop;
6938
6939 id = DIF_INSTR_VAR(instr);
6940 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6941 id -= DIF_VAR_OTHER_UBASE;
6942
6943 key[nkeys].dttk_value = (uint64_t)id;
6944 key[nkeys++].dttk_size = 0;
6945
6946 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) {
6947 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
6948 key[nkeys++].dttk_size = 0;
6949 VERIFY(id < vstate->dtvs_ntlocals);
6950 v = &vstate->dtvs_tlocals[id];
6951 } else {
6952 VERIFY(id < vstate->dtvs_nglobals);
6953 v = &vstate->dtvs_globals[id]->dtsv_var;
6954 }
6955
6956 dvar = dtrace_dynvar(dstate, nkeys, key,
6957 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6958 v->dtdv_type.dtdt_size : sizeof (uint64_t),
6959 DTRACE_DYNVAR_NOALLOC, mstate, vstate);
6960
6961 if (dvar == NULL) {
6962 regs[rd] = 0;
6963 break;
6964 }
6965
6966 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
6967 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
6968 } else {
6969 regs[rd] = *((uint64_t *)dvar->dtdv_data);
6970 }
6971
6972 break;
6973 }
6974
6975 case DIF_OP_STGAA:
6976 case DIF_OP_STTAA: {
6977 dtrace_dynvar_t *dvar;
6978 dtrace_key_t *key = tupregs;
6979 uint_t nkeys = ttop;
6980
6981 id = DIF_INSTR_VAR(instr);
6982 ASSERT(id >= DIF_VAR_OTHER_UBASE);
6983 id -= DIF_VAR_OTHER_UBASE;
6984
6985 key[nkeys].dttk_value = (uint64_t)id;
6986 key[nkeys++].dttk_size = 0;
6987
6988 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) {
6989 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
6990 key[nkeys++].dttk_size = 0;
6991 v = &vstate->dtvs_tlocals[id];
6992 } else {
6993 v = &vstate->dtvs_globals[id]->dtsv_var;
6994 }
6995
6996 dvar = dtrace_dynvar(dstate, nkeys, key,
6997 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
6998 v->dtdv_type.dtdt_size : sizeof (uint64_t),
6999 regs[rd] ? DTRACE_DYNVAR_ALLOC :
7000 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
7001
7002 if (dvar == NULL)
7003 break;
7004
7005 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
7006 size_t lim;
7007
7008 if (!dtrace_vcanload(
7009 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
7010 &lim, mstate, vstate))
7011 break;
7012
7013 dtrace_vcopy((void *)(uintptr_t)regs[rd],
7014 dvar->dtdv_data, &v->dtdv_type, lim);
7015 } else {
7016 *((uint64_t *)dvar->dtdv_data) = regs[rd];
7017 }
7018
7019 break;
7020 }
7021
7022 case DIF_OP_ALLOCS: {
7023 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
7024 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1];
7025
7026 /*
7027 * Rounding up the user allocation size could have
7028 * overflowed large, bogus allocations (like -1ULL) to
7029 * 0.
7030 */
7031 if (size < regs[r1] ||
7032 !DTRACE_INSCRATCH(mstate, size)) {
7033 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
7034 regs[rd] = 0;
7035 break;
7036 }
7037
7038 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size);
7039 mstate->dtms_scratch_ptr += size;
7040 regs[rd] = ptr;
7041 break;
7042 }
7043
7044 case DIF_OP_COPYS:
7045 if (!dtrace_canstore(regs[rd], regs[r2],
7046 mstate, vstate)) {
7047 *flags |= CPU_DTRACE_BADADDR;
7048 *illval = regs[rd];
7049 break;
7050 }
7051
7052 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate))
7053 break;
7054
7055 dtrace_bcopy((void *)(uintptr_t)regs[r1],
7056 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]);
7057 break;
7058
7059 case DIF_OP_STB:
7060 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) {
7061 *flags |= CPU_DTRACE_BADADDR;
7062 *illval = regs[rd];
7063 break;
7064 }
7065 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1];
7066 break;
7067
7068 case DIF_OP_STH:
7069 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) {
7070 *flags |= CPU_DTRACE_BADADDR;
7071 *illval = regs[rd];
7072 break;
7073 }
7074 if (regs[rd] & 1) {
7075 *flags |= CPU_DTRACE_BADALIGN;
7076 *illval = regs[rd];
7077 break;
7078 }
7079 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1];
7080 break;
7081
7082 case DIF_OP_STW:
7083 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) {
7084 *flags |= CPU_DTRACE_BADADDR;
7085 *illval = regs[rd];
7086 break;
7087 }
7088 if (regs[rd] & 3) {
7089 *flags |= CPU_DTRACE_BADALIGN;
7090 *illval = regs[rd];
7091 break;
7092 }
7093 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1];
7094 break;
7095
7096 case DIF_OP_STX:
7097 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) {
7098 *flags |= CPU_DTRACE_BADADDR;
7099 *illval = regs[rd];
7100 break;
7101 }
7102 if (regs[rd] & 7) {
7103 *flags |= CPU_DTRACE_BADALIGN;
7104 *illval = regs[rd];
7105 break;
7106 }
7107 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1];
7108 break;
7109 }
7110 }
7111
7112 if (!(*flags & CPU_DTRACE_FAULT))
7113 return (rval);
7114
7115 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t);
7116 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS;
7117
7118 return (0);
7119 }
7120
7121 static void
7122 dtrace_action_breakpoint(dtrace_ecb_t *ecb)
7123 {
7124 dtrace_probe_t *probe = ecb->dte_probe;
7125 dtrace_provider_t *prov = probe->dtpr_provider;
7126 char c[DTRACE_FULLNAMELEN + 80], *str;
7127 const char *msg = "dtrace: breakpoint action at probe ";
7128 const char *ecbmsg = " (ecb ";
7129 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4));
7130 uintptr_t val = (uintptr_t)ecb;
7131 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0;
7132
7133 if (dtrace_destructive_disallow)
7134 return;
7135
7136 /*
7137 * It's impossible to be taking action on the NULL probe.
7138 */
7139 ASSERT(probe != NULL);
7140
7141 /*
7142 * This is a poor man's (destitute man's?) snprintf(): we want to
7143 * print the provider name, module name, function name and name of
7144 * the probe, along with the hex address of the ECB with the breakpoint
7145 * action -- all of which we must place in the character buffer by
7146 * hand.
7147 */
7148 while (*msg != '\0')
7149 c[i++] = *msg++;
7150
7151 for (str = prov->dtpv_name; *str != '\0'; str++)
7152 c[i++] = *str;
7153 c[i++] = ':';
7154
7155 for (str = probe->dtpr_mod; *str != '\0'; str++)
7156 c[i++] = *str;
7157 c[i++] = ':';
7158
7159 for (str = probe->dtpr_func; *str != '\0'; str++)
7160 c[i++] = *str;
7161 c[i++] = ':';
7162
7163 for (str = probe->dtpr_name; *str != '\0'; str++)
7164 c[i++] = *str;
7165
7166 while (*ecbmsg != '\0')
7167 c[i++] = *ecbmsg++;
7168
7169 while (shift >= 0) {
7170 mask = (uintptr_t)0xf << shift;
7171
7172 if (val >= ((uintptr_t)1 << shift))
7173 c[i++] = "0123456789abcdef"[(val & mask) >> shift];
7174 shift -= 4;
7175 }
7176
7177 c[i++] = ')';
7178 c[i] = '\0';
7179
7180 #ifdef illumos
7181 debug_enter(c);
7182 #endif
7183
7184 #ifdef __FreeBSD__
7185 kdb_enter(KDB_WHY_DTRACE, "breakpoint action");
7186 #endif
7187
7188 #ifdef __NetBSD__
7189 #ifdef DDB
7190 db_printf("%s\n", c);
7191 Debugger();
7192 #else
7193 printf("%s ignored\n", c);
7194 #endif /* DDB */
7195 #endif
7196 }
7197
7198 static void
7199 dtrace_action_panic(dtrace_ecb_t *ecb)
7200 {
7201 dtrace_probe_t *probe = ecb->dte_probe;
7202
7203 /*
7204 * It's impossible to be taking action on the NULL probe.
7205 */
7206 ASSERT(probe != NULL);
7207
7208 if (dtrace_destructive_disallow)
7209 return;
7210
7211 if (dtrace_panicked != NULL)
7212 return;
7213
7214 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL)
7215 return;
7216
7217 /*
7218 * We won the right to panic. (We want to be sure that only one
7219 * thread calls panic() from dtrace_probe(), and that panic() is
7220 * called exactly once.)
7221 */
7222 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)",
7223 probe->dtpr_provider->dtpv_name, probe->dtpr_mod,
7224 probe->dtpr_func, probe->dtpr_name, (void *)ecb);
7225 }
7226
7227 static void
7228 dtrace_action_raise(uint64_t sig)
7229 {
7230 if (dtrace_destructive_disallow)
7231 return;
7232
7233 if (sig >= NSIG) {
7234 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
7235 return;
7236 }
7237
7238 #ifdef illumos
7239 /*
7240 * raise() has a queue depth of 1 -- we ignore all subsequent
7241 * invocations of the raise() action.
7242 */
7243 if (curthread->t_dtrace_sig == 0)
7244 curthread->t_dtrace_sig = (uint8_t)sig;
7245
7246 curthread->t_sig_check = 1;
7247 aston(curthread);
7248 #endif
7249
7250 #ifdef __FreeBSD__
7251 PROC_LOCK(p);
7252 kern_psignal(p, sig);
7253 PROC_UNLOCK(p);
7254 #endif
7255
7256 #ifdef __NetBSD__
7257 struct proc *p = curproc;
7258 mutex_enter(&proc_lock);
7259 psignal(p, sig);
7260 mutex_exit(&proc_lock);
7261 #endif
7262 }
7263
7264 static void
7265 dtrace_action_stop(void)
7266 {
7267 if (dtrace_destructive_disallow)
7268 return;
7269
7270 #ifdef illumos
7271 if (!curthread->t_dtrace_stop) {
7272 curthread->t_dtrace_stop = 1;
7273 curthread->t_sig_check = 1;
7274 aston(curthread);
7275 }
7276 #endif
7277
7278 #ifdef __FreeBSD__
7279 PROC_LOCK(p);
7280 kern_psignal(p, SIGSTOP);
7281 PROC_UNLOCK(p);
7282 #endif
7283
7284 #ifdef __NetBSD__
7285 struct proc *p = curproc;
7286 mutex_enter(&proc_lock);
7287 psignal(p, SIGSTOP);
7288 mutex_exit(&proc_lock);
7289 #endif
7290 }
7291
7292 static void
7293 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val)
7294 {
7295 hrtime_t now;
7296 volatile uint16_t *flags;
7297 #ifdef illumos
7298 cpu_t *cpu = CPU;
7299 #else
7300 cpu_t *cpu = &solaris_cpu[curcpu_id];
7301 #endif
7302
7303 if (dtrace_destructive_disallow)
7304 return;
7305
7306 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags;
7307
7308 now = dtrace_gethrtime();
7309
7310 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) {
7311 /*
7312 * We need to advance the mark to the current time.
7313 */
7314 cpu->cpu_dtrace_chillmark = now;
7315 cpu->cpu_dtrace_chilled = 0;
7316 }
7317
7318 /*
7319 * Now check to see if the requested chill time would take us over
7320 * the maximum amount of time allowed in the chill interval. (Or
7321 * worse, if the calculation itself induces overflow.)
7322 */
7323 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max ||
7324 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) {
7325 *flags |= CPU_DTRACE_ILLOP;
7326 return;
7327 }
7328
7329 while (dtrace_gethrtime() - now < val)
7330 continue;
7331
7332 /*
7333 * Normally, we assure that the value of the variable "timestamp" does
7334 * not change within an ECB. The presence of chill() represents an
7335 * exception to this rule, however.
7336 */
7337 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP;
7338 cpu->cpu_dtrace_chilled += val;
7339 }
7340
7341 static void
7342 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state,
7343 uint64_t *buf, uint64_t arg)
7344 {
7345 int nframes = DTRACE_USTACK_NFRAMES(arg);
7346 int strsize = DTRACE_USTACK_STRSIZE(arg);
7347 uint64_t *pcs = &buf[1], *fps;
7348 char *str = (char *)&pcs[nframes];
7349 int size, offs = 0, i, j;
7350 size_t rem;
7351 uintptr_t old = mstate->dtms_scratch_ptr, saved;
7352 uint16_t *flags = &cpu_core[curcpu_id].cpuc_dtrace_flags;
7353 char *sym;
7354
7355 /*
7356 * Should be taking a faster path if string space has not been
7357 * allocated.
7358 */
7359 ASSERT(strsize != 0);
7360
7361 /*
7362 * We will first allocate some temporary space for the frame pointers.
7363 */
7364 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
7365 size = (uintptr_t)fps - mstate->dtms_scratch_ptr +
7366 (nframes * sizeof (uint64_t));
7367
7368 if (!DTRACE_INSCRATCH(mstate, size)) {
7369 /*
7370 * Not enough room for our frame pointers -- need to indicate
7371 * that we ran out of scratch space.
7372 */
7373 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
7374 return;
7375 }
7376
7377 mstate->dtms_scratch_ptr += size;
7378 saved = mstate->dtms_scratch_ptr;
7379
7380 /*
7381 * Now get a stack with both program counters and frame pointers.
7382 */
7383 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
7384 dtrace_getufpstack(buf, fps, nframes + 1);
7385 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
7386
7387 /*
7388 * If that faulted, we're cooked.
7389 */
7390 if (*flags & CPU_DTRACE_FAULT)
7391 goto out;
7392
7393 /*
7394 * Now we want to walk up the stack, calling the USTACK helper. For
7395 * each iteration, we restore the scratch pointer.
7396 */
7397 for (i = 0; i < nframes; i++) {
7398 mstate->dtms_scratch_ptr = saved;
7399
7400 if (offs >= strsize)
7401 break;
7402
7403 sym = (char *)(uintptr_t)dtrace_helper(
7404 DTRACE_HELPER_ACTION_USTACK,
7405 mstate, state, pcs[i], fps[i]);
7406
7407 /*
7408 * If we faulted while running the helper, we're going to
7409 * clear the fault and null out the corresponding string.
7410 */
7411 if (*flags & CPU_DTRACE_FAULT) {
7412 *flags &= ~CPU_DTRACE_FAULT;
7413 str[offs++] = '\0';
7414 continue;
7415 }
7416
7417 if (sym == NULL) {
7418 str[offs++] = '\0';
7419 continue;
7420 }
7421
7422 if (!dtrace_strcanload((uintptr_t)sym, strsize, &rem, mstate,
7423 &(state->dts_vstate))) {
7424 str[offs++] = '\0';
7425 continue;
7426 }
7427
7428 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
7429
7430 /*
7431 * Now copy in the string that the helper returned to us.
7432 */
7433 for (j = 0; offs + j < strsize && j < rem; j++) {
7434 if ((str[offs + j] = sym[j]) == '\0')
7435 break;
7436 }
7437
7438 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
7439
7440 offs += j + 1;
7441 }
7442
7443 if (offs >= strsize) {
7444 /*
7445 * If we didn't have room for all of the strings, we don't
7446 * abort processing -- this needn't be a fatal error -- but we
7447 * still want to increment a counter (dts_stkstroverflows) to
7448 * allow this condition to be warned about. (If this is from
7449 * a jstack() action, it is easily tuned via jstackstrsize.)
7450 */
7451 dtrace_error(&state->dts_stkstroverflows);
7452 }
7453
7454 while (offs < strsize)
7455 str[offs++] = '\0';
7456
7457 out:
7458 mstate->dtms_scratch_ptr = old;
7459 }
7460
7461 static void
7462 dtrace_store_by_ref(dtrace_difo_t *dp, caddr_t tomax, size_t size,
7463 size_t *valoffsp, uint64_t *valp, uint64_t end, int intuple, int dtkind)
7464 {
7465 volatile uint16_t *flags;
7466 uint64_t val = *valp;
7467 size_t valoffs = *valoffsp;
7468
7469 flags = (volatile uint16_t *)&cpu_core[curcpu_id].cpuc_dtrace_flags;
7470 ASSERT(dtkind == DIF_TF_BYREF || dtkind == DIF_TF_BYUREF);
7471
7472 /*
7473 * If this is a string, we're going to only load until we find the zero
7474 * byte -- after which we'll store zero bytes.
7475 */
7476 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
7477 char c = '\0' + 1;
7478 size_t s;
7479
7480 for (s = 0; s < size; s++) {
7481 if (c != '\0' && dtkind == DIF_TF_BYREF) {
7482 c = dtrace_load8(val++);
7483 } else if (c != '\0' && dtkind == DIF_TF_BYUREF) {
7484 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
7485 c = dtrace_fuword8((void *)(uintptr_t)val++);
7486 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
7487 if (*flags & CPU_DTRACE_FAULT)
7488 break;
7489 }
7490
7491 DTRACE_STORE(uint8_t, tomax, valoffs++, c);
7492
7493 if (c == '\0' && intuple)
7494 break;
7495 }
7496 } else {
7497 uint8_t c;
7498 while (valoffs < end) {
7499 if (dtkind == DIF_TF_BYREF) {
7500 c = dtrace_load8(val++);
7501 } else if (dtkind == DIF_TF_BYUREF) {
7502 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
7503 c = dtrace_fuword8((void *)(uintptr_t)val++);
7504 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
7505 if (*flags & CPU_DTRACE_FAULT)
7506 break;
7507 }
7508
7509 DTRACE_STORE(uint8_t, tomax,
7510 valoffs++, c);
7511 }
7512 }
7513
7514 *valp = val;
7515 *valoffsp = valoffs;
7516 }
7517
7518 /*
7519 * If you're looking for the epicenter of DTrace, you just found it. This
7520 * is the function called by the provider to fire a probe -- from which all
7521 * subsequent probe-context DTrace activity emanates.
7522 */
7523 void
7524 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1,
7525 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4)
7526 {
7527 processorid_t cpuid;
7528 dtrace_icookie_t cookie;
7529 dtrace_probe_t *probe;
7530 dtrace_mstate_t mstate;
7531 dtrace_ecb_t *ecb;
7532 dtrace_action_t *act;
7533 intptr_t offs;
7534 size_t size;
7535 int vtime, onintr;
7536 volatile uint16_t *flags;
7537 hrtime_t now;
7538
7539 if (panicstr != NULL)
7540 return;
7541
7542 #ifdef illumos
7543 /*
7544 * Kick out immediately if this CPU is still being born (in which case
7545 * curthread will be set to -1) or the current thread can't allow
7546 * probes in its current context.
7547 */
7548 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE))
7549 return;
7550 #endif
7551
7552 cookie = dtrace_interrupt_disable();
7553 probe = dtrace_probes[id - 1];
7554 cpuid = curcpu_id;
7555 onintr = CPU_ON_INTR(CPU);
7556
7557 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE &&
7558 probe->dtpr_predcache == curthread->t_predcache) {
7559 /*
7560 * We have hit in the predicate cache; we know that
7561 * this predicate would evaluate to be false.
7562 */
7563 dtrace_interrupt_enable(cookie);
7564 return;
7565 }
7566
7567 #ifdef illumos
7568 if (panic_quiesce) {
7569 #else
7570 if (panicstr != NULL) {
7571 #endif
7572 /*
7573 * We don't trace anything if we're panicking.
7574 */
7575 dtrace_interrupt_enable(cookie);
7576 return;
7577 }
7578
7579 now = mstate.dtms_timestamp = dtrace_gethrtime();
7580 mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP;
7581 vtime = dtrace_vtime_references != 0;
7582
7583 if (vtime && curthread->t_dtrace_start)
7584 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start;
7585
7586 mstate.dtms_difo = NULL;
7587 mstate.dtms_probe = probe;
7588 mstate.dtms_strtok = 0;
7589 mstate.dtms_arg[0] = arg0;
7590 mstate.dtms_arg[1] = arg1;
7591 mstate.dtms_arg[2] = arg2;
7592 mstate.dtms_arg[3] = arg3;
7593 mstate.dtms_arg[4] = arg4;
7594
7595 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags;
7596
7597 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
7598 dtrace_predicate_t *pred = ecb->dte_predicate;
7599 dtrace_state_t *state = ecb->dte_state;
7600 dtrace_buffer_t *buf = &state->dts_buffer[cpuid];
7601 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid];
7602 dtrace_vstate_t *vstate = &state->dts_vstate;
7603 dtrace_provider_t *prov = probe->dtpr_provider;
7604 uint64_t tracememsize = 0;
7605 int committed = 0;
7606 caddr_t tomax;
7607
7608 /*
7609 * A little subtlety with the following (seemingly innocuous)
7610 * declaration of the automatic 'val': by looking at the
7611 * code, you might think that it could be declared in the
7612 * action processing loop, below. (That is, it's only used in
7613 * the action processing loop.) However, it must be declared
7614 * out of that scope because in the case of DIF expression
7615 * arguments to aggregating actions, one iteration of the
7616 * action loop will use the last iteration's value.
7617 */
7618 uint64_t val = 0;
7619
7620 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE;
7621 mstate.dtms_getf = NULL;
7622
7623 *flags &= ~CPU_DTRACE_ERROR;
7624
7625 if (prov == dtrace_provider) {
7626 /*
7627 * If dtrace itself is the provider of this probe,
7628 * we're only going to continue processing the ECB if
7629 * arg0 (the dtrace_state_t) is equal to the ECB's
7630 * creating state. (This prevents disjoint consumers
7631 * from seeing one another's metaprobes.)
7632 */
7633 if (arg0 != (uint64_t)(uintptr_t)state)
7634 continue;
7635 }
7636
7637 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) {
7638 /*
7639 * We're not currently active. If our provider isn't
7640 * the dtrace pseudo provider, we're not interested.
7641 */
7642 if (prov != dtrace_provider)
7643 continue;
7644
7645 /*
7646 * Now we must further check if we are in the BEGIN
7647 * probe. If we are, we will only continue processing
7648 * if we're still in WARMUP -- if one BEGIN enabling
7649 * has invoked the exit() action, we don't want to
7650 * evaluate subsequent BEGIN enablings.
7651 */
7652 if (probe->dtpr_id == dtrace_probeid_begin &&
7653 state->dts_activity != DTRACE_ACTIVITY_WARMUP) {
7654 ASSERT(state->dts_activity ==
7655 DTRACE_ACTIVITY_DRAINING);
7656 continue;
7657 }
7658 }
7659
7660 if (ecb->dte_cond) {
7661 /*
7662 * If the dte_cond bits indicate that this
7663 * consumer is only allowed to see user-mode firings
7664 * of this probe, call the provider's dtps_usermode()
7665 * entry point to check that the probe was fired
7666 * while in a user context. Skip this ECB if that's
7667 * not the case.
7668 */
7669 if ((ecb->dte_cond & DTRACE_COND_USERMODE) &&
7670 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg,
7671 probe->dtpr_id, probe->dtpr_arg) == 0)
7672 continue;
7673
7674 #ifdef illumos
7675 /*
7676 * This is more subtle than it looks. We have to be
7677 * absolutely certain that CRED() isn't going to
7678 * change out from under us so it's only legit to
7679 * examine that structure if we're in constrained
7680 * situations. Currently, the only times we'll this
7681 * check is if a non-super-user has enabled the
7682 * profile or syscall providers -- providers that
7683 * allow visibility of all processes. For the
7684 * profile case, the check above will ensure that
7685 * we're examining a user context.
7686 */
7687 if (ecb->dte_cond & DTRACE_COND_OWNER) {
7688 cred_t *cr;
7689 cred_t *s_cr =
7690 ecb->dte_state->dts_cred.dcr_cred;
7691 proc_t *proc;
7692
7693 ASSERT(s_cr != NULL);
7694
7695 if ((cr = CRED()) == NULL ||
7696 s_cr->cr_uid != cr->cr_uid ||
7697 s_cr->cr_uid != cr->cr_ruid ||
7698 s_cr->cr_uid != cr->cr_suid ||
7699 s_cr->cr_gid != cr->cr_gid ||
7700 s_cr->cr_gid != cr->cr_rgid ||
7701 s_cr->cr_gid != cr->cr_sgid ||
7702 (proc = ttoproc(curthread)) == NULL ||
7703 (proc->p_flag & SNOCD))
7704 continue;
7705 }
7706
7707 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) {
7708 cred_t *cr;
7709 cred_t *s_cr =
7710 ecb->dte_state->dts_cred.dcr_cred;
7711
7712 ASSERT(s_cr != NULL);
7713
7714 if ((cr = CRED()) == NULL ||
7715 s_cr->cr_zone->zone_id !=
7716 cr->cr_zone->zone_id)
7717 continue;
7718 }
7719 #endif
7720 }
7721
7722 if (now - state->dts_alive > dtrace_deadman_timeout) {
7723 /*
7724 * We seem to be dead. Unless we (a) have kernel
7725 * destructive permissions (b) have explicitly enabled
7726 * destructive actions and (c) destructive actions have
7727 * not been disabled, we're going to transition into
7728 * the KILLED state, from which no further processing
7729 * on this state will be performed.
7730 */
7731 if (!dtrace_priv_kernel_destructive(state) ||
7732 !state->dts_cred.dcr_destructive ||
7733 dtrace_destructive_disallow) {
7734 void *activity = &state->dts_activity;
7735 dtrace_activity_t current;
7736
7737 do {
7738 current = state->dts_activity;
7739 } while (dtrace_cas32(activity, current,
7740 DTRACE_ACTIVITY_KILLED) != current);
7741
7742 continue;
7743 }
7744 }
7745
7746 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed,
7747 ecb->dte_alignment, state, &mstate)) < 0)
7748 continue;
7749
7750 tomax = buf->dtb_tomax;
7751 ASSERT(tomax != NULL);
7752
7753 if (ecb->dte_size != 0) {
7754 dtrace_rechdr_t dtrh;
7755 if (!(mstate.dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
7756 mstate.dtms_timestamp = dtrace_gethrtime();
7757 mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP;
7758 }
7759 ASSERT3U(ecb->dte_size, >=, sizeof (dtrace_rechdr_t));
7760 dtrh.dtrh_epid = ecb->dte_epid;
7761 DTRACE_RECORD_STORE_TIMESTAMP(&dtrh,
7762 mstate.dtms_timestamp);
7763 *((dtrace_rechdr_t *)(tomax + offs)) = dtrh;
7764 }
7765
7766 mstate.dtms_epid = ecb->dte_epid;
7767 mstate.dtms_present |= DTRACE_MSTATE_EPID;
7768
7769 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)
7770 mstate.dtms_access = DTRACE_ACCESS_KERNEL;
7771 else
7772 mstate.dtms_access = 0;
7773
7774 if (pred != NULL) {
7775 dtrace_difo_t *dp = pred->dtp_difo;
7776 uint64_t rval;
7777
7778 rval = dtrace_dif_emulate(dp, &mstate, vstate, state);
7779
7780 if (!(*flags & CPU_DTRACE_ERROR) && !rval) {
7781 dtrace_cacheid_t cid = probe->dtpr_predcache;
7782
7783 if (cid != DTRACE_CACHEIDNONE && !onintr) {
7784 /*
7785 * Update the predicate cache...
7786 */
7787 ASSERT(cid == pred->dtp_cacheid);
7788 curthread->t_predcache = cid;
7789 }
7790
7791 continue;
7792 }
7793 }
7794
7795 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) &&
7796 act != NULL; act = act->dta_next) {
7797 size_t valoffs;
7798 dtrace_difo_t *dp;
7799 dtrace_recdesc_t *rec = &act->dta_rec;
7800
7801 size = rec->dtrd_size;
7802 valoffs = offs + rec->dtrd_offset;
7803
7804 if (DTRACEACT_ISAGG(act->dta_kind)) {
7805 uint64_t v = 0xbad;
7806 dtrace_aggregation_t *agg;
7807
7808 agg = (dtrace_aggregation_t *)act;
7809
7810 if ((dp = act->dta_difo) != NULL)
7811 v = dtrace_dif_emulate(dp,
7812 &mstate, vstate, state);
7813
7814 if (*flags & CPU_DTRACE_ERROR)
7815 continue;
7816
7817 /*
7818 * Note that we always pass the expression
7819 * value from the previous iteration of the
7820 * action loop. This value will only be used
7821 * if there is an expression argument to the
7822 * aggregating action, denoted by the
7823 * dtag_hasarg field.
7824 */
7825 dtrace_aggregate(agg, buf,
7826 offs, aggbuf, v, val);
7827 continue;
7828 }
7829
7830 switch (act->dta_kind) {
7831 case DTRACEACT_STOP:
7832 if (dtrace_priv_proc_destructive(state))
7833 dtrace_action_stop();
7834 continue;
7835
7836 case DTRACEACT_BREAKPOINT:
7837 if (dtrace_priv_kernel_destructive(state))
7838 dtrace_action_breakpoint(ecb);
7839 continue;
7840
7841 case DTRACEACT_PANIC:
7842 if (dtrace_priv_kernel_destructive(state))
7843 dtrace_action_panic(ecb);
7844 continue;
7845
7846 case DTRACEACT_STACK:
7847 if (!dtrace_priv_kernel(state))
7848 continue;
7849
7850 dtrace_getpcstack((pc_t *)(tomax + valoffs),
7851 size / sizeof (pc_t), probe->dtpr_aframes,
7852 DTRACE_ANCHORED(probe) ? NULL :
7853 (uint32_t *)arg0);
7854 continue;
7855
7856 case DTRACEACT_JSTACK:
7857 case DTRACEACT_USTACK:
7858 if (!dtrace_priv_proc(state))
7859 continue;
7860
7861 /*
7862 * See comment in DIF_VAR_PID.
7863 */
7864 if (DTRACE_ANCHORED(mstate.dtms_probe) &&
7865 CPU_ON_INTR(CPU)) {
7866 int depth = DTRACE_USTACK_NFRAMES(
7867 rec->dtrd_arg) + 1;
7868
7869 dtrace_bzero((void *)(tomax + valoffs),
7870 DTRACE_USTACK_STRSIZE(rec->dtrd_arg)
7871 + depth * sizeof (uint64_t));
7872
7873 continue;
7874 }
7875
7876 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 &&
7877 curproc->p_dtrace_helpers != NULL) {
7878 /*
7879 * This is the slow path -- we have
7880 * allocated string space, and we're
7881 * getting the stack of a process that
7882 * has helpers. Call into a separate
7883 * routine to perform this processing.
7884 */
7885 dtrace_action_ustack(&mstate, state,
7886 (uint64_t *)(tomax + valoffs),
7887 rec->dtrd_arg);
7888 continue;
7889 }
7890
7891 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
7892 dtrace_getupcstack((uint64_t *)
7893 (tomax + valoffs),
7894 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1);
7895 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
7896 continue;
7897
7898 default:
7899 break;
7900 }
7901
7902 dp = act->dta_difo;
7903 ASSERT(dp != NULL);
7904
7905 val = dtrace_dif_emulate(dp, &mstate, vstate, state);
7906
7907 if (*flags & CPU_DTRACE_ERROR)
7908 continue;
7909
7910 switch (act->dta_kind) {
7911 case DTRACEACT_SPECULATE: {
7912 dtrace_rechdr_t *dtrh;
7913
7914 ASSERT(buf == &state->dts_buffer[cpuid]);
7915 buf = dtrace_speculation_buffer(state,
7916 cpuid, val);
7917
7918 if (buf == NULL) {
7919 *flags |= CPU_DTRACE_DROP;
7920 continue;
7921 }
7922
7923 offs = dtrace_buffer_reserve(buf,
7924 ecb->dte_needed, ecb->dte_alignment,
7925 state, NULL);
7926
7927 if (offs < 0) {
7928 *flags |= CPU_DTRACE_DROP;
7929 continue;
7930 }
7931
7932 tomax = buf->dtb_tomax;
7933 ASSERT(tomax != NULL);
7934
7935 if (ecb->dte_size == 0)
7936 continue;
7937
7938 ASSERT3U(ecb->dte_size, >=,
7939 sizeof (dtrace_rechdr_t));
7940 dtrh = ((void *)(tomax + offs));
7941 dtrh->dtrh_epid = ecb->dte_epid;
7942 /*
7943 * When the speculation is committed, all of
7944 * the records in the speculative buffer will
7945 * have their timestamps set to the commit
7946 * time. Until then, it is set to a sentinel
7947 * value, for debugability.
7948 */
7949 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, UINT64_MAX);
7950 continue;
7951 }
7952
7953 case DTRACEACT_PRINTM: {
7954 /* The DIF returns a 'memref'. */
7955 uintptr_t *memref = (uintptr_t *)(uintptr_t) val;
7956
7957 /* Get the size from the memref. */
7958 size = memref[1];
7959
7960 /*
7961 * Check if the size exceeds the allocated
7962 * buffer size.
7963 */
7964 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) {
7965 /* Flag a drop! */
7966 *flags |= CPU_DTRACE_DROP;
7967 continue;
7968 }
7969
7970 /* Store the size in the buffer first. */
7971 DTRACE_STORE(uintptr_t, tomax,
7972 valoffs, size);
7973
7974 /*
7975 * Offset the buffer address to the start
7976 * of the data.
7977 */
7978 valoffs += sizeof(uintptr_t);
7979
7980 /*
7981 * Reset to the memory address rather than
7982 * the memref array, then let the BYREF
7983 * code below do the work to store the
7984 * memory data in the buffer.
7985 */
7986 val = memref[0];
7987 break;
7988 }
7989
7990 case DTRACEACT_CHILL:
7991 if (dtrace_priv_kernel_destructive(state))
7992 dtrace_action_chill(&mstate, val);
7993 continue;
7994
7995 case DTRACEACT_RAISE:
7996 if (dtrace_priv_proc_destructive(state))
7997 dtrace_action_raise(val);
7998 continue;
7999
8000 case DTRACEACT_COMMIT:
8001 ASSERT(!committed);
8002
8003 /*
8004 * We need to commit our buffer state.
8005 */
8006 if (ecb->dte_size)
8007 buf->dtb_offset = offs + ecb->dte_size;
8008 buf = &state->dts_buffer[cpuid];
8009 dtrace_speculation_commit(state, cpuid, val);
8010 committed = 1;
8011 continue;
8012
8013 case DTRACEACT_DISCARD:
8014 dtrace_speculation_discard(state, cpuid, val);
8015 continue;
8016
8017 case DTRACEACT_DIFEXPR:
8018 case DTRACEACT_LIBACT:
8019 case DTRACEACT_PRINTF:
8020 case DTRACEACT_PRINTA:
8021 case DTRACEACT_SYSTEM:
8022 case DTRACEACT_FREOPEN:
8023 case DTRACEACT_TRACEMEM:
8024 break;
8025
8026 case DTRACEACT_TRACEMEM_DYNSIZE:
8027 tracememsize = val;
8028 break;
8029
8030 case DTRACEACT_SYM:
8031 case DTRACEACT_MOD:
8032 if (!dtrace_priv_kernel(state))
8033 continue;
8034 break;
8035
8036 case DTRACEACT_USYM:
8037 case DTRACEACT_UMOD:
8038 case DTRACEACT_UADDR: {
8039 #ifdef illumos
8040 struct pid *pid = curthread->t_procp->p_pidp;
8041 #endif
8042
8043 if (!dtrace_priv_proc(state))
8044 continue;
8045
8046 DTRACE_STORE(uint64_t, tomax,
8047 #ifdef illumos
8048 valoffs, (uint64_t)pid->pid_id);
8049 #else
8050 valoffs, (uint64_t) curproc->p_pid);
8051 #endif
8052 DTRACE_STORE(uint64_t, tomax,
8053 valoffs + sizeof (uint64_t), val);
8054
8055 continue;
8056 }
8057
8058 case DTRACEACT_EXIT: {
8059 /*
8060 * For the exit action, we are going to attempt
8061 * to atomically set our activity to be
8062 * draining. If this fails (either because
8063 * another CPU has beat us to the exit action,
8064 * or because our current activity is something
8065 * other than ACTIVE or WARMUP), we will
8066 * continue. This assures that the exit action
8067 * can be successfully recorded at most once
8068 * when we're in the ACTIVE state. If we're
8069 * encountering the exit() action while in
8070 * COOLDOWN, however, we want to honor the new
8071 * status code. (We know that we're the only
8072 * thread in COOLDOWN, so there is no race.)
8073 */
8074 void *activity = &state->dts_activity;
8075 dtrace_activity_t current = state->dts_activity;
8076
8077 if (current == DTRACE_ACTIVITY_COOLDOWN)
8078 break;
8079
8080 if (current != DTRACE_ACTIVITY_WARMUP)
8081 current = DTRACE_ACTIVITY_ACTIVE;
8082
8083 if (dtrace_cas32(activity, current,
8084 DTRACE_ACTIVITY_DRAINING) != current) {
8085 *flags |= CPU_DTRACE_DROP;
8086 continue;
8087 }
8088
8089 break;
8090 }
8091
8092 default:
8093 ASSERT(0);
8094 }
8095
8096 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF ||
8097 dp->dtdo_rtype.dtdt_flags & DIF_TF_BYUREF) {
8098 uintptr_t end = valoffs + size;
8099
8100 if (tracememsize != 0 &&
8101 valoffs + tracememsize < end) {
8102 end = valoffs + tracememsize;
8103 tracememsize = 0;
8104 }
8105
8106 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF &&
8107 !dtrace_vcanload((void *)(uintptr_t)val,
8108 &dp->dtdo_rtype, NULL, &mstate, vstate))
8109 continue;
8110
8111 dtrace_store_by_ref(dp, tomax, size, &valoffs,
8112 &val, end, act->dta_intuple,
8113 dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF ?
8114 DIF_TF_BYREF: DIF_TF_BYUREF);
8115 continue;
8116 }
8117
8118 switch (size) {
8119 case 0:
8120 break;
8121
8122 case sizeof (uint8_t):
8123 DTRACE_STORE(uint8_t, tomax, valoffs, val);
8124 break;
8125 case sizeof (uint16_t):
8126 DTRACE_STORE(uint16_t, tomax, valoffs, val);
8127 break;
8128 case sizeof (uint32_t):
8129 DTRACE_STORE(uint32_t, tomax, valoffs, val);
8130 break;
8131 case sizeof (uint64_t):
8132 DTRACE_STORE(uint64_t, tomax, valoffs, val);
8133 break;
8134 default:
8135 /*
8136 * Any other size should have been returned by
8137 * reference, not by value.
8138 */
8139 ASSERT(0);
8140 break;
8141 }
8142 }
8143
8144 if (*flags & CPU_DTRACE_DROP)
8145 continue;
8146
8147 if (*flags & CPU_DTRACE_FAULT) {
8148 int ndx;
8149 dtrace_action_t *err;
8150
8151 buf->dtb_errors++;
8152
8153 if (probe->dtpr_id == dtrace_probeid_error) {
8154 /*
8155 * There's nothing we can do -- we had an
8156 * error on the error probe. We bump an
8157 * error counter to at least indicate that
8158 * this condition happened.
8159 */
8160 dtrace_error(&state->dts_dblerrors);
8161 continue;
8162 }
8163
8164 if (vtime) {
8165 /*
8166 * Before recursing on dtrace_probe(), we
8167 * need to explicitly clear out our start
8168 * time to prevent it from being accumulated
8169 * into t_dtrace_vtime.
8170 */
8171 curthread->t_dtrace_start = 0;
8172 }
8173
8174 /*
8175 * Iterate over the actions to figure out which action
8176 * we were processing when we experienced the error.
8177 * Note that act points _past_ the faulting action; if
8178 * act is ecb->dte_action, the fault was in the
8179 * predicate, if it's ecb->dte_action->dta_next it's
8180 * in action #1, and so on.
8181 */
8182 for (err = ecb->dte_action, ndx = 0;
8183 err != act; err = err->dta_next, ndx++)
8184 continue;
8185
8186 dtrace_probe_error(state, ecb->dte_epid, ndx,
8187 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ?
8188 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags),
8189 cpu_core[cpuid].cpuc_dtrace_illval);
8190
8191 continue;
8192 }
8193
8194 if (!committed)
8195 buf->dtb_offset = offs + ecb->dte_size;
8196 }
8197
8198 if (vtime)
8199 curthread->t_dtrace_start = dtrace_gethrtime();
8200
8201 dtrace_interrupt_enable(cookie);
8202 }
8203
8204 /*
8205 * DTrace Probe Hashing Functions
8206 *
8207 * The functions in this section (and indeed, the functions in remaining
8208 * sections) are not _called_ from probe context. (Any exceptions to this are
8209 * marked with a "Note:".) Rather, they are called from elsewhere in the
8210 * DTrace framework to look-up probes in, add probes to and remove probes from
8211 * the DTrace probe hashes. (Each probe is hashed by each element of the
8212 * probe tuple -- allowing for fast lookups, regardless of what was
8213 * specified.)
8214 */
8215 static uint_t
8216 dtrace_hash_str(const char *p)
8217 {
8218 unsigned int g;
8219 uint_t hval = 0;
8220
8221 while (*p) {
8222 hval = (hval << 4) + *p++;
8223 if ((g = (hval & 0xf0000000)) != 0)
8224 hval ^= g >> 24;
8225 hval &= ~g;
8226 }
8227 return (hval);
8228 }
8229
8230 static dtrace_hash_t *
8231 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs)
8232 {
8233 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP);
8234
8235 hash->dth_stroffs = stroffs;
8236 hash->dth_nextoffs = nextoffs;
8237 hash->dth_prevoffs = prevoffs;
8238
8239 hash->dth_size = 1;
8240 hash->dth_mask = hash->dth_size - 1;
8241
8242 hash->dth_tab = kmem_zalloc(hash->dth_size *
8243 sizeof (dtrace_hashbucket_t *), KM_SLEEP);
8244
8245 return (hash);
8246 }
8247
8248 static void
8249 dtrace_hash_destroy(dtrace_hash_t *hash)
8250 {
8251 #ifdef DEBUG
8252 int i;
8253
8254 for (i = 0; i < hash->dth_size; i++)
8255 ASSERT(hash->dth_tab[i] == NULL);
8256 #endif
8257
8258 kmem_free(hash->dth_tab,
8259 hash->dth_size * sizeof (dtrace_hashbucket_t *));
8260 kmem_free(hash, sizeof (dtrace_hash_t));
8261 }
8262
8263 static void
8264 dtrace_hash_resize(dtrace_hash_t *hash)
8265 {
8266 int size = hash->dth_size, i, ndx;
8267 int new_size = hash->dth_size << 1;
8268 int new_mask = new_size - 1;
8269 dtrace_hashbucket_t **new_tab, *bucket, *next;
8270
8271 ASSERT((new_size & new_mask) == 0);
8272
8273 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP);
8274
8275 for (i = 0; i < size; i++) {
8276 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) {
8277 dtrace_probe_t *probe = bucket->dthb_chain;
8278
8279 ASSERT(probe != NULL);
8280 ndx = DTRACE_HASHSTR(hash, probe) & new_mask;
8281
8282 next = bucket->dthb_next;
8283 bucket->dthb_next = new_tab[ndx];
8284 new_tab[ndx] = bucket;
8285 }
8286 }
8287
8288 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *));
8289 hash->dth_tab = new_tab;
8290 hash->dth_size = new_size;
8291 hash->dth_mask = new_mask;
8292 }
8293
8294 static void
8295 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new)
8296 {
8297 int hashval = DTRACE_HASHSTR(hash, new);
8298 int ndx = hashval & hash->dth_mask;
8299 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
8300 dtrace_probe_t **nextp, **prevp;
8301
8302 for (; bucket != NULL; bucket = bucket->dthb_next) {
8303 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new))
8304 goto add;
8305 }
8306
8307 if ((hash->dth_nbuckets >> 1) > hash->dth_size) {
8308 dtrace_hash_resize(hash);
8309 dtrace_hash_add(hash, new);
8310 return;
8311 }
8312
8313 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP);
8314 bucket->dthb_next = hash->dth_tab[ndx];
8315 hash->dth_tab[ndx] = bucket;
8316 hash->dth_nbuckets++;
8317
8318 add:
8319 nextp = DTRACE_HASHNEXT(hash, new);
8320 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL);
8321 *nextp = bucket->dthb_chain;
8322
8323 if (bucket->dthb_chain != NULL) {
8324 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain);
8325 ASSERT(*prevp == NULL);
8326 *prevp = new;
8327 }
8328
8329 bucket->dthb_chain = new;
8330 bucket->dthb_len++;
8331 }
8332
8333 static dtrace_probe_t *
8334 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template)
8335 {
8336 int hashval = DTRACE_HASHSTR(hash, template);
8337 int ndx = hashval & hash->dth_mask;
8338 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
8339
8340 for (; bucket != NULL; bucket = bucket->dthb_next) {
8341 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
8342 return (bucket->dthb_chain);
8343 }
8344
8345 return (NULL);
8346 }
8347
8348 static int
8349 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template)
8350 {
8351 int hashval = DTRACE_HASHSTR(hash, template);
8352 int ndx = hashval & hash->dth_mask;
8353 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
8354
8355 for (; bucket != NULL; bucket = bucket->dthb_next) {
8356 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
8357 return (bucket->dthb_len);
8358 }
8359
8360 return (0);
8361 }
8362
8363 static void
8364 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe)
8365 {
8366 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask;
8367 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
8368
8369 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe);
8370 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe);
8371
8372 /*
8373 * Find the bucket that we're removing this probe from.
8374 */
8375 for (; bucket != NULL; bucket = bucket->dthb_next) {
8376 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe))
8377 break;
8378 }
8379
8380 ASSERT(bucket != NULL);
8381
8382 if (*prevp == NULL) {
8383 if (*nextp == NULL) {
8384 /*
8385 * The removed probe was the only probe on this
8386 * bucket; we need to remove the bucket.
8387 */
8388 dtrace_hashbucket_t *b = hash->dth_tab[ndx];
8389
8390 ASSERT(bucket->dthb_chain == probe);
8391 ASSERT(b != NULL);
8392
8393 if (b == bucket) {
8394 hash->dth_tab[ndx] = bucket->dthb_next;
8395 } else {
8396 while (b->dthb_next != bucket)
8397 b = b->dthb_next;
8398 b->dthb_next = bucket->dthb_next;
8399 }
8400
8401 ASSERT(hash->dth_nbuckets > 0);
8402 hash->dth_nbuckets--;
8403 kmem_free(bucket, sizeof (dtrace_hashbucket_t));
8404 return;
8405 }
8406
8407 bucket->dthb_chain = *nextp;
8408 } else {
8409 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp;
8410 }
8411
8412 if (*nextp != NULL)
8413 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp;
8414 }
8415
8416 /*
8417 * DTrace Utility Functions
8418 *
8419 * These are random utility functions that are _not_ called from probe context.
8420 */
8421 static int
8422 dtrace_badattr(const dtrace_attribute_t *a)
8423 {
8424 return (a->dtat_name > DTRACE_STABILITY_MAX ||
8425 a->dtat_data > DTRACE_STABILITY_MAX ||
8426 a->dtat_class > DTRACE_CLASS_MAX);
8427 }
8428
8429 /*
8430 * Return a duplicate copy of a string. If the specified string is NULL,
8431 * this function returns a zero-length string.
8432 */
8433 static char *
8434 dtrace_strdup(const char *str)
8435 {
8436 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP);
8437
8438 if (str != NULL)
8439 (void) strcpy(new, str);
8440
8441 return (new);
8442 }
8443
8444 #define DTRACE_ISALPHA(c) \
8445 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z'))
8446
8447 static int
8448 dtrace_badname(const char *s)
8449 {
8450 char c;
8451
8452 if (s == NULL || (c = *s++) == '\0')
8453 return (0);
8454
8455 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.')
8456 return (1);
8457
8458 while ((c = *s++) != '\0') {
8459 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') &&
8460 c != '-' && c != '_' && c != '.' && c != '`')
8461 return (1);
8462 }
8463
8464 return (0);
8465 }
8466
8467 static void
8468 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp)
8469 {
8470 uint32_t priv;
8471
8472 #ifdef illumos
8473 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
8474 /*
8475 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter.
8476 */
8477 priv = DTRACE_PRIV_ALL;
8478 } else {
8479 *uidp = crgetuid(cr);
8480 *zoneidp = crgetzoneid(cr);
8481
8482 priv = 0;
8483 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE))
8484 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER;
8485 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE))
8486 priv |= DTRACE_PRIV_USER;
8487 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE))
8488 priv |= DTRACE_PRIV_PROC;
8489 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
8490 priv |= DTRACE_PRIV_OWNER;
8491 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
8492 priv |= DTRACE_PRIV_ZONEOWNER;
8493 }
8494 #else
8495 priv = DTRACE_PRIV_ALL;
8496 *uidp = 0;
8497 *zoneidp = 0;
8498 #endif
8499
8500 *privp = priv;
8501 }
8502
8503 #ifdef DTRACE_ERRDEBUG
8504 static void
8505 dtrace_errdebug(const char *str)
8506 {
8507 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ;
8508 int occupied = 0;
8509
8510 mutex_enter(&dtrace_errlock);
8511 dtrace_errlast = str;
8512 dtrace_errthread = curthread;
8513
8514 while (occupied++ < DTRACE_ERRHASHSZ) {
8515 if (dtrace_errhash[hval].dter_msg == str) {
8516 dtrace_errhash[hval].dter_count++;
8517 goto out;
8518 }
8519
8520 if (dtrace_errhash[hval].dter_msg != NULL) {
8521 hval = (hval + 1) % DTRACE_ERRHASHSZ;
8522 continue;
8523 }
8524
8525 dtrace_errhash[hval].dter_msg = str;
8526 dtrace_errhash[hval].dter_count = 1;
8527 goto out;
8528 }
8529
8530 panic("dtrace: undersized error hash");
8531 out:
8532 mutex_exit(&dtrace_errlock);
8533 }
8534 #endif
8535
8536 /*
8537 * DTrace Matching Functions
8538 *
8539 * These functions are used to match groups of probes, given some elements of
8540 * a probe tuple, or some globbed expressions for elements of a probe tuple.
8541 */
8542 static int
8543 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid,
8544 zoneid_t zoneid)
8545 {
8546 if (priv != DTRACE_PRIV_ALL) {
8547 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags;
8548 uint32_t match = priv & ppriv;
8549
8550 /*
8551 * No PRIV_DTRACE_* privileges...
8552 */
8553 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER |
8554 DTRACE_PRIV_KERNEL)) == 0)
8555 return (0);
8556
8557 /*
8558 * No matching bits, but there were bits to match...
8559 */
8560 if (match == 0 && ppriv != 0)
8561 return (0);
8562
8563 /*
8564 * Need to have permissions to the process, but don't...
8565 */
8566 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 &&
8567 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) {
8568 return (0);
8569 }
8570
8571 /*
8572 * Need to be in the same zone unless we possess the
8573 * privilege to examine all zones.
8574 */
8575 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 &&
8576 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) {
8577 return (0);
8578 }
8579 }
8580
8581 return (1);
8582 }
8583
8584 /*
8585 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which
8586 * consists of input pattern strings and an ops-vector to evaluate them.
8587 * This function returns >0 for match, 0 for no match, and <0 for error.
8588 */
8589 static int
8590 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp,
8591 uint32_t priv, uid_t uid, zoneid_t zoneid)
8592 {
8593 dtrace_provider_t *pvp = prp->dtpr_provider;
8594 int rv;
8595
8596 if (pvp->dtpv_defunct)
8597 return (0);
8598
8599 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0)
8600 return (rv);
8601
8602 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0)
8603 return (rv);
8604
8605 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0)
8606 return (rv);
8607
8608 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0)
8609 return (rv);
8610
8611 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0)
8612 return (0);
8613
8614 return (rv);
8615 }
8616
8617 /*
8618 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN)
8619 * interface for matching a glob pattern 'p' to an input string 's'. Unlike
8620 * libc's version, the kernel version only applies to 8-bit ASCII strings.
8621 * In addition, all of the recursion cases except for '*' matching have been
8622 * unwound. For '*', we still implement recursive evaluation, but a depth
8623 * counter is maintained and matching is aborted if we recurse too deep.
8624 * The function returns 0 if no match, >0 if match, and <0 if recursion error.
8625 */
8626 static int
8627 dtrace_match_glob(const char *s, const char *p, int depth)
8628 {
8629 const char *olds;
8630 char s1, c;
8631 int gs;
8632
8633 if (depth > DTRACE_PROBEKEY_MAXDEPTH)
8634 return (-1);
8635
8636 if (s == NULL)
8637 s = ""; /* treat NULL as empty string */
8638
8639 top:
8640 olds = s;
8641 s1 = *s++;
8642
8643 if (p == NULL)
8644 return (0);
8645
8646 if ((c = *p++) == '\0')
8647 return (s1 == '\0');
8648
8649 switch (c) {
8650 case '[': {
8651 int ok = 0, notflag = 0;
8652 char lc = '\0';
8653
8654 if (s1 == '\0')
8655 return (0);
8656
8657 if (*p == '!') {
8658 notflag = 1;
8659 p++;
8660 }
8661
8662 if ((c = *p++) == '\0')
8663 return (0);
8664
8665 do {
8666 if (c == '-' && lc != '\0' && *p != ']') {
8667 if ((c = *p++) == '\0')
8668 return (0);
8669 if (c == '\\' && (c = *p++) == '\0')
8670 return (0);
8671
8672 if (notflag) {
8673 if (s1 < lc || s1 > c)
8674 ok++;
8675 else
8676 return (0);
8677 } else if (lc <= s1 && s1 <= c)
8678 ok++;
8679
8680 } else if (c == '\\' && (c = *p++) == '\0')
8681 return (0);
8682
8683 lc = c; /* save left-hand 'c' for next iteration */
8684
8685 if (notflag) {
8686 if (s1 != c)
8687 ok++;
8688 else
8689 return (0);
8690 } else if (s1 == c)
8691 ok++;
8692
8693 if ((c = *p++) == '\0')
8694 return (0);
8695
8696 } while (c != ']');
8697
8698 if (ok)
8699 goto top;
8700
8701 return (0);
8702 }
8703
8704 case '\\':
8705 if ((c = *p++) == '\0')
8706 return (0);
8707 /*FALLTHRU*/
8708
8709 default:
8710 if (c != s1)
8711 return (0);
8712 /*FALLTHRU*/
8713
8714 case '?':
8715 if (s1 != '\0')
8716 goto top;
8717 return (0);
8718
8719 case '*':
8720 while (*p == '*')
8721 p++; /* consecutive *'s are identical to a single one */
8722
8723 if (*p == '\0')
8724 return (1);
8725
8726 for (s = olds; *s != '\0'; s++) {
8727 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0)
8728 return (gs);
8729 }
8730
8731 return (0);
8732 }
8733 }
8734
8735 /*ARGSUSED*/
8736 static int
8737 dtrace_match_string(const char *s, const char *p, int depth)
8738 {
8739 return (s != NULL && strcmp(s, p) == 0);
8740 }
8741
8742 /*ARGSUSED*/
8743 static int
8744 dtrace_match_nul(const char *s, const char *p, int depth)
8745 {
8746 return (1); /* always match the empty pattern */
8747 }
8748
8749 /*ARGSUSED*/
8750 static int
8751 dtrace_match_nonzero(const char *s, const char *p, int depth)
8752 {
8753 return (s != NULL && s[0] != '\0');
8754 }
8755
8756 static int
8757 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid,
8758 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg)
8759 {
8760 dtrace_probe_t template, *probe;
8761 dtrace_hash_t *hash = NULL;
8762 int len, rc, best = INT_MAX, nmatched = 0;
8763 dtrace_id_t i;
8764
8765 ASSERT(MUTEX_HELD(&dtrace_lock));
8766
8767 /*
8768 * If the probe ID is specified in the key, just lookup by ID and
8769 * invoke the match callback once if a matching probe is found.
8770 */
8771 if (pkp->dtpk_id != DTRACE_IDNONE) {
8772 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL &&
8773 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) {
8774 if ((*matched)(probe, arg) == DTRACE_MATCH_FAIL)
8775 return (DTRACE_MATCH_FAIL);
8776 nmatched++;
8777 }
8778 return (nmatched);
8779 }
8780
8781 template.dtpr_mod = (char *)pkp->dtpk_mod;
8782 template.dtpr_func = (char *)pkp->dtpk_func;
8783 template.dtpr_name = (char *)pkp->dtpk_name;
8784
8785 /*
8786 * We want to find the most distinct of the module name, function
8787 * name, and name. So for each one that is not a glob pattern or
8788 * empty string, we perform a lookup in the corresponding hash and
8789 * use the hash table with the fewest collisions to do our search.
8790 */
8791 if (pkp->dtpk_mmatch == &dtrace_match_string &&
8792 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) {
8793 best = len;
8794 hash = dtrace_bymod;
8795 }
8796
8797 if (pkp->dtpk_fmatch == &dtrace_match_string &&
8798 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) {
8799 best = len;
8800 hash = dtrace_byfunc;
8801 }
8802
8803 if (pkp->dtpk_nmatch == &dtrace_match_string &&
8804 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) {
8805 best = len;
8806 hash = dtrace_byname;
8807 }
8808
8809 /*
8810 * If we did not select a hash table, iterate over every probe and
8811 * invoke our callback for each one that matches our input probe key.
8812 */
8813 if (hash == NULL) {
8814 for (i = 0; i < dtrace_nprobes; i++) {
8815 if ((probe = dtrace_probes[i]) == NULL ||
8816 dtrace_match_probe(probe, pkp, priv, uid,
8817 zoneid) <= 0)
8818 continue;
8819
8820 nmatched++;
8821
8822 if ((rc = (*matched)(probe, arg)) !=
8823 DTRACE_MATCH_NEXT) {
8824 if (rc == DTRACE_MATCH_FAIL)
8825 return (DTRACE_MATCH_FAIL);
8826 break;
8827 }
8828 }
8829
8830 return (nmatched);
8831 }
8832
8833 /*
8834 * If we selected a hash table, iterate over each probe of the same key
8835 * name and invoke the callback for every probe that matches the other
8836 * attributes of our input probe key.
8837 */
8838 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL;
8839 probe = *(DTRACE_HASHNEXT(hash, probe))) {
8840
8841 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0)
8842 continue;
8843
8844 nmatched++;
8845
8846 if ((rc = (*matched)(probe, arg)) != DTRACE_MATCH_NEXT) {
8847 if (rc == DTRACE_MATCH_FAIL)
8848 return (DTRACE_MATCH_FAIL);
8849 break;
8850 }
8851 }
8852
8853 return (nmatched);
8854 }
8855
8856 /*
8857 * Return the function pointer dtrace_probecmp() should use to compare the
8858 * specified pattern with a string. For NULL or empty patterns, we select
8859 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob().
8860 * For non-empty non-glob strings, we use dtrace_match_string().
8861 */
8862 static dtrace_probekey_f *
8863 dtrace_probekey_func(const char *p)
8864 {
8865 char c;
8866
8867 if (p == NULL || *p == '\0')
8868 return (&dtrace_match_nul);
8869
8870 while ((c = *p++) != '\0') {
8871 if (c == '[' || c == '?' || c == '*' || c == '\\')
8872 return (&dtrace_match_glob);
8873 }
8874
8875 return (&dtrace_match_string);
8876 }
8877
8878 /*
8879 * Build a probe comparison key for use with dtrace_match_probe() from the
8880 * given probe description. By convention, a null key only matches anchored
8881 * probes: if each field is the empty string, reset dtpk_fmatch to
8882 * dtrace_match_nonzero().
8883 */
8884 static void
8885 dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp)
8886 {
8887 pkp->dtpk_prov = pdp->dtpd_provider;
8888 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider);
8889
8890 pkp->dtpk_mod = pdp->dtpd_mod;
8891 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod);
8892
8893 pkp->dtpk_func = pdp->dtpd_func;
8894 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func);
8895
8896 pkp->dtpk_name = pdp->dtpd_name;
8897 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name);
8898
8899 pkp->dtpk_id = pdp->dtpd_id;
8900
8901 if (pkp->dtpk_id == DTRACE_IDNONE &&
8902 pkp->dtpk_pmatch == &dtrace_match_nul &&
8903 pkp->dtpk_mmatch == &dtrace_match_nul &&
8904 pkp->dtpk_fmatch == &dtrace_match_nul &&
8905 pkp->dtpk_nmatch == &dtrace_match_nul)
8906 pkp->dtpk_fmatch = &dtrace_match_nonzero;
8907 }
8908
8909 /*
8910 * DTrace Provider-to-Framework API Functions
8911 *
8912 * These functions implement much of the Provider-to-Framework API, as
8913 * described in <sys/dtrace.h>. The parts of the API not in this section are
8914 * the functions in the API for probe management (found below), and
8915 * dtrace_probe() itself (found above).
8916 */
8917
8918 /*
8919 * Register the calling provider with the DTrace framework. This should
8920 * generally be called by DTrace providers in their attach(9E) entry point.
8921 */
8922 int
8923 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv,
8924 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp)
8925 {
8926 dtrace_provider_t *provider;
8927
8928 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) {
8929 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8930 "arguments", name ? name : "<NULL>");
8931 return (EINVAL);
8932 }
8933
8934 if (name[0] == '\0' || dtrace_badname(name)) {
8935 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8936 "provider name", name);
8937 return (EINVAL);
8938 }
8939
8940 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) ||
8941 pops->dtps_enable == NULL || pops->dtps_disable == NULL ||
8942 pops->dtps_destroy == NULL ||
8943 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) {
8944 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8945 "provider ops", name);
8946 return (EINVAL);
8947 }
8948
8949 if (dtrace_badattr(&pap->dtpa_provider) ||
8950 dtrace_badattr(&pap->dtpa_mod) ||
8951 dtrace_badattr(&pap->dtpa_func) ||
8952 dtrace_badattr(&pap->dtpa_name) ||
8953 dtrace_badattr(&pap->dtpa_args)) {
8954 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8955 "provider attributes", name);
8956 return (EINVAL);
8957 }
8958
8959 if (priv & ~DTRACE_PRIV_ALL) {
8960 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
8961 "privilege attributes", name);
8962 return (EINVAL);
8963 }
8964
8965 if ((priv & DTRACE_PRIV_KERNEL) &&
8966 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) &&
8967 pops->dtps_usermode == NULL) {
8968 cmn_err(CE_WARN, "failed to register provider '%s': need "
8969 "dtps_usermode() op for given privilege attributes", name);
8970 return (EINVAL);
8971 }
8972
8973 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP);
8974 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
8975 (void) strcpy(provider->dtpv_name, name);
8976
8977 provider->dtpv_attr = *pap;
8978 provider->dtpv_priv.dtpp_flags = priv;
8979 if (cr != NULL) {
8980 provider->dtpv_priv.dtpp_uid = crgetuid(cr);
8981 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr);
8982 }
8983 provider->dtpv_pops = *pops;
8984
8985 if (pops->dtps_provide == NULL) {
8986 ASSERT(pops->dtps_provide_module != NULL);
8987 provider->dtpv_pops.dtps_provide =
8988 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop;
8989 }
8990
8991 if (pops->dtps_provide_module == NULL) {
8992 ASSERT(pops->dtps_provide != NULL);
8993 provider->dtpv_pops.dtps_provide_module =
8994 (void (*)(void *, modctl_t *))dtrace_nullop;
8995 }
8996
8997 if (pops->dtps_suspend == NULL) {
8998 ASSERT(pops->dtps_resume == NULL);
8999 provider->dtpv_pops.dtps_suspend =
9000 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
9001 provider->dtpv_pops.dtps_resume =
9002 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
9003 }
9004
9005 provider->dtpv_arg = arg;
9006 *idp = (dtrace_provider_id_t)provider;
9007
9008 if (pops == &dtrace_provider_ops) {
9009 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
9010 ASSERT(MUTEX_HELD(&dtrace_lock));
9011 ASSERT(dtrace_anon.dta_enabling == NULL);
9012
9013 /*
9014 * We make sure that the DTrace provider is at the head of
9015 * the provider chain.
9016 */
9017 provider->dtpv_next = dtrace_provider;
9018 dtrace_provider = provider;
9019 return (0);
9020 }
9021
9022 mutex_enter(&dtrace_provider_lock);
9023 mutex_enter(&dtrace_lock);
9024
9025 /*
9026 * If there is at least one provider registered, we'll add this
9027 * provider after the first provider.
9028 */
9029 if (dtrace_provider != NULL) {
9030 provider->dtpv_next = dtrace_provider->dtpv_next;
9031 dtrace_provider->dtpv_next = provider;
9032 } else {
9033 dtrace_provider = provider;
9034 }
9035
9036 if (dtrace_retained != NULL) {
9037 dtrace_enabling_provide(provider);
9038
9039 /*
9040 * Now we need to call dtrace_enabling_matchall() -- which
9041 * will acquire cpu_lock and dtrace_lock. We therefore need
9042 * to drop all of our locks before calling into it...
9043 */
9044 mutex_exit(&dtrace_lock);
9045 mutex_exit(&dtrace_provider_lock);
9046 dtrace_enabling_matchall();
9047
9048 return (0);
9049 }
9050
9051 mutex_exit(&dtrace_lock);
9052 mutex_exit(&dtrace_provider_lock);
9053
9054 return (0);
9055 }
9056
9057 /*
9058 * Unregister the specified provider from the DTrace framework. This should
9059 * generally be called by DTrace providers in their detach(9E) entry point.
9060 */
9061 int
9062 dtrace_unregister(dtrace_provider_id_t id)
9063 {
9064 dtrace_provider_t *old = (dtrace_provider_t *)id;
9065 dtrace_provider_t *prev = NULL;
9066 int i, self = 0, noreap = 0;
9067 dtrace_probe_t *probe, *first = NULL;
9068
9069 if (old->dtpv_pops.dtps_enable ==
9070 (int (*)(void *, dtrace_id_t, void *))dtrace_nullop) {
9071 /*
9072 * If DTrace itself is the provider, we're called with locks
9073 * already held.
9074 */
9075 ASSERT(old == dtrace_provider);
9076 #ifdef illumos
9077 ASSERT(dtrace_devi != NULL);
9078 #endif
9079 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
9080 ASSERT(MUTEX_HELD(&dtrace_lock));
9081 self = 1;
9082
9083 if (dtrace_provider->dtpv_next != NULL) {
9084 /*
9085 * There's another provider here; return failure.
9086 */
9087 return (EBUSY);
9088 }
9089 } else {
9090 mutex_enter(&dtrace_provider_lock);
9091 #ifdef illumos
9092 mutex_enter(&mod_lock);
9093 #endif
9094 mutex_enter(&dtrace_lock);
9095 }
9096
9097 /*
9098 * If anyone has /dev/dtrace open, or if there are anonymous enabled
9099 * probes, we refuse to let providers slither away, unless this
9100 * provider has already been explicitly invalidated.
9101 */
9102 if (!old->dtpv_defunct &&
9103 (dtrace_opens || (dtrace_anon.dta_state != NULL &&
9104 dtrace_anon.dta_state->dts_necbs > 0))) {
9105 if (!self) {
9106 mutex_exit(&dtrace_lock);
9107 #ifdef illumos
9108 mutex_exit(&mod_lock);
9109 #endif
9110 mutex_exit(&dtrace_provider_lock);
9111 }
9112 return (EBUSY);
9113 }
9114
9115 /*
9116 * Attempt to destroy the probes associated with this provider.
9117 */
9118 for (i = 0; i < dtrace_nprobes; i++) {
9119 if ((probe = dtrace_probes[i]) == NULL)
9120 continue;
9121
9122 if (probe->dtpr_provider != old)
9123 continue;
9124
9125 if (probe->dtpr_ecb == NULL)
9126 continue;
9127
9128 /*
9129 * If we are trying to unregister a defunct provider, and the
9130 * provider was made defunct within the interval dictated by
9131 * dtrace_unregister_defunct_reap, we'll (asynchronously)
9132 * attempt to reap our enablings. To denote that the provider
9133 * should reattempt to unregister itself at some point in the
9134 * future, we will return a differentiable error code (EAGAIN
9135 * instead of EBUSY) in this case.
9136 */
9137 if (dtrace_gethrtime() - old->dtpv_defunct >
9138 dtrace_unregister_defunct_reap)
9139 noreap = 1;
9140
9141 /*
9142 * We have at least one ECB; we can't remove this provider.
9143 */
9144 if (!self) {
9145 mutex_exit(&dtrace_lock);
9146 #ifdef illumos
9147 mutex_exit(&mod_lock);
9148 #endif
9149 mutex_exit(&dtrace_provider_lock);
9150 }
9151
9152 if (noreap)
9153 return (EBUSY);
9154
9155 (void) taskq_dispatch(dtrace_taskq,
9156 (task_func_t *)dtrace_enabling_reap, NULL, TQ_SLEEP);
9157
9158 return (EAGAIN);
9159 }
9160
9161 /*
9162 * All of the probes for this provider are disabled; we can safely
9163 * remove all of them from their hash chains and from the probe array.
9164 */
9165 for (i = 0; i < dtrace_nprobes; i++) {
9166 if ((probe = dtrace_probes[i]) == NULL)
9167 continue;
9168
9169 if (probe->dtpr_provider != old)
9170 continue;
9171
9172 dtrace_probes[i] = NULL;
9173
9174 dtrace_hash_remove(dtrace_bymod, probe);
9175 dtrace_hash_remove(dtrace_byfunc, probe);
9176 dtrace_hash_remove(dtrace_byname, probe);
9177
9178 if (first == NULL) {
9179 first = probe;
9180 probe->dtpr_nextmod = NULL;
9181 } else {
9182 probe->dtpr_nextmod = first;
9183 first = probe;
9184 }
9185 }
9186
9187 /*
9188 * The provider's probes have been removed from the hash chains and
9189 * from the probe array. Now issue a dtrace_sync() to be sure that
9190 * everyone has cleared out from any probe array processing.
9191 */
9192 dtrace_sync();
9193
9194 for (probe = first; probe != NULL; probe = first) {
9195 first = probe->dtpr_nextmod;
9196
9197 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id,
9198 probe->dtpr_arg);
9199 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
9200 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
9201 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
9202 #ifdef illumos
9203 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1);
9204 #endif
9205 #ifdef __FreeBSD__
9206 free_unr(dtrace_arena, probe->dtpr_id);
9207 #endif
9208 #ifdef __NetBSD__
9209 vmem_free(dtrace_arena, (uintptr_t)(probe->dtpr_id), 1);
9210 #endif
9211 kmem_free(probe, sizeof (dtrace_probe_t));
9212 }
9213
9214 if ((prev = dtrace_provider) == old) {
9215 #ifdef illumos
9216 ASSERT(self || dtrace_devi == NULL);
9217 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL);
9218 #endif
9219 dtrace_provider = old->dtpv_next;
9220 } else {
9221 while (prev != NULL && prev->dtpv_next != old)
9222 prev = prev->dtpv_next;
9223
9224 if (prev == NULL) {
9225 panic("attempt to unregister non-existent "
9226 "dtrace provider %p\n", (void *)id);
9227 }
9228
9229 prev->dtpv_next = old->dtpv_next;
9230 }
9231
9232 if (!self) {
9233 mutex_exit(&dtrace_lock);
9234 #ifdef illumos
9235 mutex_exit(&mod_lock);
9236 #endif
9237 mutex_exit(&dtrace_provider_lock);
9238 }
9239
9240 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1);
9241 kmem_free(old, sizeof (dtrace_provider_t));
9242
9243 return (0);
9244 }
9245
9246 /*
9247 * Invalidate the specified provider. All subsequent probe lookups for the
9248 * specified provider will fail, but its probes will not be removed.
9249 */
9250 void
9251 dtrace_invalidate(dtrace_provider_id_t id)
9252 {
9253 dtrace_provider_t *pvp = (dtrace_provider_t *)id;
9254
9255 ASSERT(pvp->dtpv_pops.dtps_enable !=
9256 (int (*)(void *, dtrace_id_t, void *))dtrace_nullop);
9257
9258 mutex_enter(&dtrace_provider_lock);
9259 mutex_enter(&dtrace_lock);
9260
9261 pvp->dtpv_defunct = dtrace_gethrtime();
9262
9263 mutex_exit(&dtrace_lock);
9264 mutex_exit(&dtrace_provider_lock);
9265 }
9266
9267 /*
9268 * Indicate whether or not DTrace has attached.
9269 */
9270 int
9271 dtrace_attached(void)
9272 {
9273 /*
9274 * dtrace_provider will be non-NULL iff the DTrace driver has
9275 * attached. (It's non-NULL because DTrace is always itself a
9276 * provider.)
9277 */
9278 return (dtrace_provider != NULL);
9279 }
9280
9281 /*
9282 * Remove all the unenabled probes for the given provider. This function is
9283 * not unlike dtrace_unregister(), except that it doesn't remove the provider
9284 * -- just as many of its associated probes as it can.
9285 */
9286 int
9287 dtrace_condense(dtrace_provider_id_t id)
9288 {
9289 dtrace_provider_t *prov = (dtrace_provider_t *)id;
9290 int i;
9291 dtrace_probe_t *probe;
9292
9293 /*
9294 * Make sure this isn't the dtrace provider itself.
9295 */
9296 ASSERT(prov->dtpv_pops.dtps_enable !=
9297 (int (*)(void *, dtrace_id_t, void *))dtrace_nullop);
9298
9299 mutex_enter(&dtrace_provider_lock);
9300 mutex_enter(&dtrace_lock);
9301
9302 /*
9303 * Attempt to destroy the probes associated with this provider.
9304 */
9305 for (i = 0; i < dtrace_nprobes; i++) {
9306 if ((probe = dtrace_probes[i]) == NULL)
9307 continue;
9308
9309 if (probe->dtpr_provider != prov)
9310 continue;
9311
9312 if (probe->dtpr_ecb != NULL)
9313 continue;
9314
9315 dtrace_probes[i] = NULL;
9316
9317 dtrace_hash_remove(dtrace_bymod, probe);
9318 dtrace_hash_remove(dtrace_byfunc, probe);
9319 dtrace_hash_remove(dtrace_byname, probe);
9320
9321 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1,
9322 probe->dtpr_arg);
9323 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
9324 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
9325 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
9326 kmem_free(probe, sizeof (dtrace_probe_t));
9327 #ifdef illumos
9328 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1);
9329 #endif
9330 #ifdef __FreeBSD__
9331 free_unr(dtrace_arena, i + 1);
9332 #endif
9333 #ifdef __NetBSD__
9334 vmem_free(dtrace_arena, ((uintptr_t)i + 1), 1);
9335 #endif
9336 }
9337
9338 mutex_exit(&dtrace_lock);
9339 mutex_exit(&dtrace_provider_lock);
9340
9341 return (0);
9342 }
9343
9344 /*
9345 * DTrace Probe Management Functions
9346 *
9347 * The functions in this section perform the DTrace probe management,
9348 * including functions to create probes, look-up probes, and call into the
9349 * providers to request that probes be provided. Some of these functions are
9350 * in the Provider-to-Framework API; these functions can be identified by the
9351 * fact that they are not declared "static".
9352 */
9353
9354 /*
9355 * Create a probe with the specified module name, function name, and name.
9356 */
9357 dtrace_id_t
9358 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod,
9359 const char *func, const char *name, int aframes, void *arg)
9360 {
9361 dtrace_probe_t *probe, **probes;
9362 dtrace_provider_t *provider = (dtrace_provider_t *)prov;
9363 dtrace_id_t id;
9364
9365 if (provider == dtrace_provider) {
9366 ASSERT(MUTEX_HELD(&dtrace_lock));
9367 } else {
9368 mutex_enter(&dtrace_lock);
9369 }
9370
9371 #ifdef illumos
9372 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1,
9373 VM_BESTFIT | VM_SLEEP);
9374 #endif
9375 #ifdef __FreeBSD__
9376 id = alloc_unr(dtrace_arena);
9377 #endif
9378 #ifdef __NetBSD__
9379 vmem_addr_t offset;
9380 if (vmem_alloc(dtrace_arena, 1, VM_BESTFIT | VM_SLEEP, &offset) != 0)
9381 ASSERT(0);
9382 id = (dtrace_id_t)(uintptr_t)offset;
9383 #endif
9384 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP);
9385
9386 probe->dtpr_id = id;
9387 probe->dtpr_gen = dtrace_probegen++;
9388 probe->dtpr_mod = dtrace_strdup(mod);
9389 probe->dtpr_func = dtrace_strdup(func);
9390 probe->dtpr_name = dtrace_strdup(name);
9391 probe->dtpr_arg = arg;
9392 probe->dtpr_aframes = aframes;
9393 probe->dtpr_provider = provider;
9394
9395 dtrace_hash_add(dtrace_bymod, probe);
9396 dtrace_hash_add(dtrace_byfunc, probe);
9397 dtrace_hash_add(dtrace_byname, probe);
9398
9399 if (id - 1 >= dtrace_nprobes) {
9400 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *);
9401 size_t nsize = osize << 1;
9402
9403 if (nsize == 0) {
9404 ASSERT(osize == 0);
9405 ASSERT(dtrace_probes == NULL);
9406 nsize = sizeof (dtrace_probe_t *);
9407 }
9408
9409 probes = kmem_zalloc(nsize, KM_SLEEP);
9410
9411 if (dtrace_probes == NULL) {
9412 ASSERT(osize == 0);
9413 dtrace_probes = probes;
9414 dtrace_nprobes = 1;
9415 } else {
9416 dtrace_probe_t **oprobes = dtrace_probes;
9417
9418 bcopy(oprobes, probes, osize);
9419 dtrace_membar_producer();
9420 dtrace_probes = probes;
9421
9422 dtrace_sync();
9423
9424 /*
9425 * All CPUs are now seeing the new probes array; we can
9426 * safely free the old array.
9427 */
9428 kmem_free(oprobes, osize);
9429 dtrace_nprobes <<= 1;
9430 }
9431
9432 ASSERT(id - 1 < dtrace_nprobes);
9433 }
9434
9435 ASSERT(dtrace_probes[id - 1] == NULL);
9436 dtrace_probes[id - 1] = probe;
9437
9438 if (provider != dtrace_provider)
9439 mutex_exit(&dtrace_lock);
9440
9441 return (id);
9442 }
9443
9444 static dtrace_probe_t *
9445 dtrace_probe_lookup_id(dtrace_id_t id)
9446 {
9447 ASSERT(MUTEX_HELD(&dtrace_lock));
9448
9449 if (id == 0 || id > dtrace_nprobes)
9450 return (NULL);
9451
9452 return (dtrace_probes[id - 1]);
9453 }
9454
9455 static int
9456 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg)
9457 {
9458 *((dtrace_id_t *)arg) = probe->dtpr_id;
9459
9460 return (DTRACE_MATCH_DONE);
9461 }
9462
9463 /*
9464 * Look up a probe based on provider and one or more of module name, function
9465 * name and probe name.
9466 */
9467 dtrace_id_t
9468 dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod,
9469 char *func, char *name)
9470 {
9471 dtrace_probekey_t pkey;
9472 dtrace_id_t id;
9473 int match;
9474
9475 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name;
9476 pkey.dtpk_pmatch = &dtrace_match_string;
9477 pkey.dtpk_mod = mod;
9478 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul;
9479 pkey.dtpk_func = func;
9480 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul;
9481 pkey.dtpk_name = name;
9482 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul;
9483 pkey.dtpk_id = DTRACE_IDNONE;
9484
9485 mutex_enter(&dtrace_lock);
9486 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0,
9487 dtrace_probe_lookup_match, &id);
9488 mutex_exit(&dtrace_lock);
9489
9490 ASSERT(match == 1 || match == 0);
9491 return (match ? id : 0);
9492 }
9493
9494 /*
9495 * Returns the probe argument associated with the specified probe.
9496 */
9497 void *
9498 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid)
9499 {
9500 dtrace_probe_t *probe;
9501 void *rval = NULL;
9502
9503 mutex_enter(&dtrace_lock);
9504
9505 if ((probe = dtrace_probe_lookup_id(pid)) != NULL &&
9506 probe->dtpr_provider == (dtrace_provider_t *)id)
9507 rval = probe->dtpr_arg;
9508
9509 mutex_exit(&dtrace_lock);
9510
9511 return (rval);
9512 }
9513
9514 /*
9515 * Copy a probe into a probe description.
9516 */
9517 static void
9518 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp)
9519 {
9520 bzero(pdp, sizeof (dtrace_probedesc_t));
9521 pdp->dtpd_id = prp->dtpr_id;
9522
9523 (void) strncpy(pdp->dtpd_provider,
9524 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1);
9525
9526 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1);
9527 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1);
9528 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1);
9529 }
9530
9531 /*
9532 * Called to indicate that a probe -- or probes -- should be provided by a
9533 * specfied provider. If the specified description is NULL, the provider will
9534 * be told to provide all of its probes. (This is done whenever a new
9535 * consumer comes along, or whenever a retained enabling is to be matched.) If
9536 * the specified description is non-NULL, the provider is given the
9537 * opportunity to dynamically provide the specified probe, allowing providers
9538 * to support the creation of probes on-the-fly. (So-called _autocreated_
9539 * probes.) If the provider is NULL, the operations will be applied to all
9540 * providers; if the provider is non-NULL the operations will only be applied
9541 * to the specified provider. The dtrace_provider_lock must be held, and the
9542 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation
9543 * will need to grab the dtrace_lock when it reenters the framework through
9544 * dtrace_probe_lookup(), dtrace_probe_create(), etc.
9545 */
9546 static void
9547 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv)
9548 {
9549 #ifdef illumos
9550 modctl_t *ctl;
9551 #endif
9552 #ifdef __NetBSD__
9553 module_t *mod;
9554 #endif
9555 int all = 0;
9556
9557 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
9558
9559 if (prv == NULL) {
9560 all = 1;
9561 prv = dtrace_provider;
9562 }
9563
9564 do {
9565 /*
9566 * First, call the blanket provide operation.
9567 */
9568 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc);
9569
9570 #ifdef illumos
9571 /*
9572 * Now call the per-module provide operation. We will grab
9573 * mod_lock to prevent the list from being modified. Note
9574 * that this also prevents the mod_busy bits from changing.
9575 * (mod_busy can only be changed with mod_lock held.)
9576 */
9577 mutex_enter(&mod_lock);
9578
9579 ctl = &modules;
9580 do {
9581 if (ctl->mod_busy || ctl->mod_mp == NULL)
9582 continue;
9583
9584 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
9585
9586 } while ((ctl = ctl->mod_next) != &modules);
9587
9588 mutex_exit(&mod_lock);
9589 #endif
9590 #ifdef __NetBSD__
9591 kernconfig_lock();
9592
9593 /* Fake netbsd module first */
9594 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, module_kernel());
9595
9596 TAILQ_FOREACH(mod, &module_list, mod_chain) {
9597 if (module_source(mod) != MODULE_SOURCE_KERNEL)
9598 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, mod);
9599 }
9600 kernconfig_unlock();
9601 #endif
9602 } while (all && (prv = prv->dtpv_next) != NULL);
9603 }
9604
9605 #ifdef illumos
9606 /*
9607 * Iterate over each probe, and call the Framework-to-Provider API function
9608 * denoted by offs.
9609 */
9610 static void
9611 dtrace_probe_foreach(uintptr_t offs)
9612 {
9613 dtrace_provider_t *prov;
9614 void (*func)(void *, dtrace_id_t, void *);
9615 dtrace_probe_t *probe;
9616 dtrace_icookie_t cookie;
9617 int i;
9618
9619 /*
9620 * We disable interrupts to walk through the probe array. This is
9621 * safe -- the dtrace_sync() in dtrace_unregister() assures that we
9622 * won't see stale data.
9623 */
9624 cookie = dtrace_interrupt_disable();
9625
9626 for (i = 0; i < dtrace_nprobes; i++) {
9627 if ((probe = dtrace_probes[i]) == NULL)
9628 continue;
9629
9630 if (probe->dtpr_ecb == NULL) {
9631 /*
9632 * This probe isn't enabled -- don't call the function.
9633 */
9634 continue;
9635 }
9636
9637 prov = probe->dtpr_provider;
9638 func = *((void(**)(void *, dtrace_id_t, void *))
9639 ((uintptr_t)&prov->dtpv_pops + offs));
9640
9641 func(prov->dtpv_arg, i + 1, probe->dtpr_arg);
9642 }
9643
9644 dtrace_interrupt_enable(cookie);
9645 }
9646 #endif
9647
9648 static int
9649 dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab)
9650 {
9651 dtrace_probekey_t pkey;
9652 uint32_t priv;
9653 uid_t uid;
9654 zoneid_t zoneid;
9655
9656 ASSERT(MUTEX_HELD(&dtrace_lock));
9657 dtrace_ecb_create_cache = NULL;
9658
9659 if (desc == NULL) {
9660 /*
9661 * If we're passed a NULL description, we're being asked to
9662 * create an ECB with a NULL probe.
9663 */
9664 (void) dtrace_ecb_create_enable(NULL, enab);
9665 return (0);
9666 }
9667
9668 dtrace_probekey(desc, &pkey);
9669 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred,
9670 &priv, &uid, &zoneid);
9671
9672 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable,
9673 enab));
9674 }
9675
9676 /*
9677 * DTrace Helper Provider Functions
9678 */
9679 static void
9680 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr)
9681 {
9682 attr->dtat_name = DOF_ATTR_NAME(dofattr);
9683 attr->dtat_data = DOF_ATTR_DATA(dofattr);
9684 attr->dtat_class = DOF_ATTR_CLASS(dofattr);
9685 }
9686
9687 static void
9688 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov,
9689 const dof_provider_t *dofprov, char *strtab)
9690 {
9691 hprov->dthpv_provname = strtab + dofprov->dofpv_name;
9692 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider,
9693 dofprov->dofpv_provattr);
9694 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod,
9695 dofprov->dofpv_modattr);
9696 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func,
9697 dofprov->dofpv_funcattr);
9698 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name,
9699 dofprov->dofpv_nameattr);
9700 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args,
9701 dofprov->dofpv_argsattr);
9702 }
9703
9704 static void
9705 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
9706 {
9707 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
9708 dof_hdr_t *dof = (dof_hdr_t *)daddr;
9709 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
9710 dof_provider_t *provider;
9711 dof_probe_t *probe;
9712 uint32_t *off, *enoff;
9713 uint8_t *arg;
9714 char *strtab;
9715 uint_t i, nprobes;
9716 dtrace_helper_provdesc_t dhpv;
9717 dtrace_helper_probedesc_t dhpb;
9718 dtrace_meta_t *meta = dtrace_meta_pid;
9719 dtrace_mops_t *mops = &meta->dtm_mops;
9720 void *parg;
9721
9722 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
9723 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9724 provider->dofpv_strtab * dof->dofh_secsize);
9725 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9726 provider->dofpv_probes * dof->dofh_secsize);
9727 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9728 provider->dofpv_prargs * dof->dofh_secsize);
9729 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9730 provider->dofpv_proffs * dof->dofh_secsize);
9731
9732 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
9733 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset);
9734 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
9735 enoff = NULL;
9736
9737 /*
9738 * See dtrace_helper_provider_validate().
9739 */
9740 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
9741 provider->dofpv_prenoffs != DOF_SECT_NONE) {
9742 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9743 provider->dofpv_prenoffs * dof->dofh_secsize);
9744 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset);
9745 }
9746
9747 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
9748
9749 /*
9750 * Create the provider.
9751 */
9752 dtrace_dofprov2hprov(&dhpv, provider, strtab);
9753
9754 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL)
9755 return;
9756
9757 meta->dtm_count++;
9758
9759 /*
9760 * Create the probes.
9761 */
9762 for (i = 0; i < nprobes; i++) {
9763 probe = (dof_probe_t *)(uintptr_t)(daddr +
9764 prb_sec->dofs_offset + i * prb_sec->dofs_entsize);
9765
9766 /* See the check in dtrace_helper_provider_validate(). */
9767 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN)
9768 continue;
9769
9770 dhpb.dthpb_mod = dhp->dofhp_mod;
9771 dhpb.dthpb_func = strtab + probe->dofpr_func;
9772 dhpb.dthpb_name = strtab + probe->dofpr_name;
9773 dhpb.dthpb_base = probe->dofpr_addr;
9774 dhpb.dthpb_offs = off + probe->dofpr_offidx;
9775 dhpb.dthpb_noffs = probe->dofpr_noffs;
9776 if (enoff != NULL) {
9777 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx;
9778 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs;
9779 } else {
9780 dhpb.dthpb_enoffs = NULL;
9781 dhpb.dthpb_nenoffs = 0;
9782 }
9783 dhpb.dthpb_args = arg + probe->dofpr_argidx;
9784 dhpb.dthpb_nargc = probe->dofpr_nargc;
9785 dhpb.dthpb_xargc = probe->dofpr_xargc;
9786 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv;
9787 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv;
9788
9789 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb);
9790 }
9791 }
9792
9793 static void
9794 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid)
9795 {
9796 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
9797 dof_hdr_t *dof = (dof_hdr_t *)daddr;
9798 int i;
9799
9800 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
9801
9802 for (i = 0; i < dof->dofh_secnum; i++) {
9803 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
9804 dof->dofh_secoff + i * dof->dofh_secsize);
9805
9806 if (sec->dofs_type != DOF_SECT_PROVIDER)
9807 continue;
9808
9809 dtrace_helper_provide_one(dhp, sec, pid);
9810 }
9811
9812 /*
9813 * We may have just created probes, so we must now rematch against
9814 * any retained enablings. Note that this call will acquire both
9815 * cpu_lock and dtrace_lock; the fact that we are holding
9816 * dtrace_meta_lock now is what defines the ordering with respect to
9817 * these three locks.
9818 */
9819 dtrace_enabling_matchall();
9820 }
9821
9822 static void
9823 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
9824 {
9825 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
9826 dof_hdr_t *dof = (dof_hdr_t *)daddr;
9827 dof_sec_t *str_sec;
9828 dof_provider_t *provider;
9829 char *strtab;
9830 dtrace_helper_provdesc_t dhpv;
9831 dtrace_meta_t *meta = dtrace_meta_pid;
9832 dtrace_mops_t *mops = &meta->dtm_mops;
9833
9834 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
9835 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
9836 provider->dofpv_strtab * dof->dofh_secsize);
9837
9838 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
9839
9840 /*
9841 * Create the provider.
9842 */
9843 dtrace_dofprov2hprov(&dhpv, provider, strtab);
9844
9845 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid);
9846
9847 meta->dtm_count--;
9848 }
9849
9850 static void
9851 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid)
9852 {
9853 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
9854 dof_hdr_t *dof = (dof_hdr_t *)daddr;
9855 int i;
9856
9857 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
9858
9859 for (i = 0; i < dof->dofh_secnum; i++) {
9860 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
9861 dof->dofh_secoff + i * dof->dofh_secsize);
9862
9863 if (sec->dofs_type != DOF_SECT_PROVIDER)
9864 continue;
9865
9866 dtrace_helper_provider_remove_one(dhp, sec, pid);
9867 }
9868 }
9869
9870 /*
9871 * DTrace Meta Provider-to-Framework API Functions
9872 *
9873 * These functions implement the Meta Provider-to-Framework API, as described
9874 * in <sys/dtrace.h>.
9875 */
9876 int
9877 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg,
9878 dtrace_meta_provider_id_t *idp)
9879 {
9880 dtrace_meta_t *meta;
9881 dtrace_helpers_t *help, *next;
9882 int i;
9883
9884 *idp = DTRACE_METAPROVNONE;
9885
9886 /*
9887 * We strictly don't need the name, but we hold onto it for
9888 * debuggability. All hail error queues!
9889 */
9890 if (name == NULL) {
9891 cmn_err(CE_WARN, "failed to register meta-provider: "
9892 "invalid name");
9893 return (EINVAL);
9894 }
9895
9896 if (mops == NULL ||
9897 mops->dtms_create_probe == NULL ||
9898 mops->dtms_provide_pid == NULL ||
9899 mops->dtms_remove_pid == NULL) {
9900 cmn_err(CE_WARN, "failed to register meta-register %s: "
9901 "invalid ops", name);
9902 return (EINVAL);
9903 }
9904
9905 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP);
9906 meta->dtm_mops = *mops;
9907 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
9908 (void) strcpy(meta->dtm_name, name);
9909 meta->dtm_arg = arg;
9910
9911 mutex_enter(&dtrace_meta_lock);
9912 mutex_enter(&dtrace_lock);
9913
9914 if (dtrace_meta_pid != NULL) {
9915 mutex_exit(&dtrace_lock);
9916 mutex_exit(&dtrace_meta_lock);
9917 cmn_err(CE_WARN, "failed to register meta-register %s: "
9918 "user-land meta-provider exists", name);
9919 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1);
9920 kmem_free(meta, sizeof (dtrace_meta_t));
9921 return (EINVAL);
9922 }
9923
9924 dtrace_meta_pid = meta;
9925 *idp = (dtrace_meta_provider_id_t)meta;
9926
9927 /*
9928 * If there are providers and probes ready to go, pass them
9929 * off to the new meta provider now.
9930 */
9931
9932 help = dtrace_deferred_pid;
9933 dtrace_deferred_pid = NULL;
9934
9935 mutex_exit(&dtrace_lock);
9936
9937 while (help != NULL) {
9938 for (i = 0; i < help->dthps_nprovs; i++) {
9939 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
9940 help->dthps_pid);
9941 }
9942
9943 next = help->dthps_next;
9944 help->dthps_next = NULL;
9945 help->dthps_prev = NULL;
9946 help->dthps_deferred = 0;
9947 help = next;
9948 }
9949
9950 mutex_exit(&dtrace_meta_lock);
9951
9952 return (0);
9953 }
9954
9955 int
9956 dtrace_meta_unregister(dtrace_meta_provider_id_t id)
9957 {
9958 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id;
9959
9960 mutex_enter(&dtrace_meta_lock);
9961 mutex_enter(&dtrace_lock);
9962
9963 if (old == dtrace_meta_pid) {
9964 pp = &dtrace_meta_pid;
9965 } else {
9966 panic("attempt to unregister non-existent "
9967 "dtrace meta-provider %p\n", (void *)old);
9968 }
9969
9970 if (old->dtm_count != 0) {
9971 mutex_exit(&dtrace_lock);
9972 mutex_exit(&dtrace_meta_lock);
9973 return (EBUSY);
9974 }
9975
9976 *pp = NULL;
9977
9978 mutex_exit(&dtrace_lock);
9979 mutex_exit(&dtrace_meta_lock);
9980
9981 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1);
9982 kmem_free(old, sizeof (dtrace_meta_t));
9983
9984 return (0);
9985 }
9986
9987
9988 /*
9989 * DTrace DIF Object Functions
9990 */
9991 static int
9992 dtrace_difo_err(uint_t pc, const char *format, ...)
9993 {
9994 if (dtrace_err_verbose) {
9995 va_list alist;
9996
9997 (void) uprintf("dtrace DIF object error: [%u]: ", pc);
9998 va_start(alist, format);
9999 (void) vuprintf(format, alist);
10000 va_end(alist);
10001 }
10002
10003 #ifdef DTRACE_ERRDEBUG
10004 dtrace_errdebug(format);
10005 #endif
10006 return (1);
10007 }
10008
10009 /*
10010 * Validate a DTrace DIF object by checking the IR instructions. The following
10011 * rules are currently enforced by dtrace_difo_validate():
10012 *
10013 * 1. Each instruction must have a valid opcode
10014 * 2. Each register, string, variable, or subroutine reference must be valid
10015 * 3. No instruction can modify register %r0 (must be zero)
10016 * 4. All instruction reserved bits must be set to zero
10017 * 5. The last instruction must be a "ret" instruction
10018 * 6. All branch targets must reference a valid instruction _after_ the branch
10019 */
10020 static int
10021 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs,
10022 cred_t *cr)
10023 {
10024 int err = 0, i;
10025 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
10026 int kcheckload;
10027 uint_t pc;
10028 int maxglobal = -1, maxlocal = -1, maxtlocal = -1;
10029
10030 kcheckload = cr == NULL ||
10031 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0;
10032
10033 dp->dtdo_destructive = 0;
10034
10035 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
10036 dif_instr_t instr = dp->dtdo_buf[pc];
10037
10038 uint_t r1 = DIF_INSTR_R1(instr);
10039 uint_t r2 = DIF_INSTR_R2(instr);
10040 uint_t rd = DIF_INSTR_RD(instr);
10041 uint_t rs = DIF_INSTR_RS(instr);
10042 uint_t label = DIF_INSTR_LABEL(instr);
10043 uint_t v = DIF_INSTR_VAR(instr);
10044 uint_t subr = DIF_INSTR_SUBR(instr);
10045 uint_t type = DIF_INSTR_TYPE(instr);
10046 uint_t op = DIF_INSTR_OP(instr);
10047
10048 switch (op) {
10049 case DIF_OP_OR:
10050 case DIF_OP_XOR:
10051 case DIF_OP_AND:
10052 case DIF_OP_SLL:
10053 case DIF_OP_SRL:
10054 case DIF_OP_SRA:
10055 case DIF_OP_SUB:
10056 case DIF_OP_ADD:
10057 case DIF_OP_MUL:
10058 case DIF_OP_SDIV:
10059 case DIF_OP_UDIV:
10060 case DIF_OP_SREM:
10061 case DIF_OP_UREM:
10062 case DIF_OP_COPYS:
10063 if (r1 >= nregs)
10064 err += efunc(pc, "invalid register %u\n", r1);
10065 if (r2 >= nregs)
10066 err += efunc(pc, "invalid register %u\n", r2);
10067 if (rd >= nregs)
10068 err += efunc(pc, "invalid register %u\n", rd);
10069 if (rd == 0)
10070 err += efunc(pc, "cannot write to %r0\n");
10071 break;
10072 case DIF_OP_NOT:
10073 case DIF_OP_MOV:
10074 case DIF_OP_ALLOCS:
10075 if (r1 >= nregs)
10076 err += efunc(pc, "invalid register %u\n", r1);
10077 if (r2 != 0)
10078 err += efunc(pc, "non-zero reserved bits\n");
10079 if (rd >= nregs)
10080 err += efunc(pc, "invalid register %u\n", rd);
10081 if (rd == 0)
10082 err += efunc(pc, "cannot write to %r0\n");
10083 break;
10084 case DIF_OP_LDSB:
10085 case DIF_OP_LDSH:
10086 case DIF_OP_LDSW:
10087 case DIF_OP_LDUB:
10088 case DIF_OP_LDUH:
10089 case DIF_OP_LDUW:
10090 case DIF_OP_LDX:
10091 if (r1 >= nregs)
10092 err += efunc(pc, "invalid register %u\n", r1);
10093 if (r2 != 0)
10094 err += efunc(pc, "non-zero reserved bits\n");
10095 if (rd >= nregs)
10096 err += efunc(pc, "invalid register %u\n", rd);
10097 if (rd == 0)
10098 err += efunc(pc, "cannot write to %r0\n");
10099 if (kcheckload)
10100 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op +
10101 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd);
10102 break;
10103 case DIF_OP_RLDSB:
10104 case DIF_OP_RLDSH:
10105 case DIF_OP_RLDSW:
10106 case DIF_OP_RLDUB:
10107 case DIF_OP_RLDUH:
10108 case DIF_OP_RLDUW:
10109 case DIF_OP_RLDX:
10110 if (r1 >= nregs)
10111 err += efunc(pc, "invalid register %u\n", r1);
10112 if (r2 != 0)
10113 err += efunc(pc, "non-zero reserved bits\n");
10114 if (rd >= nregs)
10115 err += efunc(pc, "invalid register %u\n", rd);
10116 if (rd == 0)
10117 err += efunc(pc, "cannot write to %r0\n");
10118 break;
10119 case DIF_OP_ULDSB:
10120 case DIF_OP_ULDSH:
10121 case DIF_OP_ULDSW:
10122 case DIF_OP_ULDUB:
10123 case DIF_OP_ULDUH:
10124 case DIF_OP_ULDUW:
10125 case DIF_OP_ULDX:
10126 if (r1 >= nregs)
10127 err += efunc(pc, "invalid register %u\n", r1);
10128 if (r2 != 0)
10129 err += efunc(pc, "non-zero reserved bits\n");
10130 if (rd >= nregs)
10131 err += efunc(pc, "invalid register %u\n", rd);
10132 if (rd == 0)
10133 err += efunc(pc, "cannot write to %r0\n");
10134 break;
10135 case DIF_OP_STB:
10136 case DIF_OP_STH:
10137 case DIF_OP_STW:
10138 case DIF_OP_STX:
10139 if (r1 >= nregs)
10140 err += efunc(pc, "invalid register %u\n", r1);
10141 if (r2 != 0)
10142 err += efunc(pc, "non-zero reserved bits\n");
10143 if (rd >= nregs)
10144 err += efunc(pc, "invalid register %u\n", rd);
10145 if (rd == 0)
10146 err += efunc(pc, "cannot write to 0 address\n");
10147 break;
10148 case DIF_OP_CMP:
10149 case DIF_OP_SCMP:
10150 if (r1 >= nregs)
10151 err += efunc(pc, "invalid register %u\n", r1);
10152 if (r2 >= nregs)
10153 err += efunc(pc, "invalid register %u\n", r2);
10154 if (rd != 0)
10155 err += efunc(pc, "non-zero reserved bits\n");
10156 break;
10157 case DIF_OP_TST:
10158 if (r1 >= nregs)
10159 err += efunc(pc, "invalid register %u\n", r1);
10160 if (r2 != 0 || rd != 0)
10161 err += efunc(pc, "non-zero reserved bits\n");
10162 break;
10163 case DIF_OP_BA:
10164 case DIF_OP_BE:
10165 case DIF_OP_BNE:
10166 case DIF_OP_BG:
10167 case DIF_OP_BGU:
10168 case DIF_OP_BGE:
10169 case DIF_OP_BGEU:
10170 case DIF_OP_BL:
10171 case DIF_OP_BLU:
10172 case DIF_OP_BLE:
10173 case DIF_OP_BLEU:
10174 if (label >= dp->dtdo_len) {
10175 err += efunc(pc, "invalid branch target %u\n",
10176 label);
10177 }
10178 if (label <= pc) {
10179 err += efunc(pc, "backward branch to %u\n",
10180 label);
10181 }
10182 break;
10183 case DIF_OP_RET:
10184 if (r1 != 0 || r2 != 0)
10185 err += efunc(pc, "non-zero reserved bits\n");
10186 if (rd >= nregs)
10187 err += efunc(pc, "invalid register %u\n", rd);
10188 break;
10189 case DIF_OP_NOP:
10190 case DIF_OP_POPTS:
10191 case DIF_OP_FLUSHTS:
10192 if (r1 != 0 || r2 != 0 || rd != 0)
10193 err += efunc(pc, "non-zero reserved bits\n");
10194 break;
10195 case DIF_OP_SETX:
10196 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) {
10197 err += efunc(pc, "invalid integer ref %u\n",
10198 DIF_INSTR_INTEGER(instr));
10199 }
10200 if (rd >= nregs)
10201 err += efunc(pc, "invalid register %u\n", rd);
10202 if (rd == 0)
10203 err += efunc(pc, "cannot write to %r0\n");
10204 break;
10205 case DIF_OP_SETS:
10206 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) {
10207 err += efunc(pc, "invalid string ref %u\n",
10208 DIF_INSTR_STRING(instr));
10209 }
10210 if (rd >= nregs)
10211 err += efunc(pc, "invalid register %u\n", rd);
10212 if (rd == 0)
10213 err += efunc(pc, "cannot write to %r0\n");
10214 break;
10215 case DIF_OP_LDGA:
10216 case DIF_OP_LDTA:
10217 if (r1 > DIF_VAR_ARRAY_MAX)
10218 err += efunc(pc, "invalid array %u\n", r1);
10219 if (r2 >= nregs)
10220 err += efunc(pc, "invalid register %u\n", r2);
10221 if (rd >= nregs)
10222 err += efunc(pc, "invalid register %u\n", rd);
10223 if (rd == 0)
10224 err += efunc(pc, "cannot write to %r0\n");
10225 break;
10226 case DIF_OP_LDGS:
10227 case DIF_OP_LDTS:
10228 case DIF_OP_LDLS:
10229 case DIF_OP_LDGAA:
10230 case DIF_OP_LDTAA:
10231 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX)
10232 err += efunc(pc, "invalid variable %u\n", v);
10233 if (rd >= nregs)
10234 err += efunc(pc, "invalid register %u\n", rd);
10235 if (rd == 0)
10236 err += efunc(pc, "cannot write to %r0\n");
10237 break;
10238 case DIF_OP_STGS:
10239 case DIF_OP_STTS:
10240 case DIF_OP_STLS:
10241 case DIF_OP_STGAA:
10242 case DIF_OP_STTAA:
10243 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX)
10244 err += efunc(pc, "invalid variable %u\n", v);
10245 if (rs >= nregs)
10246 err += efunc(pc, "invalid register %u\n", rd);
10247 break;
10248 case DIF_OP_CALL:
10249 if (subr > DIF_SUBR_MAX)
10250 err += efunc(pc, "invalid subr %u\n", subr);
10251 if (rd >= nregs)
10252 err += efunc(pc, "invalid register %u\n", rd);
10253 if (rd == 0)
10254 err += efunc(pc, "cannot write to %r0\n");
10255
10256 if (subr == DIF_SUBR_COPYOUT ||
10257 subr == DIF_SUBR_COPYOUTSTR) {
10258 dp->dtdo_destructive = 1;
10259 }
10260 if (subr == DIF_SUBR_GETF) {
10261 /*
10262 * If we have a getf() we need to record that
10263 * in our state. Note that our state can be
10264 * NULL if this is a helper -- but in that
10265 * case, the call to getf() is itself illegal,
10266 * and will be caught (slightly later) when
10267 * the helper is validated.
10268 */
10269 if (vstate->dtvs_state != NULL)
10270 vstate->dtvs_state->dts_getf++;
10271 }
10272
10273 break;
10274 case DIF_OP_PUSHTR:
10275 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF)
10276 err += efunc(pc, "invalid ref type %u\n", type);
10277 if (r2 >= nregs)
10278 err += efunc(pc, "invalid register %u\n", r2);
10279 if (rs >= nregs)
10280 err += efunc(pc, "invalid register %u\n", rs);
10281 break;
10282 case DIF_OP_PUSHTV:
10283 if (type != DIF_TYPE_CTF)
10284 err += efunc(pc, "invalid val type %u\n", type);
10285 if (r2 >= nregs)
10286 err += efunc(pc, "invalid register %u\n", r2);
10287 if (rs >= nregs)
10288 err += efunc(pc, "invalid register %u\n", rs);
10289 break;
10290 default:
10291 err += efunc(pc, "invalid opcode %u\n",
10292 DIF_INSTR_OP(instr));
10293 }
10294 }
10295
10296 if (dp->dtdo_len != 0 &&
10297 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) {
10298 err += efunc(dp->dtdo_len - 1,
10299 "expected 'ret' as last DIF instruction\n");
10300 }
10301
10302 if (!(dp->dtdo_rtype.dtdt_flags & (DIF_TF_BYREF | DIF_TF_BYUREF))) {
10303 /*
10304 * If we're not returning by reference, the size must be either
10305 * 0 or the size of one of the base types.
10306 */
10307 switch (dp->dtdo_rtype.dtdt_size) {
10308 case 0:
10309 case sizeof (uint8_t):
10310 case sizeof (uint16_t):
10311 case sizeof (uint32_t):
10312 case sizeof (uint64_t):
10313 break;
10314
10315 default:
10316 err += efunc(dp->dtdo_len - 1, "bad return size\n");
10317 }
10318 }
10319
10320 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) {
10321 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL;
10322 dtrace_diftype_t *vt, *et;
10323 uint_t id, ndx;
10324
10325 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL &&
10326 v->dtdv_scope != DIFV_SCOPE_THREAD &&
10327 v->dtdv_scope != DIFV_SCOPE_LOCAL) {
10328 err += efunc(i, "unrecognized variable scope %d\n",
10329 v->dtdv_scope);
10330 break;
10331 }
10332
10333 if (v->dtdv_kind != DIFV_KIND_ARRAY &&
10334 v->dtdv_kind != DIFV_KIND_SCALAR) {
10335 err += efunc(i, "unrecognized variable type %d\n",
10336 v->dtdv_kind);
10337 break;
10338 }
10339
10340 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) {
10341 err += efunc(i, "%d exceeds variable id limit\n", id);
10342 break;
10343 }
10344
10345 if (id < DIF_VAR_OTHER_UBASE)
10346 continue;
10347
10348 /*
10349 * For user-defined variables, we need to check that this
10350 * definition is identical to any previous definition that we
10351 * encountered.
10352 */
10353 ndx = id - DIF_VAR_OTHER_UBASE;
10354
10355 switch (v->dtdv_scope) {
10356 case DIFV_SCOPE_GLOBAL:
10357 if (maxglobal == -1 || ndx > maxglobal)
10358 maxglobal = ndx;
10359
10360 if (ndx < vstate->dtvs_nglobals) {
10361 dtrace_statvar_t *svar;
10362
10363 if ((svar = vstate->dtvs_globals[ndx]) != NULL)
10364 existing = &svar->dtsv_var;
10365 }
10366
10367 break;
10368
10369 case DIFV_SCOPE_THREAD:
10370 if (maxtlocal == -1 || ndx > maxtlocal)
10371 maxtlocal = ndx;
10372
10373 if (ndx < vstate->dtvs_ntlocals)
10374 existing = &vstate->dtvs_tlocals[ndx];
10375 break;
10376
10377 case DIFV_SCOPE_LOCAL:
10378 if (maxlocal == -1 || ndx > maxlocal)
10379 maxlocal = ndx;
10380
10381 if (ndx < vstate->dtvs_nlocals) {
10382 dtrace_statvar_t *svar;
10383
10384 if ((svar = vstate->dtvs_locals[ndx]) != NULL)
10385 existing = &svar->dtsv_var;
10386 }
10387
10388 break;
10389 }
10390
10391 vt = &v->dtdv_type;
10392
10393 if (vt->dtdt_flags & DIF_TF_BYREF) {
10394 if (vt->dtdt_size == 0) {
10395 err += efunc(i, "zero-sized variable\n");
10396 break;
10397 }
10398
10399 if ((v->dtdv_scope == DIFV_SCOPE_GLOBAL ||
10400 v->dtdv_scope == DIFV_SCOPE_LOCAL) &&
10401 vt->dtdt_size > dtrace_statvar_maxsize) {
10402 err += efunc(i, "oversized by-ref static\n");
10403 break;
10404 }
10405 }
10406
10407 if (existing == NULL || existing->dtdv_id == 0)
10408 continue;
10409
10410 ASSERT(existing->dtdv_id == v->dtdv_id);
10411 ASSERT(existing->dtdv_scope == v->dtdv_scope);
10412
10413 if (existing->dtdv_kind != v->dtdv_kind)
10414 err += efunc(i, "%d changed variable kind\n", id);
10415
10416 et = &existing->dtdv_type;
10417
10418 if (vt->dtdt_flags != et->dtdt_flags) {
10419 err += efunc(i, "%d changed variable type flags\n", id);
10420 break;
10421 }
10422
10423 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) {
10424 err += efunc(i, "%d changed variable type size\n", id);
10425 break;
10426 }
10427 }
10428
10429 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
10430 dif_instr_t instr = dp->dtdo_buf[pc];
10431
10432 uint_t v = DIF_INSTR_VAR(instr);
10433 uint_t op = DIF_INSTR_OP(instr);
10434
10435 switch (op) {
10436 case DIF_OP_LDGS:
10437 case DIF_OP_LDGAA:
10438 case DIF_OP_STGS:
10439 case DIF_OP_STGAA:
10440 if (v > DIF_VAR_OTHER_UBASE + maxglobal)
10441 err += efunc(pc, "invalid variable %u\n", v);
10442 break;
10443 case DIF_OP_LDTS:
10444 case DIF_OP_LDTAA:
10445 case DIF_OP_STTS:
10446 case DIF_OP_STTAA:
10447 if (v > DIF_VAR_OTHER_UBASE + maxtlocal)
10448 err += efunc(pc, "invalid variable %u\n", v);
10449 break;
10450 case DIF_OP_LDLS:
10451 case DIF_OP_STLS:
10452 if (v > DIF_VAR_OTHER_UBASE + maxlocal)
10453 err += efunc(pc, "invalid variable %u\n", v);
10454 break;
10455 default:
10456 break;
10457 }
10458 }
10459
10460 return (err);
10461 }
10462
10463 /*
10464 * Validate a DTrace DIF object that it is to be used as a helper. Helpers
10465 * are much more constrained than normal DIFOs. Specifically, they may
10466 * not:
10467 *
10468 * 1. Make calls to subroutines other than copyin(), copyinstr() or
10469 * miscellaneous string routines
10470 * 2. Access DTrace variables other than the args[] array, and the
10471 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables.
10472 * 3. Have thread-local variables.
10473 * 4. Have dynamic variables.
10474 */
10475 static int
10476 dtrace_difo_validate_helper(dtrace_difo_t *dp)
10477 {
10478 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
10479 int err = 0;
10480 uint_t pc;
10481
10482 for (pc = 0; pc < dp->dtdo_len; pc++) {
10483 dif_instr_t instr = dp->dtdo_buf[pc];
10484
10485 uint_t v = DIF_INSTR_VAR(instr);
10486 uint_t subr = DIF_INSTR_SUBR(instr);
10487 uint_t op = DIF_INSTR_OP(instr);
10488
10489 switch (op) {
10490 case DIF_OP_OR:
10491 case DIF_OP_XOR:
10492 case DIF_OP_AND:
10493 case DIF_OP_SLL:
10494 case DIF_OP_SRL:
10495 case DIF_OP_SRA:
10496 case DIF_OP_SUB:
10497 case DIF_OP_ADD:
10498 case DIF_OP_MUL:
10499 case DIF_OP_SDIV:
10500 case DIF_OP_UDIV:
10501 case DIF_OP_SREM:
10502 case DIF_OP_UREM:
10503 case DIF_OP_COPYS:
10504 case DIF_OP_NOT:
10505 case DIF_OP_MOV:
10506 case DIF_OP_RLDSB:
10507 case DIF_OP_RLDSH:
10508 case DIF_OP_RLDSW:
10509 case DIF_OP_RLDUB:
10510 case DIF_OP_RLDUH:
10511 case DIF_OP_RLDUW:
10512 case DIF_OP_RLDX:
10513 case DIF_OP_ULDSB:
10514 case DIF_OP_ULDSH:
10515 case DIF_OP_ULDSW:
10516 case DIF_OP_ULDUB:
10517 case DIF_OP_ULDUH:
10518 case DIF_OP_ULDUW:
10519 case DIF_OP_ULDX:
10520 case DIF_OP_STB:
10521 case DIF_OP_STH:
10522 case DIF_OP_STW:
10523 case DIF_OP_STX:
10524 case DIF_OP_ALLOCS:
10525 case DIF_OP_CMP:
10526 case DIF_OP_SCMP:
10527 case DIF_OP_TST:
10528 case DIF_OP_BA:
10529 case DIF_OP_BE:
10530 case DIF_OP_BNE:
10531 case DIF_OP_BG:
10532 case DIF_OP_BGU:
10533 case DIF_OP_BGE:
10534 case DIF_OP_BGEU:
10535 case DIF_OP_BL:
10536 case DIF_OP_BLU:
10537 case DIF_OP_BLE:
10538 case DIF_OP_BLEU:
10539 case DIF_OP_RET:
10540 case DIF_OP_NOP:
10541 case DIF_OP_POPTS:
10542 case DIF_OP_FLUSHTS:
10543 case DIF_OP_SETX:
10544 case DIF_OP_SETS:
10545 case DIF_OP_LDGA:
10546 case DIF_OP_LDLS:
10547 case DIF_OP_STGS:
10548 case DIF_OP_STLS:
10549 case DIF_OP_PUSHTR:
10550 case DIF_OP_PUSHTV:
10551 break;
10552
10553 case DIF_OP_LDGS:
10554 if (v >= DIF_VAR_OTHER_UBASE)
10555 break;
10556
10557 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9)
10558 break;
10559
10560 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID ||
10561 v == DIF_VAR_PPID || v == DIF_VAR_TID ||
10562 v == DIF_VAR_EXECARGS ||
10563 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME ||
10564 v == DIF_VAR_UID || v == DIF_VAR_GID)
10565 break;
10566
10567 err += efunc(pc, "illegal variable %u\n", v);
10568 break;
10569
10570 case DIF_OP_LDTA:
10571 case DIF_OP_LDTS:
10572 case DIF_OP_LDGAA:
10573 case DIF_OP_LDTAA:
10574 err += efunc(pc, "illegal dynamic variable load\n");
10575 break;
10576
10577 case DIF_OP_STTS:
10578 case DIF_OP_STGAA:
10579 case DIF_OP_STTAA:
10580 err += efunc(pc, "illegal dynamic variable store\n");
10581 break;
10582
10583 case DIF_OP_CALL:
10584 if (subr == DIF_SUBR_ALLOCA ||
10585 subr == DIF_SUBR_BCOPY ||
10586 subr == DIF_SUBR_COPYIN ||
10587 subr == DIF_SUBR_COPYINTO ||
10588 subr == DIF_SUBR_COPYINSTR ||
10589 subr == DIF_SUBR_INDEX ||
10590 subr == DIF_SUBR_INET_NTOA ||
10591 subr == DIF_SUBR_INET_NTOA6 ||
10592 subr == DIF_SUBR_INET_NTOP ||
10593 subr == DIF_SUBR_JSON ||
10594 subr == DIF_SUBR_LLTOSTR ||
10595 subr == DIF_SUBR_STRTOLL ||
10596 subr == DIF_SUBR_RINDEX ||
10597 subr == DIF_SUBR_STRCHR ||
10598 subr == DIF_SUBR_STRJOIN ||
10599 subr == DIF_SUBR_STRRCHR ||
10600 subr == DIF_SUBR_STRSTR ||
10601 subr == DIF_SUBR_HTONS ||
10602 subr == DIF_SUBR_HTONL ||
10603 subr == DIF_SUBR_HTONLL ||
10604 subr == DIF_SUBR_NTOHS ||
10605 subr == DIF_SUBR_NTOHL ||
10606 subr == DIF_SUBR_NTOHLL ||
10607 subr == DIF_SUBR_MEMREF)
10608 break;
10609
10610 #if defined(__FreeBSD__) || defined(__NetBSD__)
10611 if (subr == DIF_SUBR_MEMSTR)
10612 break;
10613 #endif
10614
10615 err += efunc(pc, "invalid subr %u\n", subr);
10616 break;
10617
10618 default:
10619 err += efunc(pc, "invalid opcode %u\n",
10620 DIF_INSTR_OP(instr));
10621 }
10622 }
10623
10624 return (err);
10625 }
10626
10627 /*
10628 * Returns 1 if the expression in the DIF object can be cached on a per-thread
10629 * basis; 0 if not.
10630 */
10631 static int
10632 dtrace_difo_cacheable(dtrace_difo_t *dp)
10633 {
10634 int i;
10635
10636 if (dp == NULL)
10637 return (0);
10638
10639 for (i = 0; i < dp->dtdo_varlen; i++) {
10640 dtrace_difv_t *v = &dp->dtdo_vartab[i];
10641
10642 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL)
10643 continue;
10644
10645 switch (v->dtdv_id) {
10646 case DIF_VAR_CURTHREAD:
10647 case DIF_VAR_PID:
10648 case DIF_VAR_TID:
10649 case DIF_VAR_EXECARGS:
10650 case DIF_VAR_EXECNAME:
10651 case DIF_VAR_ZONENAME:
10652 break;
10653
10654 default:
10655 return (0);
10656 }
10657 }
10658
10659 /*
10660 * This DIF object may be cacheable. Now we need to look for any
10661 * array loading instructions, any memory loading instructions, or
10662 * any stores to thread-local variables.
10663 */
10664 for (i = 0; i < dp->dtdo_len; i++) {
10665 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]);
10666
10667 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) ||
10668 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) ||
10669 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) ||
10670 op == DIF_OP_LDGA || op == DIF_OP_STTS)
10671 return (0);
10672 }
10673
10674 return (1);
10675 }
10676
10677 static void
10678 dtrace_difo_hold(dtrace_difo_t *dp)
10679 {
10680 int i;
10681
10682 ASSERT(MUTEX_HELD(&dtrace_lock));
10683
10684 dp->dtdo_refcnt++;
10685 ASSERT(dp->dtdo_refcnt != 0);
10686
10687 /*
10688 * We need to check this DIF object for references to the variable
10689 * DIF_VAR_VTIMESTAMP.
10690 */
10691 for (i = 0; i < dp->dtdo_varlen; i++) {
10692 dtrace_difv_t *v = &dp->dtdo_vartab[i];
10693
10694 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
10695 continue;
10696
10697 if (dtrace_vtime_references++ == 0)
10698 dtrace_vtime_enable();
10699 }
10700 }
10701
10702 /*
10703 * This routine calculates the dynamic variable chunksize for a given DIF
10704 * object. The calculation is not fool-proof, and can probably be tricked by
10705 * malicious DIF -- but it works for all compiler-generated DIF. Because this
10706 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail
10707 * if a dynamic variable size exceeds the chunksize.
10708 */
10709 static void
10710 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10711 {
10712 uint64_t sval = 0;
10713 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
10714 const dif_instr_t *text = dp->dtdo_buf;
10715 uint_t pc, srd = 0;
10716 uint_t ttop = 0;
10717 size_t size, ksize;
10718 uint_t id, i;
10719
10720 for (pc = 0; pc < dp->dtdo_len; pc++) {
10721 dif_instr_t instr = text[pc];
10722 uint_t op = DIF_INSTR_OP(instr);
10723 uint_t rd = DIF_INSTR_RD(instr);
10724 uint_t r1 = DIF_INSTR_R1(instr);
10725 uint_t nkeys = 0;
10726 uchar_t scope = 0;
10727
10728 dtrace_key_t *key = tupregs;
10729
10730 switch (op) {
10731 case DIF_OP_SETX:
10732 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)];
10733 srd = rd;
10734 continue;
10735
10736 case DIF_OP_STTS:
10737 key = &tupregs[DIF_DTR_NREGS];
10738 key[0].dttk_size = 0;
10739 key[1].dttk_size = 0;
10740 nkeys = 2;
10741 scope = DIFV_SCOPE_THREAD;
10742 break;
10743
10744 case DIF_OP_STGAA:
10745 case DIF_OP_STTAA:
10746 nkeys = ttop;
10747
10748 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA)
10749 key[nkeys++].dttk_size = 0;
10750
10751 key[nkeys++].dttk_size = 0;
10752
10753 if (op == DIF_OP_STTAA) {
10754 scope = DIFV_SCOPE_THREAD;
10755 } else {
10756 scope = DIFV_SCOPE_GLOBAL;
10757 }
10758
10759 break;
10760
10761 case DIF_OP_PUSHTR:
10762 if (ttop == DIF_DTR_NREGS)
10763 return;
10764
10765 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) {
10766 /*
10767 * If the register for the size of the "pushtr"
10768 * is %r0 (or the value is 0) and the type is
10769 * a string, we'll use the system-wide default
10770 * string size.
10771 */
10772 tupregs[ttop++].dttk_size =
10773 dtrace_strsize_default;
10774 } else {
10775 if (srd == 0)
10776 return;
10777
10778 if (sval > LONG_MAX)
10779 return;
10780
10781 tupregs[ttop++].dttk_size = sval;
10782 }
10783
10784 break;
10785
10786 case DIF_OP_PUSHTV:
10787 if (ttop == DIF_DTR_NREGS)
10788 return;
10789
10790 tupregs[ttop++].dttk_size = 0;
10791 break;
10792
10793 case DIF_OP_FLUSHTS:
10794 ttop = 0;
10795 break;
10796
10797 case DIF_OP_POPTS:
10798 if (ttop != 0)
10799 ttop--;
10800 break;
10801 }
10802
10803 sval = 0;
10804 srd = 0;
10805
10806 if (nkeys == 0)
10807 continue;
10808
10809 /*
10810 * We have a dynamic variable allocation; calculate its size.
10811 */
10812 for (ksize = 0, i = 0; i < nkeys; i++)
10813 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
10814
10815 size = sizeof (dtrace_dynvar_t);
10816 size += sizeof (dtrace_key_t) * (nkeys - 1);
10817 size += ksize;
10818
10819 /*
10820 * Now we need to determine the size of the stored data.
10821 */
10822 id = DIF_INSTR_VAR(instr);
10823
10824 for (i = 0; i < dp->dtdo_varlen; i++) {
10825 dtrace_difv_t *v = &dp->dtdo_vartab[i];
10826
10827 if (v->dtdv_id == id && v->dtdv_scope == scope) {
10828 size += v->dtdv_type.dtdt_size;
10829 break;
10830 }
10831 }
10832
10833 if (i == dp->dtdo_varlen)
10834 return;
10835
10836 /*
10837 * We have the size. If this is larger than the chunk size
10838 * for our dynamic variable state, reset the chunk size.
10839 */
10840 size = P2ROUNDUP(size, sizeof (uint64_t));
10841
10842 /*
10843 * Before setting the chunk size, check that we're not going
10844 * to set it to a negative value...
10845 */
10846 if (size > LONG_MAX)
10847 return;
10848
10849 /*
10850 * ...and make certain that we didn't badly overflow.
10851 */
10852 if (size < ksize || size < sizeof (dtrace_dynvar_t))
10853 return;
10854
10855 if (size > vstate->dtvs_dynvars.dtds_chunksize)
10856 vstate->dtvs_dynvars.dtds_chunksize = size;
10857 }
10858 }
10859
10860 static void
10861 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10862 {
10863 int i, oldsvars, osz, nsz, otlocals, ntlocals;
10864 uint_t id;
10865
10866 ASSERT(MUTEX_HELD(&dtrace_lock));
10867 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0);
10868
10869 for (i = 0; i < dp->dtdo_varlen; i++) {
10870 dtrace_difv_t *v = &dp->dtdo_vartab[i];
10871 dtrace_statvar_t *svar, ***svarp = NULL;
10872 size_t dsize = 0;
10873 uint8_t scope = v->dtdv_scope;
10874 int *np = NULL;
10875
10876 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
10877 continue;
10878
10879 id -= DIF_VAR_OTHER_UBASE;
10880
10881 switch (scope) {
10882 case DIFV_SCOPE_THREAD:
10883 while (id >= (otlocals = vstate->dtvs_ntlocals)) {
10884 dtrace_difv_t *tlocals;
10885
10886 if ((ntlocals = (otlocals << 1)) == 0)
10887 ntlocals = 1;
10888
10889 osz = otlocals * sizeof (dtrace_difv_t);
10890 nsz = ntlocals * sizeof (dtrace_difv_t);
10891
10892 tlocals = kmem_zalloc(nsz, KM_SLEEP);
10893
10894 if (osz != 0) {
10895 bcopy(vstate->dtvs_tlocals,
10896 tlocals, osz);
10897 kmem_free(vstate->dtvs_tlocals, osz);
10898 }
10899
10900 vstate->dtvs_tlocals = tlocals;
10901 vstate->dtvs_ntlocals = ntlocals;
10902 }
10903
10904 vstate->dtvs_tlocals[id] = *v;
10905 continue;
10906
10907 case DIFV_SCOPE_LOCAL:
10908 np = &vstate->dtvs_nlocals;
10909 svarp = &vstate->dtvs_locals;
10910
10911 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
10912 dsize = NCPU * (v->dtdv_type.dtdt_size +
10913 sizeof (uint64_t));
10914 else
10915 dsize = NCPU * sizeof (uint64_t);
10916
10917 break;
10918
10919 case DIFV_SCOPE_GLOBAL:
10920 np = &vstate->dtvs_nglobals;
10921 svarp = &vstate->dtvs_globals;
10922
10923 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
10924 dsize = v->dtdv_type.dtdt_size +
10925 sizeof (uint64_t);
10926
10927 break;
10928
10929 default:
10930 ASSERT(0);
10931 }
10932
10933 while (id >= (oldsvars = *np)) {
10934 dtrace_statvar_t **statics;
10935 int newsvars, oldsize, newsize;
10936
10937 if ((newsvars = (oldsvars << 1)) == 0)
10938 newsvars = 1;
10939
10940 oldsize = oldsvars * sizeof (dtrace_statvar_t *);
10941 newsize = newsvars * sizeof (dtrace_statvar_t *);
10942
10943 statics = kmem_zalloc(newsize, KM_SLEEP);
10944
10945 if (oldsize != 0) {
10946 bcopy(*svarp, statics, oldsize);
10947 kmem_free(*svarp, oldsize);
10948 }
10949
10950 *svarp = statics;
10951 *np = newsvars;
10952 }
10953
10954 if ((svar = (*svarp)[id]) == NULL) {
10955 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP);
10956 svar->dtsv_var = *v;
10957
10958 if ((svar->dtsv_size = dsize) != 0) {
10959 svar->dtsv_data = (uint64_t)(uintptr_t)
10960 kmem_zalloc(dsize, KM_SLEEP);
10961 }
10962
10963 (*svarp)[id] = svar;
10964 }
10965
10966 svar->dtsv_refcnt++;
10967 }
10968
10969 dtrace_difo_chunksize(dp, vstate);
10970 dtrace_difo_hold(dp);
10971 }
10972
10973 static dtrace_difo_t *
10974 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
10975 {
10976 dtrace_difo_t *new;
10977 size_t sz;
10978
10979 ASSERT(dp->dtdo_buf != NULL);
10980 ASSERT(dp->dtdo_refcnt != 0);
10981
10982 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
10983
10984 ASSERT(dp->dtdo_buf != NULL);
10985 sz = dp->dtdo_len * sizeof (dif_instr_t);
10986 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP);
10987 bcopy(dp->dtdo_buf, new->dtdo_buf, sz);
10988 new->dtdo_len = dp->dtdo_len;
10989
10990 if (dp->dtdo_strtab != NULL) {
10991 ASSERT(dp->dtdo_strlen != 0);
10992 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP);
10993 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen);
10994 new->dtdo_strlen = dp->dtdo_strlen;
10995 }
10996
10997 if (dp->dtdo_inttab != NULL) {
10998 ASSERT(dp->dtdo_intlen != 0);
10999 sz = dp->dtdo_intlen * sizeof (uint64_t);
11000 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP);
11001 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz);
11002 new->dtdo_intlen = dp->dtdo_intlen;
11003 }
11004
11005 if (dp->dtdo_vartab != NULL) {
11006 ASSERT(dp->dtdo_varlen != 0);
11007 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t);
11008 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP);
11009 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz);
11010 new->dtdo_varlen = dp->dtdo_varlen;
11011 }
11012
11013 dtrace_difo_init(new, vstate);
11014 return (new);
11015 }
11016
11017 static void
11018 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
11019 {
11020 int i;
11021
11022 ASSERT(dp->dtdo_refcnt == 0);
11023
11024 for (i = 0; i < dp->dtdo_varlen; i++) {
11025 dtrace_difv_t *v = &dp->dtdo_vartab[i];
11026 dtrace_statvar_t *svar, **svarp = NULL;
11027 uint_t id;
11028 uint8_t scope = v->dtdv_scope;
11029 int *np = NULL;
11030
11031 switch (scope) {
11032 case DIFV_SCOPE_THREAD:
11033 continue;
11034
11035 case DIFV_SCOPE_LOCAL:
11036 np = &vstate->dtvs_nlocals;
11037 svarp = vstate->dtvs_locals;
11038 break;
11039
11040 case DIFV_SCOPE_GLOBAL:
11041 np = &vstate->dtvs_nglobals;
11042 svarp = vstate->dtvs_globals;
11043 break;
11044
11045 default:
11046 ASSERT(0);
11047 }
11048
11049 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
11050 continue;
11051
11052 id -= DIF_VAR_OTHER_UBASE;
11053 ASSERT(id < *np);
11054
11055 svar = svarp[id];
11056 ASSERT(svar != NULL);
11057 ASSERT(svar->dtsv_refcnt > 0);
11058
11059 if (--svar->dtsv_refcnt > 0)
11060 continue;
11061
11062 if (svar->dtsv_size != 0) {
11063 ASSERT(svar->dtsv_data != 0);
11064 kmem_free((void *)(uintptr_t)svar->dtsv_data,
11065 svar->dtsv_size);
11066 }
11067
11068 kmem_free(svar, sizeof (dtrace_statvar_t));
11069 svarp[id] = NULL;
11070 }
11071
11072 if (dp->dtdo_buf != NULL)
11073 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
11074 if (dp->dtdo_inttab != NULL)
11075 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
11076 if (dp->dtdo_strtab != NULL)
11077 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
11078 if (dp->dtdo_vartab != NULL)
11079 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
11080
11081 kmem_free(dp, sizeof (dtrace_difo_t));
11082 }
11083
11084 static void
11085 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
11086 {
11087 int i;
11088
11089 ASSERT(MUTEX_HELD(&dtrace_lock));
11090 ASSERT(dp->dtdo_refcnt != 0);
11091
11092 for (i = 0; i < dp->dtdo_varlen; i++) {
11093 dtrace_difv_t *v = &dp->dtdo_vartab[i];
11094
11095 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
11096 continue;
11097
11098 ASSERT(dtrace_vtime_references > 0);
11099 if (--dtrace_vtime_references == 0)
11100 dtrace_vtime_disable();
11101 }
11102
11103 if (--dp->dtdo_refcnt == 0)
11104 dtrace_difo_destroy(dp, vstate);
11105 }
11106
11107 /*
11108 * DTrace Format Functions
11109 */
11110 static uint16_t
11111 dtrace_format_add(dtrace_state_t *state, char *str)
11112 {
11113 char *fmt, **new;
11114 uint16_t ndx, len = strlen(str) + 1;
11115
11116 fmt = kmem_zalloc(len, KM_SLEEP);
11117 bcopy(str, fmt, len);
11118
11119 for (ndx = 0; ndx < state->dts_nformats; ndx++) {
11120 if (state->dts_formats[ndx] == NULL) {
11121 state->dts_formats[ndx] = fmt;
11122 return (ndx + 1);
11123 }
11124 }
11125
11126 if (state->dts_nformats == USHRT_MAX) {
11127 /*
11128 * This is only likely if a denial-of-service attack is being
11129 * attempted. As such, it's okay to fail silently here.
11130 */
11131 kmem_free(fmt, len);
11132 return (0);
11133 }
11134
11135 /*
11136 * For simplicity, we always resize the formats array to be exactly the
11137 * number of formats.
11138 */
11139 ndx = state->dts_nformats++;
11140 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP);
11141
11142 if (state->dts_formats != NULL) {
11143 ASSERT(ndx != 0);
11144 bcopy(state->dts_formats, new, ndx * sizeof (char *));
11145 kmem_free(state->dts_formats, ndx * sizeof (char *));
11146 }
11147
11148 state->dts_formats = new;
11149 state->dts_formats[ndx] = fmt;
11150
11151 return (ndx + 1);
11152 }
11153
11154 static void
11155 dtrace_format_remove(dtrace_state_t *state, uint16_t format)
11156 {
11157 char *fmt;
11158
11159 ASSERT(state->dts_formats != NULL);
11160 ASSERT(format <= state->dts_nformats);
11161 ASSERT(state->dts_formats[format - 1] != NULL);
11162
11163 fmt = state->dts_formats[format - 1];
11164 kmem_free(fmt, strlen(fmt) + 1);
11165 state->dts_formats[format - 1] = NULL;
11166 }
11167
11168 static void
11169 dtrace_format_destroy(dtrace_state_t *state)
11170 {
11171 int i;
11172
11173 if (state->dts_nformats == 0) {
11174 ASSERT(state->dts_formats == NULL);
11175 return;
11176 }
11177
11178 ASSERT(state->dts_formats != NULL);
11179
11180 for (i = 0; i < state->dts_nformats; i++) {
11181 char *fmt = state->dts_formats[i];
11182
11183 if (fmt == NULL)
11184 continue;
11185
11186 kmem_free(fmt, strlen(fmt) + 1);
11187 }
11188
11189 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *));
11190 state->dts_nformats = 0;
11191 state->dts_formats = NULL;
11192 }
11193
11194 /*
11195 * DTrace Predicate Functions
11196 */
11197 static dtrace_predicate_t *
11198 dtrace_predicate_create(dtrace_difo_t *dp)
11199 {
11200 dtrace_predicate_t *pred;
11201
11202 ASSERT(MUTEX_HELD(&dtrace_lock));
11203 ASSERT(dp->dtdo_refcnt != 0);
11204
11205 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP);
11206 pred->dtp_difo = dp;
11207 pred->dtp_refcnt = 1;
11208
11209 if (!dtrace_difo_cacheable(dp))
11210 return (pred);
11211
11212 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) {
11213 /*
11214 * This is only theoretically possible -- we have had 2^32
11215 * cacheable predicates on this machine. We cannot allow any
11216 * more predicates to become cacheable: as unlikely as it is,
11217 * there may be a thread caching a (now stale) predicate cache
11218 * ID. (N.B.: the temptation is being successfully resisted to
11219 * have this cmn_err() "Holy shit -- we executed this code!")
11220 */
11221 return (pred);
11222 }
11223
11224 pred->dtp_cacheid = dtrace_predcache_id++;
11225
11226 return (pred);
11227 }
11228
11229 static void
11230 dtrace_predicate_hold(dtrace_predicate_t *pred)
11231 {
11232 ASSERT(MUTEX_HELD(&dtrace_lock));
11233 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0);
11234 ASSERT(pred->dtp_refcnt > 0);
11235
11236 pred->dtp_refcnt++;
11237 }
11238
11239 static void
11240 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate)
11241 {
11242 dtrace_difo_t *dp = pred->dtp_difo;
11243
11244 ASSERT(MUTEX_HELD(&dtrace_lock));
11245 ASSERT(dp != NULL && dp->dtdo_refcnt != 0);
11246 ASSERT(pred->dtp_refcnt > 0);
11247
11248 if (--pred->dtp_refcnt == 0) {
11249 dtrace_difo_release(pred->dtp_difo, vstate);
11250 kmem_free(pred, sizeof (dtrace_predicate_t));
11251 }
11252 }
11253
11254 /*
11255 * DTrace Action Description Functions
11256 */
11257 static dtrace_actdesc_t *
11258 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple,
11259 uint64_t uarg, uint64_t arg)
11260 {
11261 dtrace_actdesc_t *act;
11262
11263 #ifdef illumos
11264 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL &&
11265 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA));
11266 #endif
11267
11268 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP);
11269 act->dtad_kind = kind;
11270 act->dtad_ntuple = ntuple;
11271 act->dtad_uarg = uarg;
11272 act->dtad_arg = arg;
11273 act->dtad_refcnt = 1;
11274
11275 return (act);
11276 }
11277
11278 static void
11279 dtrace_actdesc_hold(dtrace_actdesc_t *act)
11280 {
11281 ASSERT(act->dtad_refcnt >= 1);
11282 act->dtad_refcnt++;
11283 }
11284
11285 static void
11286 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate)
11287 {
11288 dtrace_actkind_t kind = act->dtad_kind;
11289 dtrace_difo_t *dp;
11290
11291 ASSERT(act->dtad_refcnt >= 1);
11292
11293 if (--act->dtad_refcnt != 0)
11294 return;
11295
11296 if ((dp = act->dtad_difo) != NULL)
11297 dtrace_difo_release(dp, vstate);
11298
11299 if (DTRACEACT_ISPRINTFLIKE(kind)) {
11300 char *str = (char *)(uintptr_t)act->dtad_arg;
11301
11302 #ifdef illumos
11303 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) ||
11304 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA));
11305 #endif
11306
11307 if (str != NULL)
11308 kmem_free(str, strlen(str) + 1);
11309 }
11310
11311 kmem_free(act, sizeof (dtrace_actdesc_t));
11312 }
11313
11314 /*
11315 * DTrace ECB Functions
11316 */
11317 static dtrace_ecb_t *
11318 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe)
11319 {
11320 dtrace_ecb_t *ecb;
11321 dtrace_epid_t epid;
11322
11323 ASSERT(MUTEX_HELD(&dtrace_lock));
11324
11325 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP);
11326 ecb->dte_predicate = NULL;
11327 ecb->dte_probe = probe;
11328
11329 /*
11330 * The default size is the size of the default action: recording
11331 * the header.
11332 */
11333 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_rechdr_t);
11334 ecb->dte_alignment = sizeof (dtrace_epid_t);
11335
11336 epid = state->dts_epid++;
11337
11338 if (epid - 1 >= state->dts_necbs) {
11339 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs;
11340 int necbs = state->dts_necbs << 1;
11341
11342 ASSERT(epid == state->dts_necbs + 1);
11343
11344 if (necbs == 0) {
11345 ASSERT(oecbs == NULL);
11346 necbs = 1;
11347 }
11348
11349 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP);
11350
11351 if (oecbs != NULL)
11352 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs));
11353
11354 dtrace_membar_producer();
11355 state->dts_ecbs = ecbs;
11356
11357 if (oecbs != NULL) {
11358 /*
11359 * If this state is active, we must dtrace_sync()
11360 * before we can free the old dts_ecbs array: we're
11361 * coming in hot, and there may be active ring
11362 * buffer processing (which indexes into the dts_ecbs
11363 * array) on another CPU.
11364 */
11365 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
11366 dtrace_sync();
11367
11368 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs));
11369 }
11370
11371 dtrace_membar_producer();
11372 state->dts_necbs = necbs;
11373 }
11374
11375 ecb->dte_state = state;
11376
11377 ASSERT(state->dts_ecbs[epid - 1] == NULL);
11378 dtrace_membar_producer();
11379 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb;
11380
11381 return (ecb);
11382 }
11383
11384 static int
11385 dtrace_ecb_enable(dtrace_ecb_t *ecb)
11386 {
11387 dtrace_probe_t *probe = ecb->dte_probe;
11388
11389 ASSERT(MUTEX_HELD(&cpu_lock));
11390 ASSERT(MUTEX_HELD(&dtrace_lock));
11391 ASSERT(ecb->dte_next == NULL);
11392
11393 if (probe == NULL) {
11394 /*
11395 * This is the NULL probe -- there's nothing to do.
11396 */
11397 return (0);
11398 }
11399
11400 if (probe->dtpr_ecb == NULL) {
11401 dtrace_provider_t *prov = probe->dtpr_provider;
11402
11403 /*
11404 * We're the first ECB on this probe.
11405 */
11406 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb;
11407
11408 if (ecb->dte_predicate != NULL)
11409 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid;
11410
11411 return (prov->dtpv_pops.dtps_enable(prov->dtpv_arg,
11412 probe->dtpr_id, probe->dtpr_arg));
11413 } else {
11414 /*
11415 * This probe is already active. Swing the last pointer to
11416 * point to the new ECB, and issue a dtrace_sync() to assure
11417 * that all CPUs have seen the change.
11418 */
11419 ASSERT(probe->dtpr_ecb_last != NULL);
11420 probe->dtpr_ecb_last->dte_next = ecb;
11421 probe->dtpr_ecb_last = ecb;
11422 probe->dtpr_predcache = 0;
11423
11424 dtrace_sync();
11425 return (0);
11426 }
11427 }
11428
11429 static int
11430 dtrace_ecb_resize(dtrace_ecb_t *ecb)
11431 {
11432 dtrace_action_t *act;
11433 uint32_t curneeded = UINT32_MAX;
11434 uint32_t aggbase = UINT32_MAX;
11435
11436 /*
11437 * If we record anything, we always record the dtrace_rechdr_t. (And
11438 * we always record it first.)
11439 */
11440 ecb->dte_size = sizeof (dtrace_rechdr_t);
11441 ecb->dte_alignment = sizeof (dtrace_epid_t);
11442
11443 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
11444 dtrace_recdesc_t *rec = &act->dta_rec;
11445 ASSERT(rec->dtrd_size > 0 || rec->dtrd_alignment == 1);
11446
11447 ecb->dte_alignment = MAX(ecb->dte_alignment,
11448 rec->dtrd_alignment);
11449
11450 if (DTRACEACT_ISAGG(act->dta_kind)) {
11451 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
11452
11453 ASSERT(rec->dtrd_size != 0);
11454 ASSERT(agg->dtag_first != NULL);
11455 ASSERT(act->dta_prev->dta_intuple);
11456 ASSERT(aggbase != UINT32_MAX);
11457 ASSERT(curneeded != UINT32_MAX);
11458
11459 agg->dtag_base = aggbase;
11460
11461 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment);
11462 rec->dtrd_offset = curneeded;
11463 if (curneeded + rec->dtrd_size < curneeded)
11464 return (EINVAL);
11465 curneeded += rec->dtrd_size;
11466 ecb->dte_needed = MAX(ecb->dte_needed, curneeded);
11467
11468 aggbase = UINT32_MAX;
11469 curneeded = UINT32_MAX;
11470 } else if (act->dta_intuple) {
11471 if (curneeded == UINT32_MAX) {
11472 /*
11473 * This is the first record in a tuple. Align
11474 * curneeded to be at offset 4 in an 8-byte
11475 * aligned block.
11476 */
11477 ASSERT(act->dta_prev == NULL ||
11478 !act->dta_prev->dta_intuple);
11479 ASSERT3U(aggbase, ==, UINT32_MAX);
11480 curneeded = P2PHASEUP(ecb->dte_size,
11481 sizeof (uint64_t), sizeof (dtrace_aggid_t));
11482
11483 aggbase = curneeded - sizeof (dtrace_aggid_t);
11484 ASSERT(IS_P2ALIGNED(aggbase,
11485 sizeof (uint64_t)));
11486 }
11487 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment);
11488 rec->dtrd_offset = curneeded;
11489 if (curneeded + rec->dtrd_size < curneeded)
11490 return (EINVAL);
11491 curneeded += rec->dtrd_size;
11492 } else {
11493 /* tuples must be followed by an aggregation */
11494 ASSERT(act->dta_prev == NULL ||
11495 !act->dta_prev->dta_intuple);
11496
11497 ecb->dte_size = P2ROUNDUP(ecb->dte_size,
11498 rec->dtrd_alignment);
11499 rec->dtrd_offset = ecb->dte_size;
11500 if (ecb->dte_size + rec->dtrd_size < ecb->dte_size)
11501 return (EINVAL);
11502 ecb->dte_size += rec->dtrd_size;
11503 ecb->dte_needed = MAX(ecb->dte_needed, ecb->dte_size);
11504 }
11505 }
11506
11507 if ((act = ecb->dte_action) != NULL &&
11508 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) &&
11509 ecb->dte_size == sizeof (dtrace_rechdr_t)) {
11510 /*
11511 * If the size is still sizeof (dtrace_rechdr_t), then all
11512 * actions store no data; set the size to 0.
11513 */
11514 ecb->dte_size = 0;
11515 }
11516
11517 ecb->dte_size = P2ROUNDUP(ecb->dte_size, sizeof (dtrace_epid_t));
11518 ecb->dte_needed = P2ROUNDUP(ecb->dte_needed, (sizeof (dtrace_epid_t)));
11519 ecb->dte_state->dts_needed = MAX(ecb->dte_state->dts_needed,
11520 ecb->dte_needed);
11521 return (0);
11522 }
11523
11524 static dtrace_action_t *
11525 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
11526 {
11527 dtrace_aggregation_t *agg;
11528 size_t size = sizeof (uint64_t);
11529 int ntuple = desc->dtad_ntuple;
11530 dtrace_action_t *act;
11531 dtrace_recdesc_t *frec;
11532 dtrace_aggid_t aggid;
11533 dtrace_state_t *state = ecb->dte_state;
11534
11535 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP);
11536 agg->dtag_ecb = ecb;
11537
11538 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind));
11539
11540 switch (desc->dtad_kind) {
11541 case DTRACEAGG_MIN:
11542 agg->dtag_initial = INT64_MAX;
11543 agg->dtag_aggregate = dtrace_aggregate_min;
11544 break;
11545
11546 case DTRACEAGG_MAX:
11547 agg->dtag_initial = INT64_MIN;
11548 agg->dtag_aggregate = dtrace_aggregate_max;
11549 break;
11550
11551 case DTRACEAGG_COUNT:
11552 agg->dtag_aggregate = dtrace_aggregate_count;
11553 break;
11554
11555 case DTRACEAGG_QUANTIZE:
11556 agg->dtag_aggregate = dtrace_aggregate_quantize;
11557 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) *
11558 sizeof (uint64_t);
11559 break;
11560
11561 case DTRACEAGG_LQUANTIZE: {
11562 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg);
11563 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg);
11564
11565 agg->dtag_initial = desc->dtad_arg;
11566 agg->dtag_aggregate = dtrace_aggregate_lquantize;
11567
11568 if (step == 0 || levels == 0)
11569 goto err;
11570
11571 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t);
11572 break;
11573 }
11574
11575 case DTRACEAGG_LLQUANTIZE: {
11576 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg);
11577 uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg);
11578 uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg);
11579 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg);
11580 int64_t v;
11581
11582 agg->dtag_initial = desc->dtad_arg;
11583 agg->dtag_aggregate = dtrace_aggregate_llquantize;
11584
11585 if (factor < 2 || low >= high || nsteps < factor)
11586 goto err;
11587
11588 /*
11589 * Now check that the number of steps evenly divides a power
11590 * of the factor. (This assures both integer bucket size and
11591 * linearity within each magnitude.)
11592 */
11593 for (v = factor; v < nsteps; v *= factor)
11594 continue;
11595
11596 if ((v % nsteps) || (nsteps % factor))
11597 goto err;
11598
11599 size = (dtrace_aggregate_llquantize_bucket(factor,
11600 low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t);
11601 break;
11602 }
11603
11604 case DTRACEAGG_AVG:
11605 agg->dtag_aggregate = dtrace_aggregate_avg;
11606 size = sizeof (uint64_t) * 2;
11607 break;
11608
11609 case DTRACEAGG_STDDEV:
11610 agg->dtag_aggregate = dtrace_aggregate_stddev;
11611 size = sizeof (uint64_t) * 4;
11612 break;
11613
11614 case DTRACEAGG_SUM:
11615 agg->dtag_aggregate = dtrace_aggregate_sum;
11616 break;
11617
11618 default:
11619 goto err;
11620 }
11621
11622 agg->dtag_action.dta_rec.dtrd_size = size;
11623
11624 if (ntuple == 0)
11625 goto err;
11626
11627 /*
11628 * We must make sure that we have enough actions for the n-tuple.
11629 */
11630 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) {
11631 if (DTRACEACT_ISAGG(act->dta_kind))
11632 break;
11633
11634 if (--ntuple == 0) {
11635 /*
11636 * This is the action with which our n-tuple begins.
11637 */
11638 agg->dtag_first = act;
11639 goto success;
11640 }
11641 }
11642
11643 /*
11644 * This n-tuple is short by ntuple elements. Return failure.
11645 */
11646 ASSERT(ntuple != 0);
11647 err:
11648 kmem_free(agg, sizeof (dtrace_aggregation_t));
11649 return (NULL);
11650
11651 success:
11652 /*
11653 * If the last action in the tuple has a size of zero, it's actually
11654 * an expression argument for the aggregating action.
11655 */
11656 ASSERT(ecb->dte_action_last != NULL);
11657 act = ecb->dte_action_last;
11658
11659 if (act->dta_kind == DTRACEACT_DIFEXPR) {
11660 ASSERT(act->dta_difo != NULL);
11661
11662 if (act->dta_difo->dtdo_rtype.dtdt_size == 0)
11663 agg->dtag_hasarg = 1;
11664 }
11665
11666 /*
11667 * We need to allocate an id for this aggregation.
11668 */
11669
11670 #ifdef illumos
11671 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1,
11672 VM_BESTFIT | VM_SLEEP);
11673 #endif
11674 #ifdef __FreeBSD__
11675 aggid = alloc_unr(state->dts_aggid_arena);
11676 #endif
11677 #ifdef __NetBSD__
11678 vmem_addr_t offset;
11679
11680 if (vmem_alloc(state->dts_aggid_arena, 1, VM_BESTFIT | VM_SLEEP,
11681 &offset) != 0)
11682 ASSERT(0);
11683 aggid = (dtrace_aggid_t)(uintptr_t)offset;
11684 #endif
11685
11686 if (aggid - 1 >= state->dts_naggregations) {
11687 dtrace_aggregation_t **oaggs = state->dts_aggregations;
11688 dtrace_aggregation_t **aggs;
11689 int naggs = state->dts_naggregations << 1;
11690 int onaggs = state->dts_naggregations;
11691
11692 ASSERT(aggid == state->dts_naggregations + 1);
11693
11694 if (naggs == 0) {
11695 ASSERT(oaggs == NULL);
11696 naggs = 1;
11697 }
11698
11699 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP);
11700
11701 if (oaggs != NULL) {
11702 bcopy(oaggs, aggs, onaggs * sizeof (*aggs));
11703 kmem_free(oaggs, onaggs * sizeof (*aggs));
11704 }
11705
11706 state->dts_aggregations = aggs;
11707 state->dts_naggregations = naggs;
11708 }
11709
11710 ASSERT(state->dts_aggregations[aggid - 1] == NULL);
11711 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg;
11712
11713 frec = &agg->dtag_first->dta_rec;
11714 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t))
11715 frec->dtrd_alignment = sizeof (dtrace_aggid_t);
11716
11717 for (act = agg->dtag_first; act != NULL; act = act->dta_next) {
11718 ASSERT(!act->dta_intuple);
11719 act->dta_intuple = 1;
11720 }
11721
11722 return (&agg->dtag_action);
11723 }
11724
11725 static void
11726 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act)
11727 {
11728 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
11729 dtrace_state_t *state = ecb->dte_state;
11730 dtrace_aggid_t aggid = agg->dtag_id;
11731
11732 ASSERT(DTRACEACT_ISAGG(act->dta_kind));
11733 #ifdef illumos
11734 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1);
11735 #endif
11736 #ifdef __FreeBSD__
11737 free_unr(state->dts_aggid_arena, aggid);
11738 #endif
11739 #ifdef __NetBSD__
11740 vmem_free(state->dts_aggid_arena, (uintptr_t)aggid, 1);
11741 #endif
11742
11743 ASSERT(state->dts_aggregations[aggid - 1] == agg);
11744 state->dts_aggregations[aggid - 1] = NULL;
11745
11746 kmem_free(agg, sizeof (dtrace_aggregation_t));
11747 }
11748
11749 static int
11750 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
11751 {
11752 dtrace_action_t *action, *last;
11753 dtrace_difo_t *dp = desc->dtad_difo;
11754 uint32_t size = 0, align = sizeof (uint8_t), mask;
11755 uint16_t format = 0;
11756 dtrace_recdesc_t *rec;
11757 dtrace_state_t *state = ecb->dte_state;
11758 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize;
11759 uint64_t arg = desc->dtad_arg;
11760
11761 ASSERT(MUTEX_HELD(&dtrace_lock));
11762 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1);
11763
11764 if (DTRACEACT_ISAGG(desc->dtad_kind)) {
11765 /*
11766 * If this is an aggregating action, there must be neither
11767 * a speculate nor a commit on the action chain.
11768 */
11769 dtrace_action_t *act;
11770
11771 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
11772 if (act->dta_kind == DTRACEACT_COMMIT)
11773 return (EINVAL);
11774
11775 if (act->dta_kind == DTRACEACT_SPECULATE)
11776 return (EINVAL);
11777 }
11778
11779 action = dtrace_ecb_aggregation_create(ecb, desc);
11780
11781 if (action == NULL)
11782 return (EINVAL);
11783 } else {
11784 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) ||
11785 (desc->dtad_kind == DTRACEACT_DIFEXPR &&
11786 dp != NULL && dp->dtdo_destructive)) {
11787 state->dts_destructive = 1;
11788 }
11789
11790 switch (desc->dtad_kind) {
11791 case DTRACEACT_PRINTF:
11792 case DTRACEACT_PRINTA:
11793 case DTRACEACT_SYSTEM:
11794 case DTRACEACT_FREOPEN:
11795 case DTRACEACT_DIFEXPR:
11796 /*
11797 * We know that our arg is a string -- turn it into a
11798 * format.
11799 */
11800 if (arg == 0) {
11801 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA ||
11802 desc->dtad_kind == DTRACEACT_DIFEXPR);
11803 format = 0;
11804 } else {
11805 ASSERT(arg != 0);
11806 #ifdef illumos
11807 ASSERT(arg > KERNELBASE);
11808 #endif
11809 format = dtrace_format_add(state,
11810 (char *)(uintptr_t)arg);
11811 }
11812
11813 /*FALLTHROUGH*/
11814 case DTRACEACT_LIBACT:
11815 case DTRACEACT_TRACEMEM:
11816 case DTRACEACT_TRACEMEM_DYNSIZE:
11817 if (dp == NULL)
11818 return (EINVAL);
11819
11820 if ((size = dp->dtdo_rtype.dtdt_size) != 0)
11821 break;
11822
11823 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
11824 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11825 return (EINVAL);
11826
11827 size = opt[DTRACEOPT_STRSIZE];
11828 }
11829
11830 break;
11831
11832 case DTRACEACT_STACK:
11833 if ((nframes = arg) == 0) {
11834 nframes = opt[DTRACEOPT_STACKFRAMES];
11835 ASSERT(nframes > 0);
11836 arg = nframes;
11837 }
11838
11839 size = nframes * sizeof (pc_t);
11840 break;
11841
11842 case DTRACEACT_JSTACK:
11843 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0)
11844 strsize = opt[DTRACEOPT_JSTACKSTRSIZE];
11845
11846 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0)
11847 nframes = opt[DTRACEOPT_JSTACKFRAMES];
11848
11849 arg = DTRACE_USTACK_ARG(nframes, strsize);
11850
11851 /*FALLTHROUGH*/
11852 case DTRACEACT_USTACK:
11853 if (desc->dtad_kind != DTRACEACT_JSTACK &&
11854 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) {
11855 strsize = DTRACE_USTACK_STRSIZE(arg);
11856 nframes = opt[DTRACEOPT_USTACKFRAMES];
11857 ASSERT(nframes > 0);
11858 arg = DTRACE_USTACK_ARG(nframes, strsize);
11859 }
11860
11861 /*
11862 * Save a slot for the pid.
11863 */
11864 size = (nframes + 1) * sizeof (uint64_t);
11865 size += DTRACE_USTACK_STRSIZE(arg);
11866 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t)));
11867
11868 break;
11869
11870 case DTRACEACT_SYM:
11871 case DTRACEACT_MOD:
11872 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) !=
11873 sizeof (uint64_t)) ||
11874 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11875 return (EINVAL);
11876 break;
11877
11878 case DTRACEACT_USYM:
11879 case DTRACEACT_UMOD:
11880 case DTRACEACT_UADDR:
11881 if (dp == NULL ||
11882 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) ||
11883 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11884 return (EINVAL);
11885
11886 /*
11887 * We have a slot for the pid, plus a slot for the
11888 * argument. To keep things simple (aligned with
11889 * bitness-neutral sizing), we store each as a 64-bit
11890 * quantity.
11891 */
11892 size = 2 * sizeof (uint64_t);
11893 break;
11894
11895 case DTRACEACT_STOP:
11896 case DTRACEACT_BREAKPOINT:
11897 case DTRACEACT_PANIC:
11898 break;
11899
11900 case DTRACEACT_CHILL:
11901 case DTRACEACT_DISCARD:
11902 case DTRACEACT_RAISE:
11903 if (dp == NULL)
11904 return (EINVAL);
11905 break;
11906
11907 case DTRACEACT_EXIT:
11908 if (dp == NULL ||
11909 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) ||
11910 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
11911 return (EINVAL);
11912 break;
11913
11914 case DTRACEACT_SPECULATE:
11915 if (ecb->dte_size > sizeof (dtrace_rechdr_t))
11916 return (EINVAL);
11917
11918 if (dp == NULL)
11919 return (EINVAL);
11920
11921 state->dts_speculates = 1;
11922 break;
11923
11924 case DTRACEACT_PRINTM:
11925 size = dp->dtdo_rtype.dtdt_size;
11926 break;
11927
11928 case DTRACEACT_COMMIT: {
11929 dtrace_action_t *act = ecb->dte_action;
11930
11931 for (; act != NULL; act = act->dta_next) {
11932 if (act->dta_kind == DTRACEACT_COMMIT)
11933 return (EINVAL);
11934 }
11935
11936 if (dp == NULL)
11937 return (EINVAL);
11938 break;
11939 }
11940
11941 default:
11942 return (EINVAL);
11943 }
11944
11945 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) {
11946 /*
11947 * If this is a data-storing action or a speculate,
11948 * we must be sure that there isn't a commit on the
11949 * action chain.
11950 */
11951 dtrace_action_t *act = ecb->dte_action;
11952
11953 for (; act != NULL; act = act->dta_next) {
11954 if (act->dta_kind == DTRACEACT_COMMIT)
11955 return (EINVAL);
11956 }
11957 }
11958
11959 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP);
11960 action->dta_rec.dtrd_size = size;
11961 }
11962
11963 action->dta_refcnt = 1;
11964 rec = &action->dta_rec;
11965 size = rec->dtrd_size;
11966
11967 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) {
11968 if (!(size & mask)) {
11969 align = mask + 1;
11970 break;
11971 }
11972 }
11973
11974 action->dta_kind = desc->dtad_kind;
11975
11976 if ((action->dta_difo = dp) != NULL)
11977 dtrace_difo_hold(dp);
11978
11979 rec->dtrd_action = action->dta_kind;
11980 rec->dtrd_arg = arg;
11981 rec->dtrd_uarg = desc->dtad_uarg;
11982 rec->dtrd_alignment = (uint16_t)align;
11983 rec->dtrd_format = format;
11984
11985 if ((last = ecb->dte_action_last) != NULL) {
11986 ASSERT(ecb->dte_action != NULL);
11987 action->dta_prev = last;
11988 last->dta_next = action;
11989 } else {
11990 ASSERT(ecb->dte_action == NULL);
11991 ecb->dte_action = action;
11992 }
11993
11994 ecb->dte_action_last = action;
11995
11996 return (0);
11997 }
11998
11999 static void
12000 dtrace_ecb_action_remove(dtrace_ecb_t *ecb)
12001 {
12002 dtrace_action_t *act = ecb->dte_action, *next;
12003 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate;
12004 dtrace_difo_t *dp;
12005 uint16_t format;
12006
12007 if (act != NULL && act->dta_refcnt > 1) {
12008 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1);
12009 act->dta_refcnt--;
12010 } else {
12011 for (; act != NULL; act = next) {
12012 next = act->dta_next;
12013 ASSERT(next != NULL || act == ecb->dte_action_last);
12014 ASSERT(act->dta_refcnt == 1);
12015
12016 if ((format = act->dta_rec.dtrd_format) != 0)
12017 dtrace_format_remove(ecb->dte_state, format);
12018
12019 if ((dp = act->dta_difo) != NULL)
12020 dtrace_difo_release(dp, vstate);
12021
12022 if (DTRACEACT_ISAGG(act->dta_kind)) {
12023 dtrace_ecb_aggregation_destroy(ecb, act);
12024 } else {
12025 kmem_free(act, sizeof (dtrace_action_t));
12026 }
12027 }
12028 }
12029
12030 ecb->dte_action = NULL;
12031 ecb->dte_action_last = NULL;
12032 ecb->dte_size = 0;
12033 }
12034
12035 static void
12036 dtrace_ecb_disable(dtrace_ecb_t *ecb)
12037 {
12038 /*
12039 * We disable the ECB by removing it from its probe.
12040 */
12041 dtrace_ecb_t *pecb, *prev = NULL;
12042 dtrace_probe_t *probe = ecb->dte_probe;
12043
12044 ASSERT(MUTEX_HELD(&dtrace_lock));
12045
12046 if (probe == NULL) {
12047 /*
12048 * This is the NULL probe; there is nothing to disable.
12049 */
12050 return;
12051 }
12052
12053 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) {
12054 if (pecb == ecb)
12055 break;
12056 prev = pecb;
12057 }
12058
12059 ASSERT(pecb != NULL);
12060
12061 if (prev == NULL) {
12062 probe->dtpr_ecb = ecb->dte_next;
12063 } else {
12064 prev->dte_next = ecb->dte_next;
12065 }
12066
12067 if (ecb == probe->dtpr_ecb_last) {
12068 ASSERT(ecb->dte_next == NULL);
12069 probe->dtpr_ecb_last = prev;
12070 }
12071
12072 /*
12073 * The ECB has been disconnected from the probe; now sync to assure
12074 * that all CPUs have seen the change before returning.
12075 */
12076 dtrace_sync();
12077
12078 if (probe->dtpr_ecb == NULL) {
12079 /*
12080 * That was the last ECB on the probe; clear the predicate
12081 * cache ID for the probe, disable it and sync one more time
12082 * to assure that we'll never hit it again.
12083 */
12084 dtrace_provider_t *prov = probe->dtpr_provider;
12085
12086 ASSERT(ecb->dte_next == NULL);
12087 ASSERT(probe->dtpr_ecb_last == NULL);
12088 probe->dtpr_predcache = DTRACE_CACHEIDNONE;
12089 prov->dtpv_pops.dtps_disable(prov->dtpv_arg,
12090 probe->dtpr_id, probe->dtpr_arg);
12091 dtrace_sync();
12092 } else {
12093 /*
12094 * There is at least one ECB remaining on the probe. If there
12095 * is _exactly_ one, set the probe's predicate cache ID to be
12096 * the predicate cache ID of the remaining ECB.
12097 */
12098 ASSERT(probe->dtpr_ecb_last != NULL);
12099 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE);
12100
12101 if (probe->dtpr_ecb == probe->dtpr_ecb_last) {
12102 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate;
12103
12104 ASSERT(probe->dtpr_ecb->dte_next == NULL);
12105
12106 if (p != NULL)
12107 probe->dtpr_predcache = p->dtp_cacheid;
12108 }
12109
12110 ecb->dte_next = NULL;
12111 }
12112 }
12113
12114 static void
12115 dtrace_ecb_destroy(dtrace_ecb_t *ecb)
12116 {
12117 dtrace_state_t *state = ecb->dte_state;
12118 dtrace_vstate_t *vstate = &state->dts_vstate;
12119 dtrace_predicate_t *pred;
12120 dtrace_epid_t epid = ecb->dte_epid;
12121
12122 ASSERT(MUTEX_HELD(&dtrace_lock));
12123 ASSERT(ecb->dte_next == NULL);
12124 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb);
12125
12126 if ((pred = ecb->dte_predicate) != NULL)
12127 dtrace_predicate_release(pred, vstate);
12128
12129 dtrace_ecb_action_remove(ecb);
12130
12131 ASSERT(state->dts_ecbs[epid - 1] == ecb);
12132 state->dts_ecbs[epid - 1] = NULL;
12133
12134 kmem_free(ecb, sizeof (dtrace_ecb_t));
12135 }
12136
12137 static dtrace_ecb_t *
12138 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe,
12139 dtrace_enabling_t *enab)
12140 {
12141 dtrace_ecb_t *ecb;
12142 dtrace_predicate_t *pred;
12143 dtrace_actdesc_t *act;
12144 dtrace_provider_t *prov;
12145 dtrace_ecbdesc_t *desc = enab->dten_current;
12146
12147 ASSERT(MUTEX_HELD(&dtrace_lock));
12148 ASSERT(state != NULL);
12149
12150 ecb = dtrace_ecb_add(state, probe);
12151 ecb->dte_uarg = desc->dted_uarg;
12152
12153 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) {
12154 dtrace_predicate_hold(pred);
12155 ecb->dte_predicate = pred;
12156 }
12157
12158 if (probe != NULL) {
12159 /*
12160 * If the provider shows more leg than the consumer is old
12161 * enough to see, we need to enable the appropriate implicit
12162 * predicate bits to prevent the ecb from activating at
12163 * revealing times.
12164 *
12165 * Providers specifying DTRACE_PRIV_USER at register time
12166 * are stating that they need the /proc-style privilege
12167 * model to be enforced, and this is what DTRACE_COND_OWNER
12168 * and DTRACE_COND_ZONEOWNER will then do at probe time.
12169 */
12170 prov = probe->dtpr_provider;
12171 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) &&
12172 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
12173 ecb->dte_cond |= DTRACE_COND_OWNER;
12174
12175 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) &&
12176 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
12177 ecb->dte_cond |= DTRACE_COND_ZONEOWNER;
12178
12179 /*
12180 * If the provider shows us kernel innards and the user
12181 * is lacking sufficient privilege, enable the
12182 * DTRACE_COND_USERMODE implicit predicate.
12183 */
12184 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) &&
12185 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL))
12186 ecb->dte_cond |= DTRACE_COND_USERMODE;
12187 }
12188
12189 if (dtrace_ecb_create_cache != NULL) {
12190 /*
12191 * If we have a cached ecb, we'll use its action list instead
12192 * of creating our own (saving both time and space).
12193 */
12194 dtrace_ecb_t *cached = dtrace_ecb_create_cache;
12195 dtrace_action_t *act = cached->dte_action;
12196
12197 if (act != NULL) {
12198 ASSERT(act->dta_refcnt > 0);
12199 act->dta_refcnt++;
12200 ecb->dte_action = act;
12201 ecb->dte_action_last = cached->dte_action_last;
12202 ecb->dte_needed = cached->dte_needed;
12203 ecb->dte_size = cached->dte_size;
12204 ecb->dte_alignment = cached->dte_alignment;
12205 }
12206
12207 return (ecb);
12208 }
12209
12210 for (act = desc->dted_action; act != NULL; act = act->dtad_next) {
12211 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) {
12212 dtrace_ecb_destroy(ecb);
12213 return (NULL);
12214 }
12215 }
12216
12217 if ((enab->dten_error = dtrace_ecb_resize(ecb)) != 0) {
12218 dtrace_ecb_destroy(ecb);
12219 return (NULL);
12220 }
12221
12222 return (dtrace_ecb_create_cache = ecb);
12223 }
12224
12225 static int
12226 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg)
12227 {
12228 dtrace_ecb_t *ecb;
12229 dtrace_enabling_t *enab = arg;
12230 dtrace_state_t *state = enab->dten_vstate->dtvs_state;
12231
12232 ASSERT(state != NULL);
12233
12234 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) {
12235 /*
12236 * This probe was created in a generation for which this
12237 * enabling has previously created ECBs; we don't want to
12238 * enable it again, so just kick out.
12239 */
12240 return (DTRACE_MATCH_NEXT);
12241 }
12242
12243 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL)
12244 return (DTRACE_MATCH_DONE);
12245
12246 if (dtrace_ecb_enable(ecb) < 0)
12247 return (DTRACE_MATCH_FAIL);
12248
12249 return (DTRACE_MATCH_NEXT);
12250 }
12251
12252 static dtrace_ecb_t *
12253 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id)
12254 {
12255 dtrace_ecb_t *ecb;
12256
12257 ASSERT(MUTEX_HELD(&dtrace_lock));
12258
12259 if (id == 0 || id > state->dts_necbs)
12260 return (NULL);
12261
12262 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL);
12263 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id);
12264
12265 return (state->dts_ecbs[id - 1]);
12266 }
12267
12268 static dtrace_aggregation_t *
12269 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id)
12270 {
12271 dtrace_aggregation_t *agg;
12272
12273 ASSERT(MUTEX_HELD(&dtrace_lock));
12274
12275 if (id == 0 || id > state->dts_naggregations)
12276 return (NULL);
12277
12278 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL);
12279 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL ||
12280 agg->dtag_id == id);
12281
12282 return (state->dts_aggregations[id - 1]);
12283 }
12284
12285 /*
12286 * DTrace Buffer Functions
12287 *
12288 * The following functions manipulate DTrace buffers. Most of these functions
12289 * are called in the context of establishing or processing consumer state;
12290 * exceptions are explicitly noted.
12291 */
12292
12293 /*
12294 * Note: called from cross call context. This function switches the two
12295 * buffers on a given CPU. The atomicity of this operation is assured by
12296 * disabling interrupts while the actual switch takes place; the disabling of
12297 * interrupts serializes the execution with any execution of dtrace_probe() on
12298 * the same CPU.
12299 */
12300 static void
12301 dtrace_buffer_switch(dtrace_buffer_t *buf)
12302 {
12303 caddr_t tomax = buf->dtb_tomax;
12304 caddr_t xamot = buf->dtb_xamot;
12305 dtrace_icookie_t cookie;
12306 hrtime_t now;
12307
12308 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
12309 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING));
12310
12311 cookie = dtrace_interrupt_disable();
12312 now = dtrace_gethrtime();
12313 buf->dtb_tomax = xamot;
12314 buf->dtb_xamot = tomax;
12315 buf->dtb_xamot_drops = buf->dtb_drops;
12316 buf->dtb_xamot_offset = buf->dtb_offset;
12317 buf->dtb_xamot_errors = buf->dtb_errors;
12318 buf->dtb_xamot_flags = buf->dtb_flags;
12319 buf->dtb_offset = 0;
12320 buf->dtb_drops = 0;
12321 buf->dtb_errors = 0;
12322 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED);
12323 buf->dtb_interval = now - buf->dtb_switched;
12324 buf->dtb_switched = now;
12325 dtrace_interrupt_enable(cookie);
12326 }
12327
12328 /*
12329 * Note: called from cross call context. This function activates a buffer
12330 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation
12331 * is guaranteed by the disabling of interrupts.
12332 */
12333 static void
12334 dtrace_buffer_activate(dtrace_state_t *state)
12335 {
12336 dtrace_buffer_t *buf;
12337 dtrace_icookie_t cookie = dtrace_interrupt_disable();
12338
12339 buf = &state->dts_buffer[curcpu_id];
12340
12341 if (buf->dtb_tomax != NULL) {
12342 /*
12343 * We might like to assert that the buffer is marked inactive,
12344 * but this isn't necessarily true: the buffer for the CPU
12345 * that processes the BEGIN probe has its buffer activated
12346 * manually. In this case, we take the (harmless) action
12347 * re-clearing the bit INACTIVE bit.
12348 */
12349 buf->dtb_flags &= ~DTRACEBUF_INACTIVE;
12350 }
12351
12352 dtrace_interrupt_enable(cookie);
12353 }
12354
12355 #ifdef __FreeBSD__
12356 /*
12357 * Activate the specified per-CPU buffer. This is used instead of
12358 * dtrace_buffer_activate() when APs have not yet started, i.e. when
12359 * activating anonymous state.
12360 */
12361 static void
12362 dtrace_buffer_activate_cpu(dtrace_state_t *state, int cpu)
12363 {
12364
12365 if (state->dts_buffer[cpu].dtb_tomax != NULL)
12366 state->dts_buffer[cpu].dtb_flags &= ~DTRACEBUF_INACTIVE;
12367 }
12368 #endif
12369
12370 static int
12371 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
12372 processorid_t cpu, int *factor)
12373 {
12374 #ifdef illumos
12375 cpu_t *cp;
12376 #endif
12377 #ifdef __NetBSD__
12378 CPU_INFO_ITERATOR cpuind;
12379 struct cpu_info *cinfo;
12380 #endif
12381 dtrace_buffer_t *buf;
12382 int allocated = 0, desired = 0;
12383
12384 #ifdef illumos
12385 ASSERT(MUTEX_HELD(&cpu_lock));
12386 ASSERT(MUTEX_HELD(&dtrace_lock));
12387
12388 *factor = 1;
12389
12390 if (size > dtrace_nonroot_maxsize &&
12391 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE))
12392 return (EFBIG);
12393
12394 cp = cpu_list;
12395
12396 do {
12397 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
12398 continue;
12399
12400 buf = &bufs[cp->cpu_id];
12401
12402 /*
12403 * If there is already a buffer allocated for this CPU, it
12404 * is only possible that this is a DR event. In this case,
12405 */
12406 if (buf->dtb_tomax != NULL) {
12407 ASSERT(buf->dtb_size == size);
12408 continue;
12409 }
12410
12411 ASSERT(buf->dtb_xamot == NULL);
12412
12413 if ((buf->dtb_tomax = kmem_zalloc(size,
12414 KM_NOSLEEP | KM_NORMALPRI)) == NULL)
12415 goto err;
12416
12417 buf->dtb_size = size;
12418 buf->dtb_flags = flags;
12419 buf->dtb_offset = 0;
12420 buf->dtb_drops = 0;
12421
12422 if (flags & DTRACEBUF_NOSWITCH)
12423 continue;
12424
12425 if ((buf->dtb_xamot = kmem_zalloc(size,
12426 KM_NOSLEEP | KM_NORMALPRI)) == NULL)
12427 goto err;
12428 } while ((cp = cp->cpu_next) != cpu_list);
12429
12430 return (0);
12431
12432 err:
12433 cp = cpu_list;
12434
12435 do {
12436 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
12437 continue;
12438
12439 buf = &bufs[cp->cpu_id];
12440 desired += 2;
12441
12442 if (buf->dtb_xamot != NULL) {
12443 ASSERT(buf->dtb_tomax != NULL);
12444 ASSERT(buf->dtb_size == size);
12445 kmem_free(buf->dtb_xamot, size);
12446 allocated++;
12447 }
12448
12449 if (buf->dtb_tomax != NULL) {
12450 ASSERT(buf->dtb_size == size);
12451 kmem_free(buf->dtb_tomax, size);
12452 allocated++;
12453 }
12454
12455 buf->dtb_tomax = NULL;
12456 buf->dtb_xamot = NULL;
12457 buf->dtb_size = 0;
12458 } while ((cp = cp->cpu_next) != cpu_list);
12459 #else
12460
12461 *factor = 1;
12462 #if defined(__aarch64__) || defined(__amd64__) || defined(__arm__) || \
12463 defined(__mips__) || defined(__powerpc__) || defined(__riscv__)
12464 /*
12465 * FreeBSD isn't good at limiting the amount of memory we
12466 * ask to malloc, so let's place a limit here before trying
12467 * to do something that might well end in tears at bedtime.
12468 */
12469 if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1)))
12470 return (ENOMEM);
12471 #endif
12472
12473 ASSERT(MUTEX_HELD(&dtrace_lock));
12474 #ifdef __NetBSD__
12475 for (CPU_INFO_FOREACH(cpuind, cinfo))
12476 #else
12477 CPU_FOREACH(i)
12478 #endif
12479 {
12480 #ifdef __NetBSD__
12481 int i = cpu_index(cinfo);
12482 #endif
12483 if (cpu != DTRACE_CPUALL && cpu != i)
12484 continue;
12485
12486 buf = &bufs[i];
12487
12488 /*
12489 * If there is already a buffer allocated for this CPU, it
12490 * is only possible that this is a DR event. In this case,
12491 * the buffer size must match our specified size.
12492 */
12493 if (buf->dtb_tomax != NULL) {
12494 ASSERT(buf->dtb_size == size);
12495 continue;
12496 }
12497
12498 ASSERT(buf->dtb_xamot == NULL);
12499
12500 if ((buf->dtb_tomax = kmem_zalloc(size,
12501 KM_NOSLEEP | KM_NORMALPRI)) == NULL)
12502 goto err;
12503
12504 buf->dtb_size = size;
12505 buf->dtb_flags = flags;
12506 buf->dtb_offset = 0;
12507 buf->dtb_drops = 0;
12508
12509 if (flags & DTRACEBUF_NOSWITCH)
12510 continue;
12511
12512 if ((buf->dtb_xamot = kmem_zalloc(size,
12513 KM_NOSLEEP | KM_NORMALPRI)) == NULL)
12514 goto err;
12515 }
12516
12517 return (0);
12518
12519 err:
12520 /*
12521 * Error allocating memory, so free the buffers that were
12522 * allocated before the failed allocation.
12523 */
12524 #ifdef __NetBSD__
12525 for (CPU_INFO_FOREACH(cpuind, cinfo))
12526 #else
12527 CPU_FOREACH(i)
12528 #endif
12529 {
12530 #ifdef __NetBSD__
12531 int i = cpu_index(cinfo);
12532 #endif
12533 if (cpu != DTRACE_CPUALL && cpu != cpu_index(cinfo))
12534 continue;
12535
12536 buf = &bufs[i];
12537 desired += 2;
12538
12539 if (buf->dtb_xamot != NULL) {
12540 ASSERT(buf->dtb_tomax != NULL);
12541 ASSERT(buf->dtb_size == size);
12542 kmem_free(buf->dtb_xamot, size);
12543 allocated++;
12544 }
12545
12546 if (buf->dtb_tomax != NULL) {
12547 ASSERT(buf->dtb_size == size);
12548 kmem_free(buf->dtb_tomax, size);
12549 allocated++;
12550 }
12551
12552 buf->dtb_tomax = NULL;
12553 buf->dtb_xamot = NULL;
12554 buf->dtb_size = 0;
12555
12556 }
12557 #endif
12558 *factor = desired / (allocated > 0 ? allocated : 1);
12559
12560 return (ENOMEM);
12561 }
12562
12563 /*
12564 * Note: called from probe context. This function just increments the drop
12565 * count on a buffer. It has been made a function to allow for the
12566 * possibility of understanding the source of mysterious drop counts. (A
12567 * problem for which one may be particularly disappointed that DTrace cannot
12568 * be used to understand DTrace.)
12569 */
12570 static void
12571 dtrace_buffer_drop(dtrace_buffer_t *buf)
12572 {
12573 buf->dtb_drops++;
12574 }
12575
12576 /*
12577 * Note: called from probe context. This function is called to reserve space
12578 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the
12579 * mstate. Returns the new offset in the buffer, or a negative value if an
12580 * error has occurred.
12581 */
12582 static intptr_t
12583 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align,
12584 dtrace_state_t *state, dtrace_mstate_t *mstate)
12585 {
12586 intptr_t offs = buf->dtb_offset, soffs;
12587 intptr_t woffs;
12588 caddr_t tomax;
12589 size_t total;
12590
12591 if (buf->dtb_flags & DTRACEBUF_INACTIVE)
12592 return (-1);
12593
12594 if ((tomax = buf->dtb_tomax) == NULL) {
12595 dtrace_buffer_drop(buf);
12596 return (-1);
12597 }
12598
12599 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) {
12600 while (offs & (align - 1)) {
12601 /*
12602 * Assert that our alignment is off by a number which
12603 * is itself sizeof (uint32_t) aligned.
12604 */
12605 ASSERT(!((align - (offs & (align - 1))) &
12606 (sizeof (uint32_t) - 1)));
12607 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
12608 offs += sizeof (uint32_t);
12609 }
12610
12611 if ((soffs = offs + needed) > buf->dtb_size) {
12612 dtrace_buffer_drop(buf);
12613 return (-1);
12614 }
12615
12616 if (mstate == NULL)
12617 return (offs);
12618
12619 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs;
12620 mstate->dtms_scratch_size = buf->dtb_size - soffs;
12621 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
12622
12623 return (offs);
12624 }
12625
12626 if (buf->dtb_flags & DTRACEBUF_FILL) {
12627 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN &&
12628 (buf->dtb_flags & DTRACEBUF_FULL))
12629 return (-1);
12630 goto out;
12631 }
12632
12633 total = needed + (offs & (align - 1));
12634
12635 /*
12636 * For a ring buffer, life is quite a bit more complicated. Before
12637 * we can store any padding, we need to adjust our wrapping offset.
12638 * (If we've never before wrapped or we're not about to, no adjustment
12639 * is required.)
12640 */
12641 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) ||
12642 offs + total > buf->dtb_size) {
12643 woffs = buf->dtb_xamot_offset;
12644
12645 if (offs + total > buf->dtb_size) {
12646 /*
12647 * We can't fit in the end of the buffer. First, a
12648 * sanity check that we can fit in the buffer at all.
12649 */
12650 if (total > buf->dtb_size) {
12651 dtrace_buffer_drop(buf);
12652 return (-1);
12653 }
12654
12655 /*
12656 * We're going to be storing at the top of the buffer,
12657 * so now we need to deal with the wrapped offset. We
12658 * only reset our wrapped offset to 0 if it is
12659 * currently greater than the current offset. If it
12660 * is less than the current offset, it is because a
12661 * previous allocation induced a wrap -- but the
12662 * allocation didn't subsequently take the space due
12663 * to an error or false predicate evaluation. In this
12664 * case, we'll just leave the wrapped offset alone: if
12665 * the wrapped offset hasn't been advanced far enough
12666 * for this allocation, it will be adjusted in the
12667 * lower loop.
12668 */
12669 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
12670 if (woffs >= offs)
12671 woffs = 0;
12672 } else {
12673 woffs = 0;
12674 }
12675
12676 /*
12677 * Now we know that we're going to be storing to the
12678 * top of the buffer and that there is room for us
12679 * there. We need to clear the buffer from the current
12680 * offset to the end (there may be old gunk there).
12681 */
12682 while (offs < buf->dtb_size)
12683 tomax[offs++] = 0;
12684
12685 /*
12686 * We need to set our offset to zero. And because we
12687 * are wrapping, we need to set the bit indicating as
12688 * much. We can also adjust our needed space back
12689 * down to the space required by the ECB -- we know
12690 * that the top of the buffer is aligned.
12691 */
12692 offs = 0;
12693 total = needed;
12694 buf->dtb_flags |= DTRACEBUF_WRAPPED;
12695 } else {
12696 /*
12697 * There is room for us in the buffer, so we simply
12698 * need to check the wrapped offset.
12699 */
12700 if (woffs < offs) {
12701 /*
12702 * The wrapped offset is less than the offset.
12703 * This can happen if we allocated buffer space
12704 * that induced a wrap, but then we didn't
12705 * subsequently take the space due to an error
12706 * or false predicate evaluation. This is
12707 * okay; we know that _this_ allocation isn't
12708 * going to induce a wrap. We still can't
12709 * reset the wrapped offset to be zero,
12710 * however: the space may have been trashed in
12711 * the previous failed probe attempt. But at
12712 * least the wrapped offset doesn't need to
12713 * be adjusted at all...
12714 */
12715 goto out;
12716 }
12717 }
12718
12719 while (offs + total > woffs) {
12720 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs);
12721 size_t size;
12722
12723 if (epid == DTRACE_EPIDNONE) {
12724 size = sizeof (uint32_t);
12725 } else {
12726 ASSERT3U(epid, <=, state->dts_necbs);
12727 ASSERT(state->dts_ecbs[epid - 1] != NULL);
12728
12729 size = state->dts_ecbs[epid - 1]->dte_size;
12730 }
12731
12732 ASSERT(woffs + size <= buf->dtb_size);
12733 ASSERT(size != 0);
12734
12735 if (woffs + size == buf->dtb_size) {
12736 /*
12737 * We've reached the end of the buffer; we want
12738 * to set the wrapped offset to 0 and break
12739 * out. However, if the offs is 0, then we're
12740 * in a strange edge-condition: the amount of
12741 * space that we want to reserve plus the size
12742 * of the record that we're overwriting is
12743 * greater than the size of the buffer. This
12744 * is problematic because if we reserve the
12745 * space but subsequently don't consume it (due
12746 * to a failed predicate or error) the wrapped
12747 * offset will be 0 -- yet the EPID at offset 0
12748 * will not be committed. This situation is
12749 * relatively easy to deal with: if we're in
12750 * this case, the buffer is indistinguishable
12751 * from one that hasn't wrapped; we need only
12752 * finish the job by clearing the wrapped bit,
12753 * explicitly setting the offset to be 0, and
12754 * zero'ing out the old data in the buffer.
12755 */
12756 if (offs == 0) {
12757 buf->dtb_flags &= ~DTRACEBUF_WRAPPED;
12758 buf->dtb_offset = 0;
12759 woffs = total;
12760
12761 while (woffs < buf->dtb_size)
12762 tomax[woffs++] = 0;
12763 }
12764
12765 woffs = 0;
12766 break;
12767 }
12768
12769 woffs += size;
12770 }
12771
12772 /*
12773 * We have a wrapped offset. It may be that the wrapped offset
12774 * has become zero -- that's okay.
12775 */
12776 buf->dtb_xamot_offset = woffs;
12777 }
12778
12779 out:
12780 /*
12781 * Now we can plow the buffer with any necessary padding.
12782 */
12783 while (offs & (align - 1)) {
12784 /*
12785 * Assert that our alignment is off by a number which
12786 * is itself sizeof (uint32_t) aligned.
12787 */
12788 ASSERT(!((align - (offs & (align - 1))) &
12789 (sizeof (uint32_t) - 1)));
12790 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
12791 offs += sizeof (uint32_t);
12792 }
12793
12794 if (buf->dtb_flags & DTRACEBUF_FILL) {
12795 if (offs + needed > buf->dtb_size - state->dts_reserve) {
12796 buf->dtb_flags |= DTRACEBUF_FULL;
12797 return (-1);
12798 }
12799 }
12800
12801 if (mstate == NULL)
12802 return (offs);
12803
12804 /*
12805 * For ring buffers and fill buffers, the scratch space is always
12806 * the inactive buffer.
12807 */
12808 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot;
12809 mstate->dtms_scratch_size = buf->dtb_size;
12810 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
12811
12812 return (offs);
12813 }
12814
12815 static void
12816 dtrace_buffer_polish(dtrace_buffer_t *buf)
12817 {
12818 ASSERT(buf->dtb_flags & DTRACEBUF_RING);
12819 ASSERT(MUTEX_HELD(&dtrace_lock));
12820
12821 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED))
12822 return;
12823
12824 /*
12825 * We need to polish the ring buffer. There are three cases:
12826 *
12827 * - The first (and presumably most common) is that there is no gap
12828 * between the buffer offset and the wrapped offset. In this case,
12829 * there is nothing in the buffer that isn't valid data; we can
12830 * mark the buffer as polished and return.
12831 *
12832 * - The second (less common than the first but still more common
12833 * than the third) is that there is a gap between the buffer offset
12834 * and the wrapped offset, and the wrapped offset is larger than the
12835 * buffer offset. This can happen because of an alignment issue, or
12836 * can happen because of a call to dtrace_buffer_reserve() that
12837 * didn't subsequently consume the buffer space. In this case,
12838 * we need to zero the data from the buffer offset to the wrapped
12839 * offset.
12840 *
12841 * - The third (and least common) is that there is a gap between the
12842 * buffer offset and the wrapped offset, but the wrapped offset is
12843 * _less_ than the buffer offset. This can only happen because a
12844 * call to dtrace_buffer_reserve() induced a wrap, but the space
12845 * was not subsequently consumed. In this case, we need to zero the
12846 * space from the offset to the end of the buffer _and_ from the
12847 * top of the buffer to the wrapped offset.
12848 */
12849 if (buf->dtb_offset < buf->dtb_xamot_offset) {
12850 bzero(buf->dtb_tomax + buf->dtb_offset,
12851 buf->dtb_xamot_offset - buf->dtb_offset);
12852 }
12853
12854 if (buf->dtb_offset > buf->dtb_xamot_offset) {
12855 bzero(buf->dtb_tomax + buf->dtb_offset,
12856 buf->dtb_size - buf->dtb_offset);
12857 bzero(buf->dtb_tomax, buf->dtb_xamot_offset);
12858 }
12859 }
12860
12861 /*
12862 * This routine determines if data generated at the specified time has likely
12863 * been entirely consumed at user-level. This routine is called to determine
12864 * if an ECB on a defunct probe (but for an active enabling) can be safely
12865 * disabled and destroyed.
12866 */
12867 static int
12868 dtrace_buffer_consumed(dtrace_buffer_t *bufs, hrtime_t when)
12869 {
12870 int i;
12871
12872 for (i = 0; i < NCPU; i++) {
12873 dtrace_buffer_t *buf = &bufs[i];
12874
12875 if (buf->dtb_size == 0)
12876 continue;
12877
12878 if (buf->dtb_flags & DTRACEBUF_RING)
12879 return (0);
12880
12881 if (!buf->dtb_switched && buf->dtb_offset != 0)
12882 return (0);
12883
12884 if (buf->dtb_switched - buf->dtb_interval < when)
12885 return (0);
12886 }
12887
12888 return (1);
12889 }
12890
12891 static void
12892 dtrace_buffer_free(dtrace_buffer_t *bufs)
12893 {
12894 int i;
12895
12896 for (i = 0; i < NCPU; i++) {
12897 dtrace_buffer_t *buf = &bufs[i];
12898
12899 if (buf->dtb_tomax == NULL) {
12900 ASSERT(buf->dtb_xamot == NULL);
12901 ASSERT(buf->dtb_size == 0);
12902 continue;
12903 }
12904
12905 if (buf->dtb_xamot != NULL) {
12906 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
12907 kmem_free(buf->dtb_xamot, buf->dtb_size);
12908 }
12909
12910 kmem_free(buf->dtb_tomax, buf->dtb_size);
12911 buf->dtb_size = 0;
12912 buf->dtb_tomax = NULL;
12913 buf->dtb_xamot = NULL;
12914 }
12915 }
12916
12917 /*
12918 * DTrace Enabling Functions
12919 */
12920 static dtrace_enabling_t *
12921 dtrace_enabling_create(dtrace_vstate_t *vstate)
12922 {
12923 dtrace_enabling_t *enab;
12924
12925 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP);
12926 enab->dten_vstate = vstate;
12927
12928 return (enab);
12929 }
12930
12931 static void
12932 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb)
12933 {
12934 dtrace_ecbdesc_t **ndesc;
12935 size_t osize, nsize;
12936
12937 /*
12938 * We can't add to enablings after we've enabled them, or after we've
12939 * retained them.
12940 */
12941 ASSERT(enab->dten_probegen == 0);
12942 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
12943
12944 if (enab->dten_ndesc < enab->dten_maxdesc) {
12945 enab->dten_desc[enab->dten_ndesc++] = ecb;
12946 return;
12947 }
12948
12949 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
12950
12951 if (enab->dten_maxdesc == 0) {
12952 enab->dten_maxdesc = 1;
12953 } else {
12954 enab->dten_maxdesc <<= 1;
12955 }
12956
12957 ASSERT(enab->dten_ndesc < enab->dten_maxdesc);
12958
12959 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
12960 ndesc = kmem_zalloc(nsize, KM_SLEEP);
12961 bcopy(enab->dten_desc, ndesc, osize);
12962 if (enab->dten_desc != NULL)
12963 kmem_free(enab->dten_desc, osize);
12964
12965 enab->dten_desc = ndesc;
12966 enab->dten_desc[enab->dten_ndesc++] = ecb;
12967 }
12968
12969 static void
12970 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb,
12971 dtrace_probedesc_t *pd)
12972 {
12973 dtrace_ecbdesc_t *new;
12974 dtrace_predicate_t *pred;
12975 dtrace_actdesc_t *act;
12976
12977 /*
12978 * We're going to create a new ECB description that matches the
12979 * specified ECB in every way, but has the specified probe description.
12980 */
12981 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
12982
12983 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL)
12984 dtrace_predicate_hold(pred);
12985
12986 for (act = ecb->dted_action; act != NULL; act = act->dtad_next)
12987 dtrace_actdesc_hold(act);
12988
12989 new->dted_action = ecb->dted_action;
12990 new->dted_pred = ecb->dted_pred;
12991 new->dted_probe = *pd;
12992 new->dted_uarg = ecb->dted_uarg;
12993
12994 dtrace_enabling_add(enab, new);
12995 }
12996
12997 static void
12998 dtrace_enabling_dump(dtrace_enabling_t *enab)
12999 {
13000 int i;
13001
13002 for (i = 0; i < enab->dten_ndesc; i++) {
13003 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe;
13004
13005 #ifdef __FreeBSD__
13006 printf("dtrace: enabling probe %d (%s:%s:%s:%s)\n", i,
13007 desc->dtpd_provider, desc->dtpd_mod,
13008 desc->dtpd_func, desc->dtpd_name);
13009 #else
13010 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i,
13011 desc->dtpd_provider, desc->dtpd_mod,
13012 desc->dtpd_func, desc->dtpd_name);
13013 #endif
13014 }
13015 }
13016
13017 static void
13018 dtrace_enabling_destroy(dtrace_enabling_t *enab)
13019 {
13020 int i;
13021 dtrace_ecbdesc_t *ep;
13022 dtrace_vstate_t *vstate = enab->dten_vstate;
13023
13024 ASSERT(MUTEX_HELD(&dtrace_lock));
13025
13026 for (i = 0; i < enab->dten_ndesc; i++) {
13027 dtrace_actdesc_t *act, *next;
13028 dtrace_predicate_t *pred;
13029
13030 ep = enab->dten_desc[i];
13031
13032 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL)
13033 dtrace_predicate_release(pred, vstate);
13034
13035 for (act = ep->dted_action; act != NULL; act = next) {
13036 next = act->dtad_next;
13037 dtrace_actdesc_release(act, vstate);
13038 }
13039
13040 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
13041 }
13042
13043 if (enab->dten_desc != NULL)
13044 kmem_free(enab->dten_desc,
13045 enab->dten_maxdesc * sizeof (dtrace_enabling_t *));
13046
13047 /*
13048 * If this was a retained enabling, decrement the dts_nretained count
13049 * and take it off of the dtrace_retained list.
13050 */
13051 if (enab->dten_prev != NULL || enab->dten_next != NULL ||
13052 dtrace_retained == enab) {
13053 ASSERT(enab->dten_vstate->dtvs_state != NULL);
13054 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0);
13055 enab->dten_vstate->dtvs_state->dts_nretained--;
13056 dtrace_retained_gen++;
13057 }
13058
13059 if (enab->dten_prev == NULL) {
13060 if (dtrace_retained == enab) {
13061 dtrace_retained = enab->dten_next;
13062
13063 if (dtrace_retained != NULL)
13064 dtrace_retained->dten_prev = NULL;
13065 }
13066 } else {
13067 ASSERT(enab != dtrace_retained);
13068 ASSERT(dtrace_retained != NULL);
13069 enab->dten_prev->dten_next = enab->dten_next;
13070 }
13071
13072 if (enab->dten_next != NULL) {
13073 ASSERT(dtrace_retained != NULL);
13074 enab->dten_next->dten_prev = enab->dten_prev;
13075 }
13076
13077 kmem_free(enab, sizeof (dtrace_enabling_t));
13078 }
13079
13080 static int
13081 dtrace_enabling_retain(dtrace_enabling_t *enab)
13082 {
13083 dtrace_state_t *state;
13084
13085 ASSERT(MUTEX_HELD(&dtrace_lock));
13086 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
13087 ASSERT(enab->dten_vstate != NULL);
13088
13089 state = enab->dten_vstate->dtvs_state;
13090 ASSERT(state != NULL);
13091
13092 /*
13093 * We only allow each state to retain dtrace_retain_max enablings.
13094 */
13095 if (state->dts_nretained >= dtrace_retain_max)
13096 return (ENOSPC);
13097
13098 state->dts_nretained++;
13099 dtrace_retained_gen++;
13100
13101 if (dtrace_retained == NULL) {
13102 dtrace_retained = enab;
13103 return (0);
13104 }
13105
13106 enab->dten_next = dtrace_retained;
13107 dtrace_retained->dten_prev = enab;
13108 dtrace_retained = enab;
13109
13110 return (0);
13111 }
13112
13113 static int
13114 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match,
13115 dtrace_probedesc_t *create)
13116 {
13117 dtrace_enabling_t *new, *enab;
13118 int found = 0, err = ENOENT;
13119
13120 ASSERT(MUTEX_HELD(&dtrace_lock));
13121 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN);
13122 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN);
13123 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN);
13124 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN);
13125
13126 new = dtrace_enabling_create(&state->dts_vstate);
13127
13128 /*
13129 * Iterate over all retained enablings, looking for enablings that
13130 * match the specified state.
13131 */
13132 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
13133 int i;
13134
13135 /*
13136 * dtvs_state can only be NULL for helper enablings -- and
13137 * helper enablings can't be retained.
13138 */
13139 ASSERT(enab->dten_vstate->dtvs_state != NULL);
13140
13141 if (enab->dten_vstate->dtvs_state != state)
13142 continue;
13143
13144 /*
13145 * Now iterate over each probe description; we're looking for
13146 * an exact match to the specified probe description.
13147 */
13148 for (i = 0; i < enab->dten_ndesc; i++) {
13149 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
13150 dtrace_probedesc_t *pd = &ep->dted_probe;
13151
13152 if (strcmp(pd->dtpd_provider, match->dtpd_provider))
13153 continue;
13154
13155 if (strcmp(pd->dtpd_mod, match->dtpd_mod))
13156 continue;
13157
13158 if (strcmp(pd->dtpd_func, match->dtpd_func))
13159 continue;
13160
13161 if (strcmp(pd->dtpd_name, match->dtpd_name))
13162 continue;
13163
13164 /*
13165 * We have a winning probe! Add it to our growing
13166 * enabling.
13167 */
13168 found = 1;
13169 dtrace_enabling_addlike(new, ep, create);
13170 }
13171 }
13172
13173 if (!found || (err = dtrace_enabling_retain(new)) != 0) {
13174 dtrace_enabling_destroy(new);
13175 return (err);
13176 }
13177
13178 return (0);
13179 }
13180
13181 static void
13182 dtrace_enabling_retract(dtrace_state_t *state)
13183 {
13184 dtrace_enabling_t *enab, *next;
13185
13186 ASSERT(MUTEX_HELD(&dtrace_lock));
13187
13188 /*
13189 * Iterate over all retained enablings, destroy the enablings retained
13190 * for the specified state.
13191 */
13192 for (enab = dtrace_retained; enab != NULL; enab = next) {
13193 next = enab->dten_next;
13194
13195 /*
13196 * dtvs_state can only be NULL for helper enablings -- and
13197 * helper enablings can't be retained.
13198 */
13199 ASSERT(enab->dten_vstate->dtvs_state != NULL);
13200
13201 if (enab->dten_vstate->dtvs_state == state) {
13202 ASSERT(state->dts_nretained > 0);
13203 dtrace_enabling_destroy(enab);
13204 }
13205 }
13206
13207 ASSERT(state->dts_nretained == 0);
13208 }
13209
13210 static int
13211 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched)
13212 {
13213 int i = 0;
13214 int total_matched = 0, matched = 0;
13215
13216 ASSERT(MUTEX_HELD(&cpu_lock));
13217 ASSERT(MUTEX_HELD(&dtrace_lock));
13218
13219 for (i = 0; i < enab->dten_ndesc; i++) {
13220 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
13221
13222 enab->dten_current = ep;
13223 enab->dten_error = 0;
13224
13225 /*
13226 * If a provider failed to enable a probe then get out and
13227 * let the consumer know we failed.
13228 */
13229 if ((matched = dtrace_probe_enable(&ep->dted_probe, enab)) < 0)
13230 return (EBUSY);
13231
13232 total_matched += matched;
13233
13234 if (enab->dten_error != 0) {
13235 /*
13236 * If we get an error half-way through enabling the
13237 * probes, we kick out -- perhaps with some number of
13238 * them enabled. Leaving enabled probes enabled may
13239 * be slightly confusing for user-level, but we expect
13240 * that no one will attempt to actually drive on in
13241 * the face of such errors. If this is an anonymous
13242 * enabling (indicated with a NULL nmatched pointer),
13243 * we cmn_err() a message. We aren't expecting to
13244 * get such an error -- such as it can exist at all,
13245 * it would be a result of corrupted DOF in the driver
13246 * properties.
13247 */
13248 if (nmatched == NULL) {
13249 cmn_err(CE_WARN, "dtrace_enabling_match() "
13250 "error on %p: %d", (void *)ep,
13251 enab->dten_error);
13252 }
13253
13254 return (enab->dten_error);
13255 }
13256 }
13257
13258 enab->dten_probegen = dtrace_probegen;
13259 if (nmatched != NULL)
13260 *nmatched = total_matched;
13261
13262 return (0);
13263 }
13264
13265 static void
13266 dtrace_enabling_matchall(void)
13267 {
13268 dtrace_enabling_t *enab;
13269
13270 mutex_enter(&cpu_lock);
13271 mutex_enter(&dtrace_lock);
13272
13273 /*
13274 * Iterate over all retained enablings to see if any probes match
13275 * against them. We only perform this operation on enablings for which
13276 * we have sufficient permissions by virtue of being in the global zone
13277 * or in the same zone as the DTrace client. Because we can be called
13278 * after dtrace_detach() has been called, we cannot assert that there
13279 * are retained enablings. We can safely load from dtrace_retained,
13280 * however: the taskq_destroy() at the end of dtrace_detach() will
13281 * block pending our completion.
13282 */
13283 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
13284 #ifdef illumos
13285 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred;
13286
13287 if (INGLOBALZONE(curproc) ||
13288 cr != NULL && getzoneid() == crgetzoneid(cr))
13289 #endif
13290 (void) dtrace_enabling_match(enab, NULL);
13291 }
13292
13293 mutex_exit(&dtrace_lock);
13294 mutex_exit(&cpu_lock);
13295 }
13296
13297 /*
13298 * If an enabling is to be enabled without having matched probes (that is, if
13299 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the
13300 * enabling must be _primed_ by creating an ECB for every ECB description.
13301 * This must be done to assure that we know the number of speculations, the
13302 * number of aggregations, the minimum buffer size needed, etc. before we
13303 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually
13304 * enabling any probes, we create ECBs for every ECB decription, but with a
13305 * NULL probe -- which is exactly what this function does.
13306 */
13307 static void
13308 dtrace_enabling_prime(dtrace_state_t *state)
13309 {
13310 dtrace_enabling_t *enab;
13311 int i;
13312
13313 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
13314 ASSERT(enab->dten_vstate->dtvs_state != NULL);
13315
13316 if (enab->dten_vstate->dtvs_state != state)
13317 continue;
13318
13319 /*
13320 * We don't want to prime an enabling more than once, lest
13321 * we allow a malicious user to induce resource exhaustion.
13322 * (The ECBs that result from priming an enabling aren't
13323 * leaked -- but they also aren't deallocated until the
13324 * consumer state is destroyed.)
13325 */
13326 if (enab->dten_primed)
13327 continue;
13328
13329 for (i = 0; i < enab->dten_ndesc; i++) {
13330 enab->dten_current = enab->dten_desc[i];
13331 (void) dtrace_probe_enable(NULL, enab);
13332 }
13333
13334 enab->dten_primed = 1;
13335 }
13336 }
13337
13338 /*
13339 * Called to indicate that probes should be provided due to retained
13340 * enablings. This is implemented in terms of dtrace_probe_provide(), but it
13341 * must take an initial lap through the enabling calling the dtps_provide()
13342 * entry point explicitly to allow for autocreated probes.
13343 */
13344 static void
13345 dtrace_enabling_provide(dtrace_provider_t *prv)
13346 {
13347 int i, all = 0;
13348 dtrace_probedesc_t desc;
13349 dtrace_genid_t gen;
13350
13351 ASSERT(MUTEX_HELD(&dtrace_lock));
13352 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
13353
13354 if (prv == NULL) {
13355 all = 1;
13356 prv = dtrace_provider;
13357 }
13358
13359 do {
13360 dtrace_enabling_t *enab;
13361 void *parg = prv->dtpv_arg;
13362
13363 retry:
13364 gen = dtrace_retained_gen;
13365 for (enab = dtrace_retained; enab != NULL;
13366 enab = enab->dten_next) {
13367 for (i = 0; i < enab->dten_ndesc; i++) {
13368 desc = enab->dten_desc[i]->dted_probe;
13369 mutex_exit(&dtrace_lock);
13370 prv->dtpv_pops.dtps_provide(parg, &desc);
13371 mutex_enter(&dtrace_lock);
13372 /*
13373 * Process the retained enablings again if
13374 * they have changed while we weren't holding
13375 * dtrace_lock.
13376 */
13377 if (gen != dtrace_retained_gen)
13378 goto retry;
13379 }
13380 }
13381 } while (all && (prv = prv->dtpv_next) != NULL);
13382
13383 mutex_exit(&dtrace_lock);
13384 dtrace_probe_provide(NULL, all ? NULL : prv);
13385 mutex_enter(&dtrace_lock);
13386 }
13387
13388 /*
13389 * Called to reap ECBs that are attached to probes from defunct providers.
13390 */
13391 static void
13392 dtrace_enabling_reap(void)
13393 {
13394 dtrace_provider_t *prov;
13395 dtrace_probe_t *probe;
13396 dtrace_ecb_t *ecb;
13397 hrtime_t when;
13398 int i;
13399
13400 mutex_enter(&cpu_lock);
13401 mutex_enter(&dtrace_lock);
13402
13403 for (i = 0; i < dtrace_nprobes; i++) {
13404 if ((probe = dtrace_probes[i]) == NULL)
13405 continue;
13406
13407 if (probe->dtpr_ecb == NULL)
13408 continue;
13409
13410 prov = probe->dtpr_provider;
13411
13412 if ((when = prov->dtpv_defunct) == 0)
13413 continue;
13414
13415 /*
13416 * We have ECBs on a defunct provider: we want to reap these
13417 * ECBs to allow the provider to unregister. The destruction
13418 * of these ECBs must be done carefully: if we destroy the ECB
13419 * and the consumer later wishes to consume an EPID that
13420 * corresponds to the destroyed ECB (and if the EPID metadata
13421 * has not been previously consumed), the consumer will abort
13422 * processing on the unknown EPID. To reduce (but not, sadly,
13423 * eliminate) the possibility of this, we will only destroy an
13424 * ECB for a defunct provider if, for the state that
13425 * corresponds to the ECB:
13426 *
13427 * (a) There is no speculative tracing (which can effectively
13428 * cache an EPID for an arbitrary amount of time).
13429 *
13430 * (b) The principal buffers have been switched twice since the
13431 * provider became defunct.
13432 *
13433 * (c) The aggregation buffers are of zero size or have been
13434 * switched twice since the provider became defunct.
13435 *
13436 * We use dts_speculates to determine (a) and call a function
13437 * (dtrace_buffer_consumed()) to determine (b) and (c). Note
13438 * that as soon as we've been unable to destroy one of the ECBs
13439 * associated with the probe, we quit trying -- reaping is only
13440 * fruitful in as much as we can destroy all ECBs associated
13441 * with the defunct provider's probes.
13442 */
13443 while ((ecb = probe->dtpr_ecb) != NULL) {
13444 dtrace_state_t *state = ecb->dte_state;
13445 dtrace_buffer_t *buf = state->dts_buffer;
13446 dtrace_buffer_t *aggbuf = state->dts_aggbuffer;
13447
13448 if (state->dts_speculates)
13449 break;
13450
13451 if (!dtrace_buffer_consumed(buf, when))
13452 break;
13453
13454 if (!dtrace_buffer_consumed(aggbuf, when))
13455 break;
13456
13457 dtrace_ecb_disable(ecb);
13458 ASSERT(probe->dtpr_ecb != ecb);
13459 dtrace_ecb_destroy(ecb);
13460 }
13461 }
13462
13463 mutex_exit(&dtrace_lock);
13464 mutex_exit(&cpu_lock);
13465 }
13466 /*
13467 * DTrace DOF Functions
13468 */
13469 /*ARGSUSED*/
13470 static void
13471 dtrace_dof_error(dof_hdr_t *dof, const char *str)
13472 {
13473 if (dtrace_err_verbose)
13474 cmn_err(CE_WARN, "failed to process DOF: %s", str);
13475
13476 #ifdef DTRACE_ERRDEBUG
13477 dtrace_errdebug(str);
13478 #endif
13479 }
13480
13481 /*
13482 * Create DOF out of a currently enabled state. Right now, we only create
13483 * DOF containing the run-time options -- but this could be expanded to create
13484 * complete DOF representing the enabled state.
13485 */
13486 static dof_hdr_t *
13487 dtrace_dof_create(dtrace_state_t *state)
13488 {
13489 dof_hdr_t *dof;
13490 dof_sec_t *sec;
13491 dof_optdesc_t *opt;
13492 int i, len = sizeof (dof_hdr_t) +
13493 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) +
13494 sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
13495
13496 ASSERT(MUTEX_HELD(&dtrace_lock));
13497
13498 dof = kmem_zalloc(len, KM_SLEEP);
13499 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0;
13500 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1;
13501 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2;
13502 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3;
13503
13504 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE;
13505 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE;
13506 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION;
13507 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION;
13508 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS;
13509 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS;
13510
13511 dof->dofh_flags = 0;
13512 dof->dofh_hdrsize = sizeof (dof_hdr_t);
13513 dof->dofh_secsize = sizeof (dof_sec_t);
13514 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */
13515 dof->dofh_secoff = sizeof (dof_hdr_t);
13516 dof->dofh_loadsz = len;
13517 dof->dofh_filesz = len;
13518 dof->dofh_pad = 0;
13519
13520 /*
13521 * Fill in the option section header...
13522 */
13523 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t));
13524 sec->dofs_type = DOF_SECT_OPTDESC;
13525 sec->dofs_align = sizeof (uint64_t);
13526 sec->dofs_flags = DOF_SECF_LOAD;
13527 sec->dofs_entsize = sizeof (dof_optdesc_t);
13528
13529 opt = (dof_optdesc_t *)((uintptr_t)sec +
13530 roundup(sizeof (dof_sec_t), sizeof (uint64_t)));
13531
13532 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof;
13533 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
13534
13535 for (i = 0; i < DTRACEOPT_MAX; i++) {
13536 opt[i].dofo_option = i;
13537 opt[i].dofo_strtab = DOF_SECIDX_NONE;
13538 opt[i].dofo_value = state->dts_options[i];
13539 }
13540
13541 return (dof);
13542 }
13543
13544 static dof_hdr_t *
13545 dtrace_dof_copyin(uintptr_t uarg, int *errp)
13546 {
13547 dof_hdr_t hdr, *dof;
13548
13549 ASSERT(!MUTEX_HELD(&dtrace_lock));
13550
13551 /*
13552 * First, we're going to copyin() the sizeof (dof_hdr_t).
13553 */
13554 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) {
13555 dtrace_dof_error(NULL, "failed to copyin DOF header");
13556 *errp = EFAULT;
13557 return (NULL);
13558 }
13559
13560 /*
13561 * Now we'll allocate the entire DOF and copy it in -- provided
13562 * that the length isn't outrageous.
13563 */
13564 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) {
13565 dtrace_dof_error(&hdr, "load size exceeds maximum");
13566 *errp = E2BIG;
13567 return (NULL);
13568 }
13569
13570 if (hdr.dofh_loadsz < sizeof (hdr)) {
13571 dtrace_dof_error(&hdr, "invalid load size");
13572 *errp = EINVAL;
13573 return (NULL);
13574 }
13575
13576 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP);
13577
13578 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0 ||
13579 dof->dofh_loadsz != hdr.dofh_loadsz) {
13580 kmem_free(dof, hdr.dofh_loadsz);
13581 *errp = EFAULT;
13582 return (NULL);
13583 }
13584
13585 return (dof);
13586 }
13587
13588 #ifdef __FreeBSD__
13589 static dof_hdr_t *
13590 dtrace_dof_copyin_proc(struct proc *p, uintptr_t uarg, int *errp)
13591 {
13592 dof_hdr_t hdr, *dof;
13593 struct thread *td;
13594 size_t loadsz;
13595
13596 ASSERT(!MUTEX_HELD(&dtrace_lock));
13597
13598 td = curthread;
13599
13600 /*
13601 * First, we're going to copyin() the sizeof (dof_hdr_t).
13602 */
13603 if (proc_readmem(td, p, uarg, &hdr, sizeof(hdr)) != sizeof(hdr)) {
13604 dtrace_dof_error(NULL, "failed to copyin DOF header");
13605 *errp = EFAULT;
13606 return (NULL);
13607 }
13608
13609 /*
13610 * Now we'll allocate the entire DOF and copy it in -- provided
13611 * that the length isn't outrageous.
13612 */
13613 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) {
13614 dtrace_dof_error(&hdr, "load size exceeds maximum");
13615 *errp = E2BIG;
13616 return (NULL);
13617 }
13618 loadsz = (size_t)hdr.dofh_loadsz;
13619
13620 if (loadsz < sizeof (hdr)) {
13621 dtrace_dof_error(&hdr, "invalid load size");
13622 *errp = EINVAL;
13623 return (NULL);
13624 }
13625
13626 dof = kmem_alloc(loadsz, KM_SLEEP);
13627
13628 if (proc_readmem(td, p, uarg, dof, loadsz) != loadsz ||
13629 dof->dofh_loadsz != loadsz) {
13630 kmem_free(dof, hdr.dofh_loadsz);
13631 *errp = EFAULT;
13632 return (NULL);
13633 }
13634
13635 return (dof);
13636 }
13637 #endif /* __FreeBSD__ */
13638 #ifdef __NetBSD__
13639 static dof_hdr_t *
13640 dtrace_dof_copyin_pid(pid_t pid, const void *uarg, int *errp)
13641 {
13642 dof_hdr_t hdr, *dof;
13643 size_t loadsz;
13644 int err;
13645
13646 err = copyin_pid(pid, uarg, &hdr, sizeof(hdr));
13647 if (err != 0) {
13648 *errp = err;
13649 return (NULL);
13650 }
13651
13652 /*
13653 * Now we'll allocate the entire DOF and copy it in -- provided
13654 * that the length isn't outrageous.
13655 */
13656 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) {
13657 dtrace_dof_error(&hdr, "load size exceeds maximum");
13658 *errp = E2BIG;
13659 return (NULL);
13660 }
13661 loadsz = (size_t)hdr.dofh_loadsz;
13662
13663 if (loadsz < sizeof (hdr)) {
13664 dtrace_dof_error(&hdr, "invalid load size");
13665 *errp = EINVAL;
13666 return (NULL);
13667 }
13668
13669 dof = kmem_alloc(loadsz, KM_SLEEP);
13670
13671 err = copyin_pid(pid, uarg, dof, loadsz);
13672 if (err == 0 && dof->dofh_loadsz != loadsz)
13673 err = EFAULT;
13674 if (err != 0) {
13675 kmem_free(dof, loadsz);
13676 *errp = EFAULT;
13677 return (NULL);
13678 }
13679
13680 return (dof);
13681 }
13682 #endif
13683
13684 #ifdef __FreeBSD__
13685 static __inline uchar_t
13686 dtrace_dof_char(char c)
13687 {
13688
13689 switch (c) {
13690 case '0':
13691 case '1':
13692 case '2':
13693 case '3':
13694 case '4':
13695 case '5':
13696 case '6':
13697 case '7':
13698 case '8':
13699 case '9':
13700 return (c - '0');
13701 case 'A':
13702 case 'B':
13703 case 'C':
13704 case 'D':
13705 case 'E':
13706 case 'F':
13707 return (c - 'A' + 10);
13708 case 'a':
13709 case 'b':
13710 case 'c':
13711 case 'd':
13712 case 'e':
13713 case 'f':
13714 return (c - 'a' + 10);
13715 }
13716 /* Should not reach here. */
13717 return (UCHAR_MAX);
13718 }
13719 #endif /* __FreeBSD__ */
13720
13721 static dof_hdr_t *
13722 dtrace_dof_property(const char *name)
13723 {
13724 #ifdef illumos
13725 uchar_t *buf;
13726 uint64_t loadsz;
13727 unsigned int len, i;
13728 dof_hdr_t *dof = NULL;
13729
13730 /*
13731 * Unfortunately, array of values in .conf files are always (and
13732 * only) interpreted to be integer arrays. We must read our DOF
13733 * as an integer array, and then squeeze it into a byte array.
13734 */
13735 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0,
13736 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS)
13737 return (NULL);
13738
13739 for (i = 0; i < len; i++)
13740 buf[i] = (uchar_t)(((int *)buf)[i]);
13741
13742 if (len < sizeof (dof_hdr_t)) {
13743 ddi_prop_free(buf);
13744 dtrace_dof_error(NULL, "truncated header");
13745 return (NULL);
13746 }
13747
13748 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) {
13749 ddi_prop_free(buf);
13750 dtrace_dof_error(NULL, "truncated DOF");
13751 return (NULL);
13752 }
13753
13754 if (loadsz >= dtrace_dof_maxsize) {
13755 ddi_prop_free(buf);
13756 dtrace_dof_error(NULL, "oversized DOF");
13757 return (NULL);
13758 }
13759
13760 dof = kmem_alloc(loadsz, KM_SLEEP);
13761 bcopy(buf, dof, loadsz);
13762 ddi_prop_free(buf);
13763
13764 return (dof);
13765 #endif /* illumos */
13766 #ifdef __FreeBSD__
13767 uint8_t *dofbuf;
13768 u_char *data, *eol;
13769 caddr_t doffile;
13770 size_t bytes, len, i;
13771 dof_hdr_t *dof;
13772 u_char c1, c2;
13773
13774 dof = NULL;
13775
13776 doffile = preload_search_by_type("dtrace_dof");
13777 if (doffile == NULL)
13778 return (NULL);
13779
13780 data = preload_fetch_addr(doffile);
13781 len = preload_fetch_size(doffile);
13782 for (;;) {
13783 /* Look for the end of the line. All lines end in a newline. */
13784 eol = memchr(data, '\n', len);
13785 if (eol == NULL)
13786 return (NULL);
13787
13788 if (strncmp(name, data, strlen(name)) == 0)
13789 break;
13790
13791 eol++; /* skip past the newline */
13792 len -= eol - data;
13793 data = eol;
13794 }
13795
13796 /* We've found the data corresponding to the specified key. */
13797
13798 data += strlen(name) + 1; /* skip past the '=' */
13799 len = eol - data;
13800 if (len % 2 != 0) {
13801 dtrace_dof_error(NULL, "invalid DOF encoding length");
13802 goto doferr;
13803 }
13804 bytes = len / 2;
13805 if (bytes < sizeof(dof_hdr_t)) {
13806 dtrace_dof_error(NULL, "truncated header");
13807 goto doferr;
13808 }
13809
13810 /*
13811 * Each byte is represented by the two ASCII characters in its hex
13812 * representation.
13813 */
13814 dofbuf = malloc(bytes, M_SOLARIS, M_WAITOK);
13815 for (i = 0; i < bytes; i++) {
13816 c1 = dtrace_dof_char(data[i * 2]);
13817 c2 = dtrace_dof_char(data[i * 2 + 1]);
13818 if (c1 == UCHAR_MAX || c2 == UCHAR_MAX) {
13819 dtrace_dof_error(NULL, "invalid hex char in DOF");
13820 goto doferr;
13821 }
13822 dofbuf[i] = c1 * 16 + c2;
13823 }
13824
13825 dof = (dof_hdr_t *)dofbuf;
13826 if (bytes < dof->dofh_loadsz) {
13827 dtrace_dof_error(NULL, "truncated DOF");
13828 goto doferr;
13829 }
13830
13831 if (dof->dofh_loadsz >= dtrace_dof_maxsize) {
13832 dtrace_dof_error(NULL, "oversized DOF");
13833 goto doferr;
13834 }
13835
13836 return (dof);
13837
13838 doferr:
13839 free(dof, M_SOLARIS);
13840 return (NULL);
13841 #endif /* __FreeBSD__ */
13842 #ifdef __NetBSD__
13843 return (NULL);
13844 #endif /* __NetBSD__ */
13845 }
13846
13847 static void
13848 dtrace_dof_destroy(dof_hdr_t *dof)
13849 {
13850 kmem_free(dof, dof->dofh_loadsz);
13851 }
13852
13853 /*
13854 * Return the dof_sec_t pointer corresponding to a given section index. If the
13855 * index is not valid, dtrace_dof_error() is called and NULL is returned. If
13856 * a type other than DOF_SECT_NONE is specified, the header is checked against
13857 * this type and NULL is returned if the types do not match.
13858 */
13859 static dof_sec_t *
13860 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i)
13861 {
13862 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)
13863 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize);
13864
13865 if (i >= dof->dofh_secnum) {
13866 dtrace_dof_error(dof, "referenced section index is invalid");
13867 return (NULL);
13868 }
13869
13870 if (!(sec->dofs_flags & DOF_SECF_LOAD)) {
13871 dtrace_dof_error(dof, "referenced section is not loadable");
13872 return (NULL);
13873 }
13874
13875 if (type != DOF_SECT_NONE && type != sec->dofs_type) {
13876 dtrace_dof_error(dof, "referenced section is the wrong type");
13877 return (NULL);
13878 }
13879
13880 return (sec);
13881 }
13882
13883 static dtrace_probedesc_t *
13884 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc)
13885 {
13886 dof_probedesc_t *probe;
13887 dof_sec_t *strtab;
13888 uintptr_t daddr = (uintptr_t)dof;
13889 uintptr_t str;
13890 size_t size;
13891
13892 if (sec->dofs_type != DOF_SECT_PROBEDESC) {
13893 dtrace_dof_error(dof, "invalid probe section");
13894 return (NULL);
13895 }
13896
13897 if (sec->dofs_align != sizeof (dof_secidx_t)) {
13898 dtrace_dof_error(dof, "bad alignment in probe description");
13899 return (NULL);
13900 }
13901
13902 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) {
13903 dtrace_dof_error(dof, "truncated probe description");
13904 return (NULL);
13905 }
13906
13907 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset);
13908 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab);
13909
13910 if (strtab == NULL)
13911 return (NULL);
13912
13913 str = daddr + strtab->dofs_offset;
13914 size = strtab->dofs_size;
13915
13916 if (probe->dofp_provider >= strtab->dofs_size) {
13917 dtrace_dof_error(dof, "corrupt probe provider");
13918 return (NULL);
13919 }
13920
13921 (void) strncpy(desc->dtpd_provider,
13922 (char *)(str + probe->dofp_provider),
13923 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider));
13924
13925 if (probe->dofp_mod >= strtab->dofs_size) {
13926 dtrace_dof_error(dof, "corrupt probe module");
13927 return (NULL);
13928 }
13929
13930 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod),
13931 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod));
13932
13933 if (probe->dofp_func >= strtab->dofs_size) {
13934 dtrace_dof_error(dof, "corrupt probe function");
13935 return (NULL);
13936 }
13937
13938 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func),
13939 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func));
13940
13941 if (probe->dofp_name >= strtab->dofs_size) {
13942 dtrace_dof_error(dof, "corrupt probe name");
13943 return (NULL);
13944 }
13945
13946 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name),
13947 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name));
13948
13949 return (desc);
13950 }
13951
13952 static dtrace_difo_t *
13953 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
13954 cred_t *cr)
13955 {
13956 dtrace_difo_t *dp;
13957 size_t ttl = 0;
13958 dof_difohdr_t *dofd;
13959 uintptr_t daddr = (uintptr_t)dof;
13960 size_t max = dtrace_difo_maxsize;
13961 int i, l, n;
13962
13963 static const struct {
13964 int section;
13965 int bufoffs;
13966 int lenoffs;
13967 int entsize;
13968 int align;
13969 const char *msg;
13970 } difo[] = {
13971 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf),
13972 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t),
13973 sizeof (dif_instr_t), "multiple DIF sections" },
13974
13975 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab),
13976 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t),
13977 sizeof (uint64_t), "multiple integer tables" },
13978
13979 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab),
13980 offsetof(dtrace_difo_t, dtdo_strlen), 0,
13981 sizeof (char), "multiple string tables" },
13982
13983 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab),
13984 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t),
13985 sizeof (uint_t), "multiple variable tables" },
13986
13987 { DOF_SECT_NONE, 0, 0, 0, 0, NULL }
13988 };
13989
13990 if (sec->dofs_type != DOF_SECT_DIFOHDR) {
13991 dtrace_dof_error(dof, "invalid DIFO header section");
13992 return (NULL);
13993 }
13994
13995 if (sec->dofs_align != sizeof (dof_secidx_t)) {
13996 dtrace_dof_error(dof, "bad alignment in DIFO header");
13997 return (NULL);
13998 }
13999
14000 if (sec->dofs_size < sizeof (dof_difohdr_t) ||
14001 sec->dofs_size % sizeof (dof_secidx_t)) {
14002 dtrace_dof_error(dof, "bad size in DIFO header");
14003 return (NULL);
14004 }
14005
14006 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
14007 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1;
14008
14009 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
14010 dp->dtdo_rtype = dofd->dofd_rtype;
14011
14012 for (l = 0; l < n; l++) {
14013 dof_sec_t *subsec;
14014 void **bufp;
14015 uint32_t *lenp;
14016
14017 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE,
14018 dofd->dofd_links[l])) == NULL)
14019 goto err; /* invalid section link */
14020
14021 if (ttl + subsec->dofs_size > max) {
14022 dtrace_dof_error(dof, "exceeds maximum size");
14023 goto err;
14024 }
14025
14026 ttl += subsec->dofs_size;
14027
14028 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) {
14029 if (subsec->dofs_type != difo[i].section)
14030 continue;
14031
14032 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) {
14033 dtrace_dof_error(dof, "section not loaded");
14034 goto err;
14035 }
14036
14037 if (subsec->dofs_align != difo[i].align) {
14038 dtrace_dof_error(dof, "bad alignment");
14039 goto err;
14040 }
14041
14042 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs);
14043 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs);
14044
14045 if (*bufp != NULL) {
14046 dtrace_dof_error(dof, difo[i].msg);
14047 goto err;
14048 }
14049
14050 if (difo[i].entsize != subsec->dofs_entsize) {
14051 dtrace_dof_error(dof, "entry size mismatch");
14052 goto err;
14053 }
14054
14055 if (subsec->dofs_entsize != 0 &&
14056 (subsec->dofs_size % subsec->dofs_entsize) != 0) {
14057 dtrace_dof_error(dof, "corrupt entry size");
14058 goto err;
14059 }
14060
14061 *lenp = subsec->dofs_size;
14062 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP);
14063 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset),
14064 *bufp, subsec->dofs_size);
14065
14066 if (subsec->dofs_entsize != 0)
14067 *lenp /= subsec->dofs_entsize;
14068
14069 break;
14070 }
14071
14072 /*
14073 * If we encounter a loadable DIFO sub-section that is not
14074 * known to us, assume this is a broken program and fail.
14075 */
14076 if (difo[i].section == DOF_SECT_NONE &&
14077 (subsec->dofs_flags & DOF_SECF_LOAD)) {
14078 dtrace_dof_error(dof, "unrecognized DIFO subsection");
14079 goto err;
14080 }
14081 }
14082
14083 if (dp->dtdo_buf == NULL) {
14084 /*
14085 * We can't have a DIF object without DIF text.
14086 */
14087 dtrace_dof_error(dof, "missing DIF text");
14088 goto err;
14089 }
14090
14091 /*
14092 * Before we validate the DIF object, run through the variable table
14093 * looking for the strings -- if any of their size are under, we'll set
14094 * their size to be the system-wide default string size. Note that
14095 * this should _not_ happen if the "strsize" option has been set --
14096 * in this case, the compiler should have set the size to reflect the
14097 * setting of the option.
14098 */
14099 for (i = 0; i < dp->dtdo_varlen; i++) {
14100 dtrace_difv_t *v = &dp->dtdo_vartab[i];
14101 dtrace_diftype_t *t = &v->dtdv_type;
14102
14103 if (v->dtdv_id < DIF_VAR_OTHER_UBASE)
14104 continue;
14105
14106 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0)
14107 t->dtdt_size = dtrace_strsize_default;
14108 }
14109
14110 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0)
14111 goto err;
14112
14113 dtrace_difo_init(dp, vstate);
14114 return (dp);
14115
14116 err:
14117 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
14118 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
14119 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
14120 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
14121
14122 kmem_free(dp, sizeof (dtrace_difo_t));
14123 return (NULL);
14124 }
14125
14126 static dtrace_predicate_t *
14127 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
14128 cred_t *cr)
14129 {
14130 dtrace_difo_t *dp;
14131
14132 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL)
14133 return (NULL);
14134
14135 return (dtrace_predicate_create(dp));
14136 }
14137
14138 static dtrace_actdesc_t *
14139 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
14140 cred_t *cr)
14141 {
14142 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next;
14143 dof_actdesc_t *desc;
14144 dof_sec_t *difosec;
14145 size_t offs;
14146 uintptr_t daddr = (uintptr_t)dof;
14147 uint64_t arg;
14148 dtrace_actkind_t kind;
14149
14150 if (sec->dofs_type != DOF_SECT_ACTDESC) {
14151 dtrace_dof_error(dof, "invalid action section");
14152 return (NULL);
14153 }
14154
14155 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) {
14156 dtrace_dof_error(dof, "truncated action description");
14157 return (NULL);
14158 }
14159
14160 if (sec->dofs_align != sizeof (uint64_t)) {
14161 dtrace_dof_error(dof, "bad alignment in action description");
14162 return (NULL);
14163 }
14164
14165 if (sec->dofs_size < sec->dofs_entsize) {
14166 dtrace_dof_error(dof, "section entry size exceeds total size");
14167 return (NULL);
14168 }
14169
14170 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) {
14171 dtrace_dof_error(dof, "bad entry size in action description");
14172 return (NULL);
14173 }
14174
14175 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) {
14176 dtrace_dof_error(dof, "actions exceed dtrace_actions_max");
14177 return (NULL);
14178 }
14179
14180 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) {
14181 desc = (dof_actdesc_t *)(daddr +
14182 (uintptr_t)sec->dofs_offset + offs);
14183 kind = (dtrace_actkind_t)desc->dofa_kind;
14184
14185 if ((DTRACEACT_ISPRINTFLIKE(kind) &&
14186 (kind != DTRACEACT_PRINTA ||
14187 desc->dofa_strtab != DOF_SECIDX_NONE)) ||
14188 (kind == DTRACEACT_DIFEXPR &&
14189 desc->dofa_strtab != DOF_SECIDX_NONE)) {
14190 dof_sec_t *strtab;
14191 char *str, *fmt;
14192 uint64_t i;
14193
14194 /*
14195 * The argument to these actions is an index into the
14196 * DOF string table. For printf()-like actions, this
14197 * is the format string. For print(), this is the
14198 * CTF type of the expression result.
14199 */
14200 if ((strtab = dtrace_dof_sect(dof,
14201 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL)
14202 goto err;
14203
14204 str = (char *)((uintptr_t)dof +
14205 (uintptr_t)strtab->dofs_offset);
14206
14207 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) {
14208 if (str[i] == '\0')
14209 break;
14210 }
14211
14212 if (i >= strtab->dofs_size) {
14213 dtrace_dof_error(dof, "bogus format string");
14214 goto err;
14215 }
14216
14217 if (i == desc->dofa_arg) {
14218 dtrace_dof_error(dof, "empty format string");
14219 goto err;
14220 }
14221
14222 i -= desc->dofa_arg;
14223 fmt = kmem_alloc(i + 1, KM_SLEEP);
14224 bcopy(&str[desc->dofa_arg], fmt, i + 1);
14225 arg = (uint64_t)(uintptr_t)fmt;
14226 } else {
14227 if (kind == DTRACEACT_PRINTA) {
14228 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE);
14229 arg = 0;
14230 } else {
14231 arg = desc->dofa_arg;
14232 }
14233 }
14234
14235 act = dtrace_actdesc_create(kind, desc->dofa_ntuple,
14236 desc->dofa_uarg, arg);
14237
14238 if (last != NULL) {
14239 last->dtad_next = act;
14240 } else {
14241 first = act;
14242 }
14243
14244 last = act;
14245
14246 if (desc->dofa_difo == DOF_SECIDX_NONE)
14247 continue;
14248
14249 if ((difosec = dtrace_dof_sect(dof,
14250 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL)
14251 goto err;
14252
14253 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr);
14254
14255 if (act->dtad_difo == NULL)
14256 goto err;
14257 }
14258
14259 ASSERT(first != NULL);
14260 return (first);
14261
14262 err:
14263 for (act = first; act != NULL; act = next) {
14264 next = act->dtad_next;
14265 dtrace_actdesc_release(act, vstate);
14266 }
14267
14268 return (NULL);
14269 }
14270
14271 static dtrace_ecbdesc_t *
14272 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
14273 cred_t *cr)
14274 {
14275 dtrace_ecbdesc_t *ep;
14276 dof_ecbdesc_t *ecb;
14277 dtrace_probedesc_t *desc;
14278 dtrace_predicate_t *pred = NULL;
14279
14280 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) {
14281 dtrace_dof_error(dof, "truncated ECB description");
14282 return (NULL);
14283 }
14284
14285 if (sec->dofs_align != sizeof (uint64_t)) {
14286 dtrace_dof_error(dof, "bad alignment in ECB description");
14287 return (NULL);
14288 }
14289
14290 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset);
14291 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes);
14292
14293 if (sec == NULL)
14294 return (NULL);
14295
14296 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
14297 ep->dted_uarg = ecb->dofe_uarg;
14298 desc = &ep->dted_probe;
14299
14300 if (dtrace_dof_probedesc(dof, sec, desc) == NULL)
14301 goto err;
14302
14303 if (ecb->dofe_pred != DOF_SECIDX_NONE) {
14304 if ((sec = dtrace_dof_sect(dof,
14305 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL)
14306 goto err;
14307
14308 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL)
14309 goto err;
14310
14311 ep->dted_pred.dtpdd_predicate = pred;
14312 }
14313
14314 if (ecb->dofe_actions != DOF_SECIDX_NONE) {
14315 if ((sec = dtrace_dof_sect(dof,
14316 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL)
14317 goto err;
14318
14319 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr);
14320
14321 if (ep->dted_action == NULL)
14322 goto err;
14323 }
14324
14325 return (ep);
14326
14327 err:
14328 if (pred != NULL)
14329 dtrace_predicate_release(pred, vstate);
14330 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
14331 return (NULL);
14332 }
14333
14334 /*
14335 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the
14336 * specified DOF. SETX relocations are computed using 'ubase', the base load
14337 * address of the object containing the DOF, and DOFREL relocations are relative
14338 * to the relocation offset within the DOF.
14339 */
14340 static int
14341 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase,
14342 uint64_t udaddr)
14343 {
14344 uintptr_t daddr = (uintptr_t)dof;
14345 dof_relohdr_t *dofr =
14346 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
14347 dof_sec_t *ss, *rs, *ts;
14348 dof_relodesc_t *r;
14349 uint_t i, n;
14350
14351 if (sec->dofs_size < sizeof (dof_relohdr_t) ||
14352 sec->dofs_align != sizeof (dof_secidx_t)) {
14353 dtrace_dof_error(dof, "invalid relocation header");
14354 return (-1);
14355 }
14356
14357 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab);
14358 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec);
14359 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec);
14360
14361 if (ss == NULL || rs == NULL || ts == NULL)
14362 return (-1); /* dtrace_dof_error() has been called already */
14363
14364 if (rs->dofs_entsize < sizeof (dof_relodesc_t) ||
14365 rs->dofs_align != sizeof (uint64_t)) {
14366 dtrace_dof_error(dof, "invalid relocation section");
14367 return (-1);
14368 }
14369
14370 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset);
14371 n = rs->dofs_size / rs->dofs_entsize;
14372
14373 for (i = 0; i < n; i++) {
14374 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset;
14375
14376 switch (r->dofr_type) {
14377 case DOF_RELO_NONE:
14378 break;
14379 case DOF_RELO_SETX:
14380 case DOF_RELO_DOFREL:
14381 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset +
14382 sizeof (uint64_t) > ts->dofs_size) {
14383 dtrace_dof_error(dof, "bad relocation offset");
14384 return (-1);
14385 }
14386
14387 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) {
14388 dtrace_dof_error(dof, "misaligned setx relo");
14389 return (-1);
14390 }
14391
14392 if (r->dofr_type == DOF_RELO_SETX)
14393 *(uint64_t *)taddr += ubase;
14394 else
14395 *(uint64_t *)taddr +=
14396 udaddr + ts->dofs_offset + r->dofr_offset;
14397 break;
14398 default:
14399 dtrace_dof_error(dof, "invalid relocation type");
14400 return (-1);
14401 }
14402
14403 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize);
14404 }
14405
14406 return (0);
14407 }
14408
14409 /*
14410 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated
14411 * header: it should be at the front of a memory region that is at least
14412 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in
14413 * size. It need not be validated in any other way.
14414 */
14415 static int
14416 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr,
14417 dtrace_enabling_t **enabp, uint64_t ubase, uint64_t udaddr, int noprobes)
14418 {
14419 uint64_t len = dof->dofh_loadsz, seclen;
14420 uintptr_t daddr = (uintptr_t)dof;
14421 dtrace_ecbdesc_t *ep;
14422 dtrace_enabling_t *enab;
14423 uint_t i;
14424
14425 ASSERT(MUTEX_HELD(&dtrace_lock));
14426 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t));
14427
14428 /*
14429 * Check the DOF header identification bytes. In addition to checking
14430 * valid settings, we also verify that unused bits/bytes are zeroed so
14431 * we can use them later without fear of regressing existing binaries.
14432 */
14433 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0],
14434 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) {
14435 dtrace_dof_error(dof, "DOF magic string mismatch");
14436 return (-1);
14437 }
14438
14439 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 &&
14440 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) {
14441 dtrace_dof_error(dof, "DOF has invalid data model");
14442 return (-1);
14443 }
14444
14445 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) {
14446 dtrace_dof_error(dof, "DOF encoding mismatch");
14447 return (-1);
14448 }
14449
14450 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
14451 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) {
14452 dtrace_dof_error(dof, "DOF version mismatch");
14453 return (-1);
14454 }
14455
14456 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) {
14457 dtrace_dof_error(dof, "DOF uses unsupported instruction set");
14458 return (-1);
14459 }
14460
14461 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) {
14462 dtrace_dof_error(dof, "DOF uses too many integer registers");
14463 return (-1);
14464 }
14465
14466 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) {
14467 dtrace_dof_error(dof, "DOF uses too many tuple registers");
14468 return (-1);
14469 }
14470
14471 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) {
14472 if (dof->dofh_ident[i] != 0) {
14473 dtrace_dof_error(dof, "DOF has invalid ident byte set");
14474 return (-1);
14475 }
14476 }
14477
14478 if (dof->dofh_flags & ~DOF_FL_VALID) {
14479 dtrace_dof_error(dof, "DOF has invalid flag bits set");
14480 return (-1);
14481 }
14482
14483 if (dof->dofh_secsize == 0) {
14484 dtrace_dof_error(dof, "zero section header size");
14485 return (-1);
14486 }
14487
14488 /*
14489 * Check that the section headers don't exceed the amount of DOF
14490 * data. Note that we cast the section size and number of sections
14491 * to uint64_t's to prevent possible overflow in the multiplication.
14492 */
14493 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize;
14494
14495 if (dof->dofh_secoff > len || seclen > len ||
14496 dof->dofh_secoff + seclen > len) {
14497 dtrace_dof_error(dof, "truncated section headers");
14498 return (-1);
14499 }
14500
14501 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) {
14502 dtrace_dof_error(dof, "misaligned section headers");
14503 return (-1);
14504 }
14505
14506 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) {
14507 dtrace_dof_error(dof, "misaligned section size");
14508 return (-1);
14509 }
14510
14511 /*
14512 * Take an initial pass through the section headers to be sure that
14513 * the headers don't have stray offsets. If the 'noprobes' flag is
14514 * set, do not permit sections relating to providers, probes, or args.
14515 */
14516 for (i = 0; i < dof->dofh_secnum; i++) {
14517 dof_sec_t *sec = (dof_sec_t *)(daddr +
14518 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
14519
14520 if (noprobes) {
14521 switch (sec->dofs_type) {
14522 case DOF_SECT_PROVIDER:
14523 case DOF_SECT_PROBES:
14524 case DOF_SECT_PRARGS:
14525 case DOF_SECT_PROFFS:
14526 dtrace_dof_error(dof, "illegal sections "
14527 "for enabling");
14528 return (-1);
14529 }
14530 }
14531
14532 if (DOF_SEC_ISLOADABLE(sec->dofs_type) &&
14533 !(sec->dofs_flags & DOF_SECF_LOAD)) {
14534 dtrace_dof_error(dof, "loadable section with load "
14535 "flag unset");
14536 return (-1);
14537 }
14538
14539 if (!(sec->dofs_flags & DOF_SECF_LOAD))
14540 continue; /* just ignore non-loadable sections */
14541
14542 if (!ISP2(sec->dofs_align)) {
14543 dtrace_dof_error(dof, "bad section alignment");
14544 return (-1);
14545 }
14546
14547 if (sec->dofs_offset & (sec->dofs_align - 1)) {
14548 dtrace_dof_error(dof, "misaligned section");
14549 return (-1);
14550 }
14551
14552 if (sec->dofs_offset > len || sec->dofs_size > len ||
14553 sec->dofs_offset + sec->dofs_size > len) {
14554 dtrace_dof_error(dof, "corrupt section header");
14555 return (-1);
14556 }
14557
14558 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr +
14559 sec->dofs_offset + sec->dofs_size - 1) != '\0') {
14560 dtrace_dof_error(dof, "non-terminating string table");
14561 return (-1);
14562 }
14563 }
14564
14565 /*
14566 * Take a second pass through the sections and locate and perform any
14567 * relocations that are present. We do this after the first pass to
14568 * be sure that all sections have had their headers validated.
14569 */
14570 for (i = 0; i < dof->dofh_secnum; i++) {
14571 dof_sec_t *sec = (dof_sec_t *)(daddr +
14572 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
14573
14574 if (!(sec->dofs_flags & DOF_SECF_LOAD))
14575 continue; /* skip sections that are not loadable */
14576
14577 switch (sec->dofs_type) {
14578 case DOF_SECT_URELHDR:
14579 if (dtrace_dof_relocate(dof, sec, ubase, udaddr) != 0)
14580 return (-1);
14581 break;
14582 }
14583 }
14584
14585 if ((enab = *enabp) == NULL)
14586 enab = *enabp = dtrace_enabling_create(vstate);
14587
14588 for (i = 0; i < dof->dofh_secnum; i++) {
14589 dof_sec_t *sec = (dof_sec_t *)(daddr +
14590 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
14591
14592 if (sec->dofs_type != DOF_SECT_ECBDESC)
14593 continue;
14594
14595 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) {
14596 dtrace_enabling_destroy(enab);
14597 *enabp = NULL;
14598 return (-1);
14599 }
14600
14601 dtrace_enabling_add(enab, ep);
14602 }
14603
14604 return (0);
14605 }
14606
14607 /*
14608 * Process DOF for any options. This routine assumes that the DOF has been
14609 * at least processed by dtrace_dof_slurp().
14610 */
14611 static int
14612 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state)
14613 {
14614 int i, rval;
14615 uint32_t entsize;
14616 size_t offs;
14617 dof_optdesc_t *desc;
14618
14619 for (i = 0; i < dof->dofh_secnum; i++) {
14620 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof +
14621 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
14622
14623 if (sec->dofs_type != DOF_SECT_OPTDESC)
14624 continue;
14625
14626 if (sec->dofs_align != sizeof (uint64_t)) {
14627 dtrace_dof_error(dof, "bad alignment in "
14628 "option description");
14629 return (EINVAL);
14630 }
14631
14632 if ((entsize = sec->dofs_entsize) == 0) {
14633 dtrace_dof_error(dof, "zeroed option entry size");
14634 return (EINVAL);
14635 }
14636
14637 if (entsize < sizeof (dof_optdesc_t)) {
14638 dtrace_dof_error(dof, "bad option entry size");
14639 return (EINVAL);
14640 }
14641
14642 for (offs = 0; offs < sec->dofs_size; offs += entsize) {
14643 desc = (dof_optdesc_t *)((uintptr_t)dof +
14644 (uintptr_t)sec->dofs_offset + offs);
14645
14646 if (desc->dofo_strtab != DOF_SECIDX_NONE) {
14647 dtrace_dof_error(dof, "non-zero option string");
14648 return (EINVAL);
14649 }
14650
14651 if (desc->dofo_value == DTRACEOPT_UNSET) {
14652 dtrace_dof_error(dof, "unset option");
14653 return (EINVAL);
14654 }
14655
14656 if ((rval = dtrace_state_option(state,
14657 desc->dofo_option, desc->dofo_value)) != 0) {
14658 dtrace_dof_error(dof, "rejected option");
14659 return (rval);
14660 }
14661 }
14662 }
14663
14664 return (0);
14665 }
14666
14667 /*
14668 * DTrace Consumer State Functions
14669 */
14670 static int
14671 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
14672 {
14673 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize;
14674 void *base;
14675 uintptr_t limit;
14676 dtrace_dynvar_t *dvar, *next, *start;
14677 int i;
14678
14679 ASSERT(MUTEX_HELD(&dtrace_lock));
14680 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL);
14681
14682 bzero(dstate, sizeof (dtrace_dstate_t));
14683
14684 if ((dstate->dtds_chunksize = chunksize) == 0)
14685 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE;
14686
14687 VERIFY(dstate->dtds_chunksize < LONG_MAX);
14688
14689 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
14690 size = min;
14691
14692 if ((base = kmem_zalloc(size, KM_NOSLEEP | KM_NORMALPRI)) == NULL)
14693 return (ENOMEM);
14694
14695 dstate->dtds_size = size;
14696 dstate->dtds_base = base;
14697 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP);
14698 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t));
14699
14700 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t));
14701
14702 if (hashsize != 1 && (hashsize & 1))
14703 hashsize--;
14704
14705 dstate->dtds_hashsize = hashsize;
14706 dstate->dtds_hash = dstate->dtds_base;
14707
14708 /*
14709 * Set all of our hash buckets to point to the single sink, and (if
14710 * it hasn't already been set), set the sink's hash value to be the
14711 * sink sentinel value. The sink is needed for dynamic variable
14712 * lookups to know that they have iterated over an entire, valid hash
14713 * chain.
14714 */
14715 for (i = 0; i < hashsize; i++)
14716 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink;
14717
14718 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK)
14719 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK;
14720
14721 /*
14722 * Determine number of active CPUs. Divide free list evenly among
14723 * active CPUs.
14724 */
14725 start = (dtrace_dynvar_t *)
14726 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t));
14727 limit = (uintptr_t)base + size;
14728
14729 VERIFY((uintptr_t)start < limit);
14730 VERIFY((uintptr_t)start >= (uintptr_t)base);
14731
14732 maxper = (limit - (uintptr_t)start) / NCPU;
14733 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize;
14734
14735 #ifdef illumos
14736 for (i = 0; i < NCPU; i++)
14737 #endif
14738 #ifdef __FreeBSD__
14739 CPU_FOREACH(i)
14740 #endif
14741 #ifdef __NetBSD__
14742 for (i = 0; i < NCPU; i++)
14743 #endif
14744 {
14745 dstate->dtds_percpu[i].dtdsc_free = dvar = start;
14746
14747 /*
14748 * If we don't even have enough chunks to make it once through
14749 * NCPUs, we're just going to allocate everything to the first
14750 * CPU. And if we're on the last CPU, we're going to allocate
14751 * whatever is left over. In either case, we set the limit to
14752 * be the limit of the dynamic variable space.
14753 */
14754 if (maxper == 0 || i == NCPU - 1) {
14755 limit = (uintptr_t)base + size;
14756 start = NULL;
14757 } else {
14758 limit = (uintptr_t)start + maxper;
14759 start = (dtrace_dynvar_t *)limit;
14760 }
14761
14762 VERIFY(limit <= (uintptr_t)base + size);
14763
14764 for (;;) {
14765 next = (dtrace_dynvar_t *)((uintptr_t)dvar +
14766 dstate->dtds_chunksize);
14767
14768 if ((uintptr_t)next + dstate->dtds_chunksize >= limit)
14769 break;
14770
14771 VERIFY((uintptr_t)dvar >= (uintptr_t)base &&
14772 (uintptr_t)dvar <= (uintptr_t)base + size);
14773 dvar->dtdv_next = next;
14774 dvar = next;
14775 }
14776
14777 if (maxper == 0)
14778 break;
14779 }
14780
14781 return (0);
14782 }
14783
14784 static void
14785 dtrace_dstate_fini(dtrace_dstate_t *dstate)
14786 {
14787 ASSERT(MUTEX_HELD(&cpu_lock));
14788
14789 if (dstate->dtds_base == NULL)
14790 return;
14791
14792 kmem_free(dstate->dtds_base, dstate->dtds_size);
14793 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu);
14794 }
14795
14796 static void
14797 dtrace_vstate_fini(dtrace_vstate_t *vstate)
14798 {
14799 /*
14800 * Logical XOR, where are you?
14801 */
14802 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL));
14803
14804 if (vstate->dtvs_nglobals > 0) {
14805 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals *
14806 sizeof (dtrace_statvar_t *));
14807 }
14808
14809 if (vstate->dtvs_ntlocals > 0) {
14810 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals *
14811 sizeof (dtrace_difv_t));
14812 }
14813
14814 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL));
14815
14816 if (vstate->dtvs_nlocals > 0) {
14817 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals *
14818 sizeof (dtrace_statvar_t *));
14819 }
14820 }
14821
14822 #ifdef __FreeBSD__
14823 static void
14824 dtrace_state_clean(void *arg)
14825 {
14826 dtrace_state_t *state = arg;
14827 dtrace_optval_t *opt = state->dts_options;
14828
14829 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
14830 return;
14831
14832 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
14833 dtrace_speculation_clean(state);
14834
14835 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC,
14836 dtrace_state_clean, state);
14837 }
14838
14839 static void
14840 dtrace_state_deadman(void *arg)
14841 {
14842 dtrace_state_t *state = arg;
14843 hrtime_t now;
14844
14845 dtrace_sync();
14846
14847 dtrace_debug_output();
14848
14849 now = dtrace_gethrtime();
14850
14851 if (state != dtrace_anon.dta_state &&
14852 now - state->dts_laststatus >= dtrace_deadman_user)
14853 return;
14854
14855 /*
14856 * We must be sure that dts_alive never appears to be less than the
14857 * value upon entry to dtrace_state_deadman(), and because we lack a
14858 * dtrace_cas64(), we cannot store to it atomically. We thus instead
14859 * store INT64_MAX to it, followed by a memory barrier, followed by
14860 * the new value. This assures that dts_alive never appears to be
14861 * less than its true value, regardless of the order in which the
14862 * stores to the underlying storage are issued.
14863 */
14864 state->dts_alive = INT64_MAX;
14865 dtrace_membar_producer();
14866 state->dts_alive = now;
14867
14868 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC,
14869 dtrace_state_deadman, state);
14870 }
14871 #else
14872 static void
14873 dtrace_state_clean(dtrace_state_t *state)
14874 {
14875 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
14876 return;
14877
14878 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
14879 dtrace_speculation_clean(state);
14880 }
14881
14882 static void
14883 dtrace_state_deadman(dtrace_state_t *state)
14884 {
14885 hrtime_t now;
14886
14887 dtrace_sync();
14888
14889 now = dtrace_gethrtime();
14890
14891 if (state != dtrace_anon.dta_state &&
14892 now - state->dts_laststatus >= dtrace_deadman_user)
14893 return;
14894
14895 /*
14896 * We must be sure that dts_alive never appears to be less than the
14897 * value upon entry to dtrace_state_deadman(), and because we lack a
14898 * dtrace_cas64(), we cannot store to it atomically. We thus instead
14899 * store INT64_MAX to it, followed by a memory barrier, followed by
14900 * the new value. This assures that dts_alive never appears to be
14901 * less than its true value, regardless of the order in which the
14902 * stores to the underlying storage are issued.
14903 */
14904 state->dts_alive = INT64_MAX;
14905 dtrace_membar_producer();
14906 state->dts_alive = now;
14907 }
14908
14909 #endif /* illumos */
14910
14911 static dtrace_state_t *
14912 #ifdef illumos
14913 dtrace_state_create(dev_t *devp, cred_t *cr)
14914 #endif
14915 #ifdef __FreeBSD__
14916 dtrace_state_create(struct cdev *dev, struct ucred *cred __unused)
14917 #endif
14918 #ifdef __NetBSD__
14919 dtrace_state_create(dev_t *devp, cred_t *cr)
14920 #endif
14921 {
14922 #ifdef illumos
14923 minor_t minor;
14924 major_t major;
14925 #else
14926 int m = 0;
14927 #endif
14928 #ifdef __FreeBSD__
14929 cred_t *cr = NULL;
14930 #endif
14931 int cpu_it;
14932 char c[30];
14933 dtrace_state_t *state;
14934 dtrace_optval_t *opt;
14935 int bufsize = NCPU * sizeof (dtrace_buffer_t), i;
14936
14937 ASSERT(MUTEX_HELD(&dtrace_lock));
14938 ASSERT(MUTEX_HELD(&cpu_lock));
14939
14940 #ifdef illumos
14941 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1,
14942 VM_BESTFIT | VM_SLEEP);
14943
14944 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) {
14945 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
14946 return (NULL);
14947 }
14948
14949 state = ddi_get_soft_state(dtrace_softstate, minor);
14950 #endif
14951 #ifdef __FreeBSD__
14952 if (dev != NULL) {
14953 cr = dev->si_cred;
14954 m = dev2unit(dev);
14955 }
14956 #endif
14957 #ifdef __NetBSD__
14958 m = minor(*devp) & 0x0F;
14959
14960 /* Allocate memory for the state. */
14961 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP);
14962 #endif
14963
14964
14965 state->dts_epid = DTRACE_EPIDNONE + 1;
14966
14967 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m);
14968 #ifdef illumos
14969 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1,
14970 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
14971
14972 if (devp != NULL) {
14973 major = getemajor(*devp);
14974 } else {
14975 major = ddi_driver_major(dtrace_devi);
14976 }
14977
14978 state->dts_dev = makedevice(major, minor);
14979
14980 if (devp != NULL)
14981 *devp = state->dts_dev;
14982 #endif
14983 #ifdef __FreeBSD__
14984 state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx);
14985 state->dts_dev = dev;
14986 #endif
14987 #ifdef __NetBSD__
14988 state->dts_aggid_arena = vmem_create(c, 1, INT_MAX, 1,
14989 NULL, NULL, NULL, 0, VM_SLEEP, IPL_NONE);
14990 state->dts_dev = *devp;
14991 #endif
14992
14993 /*
14994 * We allocate NCPU buffers. On the one hand, this can be quite
14995 * a bit of memory per instance (nearly 36K on a Starcat). On the
14996 * other hand, it saves an additional memory reference in the probe
14997 * path.
14998 */
14999 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP);
15000 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP);
15001
15002 /*
15003 * Allocate and initialise the per-process per-CPU random state.
15004 * SI_SUB_RANDOM < SI_SUB_DTRACE_ANON therefore entropy device is
15005 * assumed to be seeded at this point (if from Fortuna seed file).
15006 */
15007 (void) read_random(&state->dts_rstate[0], 2 * sizeof(uint64_t));
15008 for (cpu_it = 1; cpu_it < NCPU; cpu_it++) {
15009 /*
15010 * Each CPU is assigned a 2^64 period, non-overlapping
15011 * subsequence.
15012 */
15013 dtrace_xoroshiro128_plus_jump(state->dts_rstate[cpu_it-1],
15014 state->dts_rstate[cpu_it]);
15015 }
15016
15017
15018 #ifdef illumos
15019 state->dts_cleaner = CYCLIC_NONE;
15020 state->dts_deadman = CYCLIC_NONE;
15021 #endif
15022 #ifdef __FreeBSD__
15023 callout_init(&state->dts_cleaner, 1);
15024 callout_init(&state->dts_deadman, 1);
15025 #endif
15026 #ifdef __NetBSD__
15027 state->dts_cleaner = NULL;
15028 state->dts_deadman = NULL;
15029 #endif
15030 state->dts_vstate.dtvs_state = state;
15031
15032 for (i = 0; i < DTRACEOPT_MAX; i++)
15033 state->dts_options[i] = DTRACEOPT_UNSET;
15034
15035 /*
15036 * Set the default options.
15037 */
15038 opt = state->dts_options;
15039 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH;
15040 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO;
15041 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default;
15042 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default;
15043 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL;
15044 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default;
15045 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default;
15046 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default;
15047 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default;
15048 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default;
15049 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default;
15050 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default;
15051 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default;
15052 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default;
15053
15054 state->dts_activity = DTRACE_ACTIVITY_INACTIVE;
15055
15056 /*
15057 * Depending on the user credentials, we set flag bits which alter probe
15058 * visibility or the amount of destructiveness allowed. In the case of
15059 * actual anonymous tracing, or the possession of all privileges, all of
15060 * the normal checks are bypassed.
15061 */
15062 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
15063 state->dts_cred.dcr_visible = DTRACE_CRV_ALL;
15064 state->dts_cred.dcr_action = DTRACE_CRA_ALL;
15065 } else {
15066 /*
15067 * Set up the credentials for this instantiation. We take a
15068 * hold on the credential to prevent it from disappearing on
15069 * us; this in turn prevents the zone_t referenced by this
15070 * credential from disappearing. This means that we can
15071 * examine the credential and the zone from probe context.
15072 */
15073 crhold(cr);
15074 state->dts_cred.dcr_cred = cr;
15075
15076 /*
15077 * CRA_PROC means "we have *some* privilege for dtrace" and
15078 * unlocks the use of variables like pid, zonename, etc.
15079 */
15080 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) ||
15081 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
15082 state->dts_cred.dcr_action |= DTRACE_CRA_PROC;
15083 }
15084
15085 /*
15086 * dtrace_user allows use of syscall and profile providers.
15087 * If the user also has proc_owner and/or proc_zone, we
15088 * extend the scope to include additional visibility and
15089 * destructive power.
15090 */
15091 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) {
15092 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) {
15093 state->dts_cred.dcr_visible |=
15094 DTRACE_CRV_ALLPROC;
15095
15096 state->dts_cred.dcr_action |=
15097 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
15098 }
15099
15100 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) {
15101 state->dts_cred.dcr_visible |=
15102 DTRACE_CRV_ALLZONE;
15103
15104 state->dts_cred.dcr_action |=
15105 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
15106 }
15107
15108 /*
15109 * If we have all privs in whatever zone this is,
15110 * we can do destructive things to processes which
15111 * have altered credentials.
15112 */
15113 #ifdef illumos
15114 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
15115 cr->cr_zone->zone_privset)) {
15116 state->dts_cred.dcr_action |=
15117 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
15118 }
15119 #endif
15120 }
15121
15122 /*
15123 * Holding the dtrace_kernel privilege also implies that
15124 * the user has the dtrace_user privilege from a visibility
15125 * perspective. But without further privileges, some
15126 * destructive actions are not available.
15127 */
15128 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) {
15129 /*
15130 * Make all probes in all zones visible. However,
15131 * this doesn't mean that all actions become available
15132 * to all zones.
15133 */
15134 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL |
15135 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE;
15136
15137 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL |
15138 DTRACE_CRA_PROC;
15139 /*
15140 * Holding proc_owner means that destructive actions
15141 * for *this* zone are allowed.
15142 */
15143 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
15144 state->dts_cred.dcr_action |=
15145 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
15146
15147 /*
15148 * Holding proc_zone means that destructive actions
15149 * for this user/group ID in all zones is allowed.
15150 */
15151 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
15152 state->dts_cred.dcr_action |=
15153 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
15154
15155 #ifdef illumos
15156 /*
15157 * If we have all privs in whatever zone this is,
15158 * we can do destructive things to processes which
15159 * have altered credentials.
15160 */
15161 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
15162 cr->cr_zone->zone_privset)) {
15163 state->dts_cred.dcr_action |=
15164 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
15165 }
15166 #endif
15167 }
15168
15169 /*
15170 * Holding the dtrace_proc privilege gives control over fasttrap
15171 * and pid providers. We need to grant wider destructive
15172 * privileges in the event that the user has proc_owner and/or
15173 * proc_zone.
15174 */
15175 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
15176 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
15177 state->dts_cred.dcr_action |=
15178 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
15179
15180 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
15181 state->dts_cred.dcr_action |=
15182 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
15183 }
15184 }
15185
15186 return (state);
15187 }
15188
15189 static int
15190 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which)
15191 {
15192 dtrace_optval_t *opt = state->dts_options, size;
15193 processorid_t cpu = 0;;
15194 int flags = 0, rval, factor, divisor = 1;
15195
15196 ASSERT(MUTEX_HELD(&dtrace_lock));
15197 ASSERT(MUTEX_HELD(&cpu_lock));
15198 ASSERT(which < DTRACEOPT_MAX);
15199 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE ||
15200 (state == dtrace_anon.dta_state &&
15201 state->dts_activity == DTRACE_ACTIVITY_ACTIVE));
15202
15203 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0)
15204 return (0);
15205
15206 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET)
15207 cpu = opt[DTRACEOPT_CPU];
15208
15209 if (which == DTRACEOPT_SPECSIZE)
15210 flags |= DTRACEBUF_NOSWITCH;
15211
15212 if (which == DTRACEOPT_BUFSIZE) {
15213 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING)
15214 flags |= DTRACEBUF_RING;
15215
15216 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL)
15217 flags |= DTRACEBUF_FILL;
15218
15219 if (state != dtrace_anon.dta_state ||
15220 state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
15221 flags |= DTRACEBUF_INACTIVE;
15222 }
15223
15224 for (size = opt[which]; size >= sizeof (uint64_t); size /= divisor) {
15225 /*
15226 * The size must be 8-byte aligned. If the size is not 8-byte
15227 * aligned, drop it down by the difference.
15228 */
15229 if (size & (sizeof (uint64_t) - 1))
15230 size -= size & (sizeof (uint64_t) - 1);
15231
15232 if (size < state->dts_reserve) {
15233 /*
15234 * Buffers always must be large enough to accommodate
15235 * their prereserved space. We return E2BIG instead
15236 * of ENOMEM in this case to allow for user-level
15237 * software to differentiate the cases.
15238 */
15239 return (E2BIG);
15240 }
15241
15242 rval = dtrace_buffer_alloc(buf, size, flags, cpu, &factor);
15243
15244 if (rval != ENOMEM) {
15245 opt[which] = size;
15246 return (rval);
15247 }
15248
15249 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
15250 return (rval);
15251
15252 for (divisor = 2; divisor < factor; divisor <<= 1)
15253 continue;
15254 }
15255
15256 return (ENOMEM);
15257 }
15258
15259 static int
15260 dtrace_state_buffers(dtrace_state_t *state)
15261 {
15262 dtrace_speculation_t *spec = state->dts_speculations;
15263 int rval, i;
15264
15265 if ((rval = dtrace_state_buffer(state, state->dts_buffer,
15266 DTRACEOPT_BUFSIZE)) != 0)
15267 return (rval);
15268
15269 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer,
15270 DTRACEOPT_AGGSIZE)) != 0)
15271 return (rval);
15272
15273 for (i = 0; i < state->dts_nspeculations; i++) {
15274 if ((rval = dtrace_state_buffer(state,
15275 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0)
15276 return (rval);
15277 }
15278
15279 return (0);
15280 }
15281
15282 static void
15283 dtrace_state_prereserve(dtrace_state_t *state)
15284 {
15285 dtrace_ecb_t *ecb;
15286 dtrace_probe_t *probe;
15287
15288 state->dts_reserve = 0;
15289
15290 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL)
15291 return;
15292
15293 /*
15294 * If our buffer policy is a "fill" buffer policy, we need to set the
15295 * prereserved space to be the space required by the END probes.
15296 */
15297 probe = dtrace_probes[dtrace_probeid_end - 1];
15298 ASSERT(probe != NULL);
15299
15300 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
15301 if (ecb->dte_state != state)
15302 continue;
15303
15304 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment;
15305 }
15306 }
15307
15308 static int
15309 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu)
15310 {
15311 dtrace_optval_t *opt = state->dts_options, sz, nspec;
15312 dtrace_speculation_t *spec;
15313 dtrace_buffer_t *buf;
15314 #ifdef illumos
15315 cyc_handler_t hdlr;
15316 cyc_time_t when;
15317 #endif
15318 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t);
15319 dtrace_icookie_t cookie;
15320
15321 mutex_enter(&cpu_lock);
15322 mutex_enter(&dtrace_lock);
15323
15324 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
15325 rval = EBUSY;
15326 goto out;
15327 }
15328
15329 /*
15330 * Before we can perform any checks, we must prime all of the
15331 * retained enablings that correspond to this state.
15332 */
15333 dtrace_enabling_prime(state);
15334
15335 if (state->dts_destructive && !state->dts_cred.dcr_destructive) {
15336 rval = EACCES;
15337 goto out;
15338 }
15339
15340 dtrace_state_prereserve(state);
15341
15342 /*
15343 * Now we want to do is try to allocate our speculations.
15344 * We do not automatically resize the number of speculations; if
15345 * this fails, we will fail the operation.
15346 */
15347 nspec = opt[DTRACEOPT_NSPEC];
15348 ASSERT(nspec != DTRACEOPT_UNSET);
15349
15350 if (nspec > INT_MAX) {
15351 rval = ENOMEM;
15352 goto out;
15353 }
15354
15355 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t),
15356 KM_NOSLEEP | KM_NORMALPRI);
15357
15358 if (spec == NULL) {
15359 rval = ENOMEM;
15360 goto out;
15361 }
15362
15363 state->dts_speculations = spec;
15364 state->dts_nspeculations = (int)nspec;
15365
15366 for (i = 0; i < nspec; i++) {
15367 if ((buf = kmem_zalloc(bufsize,
15368 KM_NOSLEEP | KM_NORMALPRI)) == NULL) {
15369 rval = ENOMEM;
15370 goto err;
15371 }
15372
15373 spec[i].dtsp_buffer = buf;
15374 }
15375
15376 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) {
15377 if (dtrace_anon.dta_state == NULL) {
15378 rval = ENOENT;
15379 goto out;
15380 }
15381
15382 if (state->dts_necbs != 0) {
15383 rval = EALREADY;
15384 goto out;
15385 }
15386
15387 state->dts_anon = dtrace_anon_grab();
15388 ASSERT(state->dts_anon != NULL);
15389 state = state->dts_anon;
15390
15391 /*
15392 * We want "grabanon" to be set in the grabbed state, so we'll
15393 * copy that option value from the grabbing state into the
15394 * grabbed state.
15395 */
15396 state->dts_options[DTRACEOPT_GRABANON] =
15397 opt[DTRACEOPT_GRABANON];
15398
15399 *cpu = dtrace_anon.dta_beganon;
15400
15401 /*
15402 * If the anonymous state is active (as it almost certainly
15403 * is if the anonymous enabling ultimately matched anything),
15404 * we don't allow any further option processing -- but we
15405 * don't return failure.
15406 */
15407 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
15408 goto out;
15409 }
15410
15411 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET &&
15412 opt[DTRACEOPT_AGGSIZE] != 0) {
15413 if (state->dts_aggregations == NULL) {
15414 /*
15415 * We're not going to create an aggregation buffer
15416 * because we don't have any ECBs that contain
15417 * aggregations -- set this option to 0.
15418 */
15419 opt[DTRACEOPT_AGGSIZE] = 0;
15420 } else {
15421 /*
15422 * If we have an aggregation buffer, we must also have
15423 * a buffer to use as scratch.
15424 */
15425 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET ||
15426 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) {
15427 opt[DTRACEOPT_BUFSIZE] = state->dts_needed;
15428 }
15429 }
15430 }
15431
15432 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET &&
15433 opt[DTRACEOPT_SPECSIZE] != 0) {
15434 if (!state->dts_speculates) {
15435 /*
15436 * We're not going to create speculation buffers
15437 * because we don't have any ECBs that actually
15438 * speculate -- set the speculation size to 0.
15439 */
15440 opt[DTRACEOPT_SPECSIZE] = 0;
15441 }
15442 }
15443
15444 /*
15445 * The bare minimum size for any buffer that we're actually going to
15446 * do anything to is sizeof (uint64_t).
15447 */
15448 sz = sizeof (uint64_t);
15449
15450 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) ||
15451 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) ||
15452 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) {
15453 /*
15454 * A buffer size has been explicitly set to 0 (or to a size
15455 * that will be adjusted to 0) and we need the space -- we
15456 * need to return failure. We return ENOSPC to differentiate
15457 * it from failing to allocate a buffer due to failure to meet
15458 * the reserve (for which we return E2BIG).
15459 */
15460 rval = ENOSPC;
15461 goto out;
15462 }
15463
15464 if ((rval = dtrace_state_buffers(state)) != 0)
15465 goto err;
15466
15467 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET)
15468 sz = dtrace_dstate_defsize;
15469
15470 do {
15471 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz);
15472
15473 if (rval == 0)
15474 break;
15475
15476 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
15477 goto err;
15478 } while (sz >>= 1);
15479
15480 opt[DTRACEOPT_DYNVARSIZE] = sz;
15481
15482 if (rval != 0)
15483 goto err;
15484
15485 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max)
15486 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max;
15487
15488 if (opt[DTRACEOPT_CLEANRATE] == 0)
15489 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
15490
15491 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min)
15492 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min;
15493
15494 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max)
15495 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
15496
15497 state->dts_alive = state->dts_laststatus = dtrace_gethrtime();
15498 #ifdef illumos
15499 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean;
15500 hdlr.cyh_arg = state;
15501 hdlr.cyh_level = CY_LOW_LEVEL;
15502
15503 when.cyt_when = 0;
15504 when.cyt_interval = opt[DTRACEOPT_CLEANRATE];
15505
15506 state->dts_cleaner = cyclic_add(&hdlr, &when);
15507
15508 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman;
15509 hdlr.cyh_arg = state;
15510 hdlr.cyh_level = CY_LOW_LEVEL;
15511
15512 when.cyt_when = 0;
15513 when.cyt_interval = dtrace_deadman_interval;
15514
15515 state->dts_deadman = cyclic_add(&hdlr, &when);
15516 #endif
15517 #ifdef __FreeBSD__
15518 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC,
15519 dtrace_state_clean, state);
15520 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC,
15521 dtrace_state_deadman, state);
15522 #endif
15523 #ifdef __NetBSD__
15524 state->dts_cleaner = dtrace_state_worker_add(
15525 dtrace_state_clean, state, opt[DTRACEOPT_CLEANRATE]);
15526 state->dts_deadman = dtrace_state_worker_add(
15527 dtrace_state_deadman, state, dtrace_deadman_interval);
15528 #endif
15529
15530 state->dts_activity = DTRACE_ACTIVITY_WARMUP;
15531
15532 #ifdef illumos
15533 if (state->dts_getf != 0 &&
15534 !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) {
15535 /*
15536 * We don't have kernel privs but we have at least one call
15537 * to getf(); we need to bump our zone's count, and (if
15538 * this is the first enabling to have an unprivileged call
15539 * to getf()) we need to hook into closef().
15540 */
15541 state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf++;
15542
15543 if (dtrace_getf++ == 0) {
15544 ASSERT(dtrace_closef == NULL);
15545 dtrace_closef = dtrace_getf_barrier;
15546 }
15547 }
15548 #endif
15549
15550 /*
15551 * Now it's time to actually fire the BEGIN probe. We need to disable
15552 * interrupts here both to record the CPU on which we fired the BEGIN
15553 * probe (the data from this CPU will be processed first at user
15554 * level) and to manually activate the buffer for this CPU.
15555 */
15556 cookie = dtrace_interrupt_disable();
15557 *cpu = curcpu_id;
15558 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE);
15559 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE;
15560
15561 dtrace_probe(dtrace_probeid_begin,
15562 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
15563 dtrace_interrupt_enable(cookie);
15564 /*
15565 * We may have had an exit action from a BEGIN probe; only change our
15566 * state to ACTIVE if we're still in WARMUP.
15567 */
15568 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP ||
15569 state->dts_activity == DTRACE_ACTIVITY_DRAINING);
15570
15571 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP)
15572 state->dts_activity = DTRACE_ACTIVITY_ACTIVE;
15573
15574 #ifdef __FreeBSD__
15575 /*
15576 * We enable anonymous tracing before APs are started, so we must
15577 * activate buffers using the current CPU.
15578 */
15579 if (state == dtrace_anon.dta_state)
15580 for (int i = 0; i < NCPU; i++)
15581 dtrace_buffer_activate_cpu(state, i);
15582 else
15583 dtrace_xcall(DTRACE_CPUALL,
15584 (dtrace_xcall_t)dtrace_buffer_activate, state);
15585 #else
15586
15587 /*
15588 * Regardless of whether or not now we're in ACTIVE or DRAINING, we
15589 * want each CPU to transition its principal buffer out of the
15590 * INACTIVE state. Doing this assures that no CPU will suddenly begin
15591 * processing an ECB halfway down a probe's ECB chain; all CPUs will
15592 * atomically transition from processing none of a state's ECBs to
15593 * processing all of them.
15594 */
15595 dtrace_xcall(DTRACE_CPUALL,
15596 (dtrace_xcall_t)dtrace_buffer_activate, state);
15597 #endif
15598 goto out;
15599
15600 err:
15601 dtrace_buffer_free(state->dts_buffer);
15602 dtrace_buffer_free(state->dts_aggbuffer);
15603
15604 if ((nspec = state->dts_nspeculations) == 0) {
15605 ASSERT(state->dts_speculations == NULL);
15606 goto out;
15607 }
15608
15609 spec = state->dts_speculations;
15610 ASSERT(spec != NULL);
15611
15612 for (i = 0; i < state->dts_nspeculations; i++) {
15613 if ((buf = spec[i].dtsp_buffer) == NULL)
15614 break;
15615
15616 dtrace_buffer_free(buf);
15617 kmem_free(buf, bufsize);
15618 }
15619
15620 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
15621 state->dts_nspeculations = 0;
15622 state->dts_speculations = NULL;
15623
15624 out:
15625 mutex_exit(&dtrace_lock);
15626 mutex_exit(&cpu_lock);
15627
15628 return (rval);
15629 }
15630
15631 static int
15632 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu)
15633 {
15634 dtrace_icookie_t cookie;
15635
15636 ASSERT(MUTEX_HELD(&dtrace_lock));
15637
15638 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE &&
15639 state->dts_activity != DTRACE_ACTIVITY_DRAINING)
15640 return (EINVAL);
15641
15642 /*
15643 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync
15644 * to be sure that every CPU has seen it. See below for the details
15645 * on why this is done.
15646 */
15647 state->dts_activity = DTRACE_ACTIVITY_DRAINING;
15648 dtrace_sync();
15649
15650 /*
15651 * By this point, it is impossible for any CPU to be still processing
15652 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to
15653 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any
15654 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe()
15655 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN
15656 * iff we're in the END probe.
15657 */
15658 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN;
15659 dtrace_sync();
15660 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN);
15661
15662 /*
15663 * Finally, we can release the reserve and call the END probe. We
15664 * disable interrupts across calling the END probe to allow us to
15665 * return the CPU on which we actually called the END probe. This
15666 * allows user-land to be sure that this CPU's principal buffer is
15667 * processed last.
15668 */
15669 state->dts_reserve = 0;
15670
15671 cookie = dtrace_interrupt_disable();
15672 *cpu = curcpu_id;
15673 dtrace_probe(dtrace_probeid_end,
15674 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
15675 dtrace_interrupt_enable(cookie);
15676
15677 state->dts_activity = DTRACE_ACTIVITY_STOPPED;
15678 dtrace_sync();
15679
15680 #ifdef illumos
15681 if (state->dts_getf != 0 &&
15682 !(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)) {
15683 /*
15684 * We don't have kernel privs but we have at least one call
15685 * to getf(); we need to lower our zone's count, and (if
15686 * this is the last enabling to have an unprivileged call
15687 * to getf()) we need to clear the closef() hook.
15688 */
15689 ASSERT(state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf > 0);
15690 ASSERT(dtrace_closef == dtrace_getf_barrier);
15691 ASSERT(dtrace_getf > 0);
15692
15693 state->dts_cred.dcr_cred->cr_zone->zone_dtrace_getf--;
15694
15695 if (--dtrace_getf == 0)
15696 dtrace_closef = NULL;
15697 }
15698 #endif
15699
15700 return (0);
15701 }
15702
15703 static int
15704 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option,
15705 dtrace_optval_t val)
15706 {
15707 ASSERT(MUTEX_HELD(&dtrace_lock));
15708
15709 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
15710 return (EBUSY);
15711
15712 if (option >= DTRACEOPT_MAX)
15713 return (EINVAL);
15714
15715 if (option != DTRACEOPT_CPU && val < 0)
15716 return (EINVAL);
15717
15718 switch (option) {
15719 case DTRACEOPT_DESTRUCTIVE:
15720 if (dtrace_destructive_disallow)
15721 return (EACCES);
15722
15723 state->dts_cred.dcr_destructive = 1;
15724 break;
15725
15726 case DTRACEOPT_BUFSIZE:
15727 case DTRACEOPT_DYNVARSIZE:
15728 case DTRACEOPT_AGGSIZE:
15729 case DTRACEOPT_SPECSIZE:
15730 case DTRACEOPT_STRSIZE:
15731 if (val < 0)
15732 return (EINVAL);
15733
15734 if (val >= LONG_MAX) {
15735 /*
15736 * If this is an otherwise negative value, set it to
15737 * the highest multiple of 128m less than LONG_MAX.
15738 * Technically, we're adjusting the size without
15739 * regard to the buffer resizing policy, but in fact,
15740 * this has no effect -- if we set the buffer size to
15741 * ~LONG_MAX and the buffer policy is ultimately set to
15742 * be "manual", the buffer allocation is guaranteed to
15743 * fail, if only because the allocation requires two
15744 * buffers. (We set the the size to the highest
15745 * multiple of 128m because it ensures that the size
15746 * will remain a multiple of a megabyte when
15747 * repeatedly halved -- all the way down to 15m.)
15748 */
15749 val = LONG_MAX - (1 << 27) + 1;
15750 }
15751 }
15752
15753 state->dts_options[option] = val;
15754
15755 return (0);
15756 }
15757
15758 static void
15759 dtrace_state_destroy(dtrace_state_t *state)
15760 {
15761 dtrace_ecb_t *ecb;
15762 dtrace_vstate_t *vstate = &state->dts_vstate;
15763 #ifdef illumos
15764 minor_t minor = getminor(state->dts_dev);
15765 #endif
15766 int i, bufsize = NCPU * sizeof (dtrace_buffer_t);
15767 dtrace_speculation_t *spec = state->dts_speculations;
15768 int nspec = state->dts_nspeculations;
15769 uint32_t match;
15770
15771 ASSERT(MUTEX_HELD(&dtrace_lock));
15772 ASSERT(MUTEX_HELD(&cpu_lock));
15773
15774 /*
15775 * First, retract any retained enablings for this state.
15776 */
15777 dtrace_enabling_retract(state);
15778 ASSERT(state->dts_nretained == 0);
15779
15780 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE ||
15781 state->dts_activity == DTRACE_ACTIVITY_DRAINING) {
15782 /*
15783 * We have managed to come into dtrace_state_destroy() on a
15784 * hot enabling -- almost certainly because of a disorderly
15785 * shutdown of a consumer. (That is, a consumer that is
15786 * exiting without having called dtrace_stop().) In this case,
15787 * we're going to set our activity to be KILLED, and then
15788 * issue a sync to be sure that everyone is out of probe
15789 * context before we start blowing away ECBs.
15790 */
15791 state->dts_activity = DTRACE_ACTIVITY_KILLED;
15792 dtrace_sync();
15793 }
15794
15795 /*
15796 * Release the credential hold we took in dtrace_state_create().
15797 */
15798 if (state->dts_cred.dcr_cred != NULL)
15799 crfree(state->dts_cred.dcr_cred);
15800
15801 /*
15802 * Now we can safely disable and destroy any enabled probes. Because
15803 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress
15804 * (especially if they're all enabled), we take two passes through the
15805 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and
15806 * in the second we disable whatever is left over.
15807 */
15808 for (match = DTRACE_PRIV_KERNEL; ; match = 0) {
15809 for (i = 0; i < state->dts_necbs; i++) {
15810 if ((ecb = state->dts_ecbs[i]) == NULL)
15811 continue;
15812
15813 if (match && ecb->dte_probe != NULL) {
15814 dtrace_probe_t *probe = ecb->dte_probe;
15815 dtrace_provider_t *prov = probe->dtpr_provider;
15816
15817 if (!(prov->dtpv_priv.dtpp_flags & match))
15818 continue;
15819 }
15820
15821 dtrace_ecb_disable(ecb);
15822 dtrace_ecb_destroy(ecb);
15823 }
15824
15825 if (!match)
15826 break;
15827 }
15828
15829 /*
15830 * Before we free the buffers, perform one more sync to assure that
15831 * every CPU is out of probe context.
15832 */
15833 dtrace_sync();
15834
15835 dtrace_buffer_free(state->dts_buffer);
15836 dtrace_buffer_free(state->dts_aggbuffer);
15837
15838 for (i = 0; i < nspec; i++)
15839 dtrace_buffer_free(spec[i].dtsp_buffer);
15840
15841 #ifdef illumos
15842 if (state->dts_cleaner != CYCLIC_NONE)
15843 cyclic_remove(state->dts_cleaner);
15844
15845 if (state->dts_deadman != CYCLIC_NONE)
15846 cyclic_remove(state->dts_deadman);
15847 #endif
15848 #ifdef __FreeBSD__
15849 callout_stop(&state->dts_cleaner);
15850 callout_drain(&state->dts_cleaner);
15851 callout_stop(&state->dts_deadman);
15852 callout_drain(&state->dts_deadman);
15853 #endif
15854 #ifdef __NetBSD__
15855 if (state->dts_cleaner != NULL)
15856 dtrace_state_worker_remove(state->dts_cleaner);
15857
15858 if (state->dts_deadman != NULL)
15859 dtrace_state_worker_remove(state->dts_deadman);
15860 #endif
15861
15862 dtrace_dstate_fini(&vstate->dtvs_dynvars);
15863 dtrace_vstate_fini(vstate);
15864 if (state->dts_ecbs != NULL)
15865 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *));
15866
15867 if (state->dts_aggregations != NULL) {
15868 #ifdef DEBUG
15869 for (i = 0; i < state->dts_naggregations; i++)
15870 ASSERT(state->dts_aggregations[i] == NULL);
15871 #endif
15872 ASSERT(state->dts_naggregations > 0);
15873 kmem_free(state->dts_aggregations,
15874 state->dts_naggregations * sizeof (dtrace_aggregation_t *));
15875 }
15876
15877 kmem_free(state->dts_buffer, bufsize);
15878 kmem_free(state->dts_aggbuffer, bufsize);
15879
15880 for (i = 0; i < nspec; i++)
15881 kmem_free(spec[i].dtsp_buffer, bufsize);
15882
15883 if (spec != NULL)
15884 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
15885
15886 dtrace_format_destroy(state);
15887
15888 if (state->dts_aggid_arena != NULL) {
15889 #if defined(illumos) || defined(__NetBSD__)
15890 vmem_destroy(state->dts_aggid_arena);
15891 #else
15892 delete_unrhdr(state->dts_aggid_arena);
15893 #endif
15894 state->dts_aggid_arena = NULL;
15895 }
15896 #ifdef illumos
15897 ddi_soft_state_free(dtrace_softstate, minor);
15898 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
15899 #endif
15900 #ifdef __NetBSD__
15901 kmem_free(state, sizeof(dtrace_state_t));
15902 #endif
15903 }
15904
15905 /*
15906 * DTrace Anonymous Enabling Functions
15907 */
15908 static dtrace_state_t *
15909 dtrace_anon_grab(void)
15910 {
15911 dtrace_state_t *state;
15912
15913 ASSERT(MUTEX_HELD(&dtrace_lock));
15914
15915 if ((state = dtrace_anon.dta_state) == NULL) {
15916 ASSERT(dtrace_anon.dta_enabling == NULL);
15917 return (NULL);
15918 }
15919
15920 ASSERT(dtrace_anon.dta_enabling != NULL);
15921 ASSERT(dtrace_retained != NULL);
15922
15923 dtrace_enabling_destroy(dtrace_anon.dta_enabling);
15924 dtrace_anon.dta_enabling = NULL;
15925 dtrace_anon.dta_state = NULL;
15926
15927 return (state);
15928 }
15929
15930 static void
15931 dtrace_anon_property(void)
15932 {
15933 int i, rv;
15934 dtrace_state_t *state;
15935 dof_hdr_t *dof;
15936 char c[32]; /* enough for "dof-data-" + digits */
15937
15938 ASSERT(MUTEX_HELD(&dtrace_lock));
15939 ASSERT(MUTEX_HELD(&cpu_lock));
15940
15941 for (i = 0; ; i++) {
15942 (void) snprintf(c, sizeof (c), "dof-data-%d", i);
15943
15944 dtrace_err_verbose = 1;
15945
15946 if ((dof = dtrace_dof_property(c)) == NULL) {
15947 dtrace_err_verbose = 0;
15948 break;
15949 }
15950
15951 #ifdef illumos
15952 /*
15953 * We want to create anonymous state, so we need to transition
15954 * the kernel debugger to indicate that DTrace is active. If
15955 * this fails (e.g. because the debugger has modified text in
15956 * some way), we won't continue with the processing.
15957 */
15958 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
15959 cmn_err(CE_NOTE, "kernel debugger active; anonymous "
15960 "enabling ignored.");
15961 dtrace_dof_destroy(dof);
15962 break;
15963 }
15964 #endif
15965
15966 /*
15967 * If we haven't allocated an anonymous state, we'll do so now.
15968 */
15969 if ((state = dtrace_anon.dta_state) == NULL) {
15970 state = dtrace_state_create(NULL, NULL);
15971 dtrace_anon.dta_state = state;
15972
15973 if (state == NULL) {
15974 /*
15975 * This basically shouldn't happen: the only
15976 * failure mode from dtrace_state_create() is a
15977 * failure of ddi_soft_state_zalloc() that
15978 * itself should never happen. Still, the
15979 * interface allows for a failure mode, and
15980 * we want to fail as gracefully as possible:
15981 * we'll emit an error message and cease
15982 * processing anonymous state in this case.
15983 */
15984 cmn_err(CE_WARN, "failed to create "
15985 "anonymous state");
15986 dtrace_dof_destroy(dof);
15987 break;
15988 }
15989 }
15990
15991 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(),
15992 &dtrace_anon.dta_enabling, 0, 0, B_TRUE);
15993
15994 if (rv == 0)
15995 rv = dtrace_dof_options(dof, state);
15996
15997 dtrace_err_verbose = 0;
15998 dtrace_dof_destroy(dof);
15999
16000 if (rv != 0) {
16001 /*
16002 * This is malformed DOF; chuck any anonymous state
16003 * that we created.
16004 */
16005 ASSERT(dtrace_anon.dta_enabling == NULL);
16006 dtrace_state_destroy(state);
16007 dtrace_anon.dta_state = NULL;
16008 break;
16009 }
16010
16011 ASSERT(dtrace_anon.dta_enabling != NULL);
16012 }
16013
16014 if (dtrace_anon.dta_enabling != NULL) {
16015 int rval;
16016
16017 /*
16018 * dtrace_enabling_retain() can only fail because we are
16019 * trying to retain more enablings than are allowed -- but
16020 * we only have one anonymous enabling, and we are guaranteed
16021 * to be allowed at least one retained enabling; we assert
16022 * that dtrace_enabling_retain() returns success.
16023 */
16024 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling);
16025 ASSERT(rval == 0);
16026
16027 dtrace_enabling_dump(dtrace_anon.dta_enabling);
16028 }
16029 }
16030
16031 /*
16032 * DTrace Helper Functions
16033 */
16034 static void
16035 dtrace_helper_trace(dtrace_helper_action_t *helper,
16036 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where)
16037 {
16038 uint32_t size, next, nnext, i;
16039 dtrace_helptrace_t *ent, *buffer;
16040 uint16_t flags = cpu_core[curcpu_id].cpuc_dtrace_flags;
16041
16042 if ((buffer = dtrace_helptrace_buffer) == NULL)
16043 return;
16044
16045 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals);
16046
16047 /*
16048 * What would a tracing framework be without its own tracing
16049 * framework? (Well, a hell of a lot simpler, for starters...)
16050 */
16051 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals *
16052 sizeof (uint64_t) - sizeof (uint64_t);
16053
16054 /*
16055 * Iterate until we can allocate a slot in the trace buffer.
16056 */
16057 do {
16058 next = dtrace_helptrace_next;
16059
16060 if (next + size < dtrace_helptrace_bufsize) {
16061 nnext = next + size;
16062 } else {
16063 nnext = size;
16064 }
16065 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next);
16066
16067 /*
16068 * We have our slot; fill it in.
16069 */
16070 if (nnext == size) {
16071 dtrace_helptrace_wrapped++;
16072 next = 0;
16073 }
16074
16075 ent = (dtrace_helptrace_t *)((uintptr_t)buffer + next);
16076 ent->dtht_helper = helper;
16077 ent->dtht_where = where;
16078 ent->dtht_nlocals = vstate->dtvs_nlocals;
16079
16080 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ?
16081 mstate->dtms_fltoffs : -1;
16082 ent->dtht_fault = DTRACE_FLAGS2FLT(flags);
16083 ent->dtht_illval = cpu_core[curcpu_id].cpuc_dtrace_illval;
16084
16085 for (i = 0; i < vstate->dtvs_nlocals; i++) {
16086 dtrace_statvar_t *svar;
16087
16088 if ((svar = vstate->dtvs_locals[i]) == NULL)
16089 continue;
16090
16091 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t));
16092 ent->dtht_locals[i] =
16093 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu_id];
16094 }
16095 }
16096
16097 static uint64_t
16098 dtrace_helper(int which, dtrace_mstate_t *mstate,
16099 dtrace_state_t *state, uint64_t arg0, uint64_t arg1)
16100 {
16101 uint16_t *flags = &cpu_core[curcpu_id].cpuc_dtrace_flags;
16102 uint64_t sarg0 = mstate->dtms_arg[0];
16103 uint64_t sarg1 = mstate->dtms_arg[1];
16104 uint64_t rval = 0;
16105 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers;
16106 dtrace_helper_action_t *helper;
16107 dtrace_vstate_t *vstate;
16108 dtrace_difo_t *pred;
16109 int i, trace = dtrace_helptrace_buffer != NULL;
16110
16111 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS);
16112
16113 if (helpers == NULL)
16114 return (0);
16115
16116 if ((helper = helpers->dthps_actions[which]) == NULL)
16117 return (0);
16118
16119 vstate = &helpers->dthps_vstate;
16120 mstate->dtms_arg[0] = arg0;
16121 mstate->dtms_arg[1] = arg1;
16122
16123 /*
16124 * Now iterate over each helper. If its predicate evaluates to 'true',
16125 * we'll call the corresponding actions. Note that the below calls
16126 * to dtrace_dif_emulate() may set faults in machine state. This is
16127 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow
16128 * the stored DIF offset with its own (which is the desired behavior).
16129 * Also, note the calls to dtrace_dif_emulate() may allocate scratch
16130 * from machine state; this is okay, too.
16131 */
16132 for (; helper != NULL; helper = helper->dtha_next) {
16133 if ((pred = helper->dtha_predicate) != NULL) {
16134 if (trace)
16135 dtrace_helper_trace(helper, mstate, vstate, 0);
16136
16137 if (!dtrace_dif_emulate(pred, mstate, vstate, state))
16138 goto next;
16139
16140 if (*flags & CPU_DTRACE_FAULT)
16141 goto err;
16142 }
16143
16144 for (i = 0; i < helper->dtha_nactions; i++) {
16145 if (trace)
16146 dtrace_helper_trace(helper,
16147 mstate, vstate, i + 1);
16148
16149 rval = dtrace_dif_emulate(helper->dtha_actions[i],
16150 mstate, vstate, state);
16151
16152 if (*flags & CPU_DTRACE_FAULT)
16153 goto err;
16154 }
16155
16156 next:
16157 if (trace)
16158 dtrace_helper_trace(helper, mstate, vstate,
16159 DTRACE_HELPTRACE_NEXT);
16160 }
16161
16162 if (trace)
16163 dtrace_helper_trace(helper, mstate, vstate,
16164 DTRACE_HELPTRACE_DONE);
16165
16166 /*
16167 * Restore the arg0 that we saved upon entry.
16168 */
16169 mstate->dtms_arg[0] = sarg0;
16170 mstate->dtms_arg[1] = sarg1;
16171
16172 return (rval);
16173
16174 err:
16175 if (trace)
16176 dtrace_helper_trace(helper, mstate, vstate,
16177 DTRACE_HELPTRACE_ERR);
16178
16179 /*
16180 * Restore the arg0 that we saved upon entry.
16181 */
16182 mstate->dtms_arg[0] = sarg0;
16183 mstate->dtms_arg[1] = sarg1;
16184
16185 return (0);
16186 }
16187
16188 static void
16189 dtrace_helper_action_destroy(dtrace_helper_action_t *helper,
16190 dtrace_vstate_t *vstate)
16191 {
16192 int i;
16193
16194 if (helper->dtha_predicate != NULL)
16195 dtrace_difo_release(helper->dtha_predicate, vstate);
16196
16197 for (i = 0; i < helper->dtha_nactions; i++) {
16198 ASSERT(helper->dtha_actions[i] != NULL);
16199 dtrace_difo_release(helper->dtha_actions[i], vstate);
16200 }
16201
16202 kmem_free(helper->dtha_actions,
16203 helper->dtha_nactions * sizeof (dtrace_difo_t *));
16204 kmem_free(helper, sizeof (dtrace_helper_action_t));
16205 }
16206
16207 static int
16208 dtrace_helper_destroygen(dtrace_helpers_t *help, int gen)
16209 {
16210 proc_t *p = curproc;
16211 dtrace_vstate_t *vstate;
16212 int i;
16213
16214 if (help == NULL)
16215 help = p->p_dtrace_helpers;
16216
16217 ASSERT(MUTEX_HELD(&dtrace_lock));
16218
16219 if (help == NULL || gen > help->dthps_generation)
16220 return (EINVAL);
16221
16222 vstate = &help->dthps_vstate;
16223
16224 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
16225 dtrace_helper_action_t *last = NULL, *h, *next;
16226
16227 for (h = help->dthps_actions[i]; h != NULL; h = next) {
16228 next = h->dtha_next;
16229
16230 if (h->dtha_generation == gen) {
16231 if (last != NULL) {
16232 last->dtha_next = next;
16233 } else {
16234 help->dthps_actions[i] = next;
16235 }
16236
16237 dtrace_helper_action_destroy(h, vstate);
16238 } else {
16239 last = h;
16240 }
16241 }
16242 }
16243
16244 /*
16245 * Interate until we've cleared out all helper providers with the
16246 * given generation number.
16247 */
16248 for (;;) {
16249 dtrace_helper_provider_t *prov;
16250
16251 /*
16252 * Look for a helper provider with the right generation. We
16253 * have to start back at the beginning of the list each time
16254 * because we drop dtrace_lock. It's unlikely that we'll make
16255 * more than two passes.
16256 */
16257 for (i = 0; i < help->dthps_nprovs; i++) {
16258 prov = help->dthps_provs[i];
16259
16260 if (prov->dthp_generation == gen)
16261 break;
16262 }
16263
16264 /*
16265 * If there were no matches, we're done.
16266 */
16267 if (i == help->dthps_nprovs)
16268 break;
16269
16270 /*
16271 * Move the last helper provider into this slot.
16272 */
16273 help->dthps_nprovs--;
16274 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs];
16275 help->dthps_provs[help->dthps_nprovs] = NULL;
16276
16277 mutex_exit(&dtrace_lock);
16278
16279 /*
16280 * If we have a meta provider, remove this helper provider.
16281 */
16282 mutex_enter(&dtrace_meta_lock);
16283 if (dtrace_meta_pid != NULL) {
16284 ASSERT(dtrace_deferred_pid == NULL);
16285 dtrace_helper_provider_remove(&prov->dthp_prov,
16286 p->p_pid);
16287 }
16288 mutex_exit(&dtrace_meta_lock);
16289
16290 dtrace_helper_provider_destroy(prov);
16291
16292 mutex_enter(&dtrace_lock);
16293 }
16294
16295 return (0);
16296 }
16297
16298 static int
16299 dtrace_helper_validate(dtrace_helper_action_t *helper)
16300 {
16301 int err = 0, i;
16302 dtrace_difo_t *dp;
16303
16304 if ((dp = helper->dtha_predicate) != NULL)
16305 err += dtrace_difo_validate_helper(dp);
16306
16307 for (i = 0; i < helper->dtha_nactions; i++)
16308 err += dtrace_difo_validate_helper(helper->dtha_actions[i]);
16309
16310 return (err == 0);
16311 }
16312
16313 static int
16314 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep,
16315 dtrace_helpers_t *help)
16316 {
16317 dtrace_helper_action_t *helper, *last;
16318 dtrace_actdesc_t *act;
16319 dtrace_vstate_t *vstate;
16320 dtrace_predicate_t *pred;
16321 int count = 0, nactions = 0, i;
16322
16323 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS)
16324 return (EINVAL);
16325
16326 last = help->dthps_actions[which];
16327 vstate = &help->dthps_vstate;
16328
16329 for (count = 0; last != NULL; last = last->dtha_next) {
16330 count++;
16331 if (last->dtha_next == NULL)
16332 break;
16333 }
16334
16335 /*
16336 * If we already have dtrace_helper_actions_max helper actions for this
16337 * helper action type, we'll refuse to add a new one.
16338 */
16339 if (count >= dtrace_helper_actions_max)
16340 return (ENOSPC);
16341
16342 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP);
16343 helper->dtha_generation = help->dthps_generation;
16344
16345 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) {
16346 ASSERT(pred->dtp_difo != NULL);
16347 dtrace_difo_hold(pred->dtp_difo);
16348 helper->dtha_predicate = pred->dtp_difo;
16349 }
16350
16351 for (act = ep->dted_action; act != NULL; act = act->dtad_next) {
16352 if (act->dtad_kind != DTRACEACT_DIFEXPR)
16353 goto err;
16354
16355 if (act->dtad_difo == NULL)
16356 goto err;
16357
16358 nactions++;
16359 }
16360
16361 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) *
16362 (helper->dtha_nactions = nactions), KM_SLEEP);
16363
16364 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) {
16365 dtrace_difo_hold(act->dtad_difo);
16366 helper->dtha_actions[i++] = act->dtad_difo;
16367 }
16368
16369 if (!dtrace_helper_validate(helper))
16370 goto err;
16371
16372 if (last == NULL) {
16373 help->dthps_actions[which] = helper;
16374 } else {
16375 last->dtha_next = helper;
16376 }
16377
16378 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) {
16379 dtrace_helptrace_nlocals = vstate->dtvs_nlocals;
16380 dtrace_helptrace_next = 0;
16381 }
16382
16383 return (0);
16384 err:
16385 dtrace_helper_action_destroy(helper, vstate);
16386 return (EINVAL);
16387 }
16388
16389 static void
16390 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help,
16391 dof_helper_t *dofhp)
16392 {
16393 ASSERT(MUTEX_NOT_HELD(&dtrace_lock));
16394
16395 mutex_enter(&dtrace_meta_lock);
16396 mutex_enter(&dtrace_lock);
16397
16398 if (!dtrace_attached() || dtrace_meta_pid == NULL) {
16399 /*
16400 * If the dtrace module is loaded but not attached, or if
16401 * there aren't isn't a meta provider registered to deal with
16402 * these provider descriptions, we need to postpone creating
16403 * the actual providers until later.
16404 */
16405
16406 if (help->dthps_next == NULL && help->dthps_prev == NULL &&
16407 dtrace_deferred_pid != help) {
16408 help->dthps_deferred = 1;
16409 help->dthps_pid = p->p_pid;
16410 help->dthps_next = dtrace_deferred_pid;
16411 help->dthps_prev = NULL;
16412 if (dtrace_deferred_pid != NULL)
16413 dtrace_deferred_pid->dthps_prev = help;
16414 dtrace_deferred_pid = help;
16415 }
16416
16417 mutex_exit(&dtrace_lock);
16418
16419 } else if (dofhp != NULL) {
16420 /*
16421 * If the dtrace module is loaded and we have a particular
16422 * helper provider description, pass that off to the
16423 * meta provider.
16424 */
16425
16426 mutex_exit(&dtrace_lock);
16427
16428 dtrace_helper_provide(dofhp, p->p_pid);
16429
16430 } else {
16431 /*
16432 * Otherwise, just pass all the helper provider descriptions
16433 * off to the meta provider.
16434 */
16435
16436 int i;
16437 mutex_exit(&dtrace_lock);
16438
16439 for (i = 0; i < help->dthps_nprovs; i++) {
16440 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
16441 p->p_pid);
16442 }
16443 }
16444
16445 mutex_exit(&dtrace_meta_lock);
16446 }
16447
16448 static int
16449 dtrace_helper_provider_add(dof_helper_t *dofhp, dtrace_helpers_t *help, int gen)
16450 {
16451 dtrace_helper_provider_t *hprov, **tmp_provs;
16452 uint_t tmp_maxprovs, i;
16453
16454 ASSERT(MUTEX_HELD(&dtrace_lock));
16455 ASSERT(help != NULL);
16456
16457 /*
16458 * If we already have dtrace_helper_providers_max helper providers,
16459 * we're refuse to add a new one.
16460 */
16461 if (help->dthps_nprovs >= dtrace_helper_providers_max)
16462 return (ENOSPC);
16463
16464 /*
16465 * Check to make sure this isn't a duplicate.
16466 */
16467 for (i = 0; i < help->dthps_nprovs; i++) {
16468 if (dofhp->dofhp_addr ==
16469 help->dthps_provs[i]->dthp_prov.dofhp_addr)
16470 return (EALREADY);
16471 }
16472
16473 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP);
16474 hprov->dthp_prov = *dofhp;
16475 hprov->dthp_ref = 1;
16476 hprov->dthp_generation = gen;
16477
16478 /*
16479 * Allocate a bigger table for helper providers if it's already full.
16480 */
16481 if (help->dthps_maxprovs == help->dthps_nprovs) {
16482 tmp_maxprovs = help->dthps_maxprovs;
16483 tmp_provs = help->dthps_provs;
16484
16485 if (help->dthps_maxprovs == 0)
16486 help->dthps_maxprovs = 2;
16487 else
16488 help->dthps_maxprovs *= 2;
16489 if (help->dthps_maxprovs > dtrace_helper_providers_max)
16490 help->dthps_maxprovs = dtrace_helper_providers_max;
16491
16492 ASSERT(tmp_maxprovs < help->dthps_maxprovs);
16493
16494 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs *
16495 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
16496
16497 if (tmp_provs != NULL) {
16498 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs *
16499 sizeof (dtrace_helper_provider_t *));
16500 kmem_free(tmp_provs, tmp_maxprovs *
16501 sizeof (dtrace_helper_provider_t *));
16502 }
16503 }
16504
16505 help->dthps_provs[help->dthps_nprovs] = hprov;
16506 help->dthps_nprovs++;
16507
16508 return (0);
16509 }
16510
16511 static void
16512 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov)
16513 {
16514 mutex_enter(&dtrace_lock);
16515
16516 if (--hprov->dthp_ref == 0) {
16517 dof_hdr_t *dof;
16518 mutex_exit(&dtrace_lock);
16519 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof;
16520 dtrace_dof_destroy(dof);
16521 kmem_free(hprov, sizeof (dtrace_helper_provider_t));
16522 } else {
16523 mutex_exit(&dtrace_lock);
16524 }
16525 }
16526
16527 static int
16528 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec)
16529 {
16530 uintptr_t daddr = (uintptr_t)dof;
16531 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
16532 dof_provider_t *provider;
16533 dof_probe_t *probe;
16534 uint8_t *arg;
16535 char *strtab, *typestr;
16536 dof_stridx_t typeidx;
16537 size_t typesz;
16538 uint_t nprobes, j, k;
16539
16540 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER);
16541
16542 if (sec->dofs_offset & (sizeof (uint_t) - 1)) {
16543 dtrace_dof_error(dof, "misaligned section offset");
16544 return (-1);
16545 }
16546
16547 /*
16548 * The section needs to be large enough to contain the DOF provider
16549 * structure appropriate for the given version.
16550 */
16551 if (sec->dofs_size <
16552 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ?
16553 offsetof(dof_provider_t, dofpv_prenoffs) :
16554 sizeof (dof_provider_t))) {
16555 dtrace_dof_error(dof, "provider section too small");
16556 return (-1);
16557 }
16558
16559 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
16560 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab);
16561 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes);
16562 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs);
16563 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs);
16564
16565 if (str_sec == NULL || prb_sec == NULL ||
16566 arg_sec == NULL || off_sec == NULL)
16567 return (-1);
16568
16569 enoff_sec = NULL;
16570
16571 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
16572 provider->dofpv_prenoffs != DOF_SECT_NONE &&
16573 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS,
16574 provider->dofpv_prenoffs)) == NULL)
16575 return (-1);
16576
16577 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
16578
16579 if (provider->dofpv_name >= str_sec->dofs_size ||
16580 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) {
16581 dtrace_dof_error(dof, "invalid provider name");
16582 return (-1);
16583 }
16584
16585 if (prb_sec->dofs_entsize == 0 ||
16586 prb_sec->dofs_entsize > prb_sec->dofs_size) {
16587 dtrace_dof_error(dof, "invalid entry size");
16588 return (-1);
16589 }
16590
16591 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) {
16592 dtrace_dof_error(dof, "misaligned entry size");
16593 return (-1);
16594 }
16595
16596 if (off_sec->dofs_entsize != sizeof (uint32_t)) {
16597 dtrace_dof_error(dof, "invalid entry size");
16598 return (-1);
16599 }
16600
16601 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) {
16602 dtrace_dof_error(dof, "misaligned section offset");
16603 return (-1);
16604 }
16605
16606 if (arg_sec->dofs_entsize != sizeof (uint8_t)) {
16607 dtrace_dof_error(dof, "invalid entry size");
16608 return (-1);
16609 }
16610
16611 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
16612
16613 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
16614
16615 /*
16616 * Take a pass through the probes to check for errors.
16617 */
16618 for (j = 0; j < nprobes; j++) {
16619 probe = (dof_probe_t *)(uintptr_t)(daddr +
16620 prb_sec->dofs_offset + j * prb_sec->dofs_entsize);
16621
16622 if (probe->dofpr_func >= str_sec->dofs_size) {
16623 dtrace_dof_error(dof, "invalid function name");
16624 return (-1);
16625 }
16626
16627 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) {
16628 dtrace_dof_error(dof, "function name too long");
16629 /*
16630 * Keep going if the function name is too long.
16631 * Unlike provider and probe names, we cannot reasonably
16632 * impose restrictions on function names, since they're
16633 * a property of the code being instrumented. We will
16634 * skip this probe in dtrace_helper_provide_one().
16635 */
16636 }
16637
16638 if (probe->dofpr_name >= str_sec->dofs_size ||
16639 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) {
16640 dtrace_dof_error(dof, "invalid probe name");
16641 return (-1);
16642 }
16643
16644 /*
16645 * The offset count must not wrap the index, and the offsets
16646 * must also not overflow the section's data.
16647 */
16648 if (probe->dofpr_offidx + probe->dofpr_noffs <
16649 probe->dofpr_offidx ||
16650 (probe->dofpr_offidx + probe->dofpr_noffs) *
16651 off_sec->dofs_entsize > off_sec->dofs_size) {
16652 dtrace_dof_error(dof, "invalid probe offset");
16653 return (-1);
16654 }
16655
16656 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) {
16657 /*
16658 * If there's no is-enabled offset section, make sure
16659 * there aren't any is-enabled offsets. Otherwise
16660 * perform the same checks as for probe offsets
16661 * (immediately above).
16662 */
16663 if (enoff_sec == NULL) {
16664 if (probe->dofpr_enoffidx != 0 ||
16665 probe->dofpr_nenoffs != 0) {
16666 dtrace_dof_error(dof, "is-enabled "
16667 "offsets with null section");
16668 return (-1);
16669 }
16670 } else if (probe->dofpr_enoffidx +
16671 probe->dofpr_nenoffs < probe->dofpr_enoffidx ||
16672 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) *
16673 enoff_sec->dofs_entsize > enoff_sec->dofs_size) {
16674 dtrace_dof_error(dof, "invalid is-enabled "
16675 "offset");
16676 return (-1);
16677 }
16678
16679 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) {
16680 dtrace_dof_error(dof, "zero probe and "
16681 "is-enabled offsets");
16682 return (-1);
16683 }
16684 } else if (probe->dofpr_noffs == 0) {
16685 dtrace_dof_error(dof, "zero probe offsets");
16686 return (-1);
16687 }
16688
16689 if (probe->dofpr_argidx + probe->dofpr_xargc <
16690 probe->dofpr_argidx ||
16691 (probe->dofpr_argidx + probe->dofpr_xargc) *
16692 arg_sec->dofs_entsize > arg_sec->dofs_size) {
16693 dtrace_dof_error(dof, "invalid args");
16694 return (-1);
16695 }
16696
16697 typeidx = probe->dofpr_nargv;
16698 typestr = strtab + probe->dofpr_nargv;
16699 for (k = 0; k < probe->dofpr_nargc; k++) {
16700 if (typeidx >= str_sec->dofs_size) {
16701 dtrace_dof_error(dof, "bad "
16702 "native argument type");
16703 return (-1);
16704 }
16705
16706 typesz = strlen(typestr) + 1;
16707 if (typesz > DTRACE_ARGTYPELEN) {
16708 dtrace_dof_error(dof, "native "
16709 "argument type too long");
16710 return (-1);
16711 }
16712 typeidx += typesz;
16713 typestr += typesz;
16714 }
16715
16716 typeidx = probe->dofpr_xargv;
16717 typestr = strtab + probe->dofpr_xargv;
16718 for (k = 0; k < probe->dofpr_xargc; k++) {
16719 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) {
16720 dtrace_dof_error(dof, "bad "
16721 "native argument index");
16722 return (-1);
16723 }
16724
16725 if (typeidx >= str_sec->dofs_size) {
16726 dtrace_dof_error(dof, "bad "
16727 "translated argument type");
16728 return (-1);
16729 }
16730
16731 typesz = strlen(typestr) + 1;
16732 if (typesz > DTRACE_ARGTYPELEN) {
16733 dtrace_dof_error(dof, "translated argument "
16734 "type too long");
16735 return (-1);
16736 }
16737
16738 typeidx += typesz;
16739 typestr += typesz;
16740 }
16741 }
16742
16743 return (0);
16744 }
16745
16746 static int
16747 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp, struct proc *p)
16748 {
16749 dtrace_helpers_t *help;
16750 dtrace_vstate_t *vstate;
16751 dtrace_enabling_t *enab = NULL;
16752 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1;
16753 uintptr_t daddr = (uintptr_t)dof;
16754
16755 ASSERT(MUTEX_HELD(&dtrace_lock));
16756
16757 if ((help = p->p_dtrace_helpers) == NULL)
16758 help = dtrace_helpers_create(p);
16759
16760 vstate = &help->dthps_vstate;
16761 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, dhp->dofhp_addr,
16762 dhp->dofhp_dof, B_FALSE)) != 0) {
16763 dtrace_dof_destroy(dof);
16764 return (rv);
16765 }
16766
16767 /*
16768 * Look for helper providers and validate their descriptions.
16769 */
16770 for (i = 0; i < dof->dofh_secnum; i++) {
16771 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
16772 dof->dofh_secoff + i * dof->dofh_secsize);
16773
16774 if (sec->dofs_type != DOF_SECT_PROVIDER)
16775 continue;
16776
16777 if (dtrace_helper_provider_validate(dof, sec) != 0) {
16778 dtrace_enabling_destroy(enab);
16779 dtrace_dof_destroy(dof);
16780 return (-1);
16781 }
16782
16783 nprovs++;
16784 }
16785
16786 /*
16787 * Now we need to walk through the ECB descriptions in the enabling.
16788 */
16789 for (i = 0; i < enab->dten_ndesc; i++) {
16790 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
16791 dtrace_probedesc_t *desc = &ep->dted_probe;
16792
16793 if (strcmp(desc->dtpd_provider, "dtrace") != 0)
16794 continue;
16795
16796 if (strcmp(desc->dtpd_mod, "helper") != 0)
16797 continue;
16798
16799 if (strcmp(desc->dtpd_func, "ustack") != 0)
16800 continue;
16801
16802 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK,
16803 ep, help)) != 0) {
16804 /*
16805 * Adding this helper action failed -- we are now going
16806 * to rip out the entire generation and return failure.
16807 */
16808 (void) dtrace_helper_destroygen(help,
16809 help->dthps_generation);
16810 dtrace_enabling_destroy(enab);
16811 dtrace_dof_destroy(dof);
16812 return (-1);
16813 }
16814
16815 nhelpers++;
16816 }
16817
16818 if (nhelpers < enab->dten_ndesc)
16819 dtrace_dof_error(dof, "unmatched helpers");
16820
16821 gen = help->dthps_generation++;
16822 dtrace_enabling_destroy(enab);
16823
16824 if (nprovs > 0) {
16825 /*
16826 * Now that this is in-kernel, we change the sense of the
16827 * members: dofhp_dof denotes the in-kernel copy of the DOF
16828 * and dofhp_addr denotes the address at user-level.
16829 */
16830 dhp->dofhp_addr = dhp->dofhp_dof;
16831 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof;
16832
16833 if (dtrace_helper_provider_add(dhp, help, gen) == 0) {
16834 mutex_exit(&dtrace_lock);
16835 dtrace_helper_provider_register(p, help, dhp);
16836 mutex_enter(&dtrace_lock);
16837
16838 destroy = 0;
16839 }
16840 }
16841
16842 if (destroy)
16843 dtrace_dof_destroy(dof);
16844
16845 return (gen);
16846 }
16847
16848 static dtrace_helpers_t *
16849 dtrace_helpers_create(proc_t *p)
16850 {
16851 dtrace_helpers_t *help;
16852
16853 ASSERT(MUTEX_HELD(&dtrace_lock));
16854 ASSERT(p->p_dtrace_helpers == NULL);
16855
16856 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP);
16857 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) *
16858 DTRACE_NHELPER_ACTIONS, KM_SLEEP);
16859
16860 p->p_dtrace_helpers = help;
16861 dtrace_helpers++;
16862
16863 return (help);
16864 }
16865
16866 #ifdef illumos
16867 static
16868 #endif
16869 void
16870 dtrace_helpers_destroy(proc_t *p)
16871 {
16872 dtrace_helpers_t *help;
16873 dtrace_vstate_t *vstate;
16874 #ifdef illumos
16875 proc_t *p = curproc;
16876 #endif
16877 int i;
16878
16879 mutex_enter(&dtrace_lock);
16880
16881 ASSERT(p->p_dtrace_helpers != NULL);
16882 ASSERT(dtrace_helpers > 0);
16883
16884 help = p->p_dtrace_helpers;
16885 vstate = &help->dthps_vstate;
16886
16887 /*
16888 * We're now going to lose the help from this process.
16889 */
16890 p->p_dtrace_helpers = NULL;
16891 dtrace_sync();
16892
16893 /*
16894 * Destory the helper actions.
16895 */
16896 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
16897 dtrace_helper_action_t *h, *next;
16898
16899 for (h = help->dthps_actions[i]; h != NULL; h = next) {
16900 next = h->dtha_next;
16901 dtrace_helper_action_destroy(h, vstate);
16902 h = next;
16903 }
16904 }
16905
16906 mutex_exit(&dtrace_lock);
16907
16908 /*
16909 * Destroy the helper providers.
16910 */
16911 if (help->dthps_maxprovs > 0) {
16912 mutex_enter(&dtrace_meta_lock);
16913 if (dtrace_meta_pid != NULL) {
16914 ASSERT(dtrace_deferred_pid == NULL);
16915
16916 for (i = 0; i < help->dthps_nprovs; i++) {
16917 dtrace_helper_provider_remove(
16918 &help->dthps_provs[i]->dthp_prov, p->p_pid);
16919 }
16920 } else {
16921 mutex_enter(&dtrace_lock);
16922 ASSERT(help->dthps_deferred == 0 ||
16923 help->dthps_next != NULL ||
16924 help->dthps_prev != NULL ||
16925 help == dtrace_deferred_pid);
16926
16927 /*
16928 * Remove the helper from the deferred list.
16929 */
16930 if (help->dthps_next != NULL)
16931 help->dthps_next->dthps_prev = help->dthps_prev;
16932 if (help->dthps_prev != NULL)
16933 help->dthps_prev->dthps_next = help->dthps_next;
16934 if (dtrace_deferred_pid == help) {
16935 dtrace_deferred_pid = help->dthps_next;
16936 ASSERT(help->dthps_prev == NULL);
16937 }
16938
16939 mutex_exit(&dtrace_lock);
16940 }
16941
16942 mutex_exit(&dtrace_meta_lock);
16943
16944 for (i = 0; i < help->dthps_nprovs; i++) {
16945 dtrace_helper_provider_destroy(help->dthps_provs[i]);
16946 }
16947
16948 kmem_free(help->dthps_provs, help->dthps_maxprovs *
16949 sizeof (dtrace_helper_provider_t *));
16950 }
16951
16952 mutex_enter(&dtrace_lock);
16953
16954 dtrace_vstate_fini(&help->dthps_vstate);
16955 kmem_free(help->dthps_actions,
16956 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS);
16957 kmem_free(help, sizeof (dtrace_helpers_t));
16958
16959 --dtrace_helpers;
16960 mutex_exit(&dtrace_lock);
16961 }
16962
16963 #ifdef illumos
16964 static
16965 #endif
16966 void
16967 dtrace_helpers_duplicate(proc_t *from, proc_t *to)
16968 {
16969 dtrace_helpers_t *help, *newhelp;
16970 dtrace_helper_action_t *helper, *new, *last;
16971 dtrace_difo_t *dp;
16972 dtrace_vstate_t *vstate;
16973 int i, j, sz, hasprovs = 0;
16974
16975 mutex_enter(&dtrace_lock);
16976 ASSERT(from->p_dtrace_helpers != NULL);
16977 ASSERT(dtrace_helpers > 0);
16978
16979 help = from->p_dtrace_helpers;
16980 newhelp = dtrace_helpers_create(to);
16981 ASSERT(to->p_dtrace_helpers != NULL);
16982
16983 newhelp->dthps_generation = help->dthps_generation;
16984 vstate = &newhelp->dthps_vstate;
16985
16986 /*
16987 * Duplicate the helper actions.
16988 */
16989 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
16990 if ((helper = help->dthps_actions[i]) == NULL)
16991 continue;
16992
16993 for (last = NULL; helper != NULL; helper = helper->dtha_next) {
16994 new = kmem_zalloc(sizeof (dtrace_helper_action_t),
16995 KM_SLEEP);
16996 new->dtha_generation = helper->dtha_generation;
16997
16998 if ((dp = helper->dtha_predicate) != NULL) {
16999 dp = dtrace_difo_duplicate(dp, vstate);
17000 new->dtha_predicate = dp;
17001 }
17002
17003 new->dtha_nactions = helper->dtha_nactions;
17004 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions;
17005 new->dtha_actions = kmem_alloc(sz, KM_SLEEP);
17006
17007 for (j = 0; j < new->dtha_nactions; j++) {
17008 dtrace_difo_t *dp = helper->dtha_actions[j];
17009
17010 ASSERT(dp != NULL);
17011 dp = dtrace_difo_duplicate(dp, vstate);
17012 new->dtha_actions[j] = dp;
17013 }
17014
17015 if (last != NULL) {
17016 last->dtha_next = new;
17017 } else {
17018 newhelp->dthps_actions[i] = new;
17019 }
17020
17021 last = new;
17022 }
17023 }
17024
17025 /*
17026 * Duplicate the helper providers and register them with the
17027 * DTrace framework.
17028 */
17029 if (help->dthps_nprovs > 0) {
17030 newhelp->dthps_nprovs = help->dthps_nprovs;
17031 newhelp->dthps_maxprovs = help->dthps_nprovs;
17032 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs *
17033 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
17034 for (i = 0; i < newhelp->dthps_nprovs; i++) {
17035 newhelp->dthps_provs[i] = help->dthps_provs[i];
17036 newhelp->dthps_provs[i]->dthp_ref++;
17037 }
17038
17039 hasprovs = 1;
17040 }
17041
17042 mutex_exit(&dtrace_lock);
17043
17044 if (hasprovs)
17045 dtrace_helper_provider_register(to, newhelp, NULL);
17046 }
17047
17048 /*
17049 * DTrace Hook Functions
17050 */
17051 static void
17052 dtrace_module_loaded(modctl_t *ctl)
17053 {
17054 dtrace_provider_t *prv;
17055
17056 #ifdef __NetBSD__
17057 /*
17058 * We have just one symbol table and CTF table for the entire
17059 * base kernel, so ignore any other built-in module entries.
17060 * This means that the module name for a given symbol will change
17061 * depending on whether the module is built-in or loaded separately.
17062 */
17063 if (module_source(ctl) == MODULE_SOURCE_KERNEL &&
17064 strcmp(module_name(ctl), "netbsd")) {
17065 return;
17066 }
17067 #endif
17068
17069 mutex_enter(&dtrace_provider_lock);
17070 #ifdef illumos
17071 mutex_enter(&mod_lock);
17072 #endif
17073
17074 #ifdef illumos
17075 ASSERT(ctl->mod_busy);
17076 #endif
17077
17078 /*
17079 * We're going to call each providers per-module provide operation
17080 * specifying only this module.
17081 */
17082 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next)
17083 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
17084
17085 #ifdef illumos
17086 mutex_exit(&mod_lock);
17087 #endif
17088 mutex_exit(&dtrace_provider_lock);
17089
17090 /*
17091 * If we have any retained enablings, we need to match against them.
17092 * Enabling probes requires that cpu_lock be held, and we cannot hold
17093 * cpu_lock here -- it is legal for cpu_lock to be held when loading a
17094 * module. (In particular, this happens when loading scheduling
17095 * classes.) So if we have any retained enablings, we need to dispatch
17096 * our task queue to do the match for us.
17097 */
17098 mutex_enter(&dtrace_lock);
17099
17100 if (dtrace_retained == NULL) {
17101 mutex_exit(&dtrace_lock);
17102 return;
17103 }
17104
17105 (void) taskq_dispatch(dtrace_taskq,
17106 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP);
17107
17108 mutex_exit(&dtrace_lock);
17109
17110 /*
17111 * And now, for a little heuristic sleaze: in general, we want to
17112 * match modules as soon as they load. However, we cannot guarantee
17113 * this, because it would lead us to the lock ordering violation
17114 * outlined above. The common case, of course, is that cpu_lock is
17115 * _not_ held -- so we delay here for a clock tick, hoping that that's
17116 * long enough for the task queue to do its work. If it's not, it's
17117 * not a serious problem -- it just means that the module that we
17118 * just loaded may not be immediately instrumentable.
17119 */
17120 delay(1);
17121 }
17122
17123 static void
17124 #ifndef __FreeBSD__
17125 dtrace_module_unloaded(modctl_t *ctl)
17126 #else
17127 dtrace_module_unloaded(modctl_t *ctl, int *error)
17128 #endif
17129 {
17130 dtrace_probe_t template, *probe, *first, *next;
17131 dtrace_provider_t *prov;
17132 #ifndef illumos
17133 char modname[DTRACE_MODNAMELEN];
17134 size_t len;
17135 #endif
17136
17137 #ifdef illumos
17138 template.dtpr_mod = ctl->mod_modname;
17139 #endif
17140 #ifdef __FreeBSD__
17141 /* Handle the fact that ctl->filename may end in ".ko". */
17142 strlcpy(modname, ctl->filename, sizeof(modname));
17143 len = strlen(ctl->filename);
17144 if (len > 3 && strcmp(modname + len - 3, ".ko") == 0)
17145 modname[len - 3] = '\0';
17146 template.dtpr_mod = modname;
17147 #endif
17148 #ifdef __NetBSD__
17149 if (module_source(ctl) == MODULE_SOURCE_KERNEL &&
17150 strcmp(module_name(ctl), "netbsd")) {
17151 return;
17152 }
17153
17154 /* Handle the fact that ctl->filename may end in ".kmod". */
17155 strlcpy(modname, module_name(ctl), sizeof(modname));
17156 len = strlen(modname);
17157 if (len > 5 && strcmp(modname + len - 5, ".kmod") == 0)
17158 modname[len - 5] = '\0';
17159 template.dtpr_mod = modname;
17160
17161 #endif
17162
17163 mutex_enter(&dtrace_provider_lock);
17164 #ifdef illumos
17165 mutex_enter(&mod_lock);
17166 #endif
17167 mutex_enter(&dtrace_lock);
17168
17169 #ifdef __FreeBSD__
17170 if (ctl->nenabled > 0) {
17171 /* Don't allow unloads if a probe is enabled. */
17172 mutex_exit(&dtrace_provider_lock);
17173 mutex_exit(&dtrace_lock);
17174 *error = -1;
17175 printf(
17176 "kldunload: attempt to unload module that has DTrace probes enabled\n");
17177 return;
17178 }
17179 #endif
17180
17181 if (dtrace_bymod == NULL) {
17182 /*
17183 * The DTrace module is loaded (obviously) but not attached;
17184 * we don't have any work to do.
17185 */
17186 mutex_exit(&dtrace_provider_lock);
17187 #ifdef illumos
17188 mutex_exit(&mod_lock);
17189 #endif
17190 mutex_exit(&dtrace_lock);
17191 return;
17192 }
17193
17194 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template);
17195 probe != NULL; probe = probe->dtpr_nextmod) {
17196 if (probe->dtpr_ecb != NULL) {
17197 mutex_exit(&dtrace_provider_lock);
17198 #ifdef illumos
17199 mutex_exit(&mod_lock);
17200 #endif
17201 mutex_exit(&dtrace_lock);
17202
17203 /*
17204 * This shouldn't _actually_ be possible -- we're
17205 * unloading a module that has an enabled probe in it.
17206 * (It's normally up to the provider to make sure that
17207 * this can't happen.) However, because dtps_enable()
17208 * doesn't have a failure mode, there can be an
17209 * enable/unload race. Upshot: we don't want to
17210 * assert, but we're not going to disable the
17211 * probe, either.
17212 */
17213 if (dtrace_err_verbose) {
17214 #ifdef illumos
17215 cmn_err(CE_WARN, "unloaded module '%s' had "
17216 "enabled probes", ctl->mod_modname);
17217 #else
17218 cmn_err(CE_WARN, "unloaded module '%s' had "
17219 "enabled probes", modname);
17220 #endif
17221 }
17222
17223 return;
17224 }
17225 }
17226
17227 probe = first;
17228
17229 for (first = NULL; probe != NULL; probe = next) {
17230 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe);
17231
17232 dtrace_probes[probe->dtpr_id - 1] = NULL;
17233
17234 next = probe->dtpr_nextmod;
17235 dtrace_hash_remove(dtrace_bymod, probe);
17236 dtrace_hash_remove(dtrace_byfunc, probe);
17237 dtrace_hash_remove(dtrace_byname, probe);
17238
17239 if (first == NULL) {
17240 first = probe;
17241 probe->dtpr_nextmod = NULL;
17242 } else {
17243 probe->dtpr_nextmod = first;
17244 first = probe;
17245 }
17246 }
17247
17248 /*
17249 * We've removed all of the module's probes from the hash chains and
17250 * from the probe array. Now issue a dtrace_sync() to be sure that
17251 * everyone has cleared out from any probe array processing.
17252 */
17253 dtrace_sync();
17254
17255 for (probe = first; probe != NULL; probe = first) {
17256 first = probe->dtpr_nextmod;
17257 prov = probe->dtpr_provider;
17258 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id,
17259 probe->dtpr_arg);
17260 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
17261 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
17262 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
17263 #ifdef illumos
17264 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1);
17265 #endif
17266 #ifdef __FreeBSD__
17267 free_unr(dtrace_arena, probe->dtpr_id);
17268 #endif
17269 #ifdef __NetBSD__
17270 vmem_free(dtrace_arena, (uintptr_t)probe->dtpr_id, 1);
17271 #endif
17272 kmem_free(probe, sizeof (dtrace_probe_t));
17273 }
17274
17275 mutex_exit(&dtrace_lock);
17276 #ifdef illumos
17277 mutex_exit(&mod_lock);
17278 #endif
17279 mutex_exit(&dtrace_provider_lock);
17280 }
17281
17282 #ifdef __FreeBSD__
17283 static void
17284 dtrace_kld_load(void *arg __unused, linker_file_t lf)
17285 {
17286
17287 dtrace_module_loaded(lf);
17288 }
17289
17290 static void
17291 dtrace_kld_unload_try(void *arg __unused, linker_file_t lf, int *error)
17292 {
17293
17294 if (*error != 0)
17295 /* We already have an error, so don't do anything. */
17296 return;
17297 dtrace_module_unloaded(lf, error);
17298 }
17299 #endif
17300
17301 #ifdef illumos
17302 static void
17303 dtrace_suspend(void)
17304 {
17305 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend));
17306 }
17307
17308 static void
17309 dtrace_resume(void)
17310 {
17311 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume));
17312 }
17313 #endif
17314
17315 static int
17316 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu)
17317 {
17318 ASSERT(MUTEX_HELD(&cpu_lock));
17319 mutex_enter(&dtrace_lock);
17320
17321 switch (what) {
17322 case CPU_CONFIG: {
17323 dtrace_state_t *state;
17324 dtrace_optval_t *opt, rs, c;
17325
17326 /*
17327 * For now, we only allocate a new buffer for anonymous state.
17328 */
17329 if ((state = dtrace_anon.dta_state) == NULL)
17330 break;
17331
17332 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
17333 break;
17334
17335 opt = state->dts_options;
17336 c = opt[DTRACEOPT_CPU];
17337
17338 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu)
17339 break;
17340
17341 /*
17342 * Regardless of what the actual policy is, we're going to
17343 * temporarily set our resize policy to be manual. We're
17344 * also going to temporarily set our CPU option to denote
17345 * the newly configured CPU.
17346 */
17347 rs = opt[DTRACEOPT_BUFRESIZE];
17348 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL;
17349 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu;
17350
17351 (void) dtrace_state_buffers(state);
17352
17353 opt[DTRACEOPT_BUFRESIZE] = rs;
17354 opt[DTRACEOPT_CPU] = c;
17355
17356 break;
17357 }
17358
17359 case CPU_UNCONFIG:
17360 /*
17361 * We don't free the buffer in the CPU_UNCONFIG case. (The
17362 * buffer will be freed when the consumer exits.)
17363 */
17364 break;
17365
17366 default:
17367 break;
17368 }
17369
17370 mutex_exit(&dtrace_lock);
17371 return (0);
17372 }
17373
17374 #ifdef illumos
17375 static void
17376 dtrace_cpu_setup_initial(processorid_t cpu)
17377 {
17378 (void) dtrace_cpu_setup(CPU_CONFIG, cpu);
17379 }
17380 #endif
17381
17382 static void
17383 dtrace_toxrange_add(uintptr_t base, uintptr_t limit)
17384 {
17385 if (dtrace_toxranges >= dtrace_toxranges_max) {
17386 int osize, nsize;
17387 dtrace_toxrange_t *range;
17388
17389 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
17390
17391 if (osize == 0) {
17392 ASSERT(dtrace_toxrange == NULL);
17393 ASSERT(dtrace_toxranges_max == 0);
17394 dtrace_toxranges_max = 1;
17395 } else {
17396 dtrace_toxranges_max <<= 1;
17397 }
17398
17399 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
17400 range = kmem_zalloc(nsize, KM_SLEEP);
17401
17402 if (dtrace_toxrange != NULL) {
17403 ASSERT(osize != 0);
17404 bcopy(dtrace_toxrange, range, osize);
17405 kmem_free(dtrace_toxrange, osize);
17406 }
17407
17408 dtrace_toxrange = range;
17409 }
17410
17411 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0);
17412 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0);
17413
17414 dtrace_toxrange[dtrace_toxranges].dtt_base = base;
17415 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit;
17416 dtrace_toxranges++;
17417 }
17418
17419 static void
17420 dtrace_getf_barrier()
17421 {
17422 #ifdef illumos
17423 /*
17424 * When we have unprivileged (that is, non-DTRACE_CRV_KERNEL) enablings
17425 * that contain calls to getf(), this routine will be called on every
17426 * closef() before either the underlying vnode is released or the
17427 * file_t itself is freed. By the time we are here, it is essential
17428 * that the file_t can no longer be accessed from a call to getf()
17429 * in probe context -- that assures that a dtrace_sync() can be used
17430 * to clear out any enablings referring to the old structures.
17431 */
17432 if (curthread->t_procp->p_zone->zone_dtrace_getf != 0 ||
17433 kcred->cr_zone->zone_dtrace_getf != 0)
17434 dtrace_sync();
17435 #endif
17436 }
17437
17438 /*
17439 * DTrace Driver Cookbook Functions
17440 */
17441 #ifdef illumos
17442 /*ARGSUSED*/
17443 static int
17444 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
17445 {
17446 dtrace_provider_id_t id;
17447 dtrace_state_t *state = NULL;
17448 dtrace_enabling_t *enab;
17449
17450 mutex_enter(&cpu_lock);
17451 mutex_enter(&dtrace_provider_lock);
17452 mutex_enter(&dtrace_lock);
17453
17454 if (ddi_soft_state_init(&dtrace_softstate,
17455 sizeof (dtrace_state_t), 0) != 0) {
17456 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state");
17457 mutex_exit(&cpu_lock);
17458 mutex_exit(&dtrace_provider_lock);
17459 mutex_exit(&dtrace_lock);
17460 return (DDI_FAILURE);
17461 }
17462
17463 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR,
17464 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE ||
17465 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR,
17466 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) {
17467 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes");
17468 ddi_remove_minor_node(devi, NULL);
17469 ddi_soft_state_fini(&dtrace_softstate);
17470 mutex_exit(&cpu_lock);
17471 mutex_exit(&dtrace_provider_lock);
17472 mutex_exit(&dtrace_lock);
17473 return (DDI_FAILURE);
17474 }
17475
17476 ddi_report_dev(devi);
17477 dtrace_devi = devi;
17478
17479 dtrace_modload = dtrace_module_loaded;
17480 dtrace_modunload = dtrace_module_unloaded;
17481 dtrace_cpu_init = dtrace_cpu_setup_initial;
17482 dtrace_helpers_cleanup = dtrace_helpers_destroy;
17483 dtrace_helpers_fork = dtrace_helpers_duplicate;
17484 dtrace_cpustart_init = dtrace_suspend;
17485 dtrace_cpustart_fini = dtrace_resume;
17486 dtrace_debugger_init = dtrace_suspend;
17487 dtrace_debugger_fini = dtrace_resume;
17488
17489 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
17490
17491 ASSERT(MUTEX_HELD(&cpu_lock));
17492
17493 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1,
17494 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
17495 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE,
17496 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0,
17497 VM_SLEEP | VMC_IDENTIFIER);
17498 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri,
17499 1, INT_MAX, 0);
17500
17501 dtrace_state_cache = kmem_cache_create("dtrace_state_cache",
17502 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN,
17503 NULL, NULL, NULL, NULL, NULL, 0);
17504
17505 ASSERT(MUTEX_HELD(&cpu_lock));
17506 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod),
17507 offsetof(dtrace_probe_t, dtpr_nextmod),
17508 offsetof(dtrace_probe_t, dtpr_prevmod));
17509
17510 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func),
17511 offsetof(dtrace_probe_t, dtpr_nextfunc),
17512 offsetof(dtrace_probe_t, dtpr_prevfunc));
17513
17514 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name),
17515 offsetof(dtrace_probe_t, dtpr_nextname),
17516 offsetof(dtrace_probe_t, dtpr_prevname));
17517
17518 if (dtrace_retain_max < 1) {
17519 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; "
17520 "setting to 1", dtrace_retain_max);
17521 dtrace_retain_max = 1;
17522 }
17523
17524 /*
17525 * Now discover our toxic ranges.
17526 */
17527 dtrace_toxic_ranges(dtrace_toxrange_add);
17528
17529 /*
17530 * Before we register ourselves as a provider to our own framework,
17531 * we would like to assert that dtrace_provider is NULL -- but that's
17532 * not true if we were loaded as a dependency of a DTrace provider.
17533 * Once we've registered, we can assert that dtrace_provider is our
17534 * pseudo provider.
17535 */
17536 (void) dtrace_register("dtrace", &dtrace_provider_attr,
17537 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id);
17538
17539 ASSERT(dtrace_provider != NULL);
17540 ASSERT((dtrace_provider_id_t)dtrace_provider == id);
17541
17542 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
17543 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL);
17544 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
17545 dtrace_provider, NULL, NULL, "END", 0, NULL);
17546 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
17547 dtrace_provider, NULL, NULL, "ERROR", 1, NULL);
17548
17549 dtrace_anon_property();
17550 mutex_exit(&cpu_lock);
17551
17552 /*
17553 * If there are already providers, we must ask them to provide their
17554 * probes, and then match any anonymous enabling against them. Note
17555 * that there should be no other retained enablings at this time:
17556 * the only retained enablings at this time should be the anonymous
17557 * enabling.
17558 */
17559 if (dtrace_anon.dta_enabling != NULL) {
17560 ASSERT(dtrace_retained == dtrace_anon.dta_enabling);
17561
17562 dtrace_enabling_provide(NULL);
17563 state = dtrace_anon.dta_state;
17564
17565 /*
17566 * We couldn't hold cpu_lock across the above call to
17567 * dtrace_enabling_provide(), but we must hold it to actually
17568 * enable the probes. We have to drop all of our locks, pick
17569 * up cpu_lock, and regain our locks before matching the
17570 * retained anonymous enabling.
17571 */
17572 mutex_exit(&dtrace_lock);
17573 mutex_exit(&dtrace_provider_lock);
17574
17575 mutex_enter(&cpu_lock);
17576 mutex_enter(&dtrace_provider_lock);
17577 mutex_enter(&dtrace_lock);
17578
17579 if ((enab = dtrace_anon.dta_enabling) != NULL)
17580 (void) dtrace_enabling_match(enab, NULL);
17581
17582 mutex_exit(&cpu_lock);
17583 }
17584
17585 mutex_exit(&dtrace_lock);
17586 mutex_exit(&dtrace_provider_lock);
17587
17588 if (state != NULL) {
17589 /*
17590 * If we created any anonymous state, set it going now.
17591 */
17592 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon);
17593 }
17594
17595 return (DDI_SUCCESS);
17596 }
17597 #endif
17598
17599 #ifdef __NetBSD__
17600 static dev_type_open(dtrace_open);
17601
17602 /* Pseudo Device Entry points */
17603 /* Just opens, clones to the fileops below */
17604 const struct cdevsw dtrace_cdevsw = {
17605 .d_open = dtrace_open,
17606 .d_close = noclose,
17607 .d_read = noread,
17608 .d_write = nowrite,
17609 .d_ioctl = noioctl,
17610 .d_stop = nostop,
17611 .d_tty = notty,
17612 .d_poll = nopoll,
17613 .d_mmap = nommap,
17614 .d_kqfilter = nokqfilter,
17615 .d_discard = nodiscard,
17616 .d_flag = D_OTHER | D_MPSAFE
17617 };
17618
17619 static int dtrace_ioctl(struct file *fp, u_long cmd, void *data);
17620 static int dtrace_close(struct file *fp);
17621
17622 static const struct fileops dtrace_fileops = {
17623 .fo_read = fbadop_read,
17624 .fo_write = fbadop_write,
17625 .fo_ioctl = dtrace_ioctl,
17626 .fo_fcntl = fnullop_fcntl,
17627 .fo_poll = fnullop_poll,
17628 .fo_stat = fbadop_stat,
17629 .fo_close = dtrace_close,
17630 .fo_kqfilter = fnullop_kqfilter,
17631 };
17632 #endif
17633
17634 #ifndef illumos
17635 static void dtrace_dtr(void *);
17636 #endif
17637
17638 /*ARGSUSED*/
17639 static int
17640 #ifdef illumos
17641 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
17642 #endif
17643 #ifdef __FreeBSD_
17644 dtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
17645 #endif
17646 #ifdef __NetBSD__
17647 dtrace_open(dev_t dev, int flags, int mode, struct lwp *l)
17648 #endif
17649 {
17650 dtrace_state_t *state;
17651 uint32_t priv;
17652 uid_t uid;
17653 zoneid_t zoneid;
17654
17655 #ifdef illumos
17656 if (getminor(*devp) == DTRACEMNRN_HELPER)
17657 return (0);
17658
17659 /*
17660 * If this wasn't an open with the "helper" minor, then it must be
17661 * the "dtrace" minor.
17662 */
17663 if (getminor(*devp) == DTRACEMNRN_DTRACE)
17664 return (ENXIO);
17665 #endif
17666 #ifdef __FreeBSD__
17667 cred_t *cred_p = NULL;
17668 cred_p = dev->si_cred;
17669
17670
17671 #endif
17672 #ifdef __NetBSD__
17673 cred_t *cred_p = NULL;
17674 struct file *fp;
17675 int fd;
17676 int res;
17677
17678 if ((res = fd_allocfile(&fp, &fd)) != 0)
17679 return res;
17680 cred_p = l->l_cred;
17681 #endif
17682
17683 /*
17684 * If no DTRACE_PRIV_* bits are set in the credential, then the
17685 * caller lacks sufficient permission to do anything with DTrace.
17686 */
17687 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid);
17688 if (priv == DTRACE_PRIV_NONE) {
17689 return (EACCES);
17690 }
17691
17692 /*
17693 * Ask all providers to provide all their probes.
17694 */
17695 mutex_enter(&dtrace_provider_lock);
17696 dtrace_probe_provide(NULL, NULL);
17697 mutex_exit(&dtrace_provider_lock);
17698
17699 mutex_enter(&cpu_lock);
17700 mutex_enter(&dtrace_lock);
17701 dtrace_opens++;
17702 dtrace_membar_producer();
17703
17704 #ifdef illumos
17705 /*
17706 * If the kernel debugger is active (that is, if the kernel debugger
17707 * modified text in some way), we won't allow the open.
17708 */
17709 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
17710 dtrace_opens--;
17711 mutex_exit(&cpu_lock);
17712 mutex_exit(&dtrace_lock);
17713 return (EBUSY);
17714 }
17715
17716 if (dtrace_helptrace_enable && dtrace_helptrace_buffer == NULL) {
17717 /*
17718 * If DTrace helper tracing is enabled, we need to allocate the
17719 * trace buffer and initialize the values.
17720 */
17721 dtrace_helptrace_buffer =
17722 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP);
17723 dtrace_helptrace_next = 0;
17724 dtrace_helptrace_wrapped = 0;
17725 dtrace_helptrace_enable = 0;
17726 }
17727 state = dtrace_state_create(devp, cred_p);
17728 #endif
17729 #ifdef __FreeBSD__
17730 state = dtrace_state_create(dev, NULL);
17731 devfs_set_cdevpriv(state, dtrace_dtr);
17732 #endif
17733 #ifdef __NetBSD__
17734 state = dtrace_state_create(&dev, cred_p);
17735 #endif
17736
17737 mutex_exit(&cpu_lock);
17738
17739 if (state == NULL) {
17740 #ifdef illumos
17741 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
17742 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
17743 #else
17744 --dtrace_opens;
17745 #endif
17746 mutex_exit(&dtrace_lock);
17747 return (EAGAIN);
17748 }
17749
17750 mutex_exit(&dtrace_lock);
17751
17752 #ifdef __NetBSD__
17753 return fd_clone(fp, fd, flags, &dtrace_fileops, state);
17754 #else
17755 return (0);
17756 #endif
17757 }
17758
17759 /*ARGSUSED*/
17760 #ifdef illumos
17761 static int
17762 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
17763 #endif
17764 #ifdef __FreeBSD__
17765 static void
17766 dtrace_dtr(void *data)
17767 #endif
17768 #ifdef __NetBSD__
17769 static int
17770 dtrace_close(struct file *fp)
17771 #endif
17772 {
17773 #ifdef illumos
17774 minor_t minor = getminor(dev);
17775 dtrace_state_t *state;
17776 #endif
17777 dtrace_helptrace_t *buf = NULL;
17778
17779 #ifdef illumos
17780 if (minor == DTRACEMNRN_HELPER)
17781 return (0);
17782
17783 state = ddi_get_soft_state(dtrace_softstate, minor);
17784 #endif
17785 #ifdef __FreeBSD__
17786 dtrace_state_t *state = data;
17787 #endif
17788 #ifdef __NetBSD__
17789 dtrace_state_t *state = (dtrace_state_t *)fp->f_data;
17790 #endif
17791
17792 mutex_enter(&cpu_lock);
17793 mutex_enter(&dtrace_lock);
17794
17795 #if defined(illumos) || defined(__NetBSD__)
17796 if (state->dts_anon)
17797 #else
17798 if (state != NULL && state->dts_anon)
17799 #endif
17800 {
17801 /*
17802 * There is anonymous state. Destroy that first.
17803 */
17804 ASSERT(dtrace_anon.dta_state == NULL);
17805 dtrace_state_destroy(state->dts_anon);
17806 }
17807
17808 if (dtrace_helptrace_disable) {
17809 /*
17810 * If we have been told to disable helper tracing, set the
17811 * buffer to NULL before calling into dtrace_state_destroy();
17812 * we take advantage of its dtrace_sync() to know that no
17813 * CPU is in probe context with enabled helper tracing
17814 * after it returns.
17815 */
17816 buf = dtrace_helptrace_buffer;
17817 dtrace_helptrace_buffer = NULL;
17818 }
17819
17820 #if defined(illumos) || defined(__NetBSD__)
17821 dtrace_state_destroy(state);
17822 #else
17823 if (state != NULL) {
17824 dtrace_state_destroy(state);
17825 kmem_free(state, 0);
17826 }
17827 #endif
17828
17829 ASSERT(dtrace_opens > 0);
17830
17831 #ifdef illumos
17832 /*
17833 * Only relinquish control of the kernel debugger interface when there
17834 * are no consumers and no anonymous enablings.
17835 */
17836 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
17837 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
17838 #else
17839 --dtrace_opens;
17840 #endif
17841
17842 if (buf != NULL) {
17843 kmem_free(buf, dtrace_helptrace_bufsize);
17844 dtrace_helptrace_disable = 0;
17845 }
17846
17847 mutex_exit(&dtrace_lock);
17848 mutex_exit(&cpu_lock);
17849
17850 #if defined(illumos) || defined(__NetBSD__)
17851 return (0);
17852 #endif
17853 }
17854
17855 #ifdef illumos
17856 /*ARGSUSED*/
17857 static int
17858 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv)
17859 {
17860 int rval;
17861 dof_helper_t help, *dhp = NULL;
17862
17863 switch (cmd) {
17864 case DTRACEHIOC_ADDDOF:
17865 if (copyin((void *)arg, &help, sizeof (help)) != 0) {
17866 dtrace_dof_error(NULL, "failed to copyin DOF helper");
17867 return (EFAULT);
17868 }
17869
17870 dhp = &help;
17871 arg = (intptr_t)help.dofhp_dof;
17872 /*FALLTHROUGH*/
17873
17874 case DTRACEHIOC_ADD: {
17875 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval);
17876
17877 if (dof == NULL)
17878 return (rval);
17879
17880 mutex_enter(&dtrace_lock);
17881
17882 /*
17883 * dtrace_helper_slurp() takes responsibility for the dof --
17884 * it may free it now or it may save it and free it later.
17885 */
17886 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) {
17887 *rv = rval;
17888 rval = 0;
17889 } else {
17890 rval = EINVAL;
17891 }
17892
17893 mutex_exit(&dtrace_lock);
17894 return (rval);
17895 }
17896
17897 case DTRACEHIOC_REMOVE: {
17898 mutex_enter(&dtrace_lock);
17899 rval = dtrace_helper_destroygen(NULL, arg);
17900 mutex_exit(&dtrace_lock);
17901
17902 return (rval);
17903 }
17904
17905 default:
17906 break;
17907 }
17908
17909 return (ENOTTY);
17910 }
17911
17912 /*ARGSUSED*/
17913 static int
17914 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
17915 {
17916 minor_t minor = getminor(dev);
17917 dtrace_state_t *state;
17918 int rval;
17919
17920 if (minor == DTRACEMNRN_HELPER)
17921 return (dtrace_ioctl_helper(cmd, arg, rv));
17922
17923 state = ddi_get_soft_state(dtrace_softstate, minor);
17924
17925 if (state->dts_anon) {
17926 ASSERT(dtrace_anon.dta_state == NULL);
17927 state = state->dts_anon;
17928 }
17929
17930 switch (cmd) {
17931 case DTRACEIOC_PROVIDER: {
17932 dtrace_providerdesc_t pvd;
17933 dtrace_provider_t *pvp;
17934
17935 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0)
17936 return (EFAULT);
17937
17938 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0';
17939 mutex_enter(&dtrace_provider_lock);
17940
17941 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) {
17942 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0)
17943 break;
17944 }
17945
17946 mutex_exit(&dtrace_provider_lock);
17947
17948 if (pvp == NULL)
17949 return (ESRCH);
17950
17951 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t));
17952 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t));
17953
17954 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0)
17955 return (EFAULT);
17956
17957 return (0);
17958 }
17959
17960 case DTRACEIOC_EPROBE: {
17961 dtrace_eprobedesc_t epdesc;
17962 dtrace_ecb_t *ecb;
17963 dtrace_action_t *act;
17964 void *buf;
17965 size_t size;
17966 uintptr_t dest;
17967 int nrecs;
17968
17969 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0)
17970 return (EFAULT);
17971
17972 mutex_enter(&dtrace_lock);
17973
17974 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) {
17975 mutex_exit(&dtrace_lock);
17976 return (EINVAL);
17977 }
17978
17979 if (ecb->dte_probe == NULL) {
17980 mutex_exit(&dtrace_lock);
17981 return (EINVAL);
17982 }
17983
17984 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id;
17985 epdesc.dtepd_uarg = ecb->dte_uarg;
17986 epdesc.dtepd_size = ecb->dte_size;
17987
17988 nrecs = epdesc.dtepd_nrecs;
17989 epdesc.dtepd_nrecs = 0;
17990 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
17991 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
17992 continue;
17993
17994 epdesc.dtepd_nrecs++;
17995 }
17996
17997 /*
17998 * Now that we have the size, we need to allocate a temporary
17999 * buffer in which to store the complete description. We need
18000 * the temporary buffer to be able to drop dtrace_lock()
18001 * across the copyout(), below.
18002 */
18003 size = sizeof (dtrace_eprobedesc_t) +
18004 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t));
18005
18006 buf = kmem_alloc(size, KM_SLEEP);
18007 dest = (uintptr_t)buf;
18008
18009 bcopy(&epdesc, (void *)dest, sizeof (epdesc));
18010 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]);
18011
18012 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
18013 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
18014 continue;
18015
18016 if (nrecs-- == 0)
18017 break;
18018
18019 bcopy(&act->dta_rec, (void *)dest,
18020 sizeof (dtrace_recdesc_t));
18021 dest += sizeof (dtrace_recdesc_t);
18022 }
18023
18024 mutex_exit(&dtrace_lock);
18025
18026 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
18027 kmem_free(buf, size);
18028 return (EFAULT);
18029 }
18030
18031 kmem_free(buf, size);
18032 return (0);
18033 }
18034
18035 case DTRACEIOC_AGGDESC: {
18036 dtrace_aggdesc_t aggdesc;
18037 dtrace_action_t *act;
18038 dtrace_aggregation_t *agg;
18039 int nrecs;
18040 uint32_t offs;
18041 dtrace_recdesc_t *lrec;
18042 void *buf;
18043 size_t size;
18044 uintptr_t dest;
18045
18046 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0)
18047 return (EFAULT);
18048
18049 mutex_enter(&dtrace_lock);
18050
18051 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) {
18052 mutex_exit(&dtrace_lock);
18053 return (EINVAL);
18054 }
18055
18056 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid;
18057
18058 nrecs = aggdesc.dtagd_nrecs;
18059 aggdesc.dtagd_nrecs = 0;
18060
18061 offs = agg->dtag_base;
18062 lrec = &agg->dtag_action.dta_rec;
18063 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs;
18064
18065 for (act = agg->dtag_first; ; act = act->dta_next) {
18066 ASSERT(act->dta_intuple ||
18067 DTRACEACT_ISAGG(act->dta_kind));
18068
18069 /*
18070 * If this action has a record size of zero, it
18071 * denotes an argument to the aggregating action.
18072 * Because the presence of this record doesn't (or
18073 * shouldn't) affect the way the data is interpreted,
18074 * we don't copy it out to save user-level the
18075 * confusion of dealing with a zero-length record.
18076 */
18077 if (act->dta_rec.dtrd_size == 0) {
18078 ASSERT(agg->dtag_hasarg);
18079 continue;
18080 }
18081
18082 aggdesc.dtagd_nrecs++;
18083
18084 if (act == &agg->dtag_action)
18085 break;
18086 }
18087
18088 /*
18089 * Now that we have the size, we need to allocate a temporary
18090 * buffer in which to store the complete description. We need
18091 * the temporary buffer to be able to drop dtrace_lock()
18092 * across the copyout(), below.
18093 */
18094 size = sizeof (dtrace_aggdesc_t) +
18095 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t));
18096
18097 buf = kmem_alloc(size, KM_SLEEP);
18098 dest = (uintptr_t)buf;
18099
18100 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc));
18101 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]);
18102
18103 for (act = agg->dtag_first; ; act = act->dta_next) {
18104 dtrace_recdesc_t rec = act->dta_rec;
18105
18106 /*
18107 * See the comment in the above loop for why we pass
18108 * over zero-length records.
18109 */
18110 if (rec.dtrd_size == 0) {
18111 ASSERT(agg->dtag_hasarg);
18112 continue;
18113 }
18114
18115 if (nrecs-- == 0)
18116 break;
18117
18118 rec.dtrd_offset -= offs;
18119 bcopy(&rec, (void *)dest, sizeof (rec));
18120 dest += sizeof (dtrace_recdesc_t);
18121
18122 if (act == &agg->dtag_action)
18123 break;
18124 }
18125
18126 mutex_exit(&dtrace_lock);
18127
18128 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
18129 kmem_free(buf, size);
18130 return (EFAULT);
18131 }
18132
18133 kmem_free(buf, size);
18134 return (0);
18135 }
18136
18137 case DTRACEIOC_ENABLE: {
18138 dof_hdr_t *dof;
18139 dtrace_enabling_t *enab = NULL;
18140 dtrace_vstate_t *vstate;
18141 int err = 0;
18142
18143 *rv = 0;
18144
18145 /*
18146 * If a NULL argument has been passed, we take this as our
18147 * cue to reevaluate our enablings.
18148 */
18149 if (arg == NULL) {
18150 dtrace_enabling_matchall();
18151
18152 return (0);
18153 }
18154
18155 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL)
18156 return (rval);
18157
18158 mutex_enter(&cpu_lock);
18159 mutex_enter(&dtrace_lock);
18160 vstate = &state->dts_vstate;
18161
18162 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
18163 mutex_exit(&dtrace_lock);
18164 mutex_exit(&cpu_lock);
18165 dtrace_dof_destroy(dof);
18166 return (EBUSY);
18167 }
18168
18169 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) {
18170 mutex_exit(&dtrace_lock);
18171 mutex_exit(&cpu_lock);
18172 dtrace_dof_destroy(dof);
18173 return (EINVAL);
18174 }
18175
18176 if ((rval = dtrace_dof_options(dof, state)) != 0) {
18177 dtrace_enabling_destroy(enab);
18178 mutex_exit(&dtrace_lock);
18179 mutex_exit(&cpu_lock);
18180 dtrace_dof_destroy(dof);
18181 return (rval);
18182 }
18183
18184 if ((err = dtrace_enabling_match(enab, rv)) == 0) {
18185 err = dtrace_enabling_retain(enab);
18186 } else {
18187 dtrace_enabling_destroy(enab);
18188 }
18189
18190 mutex_exit(&cpu_lock);
18191 mutex_exit(&dtrace_lock);
18192 dtrace_dof_destroy(dof);
18193
18194 return (err);
18195 }
18196
18197 case DTRACEIOC_REPLICATE: {
18198 dtrace_repldesc_t desc;
18199 dtrace_probedesc_t *match = &desc.dtrpd_match;
18200 dtrace_probedesc_t *create = &desc.dtrpd_create;
18201 int err;
18202
18203 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
18204 return (EFAULT);
18205
18206 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
18207 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
18208 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
18209 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
18210
18211 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
18212 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
18213 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
18214 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
18215
18216 mutex_enter(&dtrace_lock);
18217 err = dtrace_enabling_replicate(state, match, create);
18218 mutex_exit(&dtrace_lock);
18219
18220 return (err);
18221 }
18222
18223 case DTRACEIOC_PROBEMATCH:
18224 case DTRACEIOC_PROBES: {
18225 dtrace_probe_t *probe = NULL;
18226 dtrace_probedesc_t desc;
18227 dtrace_probekey_t pkey;
18228 dtrace_id_t i;
18229 int m = 0;
18230 uint32_t priv;
18231 uid_t uid;
18232 zoneid_t zoneid;
18233
18234 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
18235 return (EFAULT);
18236
18237 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
18238 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
18239 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
18240 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0';
18241
18242 /*
18243 * Before we attempt to match this probe, we want to give
18244 * all providers the opportunity to provide it.
18245 */
18246 if (desc.dtpd_id == DTRACE_IDNONE) {
18247 mutex_enter(&dtrace_provider_lock);
18248 dtrace_probe_provide(&desc, NULL);
18249 mutex_exit(&dtrace_provider_lock);
18250 desc.dtpd_id++;
18251 }
18252
18253 if (cmd == DTRACEIOC_PROBEMATCH) {
18254 dtrace_probekey(&desc, &pkey);
18255 pkey.dtpk_id = DTRACE_IDNONE;
18256 }
18257
18258 dtrace_cred2priv(cr, &priv, &uid, &zoneid);
18259
18260 mutex_enter(&dtrace_lock);
18261
18262 if (cmd == DTRACEIOC_PROBEMATCH) {
18263 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
18264 if ((probe = dtrace_probes[i - 1]) != NULL &&
18265 (m = dtrace_match_probe(probe, &pkey,
18266 priv, uid, zoneid)) != 0)
18267 break;
18268 }
18269
18270 if (m < 0) {
18271 mutex_exit(&dtrace_lock);
18272 return (EINVAL);
18273 }
18274
18275 } else {
18276 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
18277 if ((probe = dtrace_probes[i - 1]) != NULL &&
18278 dtrace_match_priv(probe, priv, uid, zoneid))
18279 break;
18280 }
18281 }
18282
18283 if (probe == NULL) {
18284 mutex_exit(&dtrace_lock);
18285 return (ESRCH);
18286 }
18287
18288 dtrace_probe_description(probe, &desc);
18289 mutex_exit(&dtrace_lock);
18290
18291 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
18292 return (EFAULT);
18293
18294 return (0);
18295 }
18296
18297 case DTRACEIOC_PROBEARG: {
18298 dtrace_argdesc_t desc;
18299 dtrace_probe_t *probe;
18300 dtrace_provider_t *prov;
18301
18302 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
18303 return (EFAULT);
18304
18305 if (desc.dtargd_id == DTRACE_IDNONE)
18306 return (EINVAL);
18307
18308 if (desc.dtargd_ndx == DTRACE_ARGNONE)
18309 return (EINVAL);
18310
18311 mutex_enter(&dtrace_provider_lock);
18312 mutex_enter(&mod_lock);
18313 mutex_enter(&dtrace_lock);
18314
18315 if (desc.dtargd_id > dtrace_nprobes) {
18316 mutex_exit(&dtrace_lock);
18317 mutex_exit(&mod_lock);
18318 mutex_exit(&dtrace_provider_lock);
18319 return (EINVAL);
18320 }
18321
18322 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) {
18323 mutex_exit(&dtrace_lock);
18324 mutex_exit(&mod_lock);
18325 mutex_exit(&dtrace_provider_lock);
18326 return (EINVAL);
18327 }
18328
18329 mutex_exit(&dtrace_lock);
18330
18331 prov = probe->dtpr_provider;
18332
18333 if (prov->dtpv_pops.dtps_getargdesc == NULL) {
18334 /*
18335 * There isn't any typed information for this probe.
18336 * Set the argument number to DTRACE_ARGNONE.
18337 */
18338 desc.dtargd_ndx = DTRACE_ARGNONE;
18339 } else {
18340 desc.dtargd_native[0] = '\0';
18341 desc.dtargd_xlate[0] = '\0';
18342 desc.dtargd_mapping = desc.dtargd_ndx;
18343
18344 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg,
18345 probe->dtpr_id, probe->dtpr_arg, &desc);
18346 }
18347
18348 mutex_exit(&mod_lock);
18349 mutex_exit(&dtrace_provider_lock);
18350
18351 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
18352 return (EFAULT);
18353
18354 return (0);
18355 }
18356
18357 case DTRACEIOC_GO: {
18358 processorid_t cpuid;
18359 rval = dtrace_state_go(state, &cpuid);
18360
18361 if (rval != 0)
18362 return (rval);
18363
18364 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
18365 return (EFAULT);
18366
18367 return (0);
18368 }
18369
18370 case DTRACEIOC_STOP: {
18371 processorid_t cpuid;
18372
18373 mutex_enter(&dtrace_lock);
18374 rval = dtrace_state_stop(state, &cpuid);
18375 mutex_exit(&dtrace_lock);
18376
18377 if (rval != 0)
18378 return (rval);
18379
18380 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
18381 return (EFAULT);
18382
18383 return (0);
18384 }
18385
18386 case DTRACEIOC_DOFGET: {
18387 dof_hdr_t hdr, *dof;
18388 uint64_t len;
18389
18390 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0)
18391 return (EFAULT);
18392
18393 mutex_enter(&dtrace_lock);
18394 dof = dtrace_dof_create(state);
18395 mutex_exit(&dtrace_lock);
18396
18397 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz);
18398 rval = copyout(dof, (void *)arg, len);
18399 dtrace_dof_destroy(dof);
18400
18401 return (rval == 0 ? 0 : EFAULT);
18402 }
18403
18404 case DTRACEIOC_AGGSNAP:
18405 case DTRACEIOC_BUFSNAP: {
18406 dtrace_bufdesc_t desc;
18407 caddr_t cached;
18408 dtrace_buffer_t *buf;
18409
18410 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
18411 return (EFAULT);
18412
18413 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU)
18414 return (EINVAL);
18415
18416 mutex_enter(&dtrace_lock);
18417
18418 if (cmd == DTRACEIOC_BUFSNAP) {
18419 buf = &state->dts_buffer[desc.dtbd_cpu];
18420 } else {
18421 buf = &state->dts_aggbuffer[desc.dtbd_cpu];
18422 }
18423
18424 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) {
18425 size_t sz = buf->dtb_offset;
18426
18427 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) {
18428 mutex_exit(&dtrace_lock);
18429 return (EBUSY);
18430 }
18431
18432 /*
18433 * If this buffer has already been consumed, we're
18434 * going to indicate that there's nothing left here
18435 * to consume.
18436 */
18437 if (buf->dtb_flags & DTRACEBUF_CONSUMED) {
18438 mutex_exit(&dtrace_lock);
18439
18440 desc.dtbd_size = 0;
18441 desc.dtbd_drops = 0;
18442 desc.dtbd_errors = 0;
18443 desc.dtbd_oldest = 0;
18444 sz = sizeof (desc);
18445
18446 if (copyout(&desc, (void *)arg, sz) != 0)
18447 return (EFAULT);
18448
18449 return (0);
18450 }
18451
18452 /*
18453 * If this is a ring buffer that has wrapped, we want
18454 * to copy the whole thing out.
18455 */
18456 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
18457 dtrace_buffer_polish(buf);
18458 sz = buf->dtb_size;
18459 }
18460
18461 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) {
18462 mutex_exit(&dtrace_lock);
18463 return (EFAULT);
18464 }
18465
18466 desc.dtbd_size = sz;
18467 desc.dtbd_drops = buf->dtb_drops;
18468 desc.dtbd_errors = buf->dtb_errors;
18469 desc.dtbd_oldest = buf->dtb_xamot_offset;
18470 desc.dtbd_timestamp = dtrace_gethrtime();
18471
18472 mutex_exit(&dtrace_lock);
18473
18474 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
18475 return (EFAULT);
18476
18477 buf->dtb_flags |= DTRACEBUF_CONSUMED;
18478
18479 return (0);
18480 }
18481
18482 if (buf->dtb_tomax == NULL) {
18483 ASSERT(buf->dtb_xamot == NULL);
18484 mutex_exit(&dtrace_lock);
18485 return (ENOENT);
18486 }
18487
18488 cached = buf->dtb_tomax;
18489 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
18490
18491 dtrace_xcall(desc.dtbd_cpu,
18492 (dtrace_xcall_t)dtrace_buffer_switch, buf);
18493
18494 state->dts_errors += buf->dtb_xamot_errors;
18495
18496 /*
18497 * If the buffers did not actually switch, then the cross call
18498 * did not take place -- presumably because the given CPU is
18499 * not in the ready set. If this is the case, we'll return
18500 * ENOENT.
18501 */
18502 if (buf->dtb_tomax == cached) {
18503 ASSERT(buf->dtb_xamot != cached);
18504 mutex_exit(&dtrace_lock);
18505 return (ENOENT);
18506 }
18507
18508 ASSERT(cached == buf->dtb_xamot);
18509
18510 /*
18511 * We have our snapshot; now copy it out.
18512 */
18513 if (copyout(buf->dtb_xamot, desc.dtbd_data,
18514 buf->dtb_xamot_offset) != 0) {
18515 mutex_exit(&dtrace_lock);
18516 return (EFAULT);
18517 }
18518
18519 desc.dtbd_size = buf->dtb_xamot_offset;
18520 desc.dtbd_drops = buf->dtb_xamot_drops;
18521 desc.dtbd_errors = buf->dtb_xamot_errors;
18522 desc.dtbd_oldest = 0;
18523 desc.dtbd_timestamp = buf->dtb_switched;
18524
18525 mutex_exit(&dtrace_lock);
18526
18527 /*
18528 * Finally, copy out the buffer description.
18529 */
18530 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
18531 return (EFAULT);
18532
18533 return (0);
18534 }
18535
18536 case DTRACEIOC_CONF: {
18537 dtrace_conf_t conf;
18538
18539 bzero(&conf, sizeof (conf));
18540 conf.dtc_difversion = DIF_VERSION;
18541 conf.dtc_difintregs = DIF_DIR_NREGS;
18542 conf.dtc_diftupregs = DIF_DTR_NREGS;
18543 conf.dtc_ctfmodel = CTF_MODEL_NATIVE;
18544
18545 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0)
18546 return (EFAULT);
18547
18548 return (0);
18549 }
18550
18551 case DTRACEIOC_STATUS: {
18552 dtrace_status_t stat;
18553 dtrace_dstate_t *dstate;
18554 int i, j;
18555 uint64_t nerrs;
18556
18557 /*
18558 * See the comment in dtrace_state_deadman() for the reason
18559 * for setting dts_laststatus to INT64_MAX before setting
18560 * it to the correct value.
18561 */
18562 state->dts_laststatus = INT64_MAX;
18563 dtrace_membar_producer();
18564 state->dts_laststatus = dtrace_gethrtime();
18565
18566 bzero(&stat, sizeof (stat));
18567
18568 mutex_enter(&dtrace_lock);
18569
18570 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) {
18571 mutex_exit(&dtrace_lock);
18572 return (ENOENT);
18573 }
18574
18575 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING)
18576 stat.dtst_exiting = 1;
18577
18578 nerrs = state->dts_errors;
18579 dstate = &state->dts_vstate.dtvs_dynvars;
18580
18581 for (i = 0; i < NCPU; i++) {
18582 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i];
18583
18584 stat.dtst_dyndrops += dcpu->dtdsc_drops;
18585 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops;
18586 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops;
18587
18588 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL)
18589 stat.dtst_filled++;
18590
18591 nerrs += state->dts_buffer[i].dtb_errors;
18592
18593 for (j = 0; j < state->dts_nspeculations; j++) {
18594 dtrace_speculation_t *spec;
18595 dtrace_buffer_t *buf;
18596
18597 spec = &state->dts_speculations[j];
18598 buf = &spec->dtsp_buffer[i];
18599 stat.dtst_specdrops += buf->dtb_xamot_drops;
18600 }
18601 }
18602
18603 stat.dtst_specdrops_busy = state->dts_speculations_busy;
18604 stat.dtst_specdrops_unavail = state->dts_speculations_unavail;
18605 stat.dtst_stkstroverflows = state->dts_stkstroverflows;
18606 stat.dtst_dblerrors = state->dts_dblerrors;
18607 stat.dtst_killed =
18608 (state->dts_activity == DTRACE_ACTIVITY_KILLED);
18609 stat.dtst_errors = nerrs;
18610
18611 mutex_exit(&dtrace_lock);
18612
18613 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0)
18614 return (EFAULT);
18615
18616 return (0);
18617 }
18618
18619 case DTRACEIOC_FORMAT: {
18620 dtrace_fmtdesc_t fmt;
18621 char *str;
18622 int len;
18623
18624 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0)
18625 return (EFAULT);
18626
18627 mutex_enter(&dtrace_lock);
18628
18629 if (fmt.dtfd_format == 0 ||
18630 fmt.dtfd_format > state->dts_nformats) {
18631 mutex_exit(&dtrace_lock);
18632 return (EINVAL);
18633 }
18634
18635 /*
18636 * Format strings are allocated contiguously and they are
18637 * never freed; if a format index is less than the number
18638 * of formats, we can assert that the format map is non-NULL
18639 * and that the format for the specified index is non-NULL.
18640 */
18641 ASSERT(state->dts_formats != NULL);
18642 str = state->dts_formats[fmt.dtfd_format - 1];
18643 ASSERT(str != NULL);
18644
18645 len = strlen(str) + 1;
18646
18647 if (len > fmt.dtfd_length) {
18648 fmt.dtfd_length = len;
18649
18650 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) {
18651 mutex_exit(&dtrace_lock);
18652 return (EINVAL);
18653 }
18654 } else {
18655 if (copyout(str, fmt.dtfd_string, len) != 0) {
18656 mutex_exit(&dtrace_lock);
18657 return (EINVAL);
18658 }
18659 }
18660
18661 mutex_exit(&dtrace_lock);
18662 return (0);
18663 }
18664
18665 default:
18666 break;
18667 }
18668
18669 return (ENOTTY);
18670 }
18671
18672 /*ARGSUSED*/
18673 static int
18674 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
18675 {
18676 dtrace_state_t *state;
18677
18678 switch (cmd) {
18679 case DDI_DETACH:
18680 break;
18681
18682 case DDI_SUSPEND:
18683 return (DDI_SUCCESS);
18684
18685 default:
18686 return (DDI_FAILURE);
18687 }
18688
18689 mutex_enter(&cpu_lock);
18690 mutex_enter(&dtrace_provider_lock);
18691 mutex_enter(&dtrace_lock);
18692
18693 ASSERT(dtrace_opens == 0);
18694
18695 if (dtrace_helpers > 0) {
18696 mutex_exit(&dtrace_provider_lock);
18697 mutex_exit(&dtrace_lock);
18698 mutex_exit(&cpu_lock);
18699 return (DDI_FAILURE);
18700 }
18701
18702 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) {
18703 mutex_exit(&dtrace_provider_lock);
18704 mutex_exit(&dtrace_lock);
18705 mutex_exit(&cpu_lock);
18706 return (DDI_FAILURE);
18707 }
18708
18709 dtrace_provider = NULL;
18710
18711 if ((state = dtrace_anon_grab()) != NULL) {
18712 /*
18713 * If there were ECBs on this state, the provider should
18714 * have not been allowed to detach; assert that there is
18715 * none.
18716 */
18717 ASSERT(state->dts_necbs == 0);
18718 dtrace_state_destroy(state);
18719
18720 /*
18721 * If we're being detached with anonymous state, we need to
18722 * indicate to the kernel debugger that DTrace is now inactive.
18723 */
18724 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
18725 }
18726
18727 bzero(&dtrace_anon, sizeof (dtrace_anon_t));
18728 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
18729 dtrace_cpu_init = NULL;
18730 dtrace_helpers_cleanup = NULL;
18731 dtrace_helpers_fork = NULL;
18732 dtrace_cpustart_init = NULL;
18733 dtrace_cpustart_fini = NULL;
18734 dtrace_debugger_init = NULL;
18735 dtrace_debugger_fini = NULL;
18736 dtrace_modload = NULL;
18737 dtrace_modunload = NULL;
18738
18739 ASSERT(dtrace_getf == 0);
18740 ASSERT(dtrace_closef == NULL);
18741
18742 mutex_exit(&cpu_lock);
18743
18744 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *));
18745 dtrace_probes = NULL;
18746 dtrace_nprobes = 0;
18747
18748 dtrace_hash_destroy(dtrace_bymod);
18749 dtrace_hash_destroy(dtrace_byfunc);
18750 dtrace_hash_destroy(dtrace_byname);
18751 dtrace_bymod = NULL;
18752 dtrace_byfunc = NULL;
18753 dtrace_byname = NULL;
18754
18755 kmem_cache_destroy(dtrace_state_cache);
18756 vmem_destroy(dtrace_minor);
18757 vmem_destroy(dtrace_arena);
18758
18759 if (dtrace_toxrange != NULL) {
18760 kmem_free(dtrace_toxrange,
18761 dtrace_toxranges_max * sizeof (dtrace_toxrange_t));
18762 dtrace_toxrange = NULL;
18763 dtrace_toxranges = 0;
18764 dtrace_toxranges_max = 0;
18765 }
18766
18767 ddi_remove_minor_node(dtrace_devi, NULL);
18768 dtrace_devi = NULL;
18769
18770 ddi_soft_state_fini(&dtrace_softstate);
18771
18772 ASSERT(dtrace_vtime_references == 0);
18773 ASSERT(dtrace_opens == 0);
18774 ASSERT(dtrace_retained == NULL);
18775
18776 mutex_exit(&dtrace_lock);
18777 mutex_exit(&dtrace_provider_lock);
18778
18779 /*
18780 * We don't destroy the task queue until after we have dropped our
18781 * locks (taskq_destroy() may block on running tasks). To prevent
18782 * attempting to do work after we have effectively detached but before
18783 * the task queue has been destroyed, all tasks dispatched via the
18784 * task queue must check that DTrace is still attached before
18785 * performing any operation.
18786 */
18787 taskq_destroy(dtrace_taskq);
18788 dtrace_taskq = NULL;
18789
18790 return (DDI_SUCCESS);
18791 }
18792 #endif
18793
18794 #ifdef illumos
18795 /*ARGSUSED*/
18796 static int
18797 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
18798 {
18799 int error;
18800
18801 switch (infocmd) {
18802 case DDI_INFO_DEVT2DEVINFO:
18803 *result = (void *)dtrace_devi;
18804 error = DDI_SUCCESS;
18805 break;
18806 case DDI_INFO_DEVT2INSTANCE:
18807 *result = (void *)0;
18808 error = DDI_SUCCESS;
18809 break;
18810 default:
18811 error = DDI_FAILURE;
18812 }
18813 return (error);
18814 }
18815 #endif
18816
18817 #ifdef illumos
18818 static struct cb_ops dtrace_cb_ops = {
18819 dtrace_open, /* open */
18820 dtrace_close, /* close */
18821 nulldev, /* strategy */
18822 nulldev, /* print */
18823 nodev, /* dump */
18824 nodev, /* read */
18825 nodev, /* write */
18826 dtrace_ioctl, /* ioctl */
18827 nodev, /* devmap */
18828 nodev, /* mmap */
18829 nodev, /* segmap */
18830 nochpoll, /* poll */
18831 ddi_prop_op, /* cb_prop_op */
18832 0, /* streamtab */
18833 D_NEW | D_MP /* Driver compatibility flag */
18834 };
18835
18836 static struct dev_ops dtrace_ops = {
18837 DEVO_REV, /* devo_rev */
18838 0, /* refcnt */
18839 dtrace_info, /* get_dev_info */
18840 nulldev, /* identify */
18841 nulldev, /* probe */
18842 dtrace_attach, /* attach */
18843 dtrace_detach, /* detach */
18844 nodev, /* reset */
18845 &dtrace_cb_ops, /* driver operations */
18846 NULL, /* bus operations */
18847 nodev /* dev power */
18848 };
18849
18850 static struct modldrv modldrv = {
18851 &mod_driverops, /* module type (this is a pseudo driver) */
18852 "Dynamic Tracing", /* name of module */
18853 &dtrace_ops, /* driver ops */
18854 };
18855
18856 static struct modlinkage modlinkage = {
18857 MODREV_1,
18858 (void *)&modldrv,
18859 NULL
18860 };
18861
18862 int
18863 _init(void)
18864 {
18865 return (mod_install(&modlinkage));
18866 }
18867
18868 int
18869 _info(struct modinfo *modinfop)
18870 {
18871 return (mod_info(&modlinkage, modinfop));
18872 }
18873
18874 int
18875 _fini(void)
18876 {
18877 return (mod_remove(&modlinkage));
18878 }
18879 #endif
18880
18881 #ifdef __FreeBSD__
18882 static d_ioctl_t dtrace_ioctl;
18883 static d_ioctl_t dtrace_ioctl_helper;
18884 static void dtrace_load(void *);
18885 static int dtrace_unload(void);
18886 static struct cdev *dtrace_dev;
18887 static struct cdev *helper_dev;
18888
18889 void dtrace_invop_init(void);
18890 void dtrace_invop_uninit(void);
18891
18892 static struct cdevsw dtrace_cdevsw = {
18893 .d_version = D_VERSION,
18894 .d_ioctl = dtrace_ioctl,
18895 .d_open = dtrace_open,
18896 .d_name = "dtrace",
18897 };
18898
18899 static struct cdevsw helper_cdevsw = {
18900 .d_version = D_VERSION,
18901 .d_ioctl = dtrace_ioctl_helper,
18902 .d_name = "helper",
18903 };
18904 #endif /* __FreeBSD__ */
18905
18906 #ifdef __NetBSD__
18907 void dtrace_invop_init(void);
18908 void dtrace_invop_uninit(void);
18909
18910 struct dtrace_state_worker {
18911 kmutex_t lock;
18912 kcondvar_t cv;
18913 void (*fn)(dtrace_state_t *);
18914 dtrace_state_t *state;
18915 int interval;
18916 lwp_t *lwp;
18917 bool exiting;
18918 };
18919
18920 static void
18921 dtrace_state_worker_thread(void *vp)
18922 {
18923 struct dtrace_state_worker *w = vp;
18924
18925 mutex_enter(&w->lock);
18926 while (!w->exiting) {
18927 int error;
18928
18929 error = cv_timedwait(&w->cv, &w->lock, w->interval);
18930 if (error == EWOULDBLOCK) {
18931 mutex_exit(&w->lock);
18932 w->fn(w->state);
18933 mutex_enter(&w->lock);
18934 }
18935 }
18936 mutex_exit(&w->lock);
18937 kthread_exit(0);
18938 }
18939
18940 struct dtrace_state_worker *
18941 dtrace_state_worker_add(void (*fn)(dtrace_state_t *), dtrace_state_t *state,
18942 hrtime_t interval)
18943 {
18944 struct dtrace_state_worker *w;
18945 int error __diagused;
18946
18947 w = kmem_alloc(sizeof(*w), KM_SLEEP);
18948 mutex_init(&w->lock, "dtrace", MUTEX_DEFAULT, NULL);
18949 cv_init(&w->cv, "dtrace");
18950 w->interval = ((uintmax_t)hz * interval) / NANOSEC;
18951 w->fn = fn;
18952 w->state = state;
18953 w->exiting = false;
18954 error = kthread_create(PRI_NONE, KTHREAD_MPSAFE|KTHREAD_MUSTJOIN, NULL,
18955 dtrace_state_worker_thread, w, &w->lwp, "dtrace-state-worker");
18956 KASSERT(error == 0); /* XXX */
18957 return w;
18958 }
18959
18960 void
18961 dtrace_state_worker_remove(struct dtrace_state_worker *w)
18962 {
18963 int error __diagused;
18964
18965 KASSERT(!w->exiting);
18966 mutex_enter(&w->lock);
18967 w->exiting = true;
18968 cv_signal(&w->cv);
18969 mutex_exit(&w->lock);
18970 error = kthread_join(w->lwp);
18971 KASSERT(error == 0);
18972 cv_destroy(&w->cv);
18973 mutex_destroy(&w->lock);
18974 kmem_free(w, sizeof(*w));
18975 }
18976
18977 #endif /* __NetBSD__ */
18978
18979 static void dtrace_load(void *);
18980 static int dtrace_unload(void);
18981
18982 #include <dtrace_anon.c>
18983 #include <dtrace_ioctl.c>
18984 #include <dtrace_load.c>
18985 #include <dtrace_modevent.c>
18986 #include <dtrace_sysctl.c>
18987 #include <dtrace_unload.c>
18988 #include <dtrace_vtime.c>
18989 #include <dtrace_hacks.c>
18990 #include <dtrace_isa.c>
18991
18992 #ifdef __FreeBSD__
18993 DEV_MODULE(dtrace, dtrace_modevent, NULL);
18994 MODULE_VERSION(dtrace, 1);
18995 MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1);
18996 #endif /* __FreeBSD__ */
18997
18998 #ifdef __NetBSD__
18999 MODULE(MODULE_CLASS_MISC, dtrace, "solaris");
19000 #endif /* __NetBSD__ */
19001