1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * $FreeBSD: src/sys/cddl/contrib/opensolaris/uts/common/dtrace/dtrace.c,v 1.10.2.1 2009/08/03 08:13:06 kensmith Exp $ 22 */ 23 24 /* 25 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 26 * Use is subject to license terms. 27 */ 28 29 /* #pragma ident "%Z%%M% %I% %E% SMI" */ 30 31 /* 32 * DTrace - Dynamic Tracing for Solaris 33 * 34 * This is the implementation of the Solaris Dynamic Tracing framework 35 * (DTrace). The user-visible interface to DTrace is described at length in 36 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 37 * library, the in-kernel DTrace framework, and the DTrace providers are 38 * described in the block comments in the <sys/dtrace.h> header file. The 39 * internal architecture of DTrace is described in the block comments in the 40 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 41 * implementation very much assume mastery of all of these sources; if one has 42 * an unanswered question about the implementation, one should consult them 43 * first. 44 * 45 * The functions here are ordered roughly as follows: 46 * 47 * - Probe context functions 48 * - Probe hashing functions 49 * - Non-probe context utility functions 50 * - Matching functions 51 * - Provider-to-Framework API functions 52 * - Probe management functions 53 * - DIF object functions 54 * - Format functions 55 * - Predicate functions 56 * - ECB functions 57 * - Buffer functions 58 * - Enabling functions 59 * - DOF functions 60 * - Anonymous enabling functions 61 * - Consumer state functions 62 * - Helper functions 63 * - Hook functions 64 * - Driver cookbook functions 65 * 66 * Each group of functions begins with a block comment labelled the "DTrace 67 * [Group] Functions", allowing one to find each block by searching forward 68 * on capital-f functions. 69 */ 70 #if !defined(sun) 71 /* we need internal access to mutexes for state inspection */ 72 #define __MUTEX_PRIVATE 73 #define __RWLOCK_PRIVATE 74 #endif 75 76 #include <sys/errno.h> 77 #if !defined(sun) 78 #include <sys/time.h> 79 #endif 80 #include <sys/stat.h> 81 #include <sys/conf.h> 82 #include <sys/systm.h> 83 #if defined(sun) 84 #include <sys/modctl.h> 85 #include <sys/ddi.h> 86 #include <sys/sunddi.h> 87 #endif 88 #include <sys/cpuvar.h> 89 #include <sys/kmem.h> 90 #if defined(sun) 91 #include <sys/strsubr.h> 92 #endif 93 #include <sys/sysmacros.h> 94 #include <sys/dtrace_impl.h> 95 #include <sys/atomic.h> 96 #include <sys/cmn_err.h> 97 #include <sys/mutex_impl.h> 98 #include <sys/rwlock_impl.h> 99 #include <sys/ctf_api.h> 100 #if defined(sun) 101 #include <sys/panic.h> 102 #include <sys/priv_impl.h> 103 #endif 104 #include <sys/policy.h> 105 #if defined(sun) 106 #include <sys/cred_impl.h> 107 #include <sys/procfs_isa.h> 108 #endif 109 #include <sys/taskq.h> 110 #if defined(sun) 111 #include <sys/mkdev.h> 112 #include <sys/kdi.h> 113 #endif 114 #include <sys/zone.h> 115 #include <sys/socket.h> 116 #include <netinet/in.h> 117 118 /* FreeBSD includes: */ 119 #if !defined(sun) 120 121 #include <sys/callout.h> 122 #include <sys/ctype.h> 123 #include <sys/limits.h> 124 //#include <sys/kdb.h> 125 #include <sys/kernel.h> 126 #include <sys/malloc.h> 127 #include <sys/sysctl.h> 128 #include <sys/lock.h> 129 #include <sys/mutex.h> 130 #include <sys/rwlock.h> 131 //#include <sys/sx.h> 132 #include <sys/file.h> 133 #include <sys/filedesc.h> 134 #include <sys/dtrace_bsd.h> 135 #include <sys/vmem.h> 136 #include <sys/module.h> 137 #include <sys/cpu.h> 138 #include <netinet/in.h> 139 #include "dtrace_cddl.h" 140 #include "dtrace_debug.c" 141 #endif 142 143 #if !defined(sun) 144 /* fake module entry for netbsd */ 145 module_t *mod_nbsd = NULL; 146 #endif 147 148 /* 149 * DTrace Tunable Variables 150 * 151 * The following variables may be tuned by adding a line to /etc/system that 152 * includes both the name of the DTrace module ("dtrace") and the name of the 153 * variable. For example: 154 * 155 * set dtrace:dtrace_destructive_disallow = 1 156 * 157 * In general, the only variables that one should be tuning this way are those 158 * that affect system-wide DTrace behavior, and for which the default behavior 159 * is undesirable. Most of these variables are tunable on a per-consumer 160 * basis using DTrace options, and need not be tuned on a system-wide basis. 161 * When tuning these variables, avoid pathological values; while some attempt 162 * is made to verify the integrity of these variables, they are not considered 163 * part of the supported interface to DTrace, and they are therefore not 164 * checked comprehensively. Further, these variables should not be tuned 165 * dynamically via "mdb -kw" or other means; they should only be tuned via 166 * /etc/system. 167 */ 168 int dtrace_destructive_disallow = 0; 169 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 170 size_t dtrace_difo_maxsize = (256 * 1024); 171 dtrace_optval_t dtrace_dof_maxsize = (256 * 1024); 172 size_t dtrace_global_maxsize = (16 * 1024); 173 size_t dtrace_actions_max = (16 * 1024); 174 size_t dtrace_retain_max = 1024; 175 dtrace_optval_t dtrace_helper_actions_max = 32; 176 dtrace_optval_t dtrace_helper_providers_max = 32; 177 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 178 size_t dtrace_strsize_default = 256; 179 dtrace_optval_t dtrace_cleanrate_default = 99009900; /* 101 hz */ 180 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 181 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 182 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 183 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 184 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 185 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 186 dtrace_optval_t dtrace_nspec_default = 1; 187 dtrace_optval_t dtrace_specsize_default = 32 * 1024; 188 dtrace_optval_t dtrace_stackframes_default = 20; 189 dtrace_optval_t dtrace_ustackframes_default = 20; 190 dtrace_optval_t dtrace_jstackframes_default = 50; 191 dtrace_optval_t dtrace_jstackstrsize_default = 512; 192 int dtrace_msgdsize_max = 128; 193 hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */ 194 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 195 int dtrace_devdepth_max = 32; 196 int dtrace_err_verbose; 197 hrtime_t dtrace_deadman_interval = NANOSEC; 198 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 199 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 200 201 /* 202 * DTrace External Variables 203 * 204 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 205 * available to DTrace consumers via the backtick (`) syntax. One of these, 206 * dtrace_zero, is made deliberately so: it is provided as a source of 207 * well-known, zero-filled memory. While this variable is not documented, 208 * it is used by some translators as an implementation detail. 209 */ 210 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 211 212 /* 213 * DTrace Internal Variables 214 */ 215 #if defined(sun) 216 static dev_info_t *dtrace_devi; /* device info */ 217 #endif 218 static vmem_t *dtrace_arena; /* probe ID arena */ 219 #if defined(sun) 220 static vmem_t *dtrace_minor; /* minor number arena */ 221 static taskq_t *dtrace_taskq; /* task queue */ 222 #endif 223 static dtrace_probe_t **dtrace_probes; /* array of all probes */ 224 int dtrace_probes_size=0; /* size for kmem_free */ 225 static int dtrace_nprobes; /* number of probes */ 226 static dtrace_provider_t *dtrace_provider; /* provider list */ 227 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 228 static int dtrace_opens; /* number of opens */ 229 static int dtrace_helpers; /* number of helpers */ 230 #if defined(sun) 231 static void *dtrace_softstate; /* softstate pointer */ 232 #endif 233 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 234 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 235 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 236 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 237 static int dtrace_toxranges; /* number of toxic ranges */ 238 static int dtrace_toxranges_max; /* size of toxic range array */ 239 static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 240 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 241 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 242 static kthread_t *dtrace_panicked; /* panicking thread */ 243 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 244 static dtrace_genid_t dtrace_probegen; /* current probe generation */ 245 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 246 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 247 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */ 248 #if !defined(sun) 249 int dtrace_in_probe; /* non-zero if executing a probe */ 250 #if defined(__i386__) || defined(__amd64__) 251 uintptr_t dtrace_in_probe_addr; /* Address of invop when already in probe */ 252 #endif 253 254 void *dtrace_deadman_wchan; 255 int dtrace_deadman_alive; /* deadman thread keep alive */ 256 lwp_t *dtrace_deadman_proc; 257 #endif 258 259 /* 260 * DTrace Locking 261 * DTrace is protected by three (relatively coarse-grained) locks: 262 * 263 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 264 * including enabling state, probes, ECBs, consumer state, helper state, 265 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 266 * probe context is lock-free -- synchronization is handled via the 267 * dtrace_sync() cross call mechanism. 268 * 269 * (2) dtrace_provider_lock is required when manipulating provider state, or 270 * when provider state must be held constant. 271 * 272 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 273 * when meta provider state must be held constant. 274 * 275 * The lock ordering between these three locks is dtrace_meta_lock before 276 * dtrace_provider_lock before dtrace_lock. (In particular, there are 277 * several places where dtrace_provider_lock is held by the framework as it 278 * calls into the providers -- which then call back into the framework, 279 * grabbing dtrace_lock.) 280 * 281 * There are two other locks in the mix: mod_lock and cpu_lock. With respect 282 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical 283 * role as a coarse-grained lock; it is acquired before both of these locks. 284 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must 285 * be acquired _between_ dtrace_meta_lock and any other DTrace locks. 286 * mod_lock is similar with respect to dtrace_provider_lock in that it must be 287 * acquired _between_ dtrace_provider_lock and dtrace_lock. 288 */ 289 static kmutex_t dtrace_lock; /* probe state lock */ 290 static kmutex_t dtrace_provider_lock; /* provider state lock */ 291 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 292 293 #if !defined(sun) 294 /* XXX FreeBSD hacks. */ 295 static kmutex_t mod_lock; 296 297 #define cr_suid cr_svuid 298 #define cr_sgid cr_svgid 299 #define ipaddr_t in_addr_t 300 #define mod_modname pathname 301 #define vuprintf vprintf 302 #define ttoproc(_a) ((_a)->l_proc) 303 #define crgetzoneid(_a) 0 304 //#define NCPU MAXCPUS 305 #define NCPU ncpu 306 #define SNOCD 0 307 #define CPU_ON_INTR(_a) 0 308 309 #define PRIV_EFFECTIVE (1 << 0) 310 #define PRIV_DTRACE_KERNEL (1 << 1) 311 #define PRIV_DTRACE_PROC (1 << 2) 312 #define PRIV_DTRACE_USER (1 << 3) 313 #define PRIV_PROC_OWNER (1 << 4) 314 #define PRIV_PROC_ZONE (1 << 5) 315 #define PRIV_ALL ~0 316 317 //SYSCTL_NODE(_debug, OID_AUTO, dtrace, CTLFLAG_RD, 0, "DTrace Information"); 318 #endif 319 320 #if defined(sun) 321 #define curcpu_id CPU->cpu_id 322 #else 323 #define curcpu_id cpu_number() 324 #endif 325 326 327 /* 328 * DTrace Provider Variables 329 * 330 * These are the variables relating to DTrace as a provider (that is, the 331 * provider of the BEGIN, END, and ERROR probes). 332 */ 333 static dtrace_pattr_t dtrace_provider_attr = { 334 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 335 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 336 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 337 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 338 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 339 }; 340 341 static void 342 dtrace_nullop(void) 343 {} 344 345 static int 346 dtrace_enable_nullop(void) 347 { 348 return (0); 349 } 350 351 static dtrace_pops_t dtrace_provider_ops = { 352 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop, 353 #if defined(sun) 354 (void (*)(void *, modctl_t *))dtrace_nullop, 355 #else 356 (void (*)(void *, dtrace_modctl_t *))dtrace_nullop, 357 #endif 358 (int (*)(void *, dtrace_id_t, void *))dtrace_nullop, 359 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 360 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 361 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 362 NULL, 363 NULL, 364 NULL, 365 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 366 }; 367 368 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 369 static dtrace_id_t dtrace_probeid_end; /* special END probe */ 370 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 371 372 /* 373 * DTrace Helper Tracing Variables 374 */ 375 uint32_t dtrace_helptrace_next = 0; 376 uint32_t dtrace_helptrace_nlocals; 377 char *dtrace_helptrace_buffer; 378 int dtrace_helptrace_bufsize = 512 * 1024; 379 380 #ifdef DEBUG 381 int dtrace_helptrace_enabled = 1; 382 #else 383 int dtrace_helptrace_enabled = 0; 384 #endif 385 386 /* 387 * DTrace Error Hashing 388 * 389 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 390 * table. This is very useful for checking coverage of tests that are 391 * expected to induce DIF or DOF processing errors, and may be useful for 392 * debugging problems in the DIF code generator or in DOF generation . The 393 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 394 */ 395 #ifdef DEBUG 396 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 397 static const char *dtrace_errlast; 398 static kthread_t *dtrace_errthread; 399 static kmutex_t dtrace_errlock; 400 #endif 401 402 /* 403 * DTrace Macros and Constants 404 * 405 * These are various macros that are useful in various spots in the 406 * implementation, along with a few random constants that have no meaning 407 * outside of the implementation. There is no real structure to this cpp 408 * mishmash -- but is there ever? 409 */ 410 #define DTRACE_HASHSTR(hash, probe) \ 411 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 412 413 #define DTRACE_HASHNEXT(hash, probe) \ 414 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 415 416 #define DTRACE_HASHPREV(hash, probe) \ 417 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 418 419 #define DTRACE_HASHEQ(hash, lhs, rhs) \ 420 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 421 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 422 423 #define DTRACE_AGGHASHSIZE_SLEW 17 424 425 #define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3) 426 427 /* 428 * The key for a thread-local variable consists of the lower 61 bits of the 429 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 430 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 431 * equal to a variable identifier. This is necessary (but not sufficient) to 432 * assure that global associative arrays never collide with thread-local 433 * variables. To guarantee that they cannot collide, we must also define the 434 * order for keying dynamic variables. That order is: 435 * 436 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 437 * 438 * Because the variable-key and the tls-key are in orthogonal spaces, there is 439 * no way for a global variable key signature to match a thread-local key 440 * signature. 441 */ 442 #if defined(sun) 443 #define DTRACE_TLS_THRKEY(where) { \ 444 uint_t intr = 0; \ 445 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 446 for (; actv; actv >>= 1) \ 447 intr++; \ 448 ASSERT(intr < (1 << 3)); \ 449 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 450 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 451 } 452 #else 453 #define DTRACE_TLS_THRKEY(where) { \ 454 uint_t intr = 0; \ 455 (where) = ((curthread->l_lid + (curthread->l_proc->p_pid << 16) + \ 456 DIF_VARIABLE_MAX) & \ 457 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 458 } 459 #if 0 460 #define DTRACE_TLS_THRKEY(where) { \ 461 solaris_cpu_t *_c = &solaris_cpu[curcpu_id]; \ 462 uint_t intr = 0; \ 463 uint_t actv = _c->cpu_intr_actv; \ 464 for (; actv; actv >>= 1) \ 465 intr++; \ 466 ASSERT(intr < (1 << 3)); \ 467 (where) = ((curthread->l_lid + (curthread->l_proc->p_pid << 16) + \ 468 DIF_VARIABLE_MAX) & \ 469 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 470 } 471 #endif 472 #endif 473 474 #define DT_BSWAP_8(x) ((x) & 0xff) 475 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) 476 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) 477 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) 478 479 #define DT_MASK_LO 0x00000000FFFFFFFFULL 480 481 #define DTRACE_STORE(type, tomax, offset, what) \ 482 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 483 484 #ifndef __i386 485 #define DTRACE_ALIGNCHECK(addr, size, flags) \ 486 if (addr & (size - 1)) { \ 487 *flags |= CPU_DTRACE_BADALIGN; \ 488 cpu_core[curcpu_id].cpuc_dtrace_illval = addr; \ 489 return (0); \ 490 } 491 #else 492 #define DTRACE_ALIGNCHECK(addr, size, flags) 493 #endif 494 495 /* 496 * Test whether a range of memory starting at testaddr of size testsz falls 497 * within the range of memory described by addr, sz. We take care to avoid 498 * problems with overflow and underflow of the unsigned quantities, and 499 * disallow all negative sizes. Ranges of size 0 are allowed. 500 */ 501 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \ 502 ((testaddr) - (baseaddr) < (basesz) && \ 503 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \ 504 (testaddr) + (testsz) >= (testaddr)) 505 506 /* 507 * Test whether alloc_sz bytes will fit in the scratch region. We isolate 508 * alloc_sz on the righthand side of the comparison in order to avoid overflow 509 * or underflow in the comparison with it. This is simpler than the INRANGE 510 * check above, because we know that the dtms_scratch_ptr is valid in the 511 * range. Allocations of size zero are allowed. 512 */ 513 #define DTRACE_INSCRATCH(mstate, alloc_sz) \ 514 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \ 515 (mstate)->dtms_scratch_ptr >= (alloc_sz)) 516 517 #define DTRACE_LOADFUNC(bits) \ 518 /*CSTYLED*/ \ 519 uint##bits##_t \ 520 dtrace_load##bits(uintptr_t addr) \ 521 { \ 522 size_t size = bits / NBBY; \ 523 /*CSTYLED*/ \ 524 uint##bits##_t rval; \ 525 int i; \ 526 volatile uint16_t *flags = (volatile uint16_t *) \ 527 &cpu_core[curcpu_id].cpuc_dtrace_flags; \ 528 \ 529 DTRACE_ALIGNCHECK(addr, size, flags); \ 530 \ 531 for (i = 0; i < dtrace_toxranges; i++) { \ 532 if (addr >= dtrace_toxrange[i].dtt_limit) \ 533 continue; \ 534 \ 535 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 536 continue; \ 537 \ 538 /* \ 539 * This address falls within a toxic region; return 0. \ 540 */ \ 541 *flags |= CPU_DTRACE_BADADDR; \ 542 cpu_core[curcpu_id].cpuc_dtrace_illval = addr; \ 543 return (0); \ 544 } \ 545 \ 546 *flags |= CPU_DTRACE_NOFAULT; \ 547 /*CSTYLED*/ \ 548 rval = *((volatile uint##bits##_t *)addr); \ 549 *flags &= ~CPU_DTRACE_NOFAULT; \ 550 \ 551 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \ 552 } 553 554 #ifdef _LP64 555 #define dtrace_loadptr dtrace_load64 556 #else 557 #define dtrace_loadptr dtrace_load32 558 #endif 559 560 #define DTRACE_DYNHASH_FREE 0 561 #define DTRACE_DYNHASH_SINK 1 562 #define DTRACE_DYNHASH_VALID 2 563 564 #define DTRACE_MATCH_FAIL -1 565 #define DTRACE_MATCH_NEXT 0 566 #define DTRACE_MATCH_DONE 1 567 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 568 #define DTRACE_STATE_ALIGN 64 569 570 #define DTRACE_FLAGS2FLT(flags) \ 571 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 572 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 573 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 574 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 575 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 576 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 577 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 578 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 579 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \ 580 DTRACEFLT_UNKNOWN) 581 582 #define DTRACEACT_ISSTRING(act) \ 583 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 584 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 585 586 /* Function prototype definitions: */ 587 static size_t dtrace_strlen(const char *, size_t); 588 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 589 static void dtrace_enabling_provide(dtrace_provider_t *); 590 static int dtrace_enabling_match(dtrace_enabling_t *, int *); 591 static void dtrace_enabling_matchall(void); 592 static dtrace_state_t *dtrace_anon_grab(void); 593 #if defined(sun) 594 static uint64_t dtrace_helper(int, dtrace_mstate_t *, 595 dtrace_state_t *, uint64_t, uint64_t); 596 static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 597 #endif 598 static void dtrace_buffer_drop(dtrace_buffer_t *); 599 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 600 dtrace_state_t *, dtrace_mstate_t *); 601 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 602 dtrace_optval_t); 603 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 604 #if defined(sun) 605 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); 606 #endif 607 uint16_t dtrace_load16(uintptr_t); 608 uint32_t dtrace_load32(uintptr_t); 609 uint64_t dtrace_load64(uintptr_t); 610 uint8_t dtrace_load8(uintptr_t); 611 void dtrace_dynvar_clean(dtrace_dstate_t *); 612 dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *, 613 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *); 614 uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *); 615 616 /* 617 * DTrace Probe Context Functions 618 * 619 * These functions are called from probe context. Because probe context is 620 * any context in which C may be called, arbitrarily locks may be held, 621 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 622 * As a result, functions called from probe context may only call other DTrace 623 * support functions -- they may not interact at all with the system at large. 624 * (Note that the ASSERT macro is made probe-context safe by redefining it in 625 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 626 * loads are to be performed from probe context, they _must_ be in terms of 627 * the safe dtrace_load*() variants. 628 * 629 * Some functions in this block are not actually called from probe context; 630 * for these functions, there will be a comment above the function reading 631 * "Note: not called from probe context." 632 */ 633 void 634 dtrace_panic(const char *format, ...) 635 { 636 va_list alist; 637 638 va_start(alist, format); 639 dtrace_vpanic(format, alist); 640 va_end(alist); 641 } 642 643 int 644 dtrace_assfail(const char *a, const char *f, int l) 645 { 646 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 647 648 /* 649 * We just need something here that even the most clever compiler 650 * cannot optimize away. 651 */ 652 return (a[(uintptr_t)f]); 653 } 654 655 /* 656 * Atomically increment a specified error counter from probe context. 657 */ 658 static void 659 dtrace_error(uint32_t *counter) 660 { 661 /* 662 * Most counters stored to in probe context are per-CPU counters. 663 * However, there are some error conditions that are sufficiently 664 * arcane that they don't merit per-CPU storage. If these counters 665 * are incremented concurrently on different CPUs, scalability will be 666 * adversely affected -- but we don't expect them to be white-hot in a 667 * correctly constructed enabling... 668 */ 669 uint32_t oval, nval; 670 671 do { 672 oval = *counter; 673 674 if ((nval = oval + 1) == 0) { 675 /* 676 * If the counter would wrap, set it to 1 -- assuring 677 * that the counter is never zero when we have seen 678 * errors. (The counter must be 32-bits because we 679 * aren't guaranteed a 64-bit compare&swap operation.) 680 * To save this code both the infamy of being fingered 681 * by a priggish news story and the indignity of being 682 * the target of a neo-puritan witch trial, we're 683 * carefully avoiding any colorful description of the 684 * likelihood of this condition -- but suffice it to 685 * say that it is only slightly more likely than the 686 * overflow of predicate cache IDs, as discussed in 687 * dtrace_predicate_create(). 688 */ 689 nval = 1; 690 } 691 } while (dtrace_cas32(counter, oval, nval) != oval); 692 } 693 694 /* 695 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 696 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 697 */ 698 DTRACE_LOADFUNC(8) 699 DTRACE_LOADFUNC(16) 700 DTRACE_LOADFUNC(32) 701 DTRACE_LOADFUNC(64) 702 703 static int 704 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 705 { 706 if (dest < mstate->dtms_scratch_base) 707 return (0); 708 709 if (dest + size < dest) 710 return (0); 711 712 if (dest + size > mstate->dtms_scratch_ptr) 713 return (0); 714 715 return (1); 716 } 717 718 static int 719 dtrace_canstore_statvar(uint64_t addr, size_t sz, 720 dtrace_statvar_t **svars, int nsvars) 721 { 722 int i; 723 724 for (i = 0; i < nsvars; i++) { 725 dtrace_statvar_t *svar = svars[i]; 726 727 if (svar == NULL || svar->dtsv_size == 0) 728 continue; 729 730 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size)) 731 return (1); 732 } 733 734 return (0); 735 } 736 737 /* 738 * Check to see if the address is within a memory region to which a store may 739 * be issued. This includes the DTrace scratch areas, and any DTrace variable 740 * region. The caller of dtrace_canstore() is responsible for performing any 741 * alignment checks that are needed before stores are actually executed. 742 */ 743 static int 744 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 745 dtrace_vstate_t *vstate) 746 { 747 /* 748 * First, check to see if the address is in scratch space... 749 */ 750 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base, 751 mstate->dtms_scratch_size)) 752 return (1); 753 754 /* 755 * Now check to see if it's a dynamic variable. This check will pick 756 * up both thread-local variables and any global dynamically-allocated 757 * variables. 758 */ 759 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base, 760 vstate->dtvs_dynvars.dtds_size)) { 761 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 762 uintptr_t base = (uintptr_t)dstate->dtds_base + 763 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t)); 764 uintptr_t chunkoffs; 765 766 /* 767 * Before we assume that we can store here, we need to make 768 * sure that it isn't in our metadata -- storing to our 769 * dynamic variable metadata would corrupt our state. For 770 * the range to not include any dynamic variable metadata, 771 * it must: 772 * 773 * (1) Start above the hash table that is at the base of 774 * the dynamic variable space 775 * 776 * (2) Have a starting chunk offset that is beyond the 777 * dtrace_dynvar_t that is at the base of every chunk 778 * 779 * (3) Not span a chunk boundary 780 * 781 */ 782 if (addr < base) 783 return (0); 784 785 chunkoffs = (addr - base) % dstate->dtds_chunksize; 786 787 if (chunkoffs < sizeof (dtrace_dynvar_t)) 788 return (0); 789 790 if (chunkoffs + sz > dstate->dtds_chunksize) 791 return (0); 792 793 return (1); 794 } 795 796 /* 797 * Finally, check the static local and global variables. These checks 798 * take the longest, so we perform them last. 799 */ 800 if (dtrace_canstore_statvar(addr, sz, 801 vstate->dtvs_locals, vstate->dtvs_nlocals)) 802 return (1); 803 804 if (dtrace_canstore_statvar(addr, sz, 805 vstate->dtvs_globals, vstate->dtvs_nglobals)) 806 return (1); 807 808 return (0); 809 } 810 811 812 /* 813 * Convenience routine to check to see if the address is within a memory 814 * region in which a load may be issued given the user's privilege level; 815 * if not, it sets the appropriate error flags and loads 'addr' into the 816 * illegal value slot. 817 * 818 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement 819 * appropriate memory access protection. 820 */ 821 static int 822 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 823 dtrace_vstate_t *vstate) 824 { 825 volatile uintptr_t *illval = &cpu_core[curcpu_id].cpuc_dtrace_illval; 826 827 /* 828 * If we hold the privilege to read from kernel memory, then 829 * everything is readable. 830 */ 831 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 832 return (1); 833 834 /* 835 * You can obviously read that which you can store. 836 */ 837 if (dtrace_canstore(addr, sz, mstate, vstate)) 838 return (1); 839 840 /* 841 * We're allowed to read from our own string table. 842 */ 843 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab, 844 mstate->dtms_difo->dtdo_strlen)) 845 return (1); 846 847 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV); 848 *illval = addr; 849 return (0); 850 } 851 852 /* 853 * Convenience routine to check to see if a given string is within a memory 854 * region in which a load may be issued given the user's privilege level; 855 * this exists so that we don't need to issue unnecessary dtrace_strlen() 856 * calls in the event that the user has all privileges. 857 */ 858 static int 859 dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 860 dtrace_vstate_t *vstate) 861 { 862 size_t strsz; 863 864 /* 865 * If we hold the privilege to read from kernel memory, then 866 * everything is readable. 867 */ 868 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 869 return (1); 870 871 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz); 872 if (dtrace_canload(addr, strsz, mstate, vstate)) 873 return (1); 874 875 return (0); 876 } 877 878 /* 879 * Convenience routine to check to see if a given variable is within a memory 880 * region in which a load may be issued given the user's privilege level. 881 */ 882 static int 883 dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate, 884 dtrace_vstate_t *vstate) 885 { 886 size_t sz; 887 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 888 889 /* 890 * If we hold the privilege to read from kernel memory, then 891 * everything is readable. 892 */ 893 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 894 return (1); 895 896 if (type->dtdt_kind == DIF_TYPE_STRING) 897 sz = dtrace_strlen(src, 898 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1; 899 else 900 sz = type->dtdt_size; 901 902 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate)); 903 } 904 905 /* 906 * Compare two strings using safe loads. 907 */ 908 static int 909 dtrace_strncmp(char *s1, char *s2, size_t limit) 910 { 911 uint8_t c1, c2; 912 volatile uint16_t *flags; 913 914 if (s1 == s2 || limit == 0) 915 return (0); 916 917 flags = (volatile uint16_t *)&cpu_core[curcpu_id].cpuc_dtrace_flags; 918 919 do { 920 if (s1 == NULL) { 921 c1 = '\0'; 922 } else { 923 c1 = dtrace_load8((uintptr_t)s1++); 924 } 925 926 if (s2 == NULL) { 927 c2 = '\0'; 928 } else { 929 c2 = dtrace_load8((uintptr_t)s2++); 930 } 931 932 if (c1 != c2) 933 return (c1 - c2); 934 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 935 936 return (0); 937 } 938 939 /* 940 * Compute strlen(s) for a string using safe memory accesses. The additional 941 * len parameter is used to specify a maximum length to ensure completion. 942 */ 943 static size_t 944 dtrace_strlen(const char *s, size_t lim) 945 { 946 uint_t len; 947 948 for (len = 0; len != lim; len++) { 949 if (dtrace_load8((uintptr_t)s++) == '\0') 950 break; 951 } 952 953 return (len); 954 } 955 956 /* 957 * Check if an address falls within a toxic region. 958 */ 959 static int 960 dtrace_istoxic(uintptr_t kaddr, size_t size) 961 { 962 uintptr_t taddr, tsize; 963 int i; 964 965 for (i = 0; i < dtrace_toxranges; i++) { 966 taddr = dtrace_toxrange[i].dtt_base; 967 tsize = dtrace_toxrange[i].dtt_limit - taddr; 968 969 if (kaddr - taddr < tsize) { 970 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 971 cpu_core[curcpu_id].cpuc_dtrace_illval = kaddr; 972 return (1); 973 } 974 975 if (taddr - kaddr < size) { 976 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 977 cpu_core[curcpu_id].cpuc_dtrace_illval = taddr; 978 return (1); 979 } 980 } 981 982 return (0); 983 } 984 985 /* 986 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 987 * memory specified by the DIF program. The dst is assumed to be safe memory 988 * that we can store to directly because it is managed by DTrace. As with 989 * standard bcopy, overlapping copies are handled properly. 990 */ 991 static void 992 dtrace_bcopy(const void *src, void *dst, size_t len) 993 { 994 if (len != 0) { 995 uint8_t *s1 = dst; 996 const uint8_t *s2 = src; 997 998 if (s1 <= s2) { 999 do { 1000 *s1++ = dtrace_load8((uintptr_t)s2++); 1001 } while (--len != 0); 1002 } else { 1003 s2 += len; 1004 s1 += len; 1005 1006 do { 1007 *--s1 = dtrace_load8((uintptr_t)--s2); 1008 } while (--len != 0); 1009 } 1010 } 1011 } 1012 1013 /* 1014 * Copy src to dst using safe memory accesses, up to either the specified 1015 * length, or the point that a nul byte is encountered. The src is assumed to 1016 * be unsafe memory specified by the DIF program. The dst is assumed to be 1017 * safe memory that we can store to directly because it is managed by DTrace. 1018 * Unlike dtrace_bcopy(), overlapping regions are not handled. 1019 */ 1020 static void 1021 dtrace_strcpy(const void *src, void *dst, size_t len) 1022 { 1023 if (len != 0) { 1024 uint8_t *s1 = dst, c; 1025 const uint8_t *s2 = src; 1026 1027 do { 1028 *s1++ = c = dtrace_load8((uintptr_t)s2++); 1029 } while (--len != 0 && c != '\0'); 1030 } 1031 } 1032 1033 /* 1034 * Copy src to dst, deriving the size and type from the specified (BYREF) 1035 * variable type. The src is assumed to be unsafe memory specified by the DIF 1036 * program. The dst is assumed to be DTrace variable memory that is of the 1037 * specified type; we assume that we can store to directly. 1038 */ 1039 static void 1040 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 1041 { 1042 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 1043 1044 if (type->dtdt_kind == DIF_TYPE_STRING) { 1045 dtrace_strcpy(src, dst, type->dtdt_size); 1046 } else { 1047 dtrace_bcopy(src, dst, type->dtdt_size); 1048 } 1049 } 1050 1051 /* 1052 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 1053 * unsafe memory specified by the DIF program. The s2 data is assumed to be 1054 * safe memory that we can access directly because it is managed by DTrace. 1055 */ 1056 static int 1057 dtrace_bcmp(const void *s1, const void *s2, size_t len) 1058 { 1059 volatile uint16_t *flags; 1060 1061 flags = (volatile uint16_t *)&cpu_core[curcpu_id].cpuc_dtrace_flags; 1062 1063 if (s1 == s2) 1064 return (0); 1065 1066 if (s1 == NULL || s2 == NULL) 1067 return (1); 1068 1069 if (s1 != s2 && len != 0) { 1070 const uint8_t *ps1 = s1; 1071 const uint8_t *ps2 = s2; 1072 1073 do { 1074 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 1075 return (1); 1076 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 1077 } 1078 return (0); 1079 } 1080 1081 /* 1082 * Zero the specified region using a simple byte-by-byte loop. Note that this 1083 * is for safe DTrace-managed memory only. 1084 */ 1085 static void 1086 dtrace_bzero(void *dst, size_t len) 1087 { 1088 uchar_t *cp; 1089 1090 for (cp = dst; len != 0; len--) 1091 *cp++ = 0; 1092 } 1093 1094 static void 1095 dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 1096 { 1097 uint64_t result[2]; 1098 1099 result[0] = addend1[0] + addend2[0]; 1100 result[1] = addend1[1] + addend2[1] + 1101 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 1102 1103 sum[0] = result[0]; 1104 sum[1] = result[1]; 1105 } 1106 1107 /* 1108 * Shift the 128-bit value in a by b. If b is positive, shift left. 1109 * If b is negative, shift right. 1110 */ 1111 static void 1112 dtrace_shift_128(uint64_t *a, int b) 1113 { 1114 uint64_t mask; 1115 1116 if (b == 0) 1117 return; 1118 1119 if (b < 0) { 1120 b = -b; 1121 if (b >= 64) { 1122 a[0] = a[1] >> (b - 64); 1123 a[1] = 0; 1124 } else { 1125 a[0] >>= b; 1126 mask = 1LL << (64 - b); 1127 mask -= 1; 1128 a[0] |= ((a[1] & mask) << (64 - b)); 1129 a[1] >>= b; 1130 } 1131 } else { 1132 if (b >= 64) { 1133 a[1] = a[0] << (b - 64); 1134 a[0] = 0; 1135 } else { 1136 a[1] <<= b; 1137 mask = a[0] >> (64 - b); 1138 a[1] |= mask; 1139 a[0] <<= b; 1140 } 1141 } 1142 } 1143 1144 /* 1145 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 1146 * use native multiplication on those, and then re-combine into the 1147 * resulting 128-bit value. 1148 * 1149 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 1150 * hi1 * hi2 << 64 + 1151 * hi1 * lo2 << 32 + 1152 * hi2 * lo1 << 32 + 1153 * lo1 * lo2 1154 */ 1155 static void 1156 dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 1157 { 1158 uint64_t hi1, hi2, lo1, lo2; 1159 uint64_t tmp[2]; 1160 1161 hi1 = factor1 >> 32; 1162 hi2 = factor2 >> 32; 1163 1164 lo1 = factor1 & DT_MASK_LO; 1165 lo2 = factor2 & DT_MASK_LO; 1166 1167 product[0] = lo1 * lo2; 1168 product[1] = hi1 * hi2; 1169 1170 tmp[0] = hi1 * lo2; 1171 tmp[1] = 0; 1172 dtrace_shift_128(tmp, 32); 1173 dtrace_add_128(product, tmp, product); 1174 1175 tmp[0] = hi2 * lo1; 1176 tmp[1] = 0; 1177 dtrace_shift_128(tmp, 32); 1178 dtrace_add_128(product, tmp, product); 1179 } 1180 1181 /* 1182 * This privilege check should be used by actions and subroutines to 1183 * verify that the user credentials of the process that enabled the 1184 * invoking ECB match the target credentials 1185 */ 1186 static int 1187 dtrace_priv_proc_common_user(dtrace_state_t *state) 1188 { 1189 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1190 1191 /* 1192 * We should always have a non-NULL state cred here, since if cred 1193 * is null (anonymous tracing), we fast-path bypass this routine. 1194 */ 1195 ASSERT(s_cr != NULL); 1196 1197 #if defined(sun) 1198 if ((cr = CRED()) != NULL && 1199 s_cr->cr_uid == cr->cr_uid && 1200 s_cr->cr_uid == cr->cr_ruid && 1201 s_cr->cr_uid == cr->cr_suid && 1202 s_cr->cr_gid == cr->cr_gid && 1203 s_cr->cr_gid == cr->cr_rgid && 1204 s_cr->cr_gid == cr->cr_sgid) 1205 return (1); 1206 #else 1207 if ((cr = CRED()) != NULL) { 1208 uid_t uid; 1209 gid_t gid; 1210 1211 uid = kauth_cred_getuid(s_cr); 1212 gid = kauth_cred_getgid(s_cr); 1213 1214 if (uid == kauth_cred_getuid(cr) && 1215 uid == kauth_cred_geteuid(cr) && 1216 uid == kauth_cred_getsvuid(cr) && 1217 gid == kauth_cred_getgid(cr) && 1218 gid == kauth_cred_getegid(cr) && 1219 gid == kauth_cred_getsvgid(cr)) { 1220 return 1; 1221 } 1222 } 1223 #endif 1224 1225 return (0); 1226 } 1227 1228 /* 1229 * This privilege check should be used by actions and subroutines to 1230 * verify that the zone of the process that enabled the invoking ECB 1231 * matches the target credentials 1232 */ 1233 static int 1234 dtrace_priv_proc_common_zone(dtrace_state_t *state) 1235 { 1236 #if defined(sun) 1237 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1238 1239 /* 1240 * We should always have a non-NULL state cred here, since if cred 1241 * is null (anonymous tracing), we fast-path bypass this routine. 1242 */ 1243 ASSERT(s_cr != NULL); 1244 1245 if ((cr = CRED()) != NULL && 1246 s_cr->cr_zone == cr->cr_zone) 1247 return (1); 1248 1249 return (0); 1250 #else 1251 return (1); 1252 #endif 1253 } 1254 1255 /* 1256 * This privilege check should be used by actions and subroutines to 1257 * verify that the process has not setuid or changed credentials. 1258 */ 1259 static int 1260 dtrace_priv_proc_common_nocd(void) 1261 { 1262 proc_t *proc; 1263 1264 if ((proc = ttoproc(curthread)) != NULL && 1265 !(proc->p_flag & SNOCD)) 1266 return (1); 1267 1268 return (0); 1269 } 1270 1271 static int 1272 dtrace_priv_proc_destructive(dtrace_state_t *state) 1273 { 1274 int action = state->dts_cred.dcr_action; 1275 1276 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 1277 dtrace_priv_proc_common_zone(state) == 0) 1278 goto bad; 1279 1280 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 1281 dtrace_priv_proc_common_user(state) == 0) 1282 goto bad; 1283 1284 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 1285 dtrace_priv_proc_common_nocd() == 0) 1286 goto bad; 1287 1288 return (1); 1289 1290 bad: 1291 cpu_core[curcpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1292 1293 return (0); 1294 } 1295 1296 static int 1297 dtrace_priv_proc_control(dtrace_state_t *state) 1298 { 1299 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 1300 return (1); 1301 1302 if (dtrace_priv_proc_common_zone(state) && 1303 dtrace_priv_proc_common_user(state) && 1304 dtrace_priv_proc_common_nocd()) 1305 return (1); 1306 1307 cpu_core[curcpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1308 1309 return (0); 1310 } 1311 1312 static int 1313 dtrace_priv_proc(dtrace_state_t *state) 1314 { 1315 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 1316 return (1); 1317 1318 cpu_core[curcpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1319 1320 return (0); 1321 } 1322 1323 static int 1324 dtrace_priv_kernel(dtrace_state_t *state) 1325 { 1326 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 1327 return (1); 1328 1329 cpu_core[curcpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1330 1331 return (0); 1332 } 1333 1334 static int 1335 dtrace_priv_kernel_destructive(dtrace_state_t *state) 1336 { 1337 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 1338 return (1); 1339 1340 cpu_core[curcpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1341 1342 return (0); 1343 } 1344 1345 /* 1346 * Note: not called from probe context. This function is called 1347 * asynchronously (and at a regular interval) from outside of probe context to 1348 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 1349 * cleaning is explained in detail in <sys/dtrace_impl.h>. 1350 */ 1351 void 1352 dtrace_dynvar_clean(dtrace_dstate_t *dstate) 1353 { 1354 dtrace_dynvar_t *dirty; 1355 dtrace_dstate_percpu_t *dcpu; 1356 int i, work = 0; 1357 1358 for (i = 0; i < NCPU; i++) { 1359 dcpu = &dstate->dtds_percpu[i]; 1360 1361 ASSERT(dcpu->dtdsc_rinsing == NULL); 1362 1363 /* 1364 * If the dirty list is NULL, there is no dirty work to do. 1365 */ 1366 if (dcpu->dtdsc_dirty == NULL) 1367 continue; 1368 1369 /* 1370 * If the clean list is non-NULL, then we're not going to do 1371 * any work for this CPU -- it means that there has not been 1372 * a dtrace_dynvar() allocation on this CPU (or from this CPU) 1373 * since the last time we cleaned house. 1374 */ 1375 if (dcpu->dtdsc_clean != NULL) 1376 continue; 1377 1378 work = 1; 1379 1380 /* 1381 * Atomically move the dirty list aside. 1382 */ 1383 do { 1384 dirty = dcpu->dtdsc_dirty; 1385 1386 /* 1387 * Before we zap the dirty list, set the rinsing list. 1388 * (This allows for a potential assertion in 1389 * dtrace_dynvar(): if a free dynamic variable appears 1390 * on a hash chain, either the dirty list or the 1391 * rinsing list for some CPU must be non-NULL.) 1392 */ 1393 dcpu->dtdsc_rinsing = dirty; 1394 dtrace_membar_producer(); 1395 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 1396 dirty, NULL) != dirty); 1397 } 1398 1399 if (!work) { 1400 /* 1401 * We have no work to do; we can simply return. 1402 */ 1403 return; 1404 } 1405 1406 dtrace_sync(); 1407 1408 for (i = 0; i < NCPU; i++) { 1409 dcpu = &dstate->dtds_percpu[i]; 1410 1411 if (dcpu->dtdsc_rinsing == NULL) 1412 continue; 1413 1414 /* 1415 * We are now guaranteed that no hash chain contains a pointer 1416 * into this dirty list; we can make it clean. 1417 */ 1418 ASSERT(dcpu->dtdsc_clean == NULL); 1419 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1420 dcpu->dtdsc_rinsing = NULL; 1421 } 1422 1423 /* 1424 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1425 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1426 * This prevents a race whereby a CPU incorrectly decides that 1427 * the state should be something other than DTRACE_DSTATE_CLEAN 1428 * after dtrace_dynvar_clean() has completed. 1429 */ 1430 dtrace_sync(); 1431 1432 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1433 } 1434 1435 /* 1436 * Depending on the value of the op parameter, this function looks-up, 1437 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1438 * allocation is requested, this function will return a pointer to a 1439 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1440 * variable can be allocated. If NULL is returned, the appropriate counter 1441 * will be incremented. 1442 */ 1443 dtrace_dynvar_t * 1444 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1445 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, 1446 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1447 { 1448 uint64_t hashval = DTRACE_DYNHASH_VALID; 1449 dtrace_dynhash_t *hash = dstate->dtds_hash; 1450 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1451 processorid_t me = curcpu_id, cpu = me; 1452 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1453 size_t bucket, ksize; 1454 size_t chunksize = dstate->dtds_chunksize; 1455 uintptr_t kdata, lock, nstate; 1456 uint_t i; 1457 1458 ASSERT(nkeys != 0); 1459 1460 /* 1461 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1462 * algorithm. For the by-value portions, we perform the algorithm in 1463 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1464 * bit, and seems to have only a minute effect on distribution. For 1465 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1466 * over each referenced byte. It's painful to do this, but it's much 1467 * better than pathological hash distribution. The efficacy of the 1468 * hashing algorithm (and a comparison with other algorithms) may be 1469 * found by running the ::dtrace_dynstat MDB dcmd. 1470 */ 1471 for (i = 0; i < nkeys; i++) { 1472 if (key[i].dttk_size == 0) { 1473 uint64_t val = key[i].dttk_value; 1474 1475 hashval += (val >> 48) & 0xffff; 1476 hashval += (hashval << 10); 1477 hashval ^= (hashval >> 6); 1478 1479 hashval += (val >> 32) & 0xffff; 1480 hashval += (hashval << 10); 1481 hashval ^= (hashval >> 6); 1482 1483 hashval += (val >> 16) & 0xffff; 1484 hashval += (hashval << 10); 1485 hashval ^= (hashval >> 6); 1486 1487 hashval += val & 0xffff; 1488 hashval += (hashval << 10); 1489 hashval ^= (hashval >> 6); 1490 } else { 1491 /* 1492 * This is incredibly painful, but it beats the hell 1493 * out of the alternative. 1494 */ 1495 uint64_t j, size = key[i].dttk_size; 1496 uintptr_t base = (uintptr_t)key[i].dttk_value; 1497 1498 if (!dtrace_canload(base, size, mstate, vstate)) 1499 break; 1500 1501 for (j = 0; j < size; j++) { 1502 hashval += dtrace_load8(base + j); 1503 hashval += (hashval << 10); 1504 hashval ^= (hashval >> 6); 1505 } 1506 } 1507 } 1508 1509 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 1510 return (NULL); 1511 1512 hashval += (hashval << 3); 1513 hashval ^= (hashval >> 11); 1514 hashval += (hashval << 15); 1515 1516 /* 1517 * There is a remote chance (ideally, 1 in 2^31) that our hashval 1518 * comes out to be one of our two sentinel hash values. If this 1519 * actually happens, we set the hashval to be a value known to be a 1520 * non-sentinel value. 1521 */ 1522 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) 1523 hashval = DTRACE_DYNHASH_VALID; 1524 1525 /* 1526 * Yes, it's painful to do a divide here. If the cycle count becomes 1527 * important here, tricks can be pulled to reduce it. (However, it's 1528 * critical that hash collisions be kept to an absolute minimum; 1529 * they're much more painful than a divide.) It's better to have a 1530 * solution that generates few collisions and still keeps things 1531 * relatively simple. 1532 */ 1533 bucket = hashval % dstate->dtds_hashsize; 1534 1535 if (op == DTRACE_DYNVAR_DEALLOC) { 1536 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1537 1538 for (;;) { 1539 while ((lock = *lockp) & 1) 1540 continue; 1541 1542 if (dtrace_casptr((volatile void *)lockp, 1543 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock) 1544 break; 1545 } 1546 1547 dtrace_membar_producer(); 1548 } 1549 1550 top: 1551 prev = NULL; 1552 lock = hash[bucket].dtdh_lock; 1553 1554 dtrace_membar_consumer(); 1555 1556 start = hash[bucket].dtdh_chain; 1557 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || 1558 start->dtdv_hashval != DTRACE_DYNHASH_FREE || 1559 op != DTRACE_DYNVAR_DEALLOC)); 1560 1561 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1562 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1563 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1564 1565 if (dvar->dtdv_hashval != hashval) { 1566 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { 1567 /* 1568 * We've reached the sink, and therefore the 1569 * end of the hash chain; we can kick out of 1570 * the loop knowing that we have seen a valid 1571 * snapshot of state. 1572 */ 1573 ASSERT(dvar->dtdv_next == NULL); 1574 ASSERT(dvar == &dtrace_dynhash_sink); 1575 break; 1576 } 1577 1578 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { 1579 /* 1580 * We've gone off the rails: somewhere along 1581 * the line, one of the members of this hash 1582 * chain was deleted. Note that we could also 1583 * detect this by simply letting this loop run 1584 * to completion, as we would eventually hit 1585 * the end of the dirty list. However, we 1586 * want to avoid running the length of the 1587 * dirty list unnecessarily (it might be quite 1588 * long), so we catch this as early as 1589 * possible by detecting the hash marker. In 1590 * this case, we simply set dvar to NULL and 1591 * break; the conditional after the loop will 1592 * send us back to top. 1593 */ 1594 dvar = NULL; 1595 break; 1596 } 1597 1598 goto next; 1599 } 1600 1601 if (dtuple->dtt_nkeys != nkeys) 1602 goto next; 1603 1604 for (i = 0; i < nkeys; i++, dkey++) { 1605 if (dkey->dttk_size != key[i].dttk_size) 1606 goto next; /* size or type mismatch */ 1607 1608 if (dkey->dttk_size != 0) { 1609 if (dtrace_bcmp( 1610 (void *)(uintptr_t)key[i].dttk_value, 1611 (void *)(uintptr_t)dkey->dttk_value, 1612 dkey->dttk_size)) 1613 goto next; 1614 } else { 1615 if (dkey->dttk_value != key[i].dttk_value) 1616 goto next; 1617 } 1618 } 1619 1620 if (op != DTRACE_DYNVAR_DEALLOC) 1621 return (dvar); 1622 1623 ASSERT(dvar->dtdv_next == NULL || 1624 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); 1625 1626 if (prev != NULL) { 1627 ASSERT(hash[bucket].dtdh_chain != dvar); 1628 ASSERT(start != dvar); 1629 ASSERT(prev->dtdv_next == dvar); 1630 prev->dtdv_next = dvar->dtdv_next; 1631 } else { 1632 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1633 start, dvar->dtdv_next) != start) { 1634 /* 1635 * We have failed to atomically swing the 1636 * hash table head pointer, presumably because 1637 * of a conflicting allocation on another CPU. 1638 * We need to reread the hash chain and try 1639 * again. 1640 */ 1641 goto top; 1642 } 1643 } 1644 1645 dtrace_membar_producer(); 1646 1647 /* 1648 * Now set the hash value to indicate that it's free. 1649 */ 1650 ASSERT(hash[bucket].dtdh_chain != dvar); 1651 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1652 1653 dtrace_membar_producer(); 1654 1655 /* 1656 * Set the next pointer to point at the dirty list, and 1657 * atomically swing the dirty pointer to the newly freed dvar. 1658 */ 1659 do { 1660 next = dcpu->dtdsc_dirty; 1661 dvar->dtdv_next = next; 1662 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1663 1664 /* 1665 * Finally, unlock this hash bucket. 1666 */ 1667 ASSERT(hash[bucket].dtdh_lock == lock); 1668 ASSERT(lock & 1); 1669 hash[bucket].dtdh_lock++; 1670 1671 return (NULL); 1672 next: 1673 prev = dvar; 1674 continue; 1675 } 1676 1677 if (dvar == NULL) { 1678 /* 1679 * If dvar is NULL, it is because we went off the rails: 1680 * one of the elements that we traversed in the hash chain 1681 * was deleted while we were traversing it. In this case, 1682 * we assert that we aren't doing a dealloc (deallocs lock 1683 * the hash bucket to prevent themselves from racing with 1684 * one another), and retry the hash chain traversal. 1685 */ 1686 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1687 goto top; 1688 } 1689 1690 if (op != DTRACE_DYNVAR_ALLOC) { 1691 /* 1692 * If we are not to allocate a new variable, we want to 1693 * return NULL now. Before we return, check that the value 1694 * of the lock word hasn't changed. If it has, we may have 1695 * seen an inconsistent snapshot. 1696 */ 1697 if (op == DTRACE_DYNVAR_NOALLOC) { 1698 if (hash[bucket].dtdh_lock != lock) 1699 goto top; 1700 } else { 1701 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 1702 ASSERT(hash[bucket].dtdh_lock == lock); 1703 ASSERT(lock & 1); 1704 hash[bucket].dtdh_lock++; 1705 } 1706 1707 return (NULL); 1708 } 1709 1710 /* 1711 * We need to allocate a new dynamic variable. The size we need is the 1712 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 1713 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 1714 * the size of any referred-to data (dsize). We then round the final 1715 * size up to the chunksize for allocation. 1716 */ 1717 for (ksize = 0, i = 0; i < nkeys; i++) 1718 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 1719 1720 /* 1721 * This should be pretty much impossible, but could happen if, say, 1722 * strange DIF specified the tuple. Ideally, this should be an 1723 * assertion and not an error condition -- but that requires that the 1724 * chunksize calculation in dtrace_difo_chunksize() be absolutely 1725 * bullet-proof. (That is, it must not be able to be fooled by 1726 * malicious DIF.) Given the lack of backwards branches in DIF, 1727 * solving this would presumably not amount to solving the Halting 1728 * Problem -- but it still seems awfully hard. 1729 */ 1730 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 1731 ksize + dsize > chunksize) { 1732 dcpu->dtdsc_drops++; 1733 return (NULL); 1734 } 1735 1736 nstate = DTRACE_DSTATE_EMPTY; 1737 1738 do { 1739 retry: 1740 free = dcpu->dtdsc_free; 1741 1742 if (free == NULL) { 1743 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 1744 void *rval; 1745 1746 if (clean == NULL) { 1747 /* 1748 * We're out of dynamic variable space on 1749 * this CPU. Unless we have tried all CPUs, 1750 * we'll try to allocate from a different 1751 * CPU. 1752 */ 1753 switch (dstate->dtds_state) { 1754 case DTRACE_DSTATE_CLEAN: { 1755 void *sp = &dstate->dtds_state; 1756 1757 if (++cpu >= NCPU) 1758 cpu = 0; 1759 1760 if (dcpu->dtdsc_dirty != NULL && 1761 nstate == DTRACE_DSTATE_EMPTY) 1762 nstate = DTRACE_DSTATE_DIRTY; 1763 1764 if (dcpu->dtdsc_rinsing != NULL) 1765 nstate = DTRACE_DSTATE_RINSING; 1766 1767 dcpu = &dstate->dtds_percpu[cpu]; 1768 1769 if (cpu != me) 1770 goto retry; 1771 1772 (void) dtrace_cas32(sp, 1773 DTRACE_DSTATE_CLEAN, nstate); 1774 1775 /* 1776 * To increment the correct bean 1777 * counter, take another lap. 1778 */ 1779 goto retry; 1780 } 1781 1782 case DTRACE_DSTATE_DIRTY: 1783 dcpu->dtdsc_dirty_drops++; 1784 break; 1785 1786 case DTRACE_DSTATE_RINSING: 1787 dcpu->dtdsc_rinsing_drops++; 1788 break; 1789 1790 case DTRACE_DSTATE_EMPTY: 1791 dcpu->dtdsc_drops++; 1792 break; 1793 } 1794 1795 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 1796 return (NULL); 1797 } 1798 1799 /* 1800 * The clean list appears to be non-empty. We want to 1801 * move the clean list to the free list; we start by 1802 * moving the clean pointer aside. 1803 */ 1804 if (dtrace_casptr(&dcpu->dtdsc_clean, 1805 clean, NULL) != clean) { 1806 /* 1807 * We are in one of two situations: 1808 * 1809 * (a) The clean list was switched to the 1810 * free list by another CPU. 1811 * 1812 * (b) The clean list was added to by the 1813 * cleansing cyclic. 1814 * 1815 * In either of these situations, we can 1816 * just reattempt the free list allocation. 1817 */ 1818 goto retry; 1819 } 1820 1821 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); 1822 1823 /* 1824 * Now we'll move the clean list to the free list. 1825 * It's impossible for this to fail: the only way 1826 * the free list can be updated is through this 1827 * code path, and only one CPU can own the clean list. 1828 * Thus, it would only be possible for this to fail if 1829 * this code were racing with dtrace_dynvar_clean(). 1830 * (That is, if dtrace_dynvar_clean() updated the clean 1831 * list, and we ended up racing to update the free 1832 * list.) This race is prevented by the dtrace_sync() 1833 * in dtrace_dynvar_clean() -- which flushes the 1834 * owners of the clean lists out before resetting 1835 * the clean lists. 1836 */ 1837 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 1838 ASSERT(rval == NULL); 1839 goto retry; 1840 } 1841 1842 dvar = free; 1843 new_free = dvar->dtdv_next; 1844 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 1845 1846 /* 1847 * We have now allocated a new chunk. We copy the tuple keys into the 1848 * tuple array and copy any referenced key data into the data space 1849 * following the tuple array. As we do this, we relocate dttk_value 1850 * in the final tuple to point to the key data address in the chunk. 1851 */ 1852 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 1853 dvar->dtdv_data = (void *)(kdata + ksize); 1854 dvar->dtdv_tuple.dtt_nkeys = nkeys; 1855 1856 for (i = 0; i < nkeys; i++) { 1857 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 1858 size_t kesize = key[i].dttk_size; 1859 1860 if (kesize != 0) { 1861 dtrace_bcopy( 1862 (const void *)(uintptr_t)key[i].dttk_value, 1863 (void *)kdata, kesize); 1864 dkey->dttk_value = kdata; 1865 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 1866 } else { 1867 dkey->dttk_value = key[i].dttk_value; 1868 } 1869 1870 dkey->dttk_size = kesize; 1871 } 1872 1873 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); 1874 dvar->dtdv_hashval = hashval; 1875 dvar->dtdv_next = start; 1876 1877 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 1878 return (dvar); 1879 1880 /* 1881 * The cas has failed. Either another CPU is adding an element to 1882 * this hash chain, or another CPU is deleting an element from this 1883 * hash chain. The simplest way to deal with both of these cases 1884 * (though not necessarily the most efficient) is to free our 1885 * allocated block and tail-call ourselves. Note that the free is 1886 * to the dirty list and _not_ to the free list. This is to prevent 1887 * races with allocators, above. 1888 */ 1889 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1890 1891 dtrace_membar_producer(); 1892 1893 do { 1894 free = dcpu->dtdsc_dirty; 1895 dvar->dtdv_next = free; 1896 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 1897 1898 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate)); 1899 } 1900 1901 /*ARGSUSED*/ 1902 static void 1903 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 1904 { 1905 if ((int64_t)nval < (int64_t)*oval) 1906 *oval = nval; 1907 } 1908 1909 /*ARGSUSED*/ 1910 static void 1911 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 1912 { 1913 if ((int64_t)nval > (int64_t)*oval) 1914 *oval = nval; 1915 } 1916 1917 static void 1918 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 1919 { 1920 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 1921 int64_t val = (int64_t)nval; 1922 1923 if (val < 0) { 1924 for (i = 0; i < zero; i++) { 1925 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 1926 quanta[i] += incr; 1927 return; 1928 } 1929 } 1930 } else { 1931 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 1932 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 1933 quanta[i - 1] += incr; 1934 return; 1935 } 1936 } 1937 1938 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 1939 return; 1940 } 1941 1942 ASSERT(0); 1943 } 1944 1945 static void 1946 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 1947 { 1948 uint64_t arg = *lquanta++; 1949 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 1950 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 1951 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 1952 int32_t val = (int32_t)nval, level; 1953 1954 ASSERT(step != 0); 1955 ASSERT(levels != 0); 1956 1957 if (val < base) { 1958 /* 1959 * This is an underflow. 1960 */ 1961 lquanta[0] += incr; 1962 return; 1963 } 1964 1965 level = (val - base) / step; 1966 1967 if (level < levels) { 1968 lquanta[level + 1] += incr; 1969 return; 1970 } 1971 1972 /* 1973 * This is an overflow. 1974 */ 1975 lquanta[levels + 1] += incr; 1976 } 1977 1978 /*ARGSUSED*/ 1979 static void 1980 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 1981 { 1982 data[0]++; 1983 data[1] += nval; 1984 } 1985 1986 /*ARGSUSED*/ 1987 static void 1988 dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg) 1989 { 1990 int64_t snval = (int64_t)nval; 1991 uint64_t tmp[2]; 1992 1993 data[0]++; 1994 data[1] += nval; 1995 1996 /* 1997 * What we want to say here is: 1998 * 1999 * data[2] += nval * nval; 2000 * 2001 * But given that nval is 64-bit, we could easily overflow, so 2002 * we do this as 128-bit arithmetic. 2003 */ 2004 if (snval < 0) 2005 snval = -snval; 2006 2007 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp); 2008 dtrace_add_128(data + 2, tmp, data + 2); 2009 } 2010 2011 /*ARGSUSED*/ 2012 static void 2013 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 2014 { 2015 *oval = *oval + 1; 2016 } 2017 2018 /*ARGSUSED*/ 2019 static void 2020 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 2021 { 2022 *oval += nval; 2023 } 2024 2025 /* 2026 * Aggregate given the tuple in the principal data buffer, and the aggregating 2027 * action denoted by the specified dtrace_aggregation_t. The aggregation 2028 * buffer is specified as the buf parameter. This routine does not return 2029 * failure; if there is no space in the aggregation buffer, the data will be 2030 * dropped, and a corresponding counter incremented. 2031 */ 2032 static void 2033 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 2034 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 2035 { 2036 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 2037 uint32_t i, ndx, size, fsize; 2038 uint32_t align = sizeof (uint64_t) - 1; 2039 dtrace_aggbuffer_t *agb; 2040 dtrace_aggkey_t *key; 2041 uint32_t hashval = 0, limit, isstr; 2042 caddr_t tomax, data, kdata; 2043 dtrace_actkind_t action; 2044 dtrace_action_t *act; 2045 uintptr_t offs; 2046 2047 if (buf == NULL) 2048 return; 2049 2050 if (!agg->dtag_hasarg) { 2051 /* 2052 * Currently, only quantize() and lquantize() take additional 2053 * arguments, and they have the same semantics: an increment 2054 * value that defaults to 1 when not present. If additional 2055 * aggregating actions take arguments, the setting of the 2056 * default argument value will presumably have to become more 2057 * sophisticated... 2058 */ 2059 arg = 1; 2060 } 2061 2062 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 2063 size = rec->dtrd_offset - agg->dtag_base; 2064 fsize = size + rec->dtrd_size; 2065 2066 ASSERT(dbuf->dtb_tomax != NULL); 2067 data = dbuf->dtb_tomax + offset + agg->dtag_base; 2068 2069 if ((tomax = buf->dtb_tomax) == NULL) { 2070 dtrace_buffer_drop(buf); 2071 return; 2072 } 2073 2074 /* 2075 * The metastructure is always at the bottom of the buffer. 2076 */ 2077 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 2078 sizeof (dtrace_aggbuffer_t)); 2079 2080 if (buf->dtb_offset == 0) { 2081 /* 2082 * We just kludge up approximately 1/8th of the size to be 2083 * buckets. If this guess ends up being routinely 2084 * off-the-mark, we may need to dynamically readjust this 2085 * based on past performance. 2086 */ 2087 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 2088 2089 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 2090 (uintptr_t)tomax || hashsize == 0) { 2091 /* 2092 * We've been given a ludicrously small buffer; 2093 * increment our drop count and leave. 2094 */ 2095 dtrace_buffer_drop(buf); 2096 return; 2097 } 2098 2099 /* 2100 * And now, a pathetic attempt to try to get a an odd (or 2101 * perchance, a prime) hash size for better hash distribution. 2102 */ 2103 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 2104 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 2105 2106 agb->dtagb_hashsize = hashsize; 2107 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 2108 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 2109 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 2110 2111 for (i = 0; i < agb->dtagb_hashsize; i++) 2112 agb->dtagb_hash[i] = NULL; 2113 } 2114 2115 ASSERT(agg->dtag_first != NULL); 2116 ASSERT(agg->dtag_first->dta_intuple); 2117 2118 /* 2119 * Calculate the hash value based on the key. Note that we _don't_ 2120 * include the aggid in the hashing (but we will store it as part of 2121 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 2122 * algorithm: a simple, quick algorithm that has no known funnels, and 2123 * gets good distribution in practice. The efficacy of the hashing 2124 * algorithm (and a comparison with other algorithms) may be found by 2125 * running the ::dtrace_aggstat MDB dcmd. 2126 */ 2127 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2128 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2129 limit = i + act->dta_rec.dtrd_size; 2130 ASSERT(limit <= size); 2131 isstr = DTRACEACT_ISSTRING(act); 2132 2133 for (; i < limit; i++) { 2134 hashval += data[i]; 2135 hashval += (hashval << 10); 2136 hashval ^= (hashval >> 6); 2137 2138 if (isstr && data[i] == '\0') 2139 break; 2140 } 2141 } 2142 2143 hashval += (hashval << 3); 2144 hashval ^= (hashval >> 11); 2145 hashval += (hashval << 15); 2146 2147 /* 2148 * Yes, the divide here is expensive -- but it's generally the least 2149 * of the performance issues given the amount of data that we iterate 2150 * over to compute hash values, compare data, etc. 2151 */ 2152 ndx = hashval % agb->dtagb_hashsize; 2153 2154 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 2155 ASSERT((caddr_t)key >= tomax); 2156 ASSERT((caddr_t)key < tomax + buf->dtb_size); 2157 2158 if (hashval != key->dtak_hashval || key->dtak_size != size) 2159 continue; 2160 2161 kdata = key->dtak_data; 2162 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 2163 2164 for (act = agg->dtag_first; act->dta_intuple; 2165 act = act->dta_next) { 2166 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2167 limit = i + act->dta_rec.dtrd_size; 2168 ASSERT(limit <= size); 2169 isstr = DTRACEACT_ISSTRING(act); 2170 2171 for (; i < limit; i++) { 2172 if (kdata[i] != data[i]) 2173 goto next; 2174 2175 if (isstr && data[i] == '\0') 2176 break; 2177 } 2178 } 2179 2180 if (action != key->dtak_action) { 2181 /* 2182 * We are aggregating on the same value in the same 2183 * aggregation with two different aggregating actions. 2184 * (This should have been picked up in the compiler, 2185 * so we may be dealing with errant or devious DIF.) 2186 * This is an error condition; we indicate as much, 2187 * and return. 2188 */ 2189 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2190 return; 2191 } 2192 2193 /* 2194 * This is a hit: we need to apply the aggregator to 2195 * the value at this key. 2196 */ 2197 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 2198 return; 2199 next: 2200 continue; 2201 } 2202 2203 /* 2204 * We didn't find it. We need to allocate some zero-filled space, 2205 * link it into the hash table appropriately, and apply the aggregator 2206 * to the (zero-filled) value. 2207 */ 2208 offs = buf->dtb_offset; 2209 while (offs & (align - 1)) 2210 offs += sizeof (uint32_t); 2211 2212 /* 2213 * If we don't have enough room to both allocate a new key _and_ 2214 * its associated data, increment the drop count and return. 2215 */ 2216 if ((uintptr_t)tomax + offs + fsize > 2217 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 2218 dtrace_buffer_drop(buf); 2219 return; 2220 } 2221 2222 /*CONSTCOND*/ 2223 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 2224 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 2225 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 2226 2227 key->dtak_data = kdata = tomax + offs; 2228 buf->dtb_offset = offs + fsize; 2229 2230 /* 2231 * Now copy the data across. 2232 */ 2233 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 2234 2235 for (i = sizeof (dtrace_aggid_t); i < size; i++) 2236 kdata[i] = data[i]; 2237 2238 /* 2239 * Because strings are not zeroed out by default, we need to iterate 2240 * looking for actions that store strings, and we need to explicitly 2241 * pad these strings out with zeroes. 2242 */ 2243 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2244 int nul; 2245 2246 if (!DTRACEACT_ISSTRING(act)) 2247 continue; 2248 2249 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2250 limit = i + act->dta_rec.dtrd_size; 2251 ASSERT(limit <= size); 2252 2253 for (nul = 0; i < limit; i++) { 2254 if (nul) { 2255 kdata[i] = '\0'; 2256 continue; 2257 } 2258 2259 if (data[i] != '\0') 2260 continue; 2261 2262 nul = 1; 2263 } 2264 } 2265 2266 for (i = size; i < fsize; i++) 2267 kdata[i] = 0; 2268 2269 key->dtak_hashval = hashval; 2270 key->dtak_size = size; 2271 key->dtak_action = action; 2272 key->dtak_next = agb->dtagb_hash[ndx]; 2273 agb->dtagb_hash[ndx] = key; 2274 2275 /* 2276 * Finally, apply the aggregator. 2277 */ 2278 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 2279 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 2280 } 2281 2282 /* 2283 * Given consumer state, this routine finds a speculation in the INACTIVE 2284 * state and transitions it into the ACTIVE state. If there is no speculation 2285 * in the INACTIVE state, 0 is returned. In this case, no error counter is 2286 * incremented -- it is up to the caller to take appropriate action. 2287 */ 2288 static int 2289 dtrace_speculation(dtrace_state_t *state) 2290 { 2291 int i = 0; 2292 dtrace_speculation_state_t current; 2293 uint32_t *stat = &state->dts_speculations_unavail, count; 2294 2295 while (i < state->dts_nspeculations) { 2296 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2297 2298 current = spec->dtsp_state; 2299 2300 if (current != DTRACESPEC_INACTIVE) { 2301 if (current == DTRACESPEC_COMMITTINGMANY || 2302 current == DTRACESPEC_COMMITTING || 2303 current == DTRACESPEC_DISCARDING) 2304 stat = &state->dts_speculations_busy; 2305 i++; 2306 continue; 2307 } 2308 2309 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2310 current, DTRACESPEC_ACTIVE) == current) 2311 return (i + 1); 2312 } 2313 2314 /* 2315 * We couldn't find a speculation. If we found as much as a single 2316 * busy speculation buffer, we'll attribute this failure as "busy" 2317 * instead of "unavail". 2318 */ 2319 do { 2320 count = *stat; 2321 } while (dtrace_cas32(stat, count, count + 1) != count); 2322 2323 return (0); 2324 } 2325 2326 /* 2327 * This routine commits an active speculation. If the specified speculation 2328 * is not in a valid state to perform a commit(), this routine will silently do 2329 * nothing. The state of the specified speculation is transitioned according 2330 * to the state transition diagram outlined in <sys/dtrace_impl.h> 2331 */ 2332 static void 2333 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 2334 dtrace_specid_t which) 2335 { 2336 dtrace_speculation_t *spec; 2337 dtrace_buffer_t *src, *dest; 2338 uintptr_t daddr, saddr, dlimit; 2339 dtrace_speculation_state_t current, new = 0; 2340 intptr_t offs; 2341 2342 if (which == 0) 2343 return; 2344 2345 if (which > state->dts_nspeculations) { 2346 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2347 return; 2348 } 2349 2350 spec = &state->dts_speculations[which - 1]; 2351 src = &spec->dtsp_buffer[cpu]; 2352 dest = &state->dts_buffer[cpu]; 2353 2354 do { 2355 current = spec->dtsp_state; 2356 2357 if (current == DTRACESPEC_COMMITTINGMANY) 2358 break; 2359 2360 switch (current) { 2361 case DTRACESPEC_INACTIVE: 2362 case DTRACESPEC_DISCARDING: 2363 return; 2364 2365 case DTRACESPEC_COMMITTING: 2366 /* 2367 * This is only possible if we are (a) commit()'ing 2368 * without having done a prior speculate() on this CPU 2369 * and (b) racing with another commit() on a different 2370 * CPU. There's nothing to do -- we just assert that 2371 * our offset is 0. 2372 */ 2373 ASSERT(src->dtb_offset == 0); 2374 return; 2375 2376 case DTRACESPEC_ACTIVE: 2377 new = DTRACESPEC_COMMITTING; 2378 break; 2379 2380 case DTRACESPEC_ACTIVEONE: 2381 /* 2382 * This speculation is active on one CPU. If our 2383 * buffer offset is non-zero, we know that the one CPU 2384 * must be us. Otherwise, we are committing on a 2385 * different CPU from the speculate(), and we must 2386 * rely on being asynchronously cleaned. 2387 */ 2388 if (src->dtb_offset != 0) { 2389 new = DTRACESPEC_COMMITTING; 2390 break; 2391 } 2392 /*FALLTHROUGH*/ 2393 2394 case DTRACESPEC_ACTIVEMANY: 2395 new = DTRACESPEC_COMMITTINGMANY; 2396 break; 2397 2398 default: 2399 ASSERT(0); 2400 } 2401 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2402 current, new) != current); 2403 2404 /* 2405 * We have set the state to indicate that we are committing this 2406 * speculation. Now reserve the necessary space in the destination 2407 * buffer. 2408 */ 2409 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 2410 sizeof (uint64_t), state, NULL)) < 0) { 2411 dtrace_buffer_drop(dest); 2412 goto out; 2413 } 2414 2415 /* 2416 * We have the space; copy the buffer across. (Note that this is a 2417 * highly subobtimal bcopy(); in the unlikely event that this becomes 2418 * a serious performance issue, a high-performance DTrace-specific 2419 * bcopy() should obviously be invented.) 2420 */ 2421 daddr = (uintptr_t)dest->dtb_tomax + offs; 2422 dlimit = daddr + src->dtb_offset; 2423 saddr = (uintptr_t)src->dtb_tomax; 2424 2425 /* 2426 * First, the aligned portion. 2427 */ 2428 while (dlimit - daddr >= sizeof (uint64_t)) { 2429 *((uint64_t *)daddr) = *((uint64_t *)saddr); 2430 2431 daddr += sizeof (uint64_t); 2432 saddr += sizeof (uint64_t); 2433 } 2434 2435 /* 2436 * Now any left-over bit... 2437 */ 2438 while (dlimit - daddr) 2439 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 2440 2441 /* 2442 * Finally, commit the reserved space in the destination buffer. 2443 */ 2444 dest->dtb_offset = offs + src->dtb_offset; 2445 2446 out: 2447 /* 2448 * If we're lucky enough to be the only active CPU on this speculation 2449 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 2450 */ 2451 if (current == DTRACESPEC_ACTIVE || 2452 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 2453 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 2454 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 2455 2456 ASSERT(rval == DTRACESPEC_COMMITTING); 2457 } 2458 2459 src->dtb_offset = 0; 2460 src->dtb_xamot_drops += src->dtb_drops; 2461 src->dtb_drops = 0; 2462 } 2463 2464 /* 2465 * This routine discards an active speculation. If the specified speculation 2466 * is not in a valid state to perform a discard(), this routine will silently 2467 * do nothing. The state of the specified speculation is transitioned 2468 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 2469 */ 2470 static void 2471 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 2472 dtrace_specid_t which) 2473 { 2474 dtrace_speculation_t *spec; 2475 dtrace_speculation_state_t current, new = 0; 2476 dtrace_buffer_t *buf; 2477 2478 if (which == 0) 2479 return; 2480 2481 if (which > state->dts_nspeculations) { 2482 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2483 return; 2484 } 2485 2486 spec = &state->dts_speculations[which - 1]; 2487 buf = &spec->dtsp_buffer[cpu]; 2488 2489 do { 2490 current = spec->dtsp_state; 2491 2492 switch (current) { 2493 case DTRACESPEC_INACTIVE: 2494 case DTRACESPEC_COMMITTINGMANY: 2495 case DTRACESPEC_COMMITTING: 2496 case DTRACESPEC_DISCARDING: 2497 return; 2498 2499 case DTRACESPEC_ACTIVE: 2500 case DTRACESPEC_ACTIVEMANY: 2501 new = DTRACESPEC_DISCARDING; 2502 break; 2503 2504 case DTRACESPEC_ACTIVEONE: 2505 if (buf->dtb_offset != 0) { 2506 new = DTRACESPEC_INACTIVE; 2507 } else { 2508 new = DTRACESPEC_DISCARDING; 2509 } 2510 break; 2511 2512 default: 2513 ASSERT(0); 2514 } 2515 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2516 current, new) != current); 2517 2518 buf->dtb_offset = 0; 2519 buf->dtb_drops = 0; 2520 } 2521 2522 /* 2523 * Note: not called from probe context. This function is called 2524 * asynchronously from cross call context to clean any speculations that are 2525 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 2526 * transitioned back to the INACTIVE state until all CPUs have cleaned the 2527 * speculation. 2528 */ 2529 static void 2530 dtrace_speculation_clean_here(dtrace_state_t *state) 2531 { 2532 dtrace_icookie_t cookie; 2533 processorid_t cpu = curcpu_id; 2534 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 2535 dtrace_specid_t i; 2536 2537 cookie = dtrace_interrupt_disable(); 2538 2539 if (dest->dtb_tomax == NULL) { 2540 dtrace_interrupt_enable(cookie); 2541 return; 2542 } 2543 2544 for (i = 0; i < state->dts_nspeculations; i++) { 2545 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2546 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 2547 2548 if (src->dtb_tomax == NULL) 2549 continue; 2550 2551 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 2552 src->dtb_offset = 0; 2553 continue; 2554 } 2555 2556 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2557 continue; 2558 2559 if (src->dtb_offset == 0) 2560 continue; 2561 2562 dtrace_speculation_commit(state, cpu, i + 1); 2563 } 2564 2565 dtrace_interrupt_enable(cookie); 2566 } 2567 2568 /* 2569 * Note: not called from probe context. This function is called 2570 * asynchronously (and at a regular interval) to clean any speculations that 2571 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 2572 * is work to be done, it cross calls all CPUs to perform that work; 2573 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 2574 * INACTIVE state until they have been cleaned by all CPUs. 2575 */ 2576 static void 2577 dtrace_speculation_clean(dtrace_state_t *state) 2578 { 2579 int work = 0, rv; 2580 dtrace_specid_t i; 2581 2582 for (i = 0; i < state->dts_nspeculations; i++) { 2583 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2584 2585 ASSERT(!spec->dtsp_cleaning); 2586 2587 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 2588 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2589 continue; 2590 2591 work++; 2592 spec->dtsp_cleaning = 1; 2593 } 2594 2595 if (!work) 2596 return; 2597 2598 dtrace_xcall(DTRACE_CPUALL, 2599 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 2600 2601 /* 2602 * We now know that all CPUs have committed or discarded their 2603 * speculation buffers, as appropriate. We can now set the state 2604 * to inactive. 2605 */ 2606 for (i = 0; i < state->dts_nspeculations; i++) { 2607 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2608 dtrace_speculation_state_t current, new; 2609 2610 if (!spec->dtsp_cleaning) 2611 continue; 2612 2613 current = spec->dtsp_state; 2614 ASSERT(current == DTRACESPEC_DISCARDING || 2615 current == DTRACESPEC_COMMITTINGMANY); 2616 2617 new = DTRACESPEC_INACTIVE; 2618 2619 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 2620 ASSERT(rv == current); 2621 spec->dtsp_cleaning = 0; 2622 } 2623 } 2624 2625 /* 2626 * Called as part of a speculate() to get the speculative buffer associated 2627 * with a given speculation. Returns NULL if the specified speculation is not 2628 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 2629 * the active CPU is not the specified CPU -- the speculation will be 2630 * atomically transitioned into the ACTIVEMANY state. 2631 */ 2632 static dtrace_buffer_t * 2633 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 2634 dtrace_specid_t which) 2635 { 2636 dtrace_speculation_t *spec; 2637 dtrace_speculation_state_t current, new = 0; 2638 dtrace_buffer_t *buf; 2639 2640 if (which == 0) 2641 return (NULL); 2642 2643 if (which > state->dts_nspeculations) { 2644 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2645 return (NULL); 2646 } 2647 2648 spec = &state->dts_speculations[which - 1]; 2649 buf = &spec->dtsp_buffer[cpuid]; 2650 2651 do { 2652 current = spec->dtsp_state; 2653 2654 switch (current) { 2655 case DTRACESPEC_INACTIVE: 2656 case DTRACESPEC_COMMITTINGMANY: 2657 case DTRACESPEC_DISCARDING: 2658 return (NULL); 2659 2660 case DTRACESPEC_COMMITTING: 2661 ASSERT(buf->dtb_offset == 0); 2662 return (NULL); 2663 2664 case DTRACESPEC_ACTIVEONE: 2665 /* 2666 * This speculation is currently active on one CPU. 2667 * Check the offset in the buffer; if it's non-zero, 2668 * that CPU must be us (and we leave the state alone). 2669 * If it's zero, assume that we're starting on a new 2670 * CPU -- and change the state to indicate that the 2671 * speculation is active on more than one CPU. 2672 */ 2673 if (buf->dtb_offset != 0) 2674 return (buf); 2675 2676 new = DTRACESPEC_ACTIVEMANY; 2677 break; 2678 2679 case DTRACESPEC_ACTIVEMANY: 2680 return (buf); 2681 2682 case DTRACESPEC_ACTIVE: 2683 new = DTRACESPEC_ACTIVEONE; 2684 break; 2685 2686 default: 2687 ASSERT(0); 2688 } 2689 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2690 current, new) != current); 2691 2692 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 2693 return (buf); 2694 } 2695 2696 /* 2697 * Return a string. In the event that the user lacks the privilege to access 2698 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2699 * don't fail access checking. 2700 * 2701 * dtrace_dif_variable() uses this routine as a helper for various 2702 * builtin values such as 'execname' and 'probefunc.' 2703 */ 2704 uintptr_t 2705 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, 2706 dtrace_mstate_t *mstate) 2707 { 2708 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2709 uintptr_t ret; 2710 size_t strsz; 2711 2712 /* 2713 * The easy case: this probe is allowed to read all of memory, so 2714 * we can just return this as a vanilla pointer. 2715 */ 2716 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 2717 return (addr); 2718 2719 /* 2720 * This is the tougher case: we copy the string in question from 2721 * kernel memory into scratch memory and return it that way: this 2722 * ensures that we won't trip up when access checking tests the 2723 * BYREF return value. 2724 */ 2725 strsz = dtrace_strlen((char *)addr, size) + 1; 2726 2727 if (mstate->dtms_scratch_ptr + strsz > 2728 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2729 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2730 return (0); 2731 } 2732 2733 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2734 strsz); 2735 ret = mstate->dtms_scratch_ptr; 2736 mstate->dtms_scratch_ptr += strsz; 2737 return (ret); 2738 } 2739 2740 #ifdef notyet 2741 /* 2742 * Return a string from a memoy address which is known to have one or 2743 * more concatenated, individually zero terminated, sub-strings. 2744 * In the event that the user lacks the privilege to access 2745 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2746 * don't fail access checking. 2747 * 2748 * dtrace_dif_variable() uses this routine as a helper for various 2749 * builtin values such as 'execargs'. 2750 */ 2751 static uintptr_t 2752 dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state, 2753 dtrace_mstate_t *mstate) 2754 { 2755 char *p; 2756 size_t i; 2757 uintptr_t ret; 2758 2759 if (mstate->dtms_scratch_ptr + strsz > 2760 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2761 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2762 return (0); 2763 } 2764 2765 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2766 strsz); 2767 2768 /* Replace sub-string termination characters with a space. */ 2769 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1; 2770 p++, i++) 2771 if (*p == '\0') 2772 *p = ' '; 2773 2774 ret = mstate->dtms_scratch_ptr; 2775 mstate->dtms_scratch_ptr += strsz; 2776 return (ret); 2777 } 2778 #endif 2779 2780 /* 2781 * This function implements the DIF emulator's variable lookups. The emulator 2782 * passes a reserved variable identifier and optional built-in array index. 2783 */ 2784 static uint64_t 2785 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 2786 uint64_t ndx) 2787 { 2788 /* 2789 * If we're accessing one of the uncached arguments, we'll turn this 2790 * into a reference in the args array. 2791 */ 2792 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 2793 ndx = v - DIF_VAR_ARG0; 2794 v = DIF_VAR_ARGS; 2795 } 2796 2797 switch (v) { 2798 case DIF_VAR_ARGS: 2799 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 2800 if (ndx >= sizeof (mstate->dtms_arg) / 2801 sizeof (mstate->dtms_arg[0])) { 2802 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2803 dtrace_provider_t *pv; 2804 uint64_t val; 2805 2806 pv = mstate->dtms_probe->dtpr_provider; 2807 if (pv->dtpv_pops.dtps_getargval != NULL) 2808 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 2809 mstate->dtms_probe->dtpr_id, 2810 mstate->dtms_probe->dtpr_arg, ndx, aframes); 2811 else 2812 val = dtrace_getarg(ndx, aframes); 2813 2814 /* 2815 * This is regrettably required to keep the compiler 2816 * from tail-optimizing the call to dtrace_getarg(). 2817 * The condition always evaluates to true, but the 2818 * compiler has no way of figuring that out a priori. 2819 * (None of this would be necessary if the compiler 2820 * could be relied upon to _always_ tail-optimize 2821 * the call to dtrace_getarg() -- but it can't.) 2822 */ 2823 if (mstate->dtms_probe != NULL) 2824 return (val); 2825 2826 ASSERT(0); 2827 } 2828 2829 return (mstate->dtms_arg[ndx]); 2830 2831 #if defined(sun) 2832 case DIF_VAR_UREGS: { 2833 klwp_t *lwp; 2834 2835 if (!dtrace_priv_proc(state)) 2836 return (0); 2837 2838 if ((lwp = curthread->t_lwp) == NULL) { 2839 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2840 cpu_core[curcpu_id].cpuc_dtrace_illval = NULL; 2841 return (0); 2842 } 2843 2844 return (dtrace_getreg(lwp->lwp_regs, ndx)); 2845 return (0); 2846 } 2847 #endif 2848 2849 case DIF_VAR_CURTHREAD: 2850 if (!dtrace_priv_kernel(state)) 2851 return (0); 2852 return ((uint64_t)(uintptr_t)curthread); 2853 2854 case DIF_VAR_TIMESTAMP: 2855 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 2856 mstate->dtms_timestamp = dtrace_gethrtime(); 2857 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 2858 } 2859 return (mstate->dtms_timestamp); 2860 2861 case DIF_VAR_VTIMESTAMP: 2862 ASSERT(dtrace_vtime_references != 0); 2863 return (curthread->t_dtrace_vtime); 2864 2865 case DIF_VAR_WALLTIMESTAMP: 2866 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 2867 mstate->dtms_walltimestamp = dtrace_gethrestime(); 2868 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 2869 } 2870 return (mstate->dtms_walltimestamp); 2871 2872 #if defined(sun) 2873 case DIF_VAR_IPL: 2874 if (!dtrace_priv_kernel(state)) 2875 return (0); 2876 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 2877 mstate->dtms_ipl = dtrace_getipl(); 2878 mstate->dtms_present |= DTRACE_MSTATE_IPL; 2879 } 2880 return (mstate->dtms_ipl); 2881 #endif 2882 2883 case DIF_VAR_EPID: 2884 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 2885 return (mstate->dtms_epid); 2886 2887 case DIF_VAR_ID: 2888 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2889 return (mstate->dtms_probe->dtpr_id); 2890 2891 case DIF_VAR_STACKDEPTH: 2892 if (!dtrace_priv_kernel(state)) 2893 return (0); 2894 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 2895 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2896 2897 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 2898 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 2899 } 2900 return (mstate->dtms_stackdepth); 2901 2902 #if defined(sun) 2903 case DIF_VAR_USTACKDEPTH: 2904 if (!dtrace_priv_proc(state)) 2905 return (0); 2906 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 2907 /* 2908 * See comment in DIF_VAR_PID. 2909 */ 2910 if (DTRACE_ANCHORED(mstate->dtms_probe) && 2911 CPU_ON_INTR(CPU)) { 2912 mstate->dtms_ustackdepth = 0; 2913 } else { 2914 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2915 mstate->dtms_ustackdepth = 2916 dtrace_getustackdepth(); 2917 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2918 } 2919 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 2920 } 2921 return (mstate->dtms_ustackdepth); 2922 #endif 2923 2924 case DIF_VAR_CALLER: 2925 if (!dtrace_priv_kernel(state)) 2926 return (0); 2927 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 2928 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2929 2930 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 2931 /* 2932 * If this is an unanchored probe, we are 2933 * required to go through the slow path: 2934 * dtrace_caller() only guarantees correct 2935 * results for anchored probes. 2936 */ 2937 pc_t caller[2] = {0, 0}; 2938 2939 dtrace_getpcstack(caller, 2, aframes, 2940 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 2941 mstate->dtms_caller = caller[1]; 2942 } else if ((mstate->dtms_caller = 2943 dtrace_caller(aframes)) == -1) { 2944 /* 2945 * We have failed to do this the quick way; 2946 * we must resort to the slower approach of 2947 * calling dtrace_getpcstack(). 2948 */ 2949 pc_t caller = 0; 2950 2951 dtrace_getpcstack(&caller, 1, aframes, NULL); 2952 mstate->dtms_caller = caller; 2953 } 2954 2955 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 2956 } 2957 return (mstate->dtms_caller); 2958 2959 #if defined(sun) 2960 case DIF_VAR_UCALLER: 2961 if (!dtrace_priv_proc(state)) 2962 return (0); 2963 2964 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 2965 uint64_t ustack[3]; 2966 2967 /* 2968 * dtrace_getupcstack() fills in the first uint64_t 2969 * with the current PID. The second uint64_t will 2970 * be the program counter at user-level. The third 2971 * uint64_t will contain the caller, which is what 2972 * we're after. 2973 */ 2974 ustack[2] = 0; 2975 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2976 dtrace_getupcstack(ustack, 3); 2977 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2978 mstate->dtms_ucaller = ustack[2]; 2979 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 2980 } 2981 2982 return (mstate->dtms_ucaller); 2983 #endif 2984 2985 case DIF_VAR_PROBEPROV: 2986 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2987 return (dtrace_dif_varstr( 2988 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name, 2989 state, mstate)); 2990 2991 case DIF_VAR_PROBEMOD: 2992 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2993 return (dtrace_dif_varstr( 2994 (uintptr_t)mstate->dtms_probe->dtpr_mod, 2995 state, mstate)); 2996 2997 case DIF_VAR_PROBEFUNC: 2998 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2999 return (dtrace_dif_varstr( 3000 (uintptr_t)mstate->dtms_probe->dtpr_func, 3001 state, mstate)); 3002 3003 case DIF_VAR_PROBENAME: 3004 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3005 return (dtrace_dif_varstr( 3006 (uintptr_t)mstate->dtms_probe->dtpr_name, 3007 state, mstate)); 3008 3009 case DIF_VAR_PID: 3010 if (!dtrace_priv_proc(state)) 3011 return (0); 3012 3013 #if defined(sun) 3014 /* 3015 * Note that we are assuming that an unanchored probe is 3016 * always due to a high-level interrupt. (And we're assuming 3017 * that there is only a single high level interrupt.) 3018 */ 3019 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3020 return (pid0.pid_id); 3021 3022 /* 3023 * It is always safe to dereference one's own t_procp pointer: 3024 * it always points to a valid, allocated proc structure. 3025 * Further, it is always safe to dereference the p_pidp member 3026 * of one's own proc structure. (These are truisms becuase 3027 * threads and processes don't clean up their own state -- 3028 * they leave that task to whomever reaps them.) 3029 */ 3030 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 3031 #else 3032 return ((uint64_t)curproc->p_pid); 3033 #endif 3034 3035 case DIF_VAR_PPID: 3036 if (!dtrace_priv_proc(state)) 3037 return (0); 3038 3039 #if defined(sun) 3040 /* 3041 * See comment in DIF_VAR_PID. 3042 */ 3043 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3044 return (pid0.pid_id); 3045 3046 /* 3047 * It is always safe to dereference one's own t_procp pointer: 3048 * it always points to a valid, allocated proc structure. 3049 * (This is true because threads don't clean up their own 3050 * state -- they leave that task to whomever reaps them.) 3051 */ 3052 return ((uint64_t)curthread->t_procp->p_ppid); 3053 #else 3054 return ((uint64_t)curproc->p_pptr->p_pid); 3055 #endif 3056 3057 case DIF_VAR_TID: 3058 #if defined(sun) 3059 /* 3060 * See comment in DIF_VAR_PID. 3061 */ 3062 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3063 return (0); 3064 #endif 3065 3066 return ((uint64_t)curthread->t_tid); 3067 3068 case DIF_VAR_EXECARGS: { 3069 #if 0 3070 struct pargs *p_args = curthread->td_proc->p_args; 3071 3072 if (p_args == NULL) 3073 return(0); 3074 3075 return (dtrace_dif_varstrz( 3076 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate)); 3077 #endif 3078 /* XXX FreeBSD extension */ 3079 return 0; 3080 } 3081 3082 case DIF_VAR_EXECNAME: 3083 #if defined(sun) 3084 if (!dtrace_priv_proc(state)) 3085 return (0); 3086 3087 /* 3088 * See comment in DIF_VAR_PID. 3089 */ 3090 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3091 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 3092 3093 /* 3094 * It is always safe to dereference one's own t_procp pointer: 3095 * it always points to a valid, allocated proc structure. 3096 * (This is true because threads don't clean up their own 3097 * state -- they leave that task to whomever reaps them.) 3098 */ 3099 return (dtrace_dif_varstr( 3100 (uintptr_t)curthread->t_procp->p_user.u_comm, 3101 state, mstate)); 3102 #else 3103 return (dtrace_dif_varstr( 3104 (uintptr_t) curthread->l_proc->p_comm, state, mstate)); 3105 #endif 3106 3107 case DIF_VAR_ZONENAME: 3108 #if defined(sun) 3109 if (!dtrace_priv_proc(state)) 3110 return (0); 3111 3112 /* 3113 * See comment in DIF_VAR_PID. 3114 */ 3115 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3116 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 3117 3118 /* 3119 * It is always safe to dereference one's own t_procp pointer: 3120 * it always points to a valid, allocated proc structure. 3121 * (This is true because threads don't clean up their own 3122 * state -- they leave that task to whomever reaps them.) 3123 */ 3124 return (dtrace_dif_varstr( 3125 (uintptr_t)curthread->t_procp->p_zone->zone_name, 3126 state, mstate)); 3127 #else 3128 return (0); 3129 #endif 3130 3131 case DIF_VAR_UID: 3132 if (!dtrace_priv_proc(state)) 3133 return (0); 3134 3135 #if defined(sun) 3136 /* 3137 * See comment in DIF_VAR_PID. 3138 */ 3139 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3140 return ((uint64_t)p0.p_cred->cr_uid); 3141 3142 /* 3143 * It is always safe to dereference one's own t_procp pointer: 3144 * it always points to a valid, allocated proc structure. 3145 * (This is true because threads don't clean up their own 3146 * state -- they leave that task to whomever reaps them.) 3147 * 3148 * Additionally, it is safe to dereference one's own process 3149 * credential, since this is never NULL after process birth. 3150 */ 3151 return ((uint64_t)curthread->t_procp->p_cred->cr_uid); 3152 #else 3153 return (uint64_t)kauth_cred_getuid(curthread->t_procp->p_cred); 3154 #endif 3155 3156 case DIF_VAR_GID: 3157 if (!dtrace_priv_proc(state)) 3158 return (0); 3159 3160 #if defined(sun) 3161 /* 3162 * See comment in DIF_VAR_PID. 3163 */ 3164 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3165 return ((uint64_t)p0.p_cred->cr_gid); 3166 3167 /* 3168 * It is always safe to dereference one's own t_procp pointer: 3169 * it always points to a valid, allocated proc structure. 3170 * (This is true because threads don't clean up their own 3171 * state -- they leave that task to whomever reaps them.) 3172 * 3173 * Additionally, it is safe to dereference one's own process 3174 * credential, since this is never NULL after process birth. 3175 */ 3176 return ((uint64_t)curthread->t_procp->p_cred->cr_gid); 3177 #else 3178 return (uint64_t)kauth_cred_getgid(curthread->t_procp->p_cred); 3179 #endif 3180 3181 case DIF_VAR_ERRNO: { 3182 #if defined(sun) 3183 klwp_t *lwp; 3184 if (!dtrace_priv_proc(state)) 3185 return (0); 3186 3187 /* 3188 * See comment in DIF_VAR_PID. 3189 */ 3190 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3191 return (0); 3192 3193 /* 3194 * It is always safe to dereference one's own t_lwp pointer in 3195 * the event that this pointer is non-NULL. (This is true 3196 * because threads and lwps don't clean up their own state -- 3197 * they leave that task to whomever reaps them.) 3198 */ 3199 if ((lwp = curthread->t_lwp) == NULL) 3200 return (0); 3201 3202 return ((uint64_t)lwp->lwp_errno); 3203 #else 3204 #if 0 3205 return (curthread->l_errno); 3206 #else 3207 return 0; /* XXX TBD errno support at lwp level? */ 3208 #endif 3209 #endif 3210 } 3211 default: 3212 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 3213 return (0); 3214 } 3215 } 3216 3217 /* 3218 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 3219 * Notice that we don't bother validating the proper number of arguments or 3220 * their types in the tuple stack. This isn't needed because all argument 3221 * interpretation is safe because of our load safety -- the worst that can 3222 * happen is that a bogus program can obtain bogus results. 3223 */ 3224 static void 3225 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 3226 dtrace_key_t *tupregs, int nargs, 3227 dtrace_mstate_t *mstate, dtrace_state_t *state) 3228 { 3229 volatile uint16_t *flags = &cpu_core[curcpu_id].cpuc_dtrace_flags; 3230 volatile uintptr_t *illval = &cpu_core[curcpu_id].cpuc_dtrace_illval; 3231 dtrace_vstate_t *vstate = &state->dts_vstate; 3232 3233 #if defined(sun) 3234 union { 3235 mutex_impl_t mi; 3236 uint64_t mx; 3237 } m; 3238 3239 union { 3240 krwlock_t ri; 3241 uintptr_t rw; 3242 } r; 3243 #else 3244 union { 3245 kmutex_t mi; 3246 uint64_t mx; 3247 } m; 3248 3249 union { 3250 krwlock_t ri; 3251 uintptr_t rw; 3252 } r; 3253 #endif 3254 3255 switch (subr) { 3256 case DIF_SUBR_RAND: 3257 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 3258 break; 3259 3260 #if defined(sun) 3261 case DIF_SUBR_MUTEX_OWNED: 3262 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3263 mstate, vstate)) { 3264 regs[rd] = 0; 3265 break; 3266 } 3267 3268 m.mx = dtrace_load64(tupregs[0].dttk_value); 3269 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 3270 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 3271 else 3272 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 3273 break; 3274 3275 case DIF_SUBR_MUTEX_OWNER: 3276 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3277 mstate, vstate)) { 3278 regs[rd] = 0; 3279 break; 3280 } 3281 3282 m.mx = dtrace_load64(tupregs[0].dttk_value); 3283 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 3284 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 3285 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 3286 else 3287 regs[rd] = 0; 3288 break; 3289 3290 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3291 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3292 mstate, vstate)) { 3293 regs[rd] = 0; 3294 break; 3295 } 3296 3297 m.mx = dtrace_load64(tupregs[0].dttk_value); 3298 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 3299 break; 3300 3301 case DIF_SUBR_MUTEX_TYPE_SPIN: 3302 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3303 mstate, vstate)) { 3304 regs[rd] = 0; 3305 break; 3306 } 3307 3308 m.mx = dtrace_load64(tupregs[0].dttk_value); 3309 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 3310 break; 3311 3312 case DIF_SUBR_RW_READ_HELD: { 3313 uintptr_t tmp; 3314 3315 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3316 mstate, vstate)) { 3317 regs[rd] = 0; 3318 break; 3319 } 3320 3321 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3322 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 3323 break; 3324 } 3325 3326 case DIF_SUBR_RW_WRITE_HELD: 3327 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3328 mstate, vstate)) { 3329 regs[rd] = 0; 3330 break; 3331 } 3332 3333 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3334 regs[rd] = _RW_WRITE_HELD(&r.ri); 3335 break; 3336 3337 case DIF_SUBR_RW_ISWRITER: 3338 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3339 mstate, vstate)) { 3340 regs[rd] = 0; 3341 break; 3342 } 3343 3344 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3345 regs[rd] = _RW_ISWRITER(&r.ri); 3346 break; 3347 3348 #else 3349 case DIF_SUBR_MUTEX_OWNED: 3350 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3351 mstate, vstate)) { 3352 regs[rd] = 0; 3353 break; 3354 } 3355 3356 m.mx = dtrace_load64(tupregs[0].dttk_value); 3357 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 3358 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 3359 else 3360 regs[rd] = __SIMPLELOCK_LOCKED_P(&m.mi.mtx_lock); 3361 break; 3362 3363 case DIF_SUBR_MUTEX_OWNER: 3364 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3365 mstate, vstate)) { 3366 regs[rd] = 0; 3367 break; 3368 } 3369 3370 m.mx = dtrace_load64(tupregs[0].dttk_value); 3371 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 3372 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 3373 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 3374 else 3375 regs[rd] = 0; 3376 break; 3377 3378 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3379 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3380 mstate, vstate)) { 3381 regs[rd] = 0; 3382 break; 3383 } 3384 3385 m.mx = dtrace_load64(tupregs[0].dttk_value); 3386 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 3387 break; 3388 3389 case DIF_SUBR_MUTEX_TYPE_SPIN: 3390 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3391 mstate, vstate)) { 3392 regs[rd] = 0; 3393 break; 3394 } 3395 3396 m.mx = dtrace_load64(tupregs[0].dttk_value); 3397 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 3398 break; 3399 3400 case DIF_SUBR_RW_READ_HELD: { 3401 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3402 mstate, vstate)) { 3403 regs[rd] = 0; 3404 break; 3405 } 3406 3407 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3408 regs[rd] = _RW_READ_HELD(&r.ri); 3409 break; 3410 } 3411 3412 case DIF_SUBR_RW_WRITE_HELD: 3413 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3414 mstate, vstate)) { 3415 regs[rd] = 0; 3416 break; 3417 } 3418 3419 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3420 regs[rd] = _RW_WRITE_HELD(&r.ri); 3421 break; 3422 3423 case DIF_SUBR_RW_ISWRITER: 3424 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3425 mstate, vstate)) { 3426 regs[rd] = 0; 3427 break; 3428 } 3429 3430 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3431 regs[rd] = _RW_ISWRITER(&r.ri); 3432 break; 3433 3434 #endif /* ! defined(sun) */ 3435 3436 case DIF_SUBR_BCOPY: { 3437 /* 3438 * We need to be sure that the destination is in the scratch 3439 * region -- no other region is allowed. 3440 */ 3441 uintptr_t src = tupregs[0].dttk_value; 3442 uintptr_t dest = tupregs[1].dttk_value; 3443 size_t size = tupregs[2].dttk_value; 3444 3445 if (!dtrace_inscratch(dest, size, mstate)) { 3446 *flags |= CPU_DTRACE_BADADDR; 3447 *illval = regs[rd]; 3448 break; 3449 } 3450 3451 if (!dtrace_canload(src, size, mstate, vstate)) { 3452 regs[rd] = 0; 3453 break; 3454 } 3455 3456 dtrace_bcopy((void *)src, (void *)dest, size); 3457 break; 3458 } 3459 3460 case DIF_SUBR_ALLOCA: 3461 case DIF_SUBR_COPYIN: { 3462 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 3463 uint64_t size = 3464 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 3465 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 3466 3467 /* 3468 * This action doesn't require any credential checks since 3469 * probes will not activate in user contexts to which the 3470 * enabling user does not have permissions. 3471 */ 3472 3473 /* 3474 * Rounding up the user allocation size could have overflowed 3475 * a large, bogus allocation (like -1ULL) to 0. 3476 */ 3477 if (scratch_size < size || 3478 !DTRACE_INSCRATCH(mstate, scratch_size)) { 3479 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3480 regs[rd] = 0; 3481 break; 3482 } 3483 3484 if (subr == DIF_SUBR_COPYIN) { 3485 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3486 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3487 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3488 } 3489 3490 mstate->dtms_scratch_ptr += scratch_size; 3491 regs[rd] = dest; 3492 break; 3493 } 3494 3495 case DIF_SUBR_COPYINTO: { 3496 uint64_t size = tupregs[1].dttk_value; 3497 uintptr_t dest = tupregs[2].dttk_value; 3498 3499 /* 3500 * This action doesn't require any credential checks since 3501 * probes will not activate in user contexts to which the 3502 * enabling user does not have permissions. 3503 */ 3504 if (!dtrace_inscratch(dest, size, mstate)) { 3505 *flags |= CPU_DTRACE_BADADDR; 3506 *illval = regs[rd]; 3507 break; 3508 } 3509 3510 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3511 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3512 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3513 break; 3514 } 3515 3516 case DIF_SUBR_COPYINSTR: { 3517 uintptr_t dest = mstate->dtms_scratch_ptr; 3518 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3519 3520 if (nargs > 1 && tupregs[1].dttk_value < size) 3521 size = tupregs[1].dttk_value + 1; 3522 3523 /* 3524 * This action doesn't require any credential checks since 3525 * probes will not activate in user contexts to which the 3526 * enabling user does not have permissions. 3527 */ 3528 if (!DTRACE_INSCRATCH(mstate, size)) { 3529 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3530 regs[rd] = 0; 3531 break; 3532 } 3533 3534 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3535 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); 3536 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3537 3538 ((char *)dest)[size - 1] = '\0'; 3539 mstate->dtms_scratch_ptr += size; 3540 regs[rd] = dest; 3541 break; 3542 } 3543 3544 #if defined(sun) 3545 case DIF_SUBR_MSGSIZE: 3546 case DIF_SUBR_MSGDSIZE: { 3547 uintptr_t baddr = tupregs[0].dttk_value, daddr; 3548 uintptr_t wptr, rptr; 3549 size_t count = 0; 3550 int cont = 0; 3551 3552 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) { 3553 3554 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, 3555 vstate)) { 3556 regs[rd] = 0; 3557 break; 3558 } 3559 3560 wptr = dtrace_loadptr(baddr + 3561 offsetof(mblk_t, b_wptr)); 3562 3563 rptr = dtrace_loadptr(baddr + 3564 offsetof(mblk_t, b_rptr)); 3565 3566 if (wptr < rptr) { 3567 *flags |= CPU_DTRACE_BADADDR; 3568 *illval = tupregs[0].dttk_value; 3569 break; 3570 } 3571 3572 daddr = dtrace_loadptr(baddr + 3573 offsetof(mblk_t, b_datap)); 3574 3575 baddr = dtrace_loadptr(baddr + 3576 offsetof(mblk_t, b_cont)); 3577 3578 /* 3579 * We want to prevent against denial-of-service here, 3580 * so we're only going to search the list for 3581 * dtrace_msgdsize_max mblks. 3582 */ 3583 if (cont++ > dtrace_msgdsize_max) { 3584 *flags |= CPU_DTRACE_ILLOP; 3585 break; 3586 } 3587 3588 if (subr == DIF_SUBR_MSGDSIZE) { 3589 if (dtrace_load8(daddr + 3590 offsetof(dblk_t, db_type)) != M_DATA) 3591 continue; 3592 } 3593 3594 count += wptr - rptr; 3595 } 3596 3597 if (!(*flags & CPU_DTRACE_FAULT)) 3598 regs[rd] = count; 3599 3600 break; 3601 } 3602 #endif 3603 3604 case DIF_SUBR_PROGENYOF: { 3605 pid_t pid = tupregs[0].dttk_value; 3606 proc_t *p; 3607 int rval = 0; 3608 3609 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3610 3611 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 3612 #if defined(sun) 3613 if (p->p_pidp->pid_id == pid) { 3614 #else 3615 if (p->p_pid == pid) { 3616 #endif 3617 rval = 1; 3618 break; 3619 } 3620 } 3621 3622 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3623 3624 regs[rd] = rval; 3625 break; 3626 } 3627 3628 case DIF_SUBR_SPECULATION: 3629 regs[rd] = dtrace_speculation(state); 3630 break; 3631 3632 case DIF_SUBR_COPYOUT: { 3633 uintptr_t kaddr = tupregs[0].dttk_value; 3634 uintptr_t uaddr = tupregs[1].dttk_value; 3635 uint64_t size = tupregs[2].dttk_value; 3636 3637 if (!dtrace_destructive_disallow && 3638 dtrace_priv_proc_control(state) && 3639 !dtrace_istoxic(kaddr, size)) { 3640 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3641 dtrace_copyout(kaddr, uaddr, size, flags); 3642 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3643 } 3644 break; 3645 } 3646 3647 case DIF_SUBR_COPYOUTSTR: { 3648 uintptr_t kaddr = tupregs[0].dttk_value; 3649 uintptr_t uaddr = tupregs[1].dttk_value; 3650 uint64_t size = tupregs[2].dttk_value; 3651 3652 if (!dtrace_destructive_disallow && 3653 dtrace_priv_proc_control(state) && 3654 !dtrace_istoxic(kaddr, size)) { 3655 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3656 dtrace_copyoutstr(kaddr, uaddr, size, flags); 3657 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3658 } 3659 break; 3660 } 3661 3662 case DIF_SUBR_STRLEN: { 3663 size_t sz; 3664 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value; 3665 sz = dtrace_strlen((char *)addr, 3666 state->dts_options[DTRACEOPT_STRSIZE]); 3667 3668 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) { 3669 regs[rd] = 0; 3670 break; 3671 } 3672 3673 regs[rd] = sz; 3674 3675 break; 3676 } 3677 3678 case DIF_SUBR_STRCHR: 3679 case DIF_SUBR_STRRCHR: { 3680 /* 3681 * We're going to iterate over the string looking for the 3682 * specified character. We will iterate until we have reached 3683 * the string length or we have found the character. If this 3684 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 3685 * of the specified character instead of the first. 3686 */ 3687 uintptr_t saddr = tupregs[0].dttk_value; 3688 uintptr_t addr = tupregs[0].dttk_value; 3689 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 3690 char c, target = (char)tupregs[1].dttk_value; 3691 3692 for (regs[rd] = 0; addr < limit; addr++) { 3693 if ((c = dtrace_load8(addr)) == target) { 3694 regs[rd] = addr; 3695 3696 if (subr == DIF_SUBR_STRCHR) 3697 break; 3698 } 3699 3700 if (c == '\0') 3701 break; 3702 } 3703 3704 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) { 3705 regs[rd] = 0; 3706 break; 3707 } 3708 3709 break; 3710 } 3711 3712 case DIF_SUBR_STRSTR: 3713 case DIF_SUBR_INDEX: 3714 case DIF_SUBR_RINDEX: { 3715 /* 3716 * We're going to iterate over the string looking for the 3717 * specified string. We will iterate until we have reached 3718 * the string length or we have found the string. (Yes, this 3719 * is done in the most naive way possible -- but considering 3720 * that the string we're searching for is likely to be 3721 * relatively short, the complexity of Rabin-Karp or similar 3722 * hardly seems merited.) 3723 */ 3724 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 3725 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 3726 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3727 size_t len = dtrace_strlen(addr, size); 3728 size_t sublen = dtrace_strlen(substr, size); 3729 char *limit = addr + len, *orig = addr; 3730 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 3731 int inc = 1; 3732 3733 regs[rd] = notfound; 3734 3735 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { 3736 regs[rd] = 0; 3737 break; 3738 } 3739 3740 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, 3741 vstate)) { 3742 regs[rd] = 0; 3743 break; 3744 } 3745 3746 /* 3747 * strstr() and index()/rindex() have similar semantics if 3748 * both strings are the empty string: strstr() returns a 3749 * pointer to the (empty) string, and index() and rindex() 3750 * both return index 0 (regardless of any position argument). 3751 */ 3752 if (sublen == 0 && len == 0) { 3753 if (subr == DIF_SUBR_STRSTR) 3754 regs[rd] = (uintptr_t)addr; 3755 else 3756 regs[rd] = 0; 3757 break; 3758 } 3759 3760 if (subr != DIF_SUBR_STRSTR) { 3761 if (subr == DIF_SUBR_RINDEX) { 3762 limit = orig - 1; 3763 addr += len; 3764 inc = -1; 3765 } 3766 3767 /* 3768 * Both index() and rindex() take an optional position 3769 * argument that denotes the starting position. 3770 */ 3771 if (nargs == 3) { 3772 int64_t pos = (int64_t)tupregs[2].dttk_value; 3773 3774 /* 3775 * If the position argument to index() is 3776 * negative, Perl implicitly clamps it at 3777 * zero. This semantic is a little surprising 3778 * given the special meaning of negative 3779 * positions to similar Perl functions like 3780 * substr(), but it appears to reflect a 3781 * notion that index() can start from a 3782 * negative index and increment its way up to 3783 * the string. Given this notion, Perl's 3784 * rindex() is at least self-consistent in 3785 * that it implicitly clamps positions greater 3786 * than the string length to be the string 3787 * length. Where Perl completely loses 3788 * coherence, however, is when the specified 3789 * substring is the empty string (""). In 3790 * this case, even if the position is 3791 * negative, rindex() returns 0 -- and even if 3792 * the position is greater than the length, 3793 * index() returns the string length. These 3794 * semantics violate the notion that index() 3795 * should never return a value less than the 3796 * specified position and that rindex() should 3797 * never return a value greater than the 3798 * specified position. (One assumes that 3799 * these semantics are artifacts of Perl's 3800 * implementation and not the results of 3801 * deliberate design -- it beggars belief that 3802 * even Larry Wall could desire such oddness.) 3803 * While in the abstract one would wish for 3804 * consistent position semantics across 3805 * substr(), index() and rindex() -- or at the 3806 * very least self-consistent position 3807 * semantics for index() and rindex() -- we 3808 * instead opt to keep with the extant Perl 3809 * semantics, in all their broken glory. (Do 3810 * we have more desire to maintain Perl's 3811 * semantics than Perl does? Probably.) 3812 */ 3813 if (subr == DIF_SUBR_RINDEX) { 3814 if (pos < 0) { 3815 if (sublen == 0) 3816 regs[rd] = 0; 3817 break; 3818 } 3819 3820 if (pos > len) 3821 pos = len; 3822 } else { 3823 if (pos < 0) 3824 pos = 0; 3825 3826 if (pos >= len) { 3827 if (sublen == 0) 3828 regs[rd] = len; 3829 break; 3830 } 3831 } 3832 3833 addr = orig + pos; 3834 } 3835 } 3836 3837 for (regs[rd] = notfound; addr != limit; addr += inc) { 3838 if (dtrace_strncmp(addr, substr, sublen) == 0) { 3839 if (subr != DIF_SUBR_STRSTR) { 3840 /* 3841 * As D index() and rindex() are 3842 * modeled on Perl (and not on awk), 3843 * we return a zero-based (and not a 3844 * one-based) index. (For you Perl 3845 * weenies: no, we're not going to add 3846 * $[ -- and shouldn't you be at a con 3847 * or something?) 3848 */ 3849 regs[rd] = (uintptr_t)(addr - orig); 3850 break; 3851 } 3852 3853 ASSERT(subr == DIF_SUBR_STRSTR); 3854 regs[rd] = (uintptr_t)addr; 3855 break; 3856 } 3857 } 3858 3859 break; 3860 } 3861 3862 case DIF_SUBR_STRTOK: { 3863 uintptr_t addr = tupregs[0].dttk_value; 3864 uintptr_t tokaddr = tupregs[1].dttk_value; 3865 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3866 uintptr_t limit, toklimit = tokaddr + size; 3867 uint8_t c = 0, tokmap[32]; /* 256 / 8 */ 3868 char *dest = (char *)mstate->dtms_scratch_ptr; 3869 int i; 3870 3871 /* 3872 * Check both the token buffer and (later) the input buffer, 3873 * since both could be non-scratch addresses. 3874 */ 3875 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) { 3876 regs[rd] = 0; 3877 break; 3878 } 3879 3880 if (!DTRACE_INSCRATCH(mstate, size)) { 3881 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3882 regs[rd] = 0; 3883 break; 3884 } 3885 3886 if (addr == 0) { 3887 /* 3888 * If the address specified is NULL, we use our saved 3889 * strtok pointer from the mstate. Note that this 3890 * means that the saved strtok pointer is _only_ 3891 * valid within multiple enablings of the same probe -- 3892 * it behaves like an implicit clause-local variable. 3893 */ 3894 addr = mstate->dtms_strtok; 3895 } else { 3896 /* 3897 * If the user-specified address is non-NULL we must 3898 * access check it. This is the only time we have 3899 * a chance to do so, since this address may reside 3900 * in the string table of this clause-- future calls 3901 * (when we fetch addr from mstate->dtms_strtok) 3902 * would fail this access check. 3903 */ 3904 if (!dtrace_strcanload(addr, size, mstate, vstate)) { 3905 regs[rd] = 0; 3906 break; 3907 } 3908 } 3909 3910 /* 3911 * First, zero the token map, and then process the token 3912 * string -- setting a bit in the map for every character 3913 * found in the token string. 3914 */ 3915 for (i = 0; i < sizeof (tokmap); i++) 3916 tokmap[i] = 0; 3917 3918 for (; tokaddr < toklimit; tokaddr++) { 3919 if ((c = dtrace_load8(tokaddr)) == '\0') 3920 break; 3921 3922 ASSERT((c >> 3) < sizeof (tokmap)); 3923 tokmap[c >> 3] |= (1 << (c & 0x7)); 3924 } 3925 3926 for (limit = addr + size; addr < limit; addr++) { 3927 /* 3928 * We're looking for a character that is _not_ contained 3929 * in the token string. 3930 */ 3931 if ((c = dtrace_load8(addr)) == '\0') 3932 break; 3933 3934 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 3935 break; 3936 } 3937 3938 if (c == '\0') { 3939 /* 3940 * We reached the end of the string without finding 3941 * any character that was not in the token string. 3942 * We return NULL in this case, and we set the saved 3943 * address to NULL as well. 3944 */ 3945 regs[rd] = 0; 3946 mstate->dtms_strtok = 0; 3947 break; 3948 } 3949 3950 /* 3951 * From here on, we're copying into the destination string. 3952 */ 3953 for (i = 0; addr < limit && i < size - 1; addr++) { 3954 if ((c = dtrace_load8(addr)) == '\0') 3955 break; 3956 3957 if (tokmap[c >> 3] & (1 << (c & 0x7))) 3958 break; 3959 3960 ASSERT(i < size); 3961 dest[i++] = c; 3962 } 3963 3964 ASSERT(i < size); 3965 dest[i] = '\0'; 3966 regs[rd] = (uintptr_t)dest; 3967 mstate->dtms_scratch_ptr += size; 3968 mstate->dtms_strtok = addr; 3969 break; 3970 } 3971 3972 case DIF_SUBR_SUBSTR: { 3973 uintptr_t s = tupregs[0].dttk_value; 3974 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3975 char *d = (char *)mstate->dtms_scratch_ptr; 3976 int64_t index = (int64_t)tupregs[1].dttk_value; 3977 int64_t remaining = (int64_t)tupregs[2].dttk_value; 3978 size_t len = dtrace_strlen((char *)s, size); 3979 int64_t i = 0; 3980 3981 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 3982 regs[rd] = 0; 3983 break; 3984 } 3985 3986 if (!DTRACE_INSCRATCH(mstate, size)) { 3987 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3988 regs[rd] = 0; 3989 break; 3990 } 3991 3992 if (nargs <= 2) 3993 remaining = (int64_t)size; 3994 3995 if (index < 0) { 3996 index += len; 3997 3998 if (index < 0 && index + remaining > 0) { 3999 remaining += index; 4000 index = 0; 4001 } 4002 } 4003 4004 if (index >= len || index < 0) { 4005 remaining = 0; 4006 } else if (remaining < 0) { 4007 remaining += len - index; 4008 } else if (index + remaining > size) { 4009 remaining = size - index; 4010 } 4011 4012 for (i = 0; i < remaining; i++) { 4013 if ((d[i] = dtrace_load8(s + index + i)) == '\0') 4014 break; 4015 } 4016 4017 d[i] = '\0'; 4018 4019 mstate->dtms_scratch_ptr += size; 4020 regs[rd] = (uintptr_t)d; 4021 break; 4022 } 4023 4024 #if defined(sun) 4025 case DIF_SUBR_GETMAJOR: 4026 #ifdef _LP64 4027 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 4028 #else 4029 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 4030 #endif 4031 break; 4032 4033 case DIF_SUBR_GETMINOR: 4034 #ifdef _LP64 4035 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 4036 #else 4037 regs[rd] = tupregs[0].dttk_value & MAXMIN; 4038 #endif 4039 break; 4040 4041 case DIF_SUBR_DDI_PATHNAME: { 4042 /* 4043 * This one is a galactic mess. We are going to roughly 4044 * emulate ddi_pathname(), but it's made more complicated 4045 * by the fact that we (a) want to include the minor name and 4046 * (b) must proceed iteratively instead of recursively. 4047 */ 4048 uintptr_t dest = mstate->dtms_scratch_ptr; 4049 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4050 char *start = (char *)dest, *end = start + size - 1; 4051 uintptr_t daddr = tupregs[0].dttk_value; 4052 int64_t minor = (int64_t)tupregs[1].dttk_value; 4053 char *s; 4054 int i, len, depth = 0; 4055 4056 /* 4057 * Due to all the pointer jumping we do and context we must 4058 * rely upon, we just mandate that the user must have kernel 4059 * read privileges to use this routine. 4060 */ 4061 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { 4062 *flags |= CPU_DTRACE_KPRIV; 4063 *illval = daddr; 4064 regs[rd] = 0; 4065 } 4066 4067 if (!DTRACE_INSCRATCH(mstate, size)) { 4068 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4069 regs[rd] = 0; 4070 break; 4071 } 4072 4073 *end = '\0'; 4074 4075 /* 4076 * We want to have a name for the minor. In order to do this, 4077 * we need to walk the minor list from the devinfo. We want 4078 * to be sure that we don't infinitely walk a circular list, 4079 * so we check for circularity by sending a scout pointer 4080 * ahead two elements for every element that we iterate over; 4081 * if the list is circular, these will ultimately point to the 4082 * same element. You may recognize this little trick as the 4083 * answer to a stupid interview question -- one that always 4084 * seems to be asked by those who had to have it laboriously 4085 * explained to them, and who can't even concisely describe 4086 * the conditions under which one would be forced to resort to 4087 * this technique. Needless to say, those conditions are 4088 * found here -- and probably only here. Is this the only use 4089 * of this infamous trick in shipping, production code? If it 4090 * isn't, it probably should be... 4091 */ 4092 if (minor != -1) { 4093 uintptr_t maddr = dtrace_loadptr(daddr + 4094 offsetof(struct dev_info, devi_minor)); 4095 4096 uintptr_t next = offsetof(struct ddi_minor_data, next); 4097 uintptr_t name = offsetof(struct ddi_minor_data, 4098 d_minor) + offsetof(struct ddi_minor, name); 4099 uintptr_t dev = offsetof(struct ddi_minor_data, 4100 d_minor) + offsetof(struct ddi_minor, dev); 4101 uintptr_t scout; 4102 4103 if (maddr != NULL) 4104 scout = dtrace_loadptr(maddr + next); 4105 4106 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4107 uint64_t m; 4108 #ifdef _LP64 4109 m = dtrace_load64(maddr + dev) & MAXMIN64; 4110 #else 4111 m = dtrace_load32(maddr + dev) & MAXMIN; 4112 #endif 4113 if (m != minor) { 4114 maddr = dtrace_loadptr(maddr + next); 4115 4116 if (scout == NULL) 4117 continue; 4118 4119 scout = dtrace_loadptr(scout + next); 4120 4121 if (scout == NULL) 4122 continue; 4123 4124 scout = dtrace_loadptr(scout + next); 4125 4126 if (scout == NULL) 4127 continue; 4128 4129 if (scout == maddr) { 4130 *flags |= CPU_DTRACE_ILLOP; 4131 break; 4132 } 4133 4134 continue; 4135 } 4136 4137 /* 4138 * We have the minor data. Now we need to 4139 * copy the minor's name into the end of the 4140 * pathname. 4141 */ 4142 s = (char *)dtrace_loadptr(maddr + name); 4143 len = dtrace_strlen(s, size); 4144 4145 if (*flags & CPU_DTRACE_FAULT) 4146 break; 4147 4148 if (len != 0) { 4149 if ((end -= (len + 1)) < start) 4150 break; 4151 4152 *end = ':'; 4153 } 4154 4155 for (i = 1; i <= len; i++) 4156 end[i] = dtrace_load8((uintptr_t)s++); 4157 break; 4158 } 4159 } 4160 4161 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4162 ddi_node_state_t devi_state; 4163 4164 devi_state = dtrace_load32(daddr + 4165 offsetof(struct dev_info, devi_node_state)); 4166 4167 if (*flags & CPU_DTRACE_FAULT) 4168 break; 4169 4170 if (devi_state >= DS_INITIALIZED) { 4171 s = (char *)dtrace_loadptr(daddr + 4172 offsetof(struct dev_info, devi_addr)); 4173 len = dtrace_strlen(s, size); 4174 4175 if (*flags & CPU_DTRACE_FAULT) 4176 break; 4177 4178 if (len != 0) { 4179 if ((end -= (len + 1)) < start) 4180 break; 4181 4182 *end = '@'; 4183 } 4184 4185 for (i = 1; i <= len; i++) 4186 end[i] = dtrace_load8((uintptr_t)s++); 4187 } 4188 4189 /* 4190 * Now for the node name... 4191 */ 4192 s = (char *)dtrace_loadptr(daddr + 4193 offsetof(struct dev_info, devi_node_name)); 4194 4195 daddr = dtrace_loadptr(daddr + 4196 offsetof(struct dev_info, devi_parent)); 4197 4198 /* 4199 * If our parent is NULL (that is, if we're the root 4200 * node), we're going to use the special path 4201 * "devices". 4202 */ 4203 if (daddr == 0) 4204 s = "devices"; 4205 4206 len = dtrace_strlen(s, size); 4207 if (*flags & CPU_DTRACE_FAULT) 4208 break; 4209 4210 if ((end -= (len + 1)) < start) 4211 break; 4212 4213 for (i = 1; i <= len; i++) 4214 end[i] = dtrace_load8((uintptr_t)s++); 4215 *end = '/'; 4216 4217 if (depth++ > dtrace_devdepth_max) { 4218 *flags |= CPU_DTRACE_ILLOP; 4219 break; 4220 } 4221 } 4222 4223 if (end < start) 4224 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4225 4226 if (daddr == 0) { 4227 regs[rd] = (uintptr_t)end; 4228 mstate->dtms_scratch_ptr += size; 4229 } 4230 4231 break; 4232 } 4233 #endif 4234 4235 case DIF_SUBR_STRJOIN: { 4236 char *d = (char *)mstate->dtms_scratch_ptr; 4237 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4238 uintptr_t s1 = tupregs[0].dttk_value; 4239 uintptr_t s2 = tupregs[1].dttk_value; 4240 int i = 0; 4241 4242 if (!dtrace_strcanload(s1, size, mstate, vstate) || 4243 !dtrace_strcanload(s2, size, mstate, vstate)) { 4244 regs[rd] = 0; 4245 break; 4246 } 4247 4248 if (!DTRACE_INSCRATCH(mstate, size)) { 4249 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4250 regs[rd] = 0; 4251 break; 4252 } 4253 4254 for (;;) { 4255 if (i >= size) { 4256 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4257 regs[rd] = 0; 4258 break; 4259 } 4260 4261 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 4262 i--; 4263 break; 4264 } 4265 } 4266 4267 for (;;) { 4268 if (i >= size) { 4269 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4270 regs[rd] = 0; 4271 break; 4272 } 4273 4274 if ((d[i++] = dtrace_load8(s2++)) == '\0') 4275 break; 4276 } 4277 4278 if (i < size) { 4279 mstate->dtms_scratch_ptr += i; 4280 regs[rd] = (uintptr_t)d; 4281 } 4282 4283 break; 4284 } 4285 4286 case DIF_SUBR_LLTOSTR: { 4287 int64_t i = (int64_t)tupregs[0].dttk_value; 4288 int64_t val = i < 0 ? i * -1 : i; 4289 uint64_t size = 22; /* enough room for 2^64 in decimal */ 4290 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 4291 4292 if (!DTRACE_INSCRATCH(mstate, size)) { 4293 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4294 regs[rd] = 0; 4295 break; 4296 } 4297 4298 for (*end-- = '\0'; val; val /= 10) 4299 *end-- = '0' + (val % 10); 4300 4301 if (i == 0) 4302 *end-- = '0'; 4303 4304 if (i < 0) 4305 *end-- = '-'; 4306 4307 regs[rd] = (uintptr_t)end + 1; 4308 mstate->dtms_scratch_ptr += size; 4309 break; 4310 } 4311 4312 case DIF_SUBR_HTONS: 4313 case DIF_SUBR_NTOHS: 4314 #if BYTE_ORDER == BIG_ENDIAN 4315 regs[rd] = (uint16_t)tupregs[0].dttk_value; 4316 #else 4317 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); 4318 #endif 4319 break; 4320 4321 4322 case DIF_SUBR_HTONL: 4323 case DIF_SUBR_NTOHL: 4324 #if BYTE_ORDER == BIG_ENDIAN 4325 regs[rd] = (uint32_t)tupregs[0].dttk_value; 4326 #else 4327 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); 4328 #endif 4329 break; 4330 4331 4332 case DIF_SUBR_HTONLL: 4333 case DIF_SUBR_NTOHLL: 4334 #if BYTE_ORDER == BIG_ENDIAN 4335 regs[rd] = (uint64_t)tupregs[0].dttk_value; 4336 #else 4337 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); 4338 #endif 4339 break; 4340 4341 4342 case DIF_SUBR_DIRNAME: 4343 case DIF_SUBR_BASENAME: { 4344 char *dest = (char *)mstate->dtms_scratch_ptr; 4345 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4346 uintptr_t src = tupregs[0].dttk_value; 4347 int i, j, len = dtrace_strlen((char *)src, size); 4348 int lastbase = -1, firstbase = -1, lastdir = -1; 4349 int start, end; 4350 4351 if (!dtrace_canload(src, len + 1, mstate, vstate)) { 4352 regs[rd] = 0; 4353 break; 4354 } 4355 4356 if (!DTRACE_INSCRATCH(mstate, size)) { 4357 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4358 regs[rd] = 0; 4359 break; 4360 } 4361 4362 /* 4363 * The basename and dirname for a zero-length string is 4364 * defined to be "." 4365 */ 4366 if (len == 0) { 4367 len = 1; 4368 src = (uintptr_t)"."; 4369 } 4370 4371 /* 4372 * Start from the back of the string, moving back toward the 4373 * front until we see a character that isn't a slash. That 4374 * character is the last character in the basename. 4375 */ 4376 for (i = len - 1; i >= 0; i--) { 4377 if (dtrace_load8(src + i) != '/') 4378 break; 4379 } 4380 4381 if (i >= 0) 4382 lastbase = i; 4383 4384 /* 4385 * Starting from the last character in the basename, move 4386 * towards the front until we find a slash. The character 4387 * that we processed immediately before that is the first 4388 * character in the basename. 4389 */ 4390 for (; i >= 0; i--) { 4391 if (dtrace_load8(src + i) == '/') 4392 break; 4393 } 4394 4395 if (i >= 0) 4396 firstbase = i + 1; 4397 4398 /* 4399 * Now keep going until we find a non-slash character. That 4400 * character is the last character in the dirname. 4401 */ 4402 for (; i >= 0; i--) { 4403 if (dtrace_load8(src + i) != '/') 4404 break; 4405 } 4406 4407 if (i >= 0) 4408 lastdir = i; 4409 4410 ASSERT(!(lastbase == -1 && firstbase != -1)); 4411 ASSERT(!(firstbase == -1 && lastdir != -1)); 4412 4413 if (lastbase == -1) { 4414 /* 4415 * We didn't find a non-slash character. We know that 4416 * the length is non-zero, so the whole string must be 4417 * slashes. In either the dirname or the basename 4418 * case, we return '/'. 4419 */ 4420 ASSERT(firstbase == -1); 4421 firstbase = lastbase = lastdir = 0; 4422 } 4423 4424 if (firstbase == -1) { 4425 /* 4426 * The entire string consists only of a basename 4427 * component. If we're looking for dirname, we need 4428 * to change our string to be just "."; if we're 4429 * looking for a basename, we'll just set the first 4430 * character of the basename to be 0. 4431 */ 4432 if (subr == DIF_SUBR_DIRNAME) { 4433 ASSERT(lastdir == -1); 4434 src = (uintptr_t)"."; 4435 lastdir = 0; 4436 } else { 4437 firstbase = 0; 4438 } 4439 } 4440 4441 if (subr == DIF_SUBR_DIRNAME) { 4442 if (lastdir == -1) { 4443 /* 4444 * We know that we have a slash in the name -- 4445 * or lastdir would be set to 0, above. And 4446 * because lastdir is -1, we know that this 4447 * slash must be the first character. (That 4448 * is, the full string must be of the form 4449 * "/basename".) In this case, the last 4450 * character of the directory name is 0. 4451 */ 4452 lastdir = 0; 4453 } 4454 4455 start = 0; 4456 end = lastdir; 4457 } else { 4458 ASSERT(subr == DIF_SUBR_BASENAME); 4459 ASSERT(firstbase != -1 && lastbase != -1); 4460 start = firstbase; 4461 end = lastbase; 4462 } 4463 4464 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 4465 dest[j] = dtrace_load8(src + i); 4466 4467 dest[j] = '\0'; 4468 regs[rd] = (uintptr_t)dest; 4469 mstate->dtms_scratch_ptr += size; 4470 break; 4471 } 4472 4473 case DIF_SUBR_CLEANPATH: { 4474 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4475 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4476 uintptr_t src = tupregs[0].dttk_value; 4477 int i = 0, j = 0; 4478 4479 if (!dtrace_strcanload(src, size, mstate, vstate)) { 4480 regs[rd] = 0; 4481 break; 4482 } 4483 4484 if (!DTRACE_INSCRATCH(mstate, size)) { 4485 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4486 regs[rd] = 0; 4487 break; 4488 } 4489 4490 /* 4491 * Move forward, loading each character. 4492 */ 4493 do { 4494 c = dtrace_load8(src + i++); 4495 next: 4496 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 4497 break; 4498 4499 if (c != '/') { 4500 dest[j++] = c; 4501 continue; 4502 } 4503 4504 c = dtrace_load8(src + i++); 4505 4506 if (c == '/') { 4507 /* 4508 * We have two slashes -- we can just advance 4509 * to the next character. 4510 */ 4511 goto next; 4512 } 4513 4514 if (c != '.') { 4515 /* 4516 * This is not "." and it's not ".." -- we can 4517 * just store the "/" and this character and 4518 * drive on. 4519 */ 4520 dest[j++] = '/'; 4521 dest[j++] = c; 4522 continue; 4523 } 4524 4525 c = dtrace_load8(src + i++); 4526 4527 if (c == '/') { 4528 /* 4529 * This is a "/./" component. We're not going 4530 * to store anything in the destination buffer; 4531 * we're just going to go to the next component. 4532 */ 4533 goto next; 4534 } 4535 4536 if (c != '.') { 4537 /* 4538 * This is not ".." -- we can just store the 4539 * "/." and this character and continue 4540 * processing. 4541 */ 4542 dest[j++] = '/'; 4543 dest[j++] = '.'; 4544 dest[j++] = c; 4545 continue; 4546 } 4547 4548 c = dtrace_load8(src + i++); 4549 4550 if (c != '/' && c != '\0') { 4551 /* 4552 * This is not ".." -- it's "..[mumble]". 4553 * We'll store the "/.." and this character 4554 * and continue processing. 4555 */ 4556 dest[j++] = '/'; 4557 dest[j++] = '.'; 4558 dest[j++] = '.'; 4559 dest[j++] = c; 4560 continue; 4561 } 4562 4563 /* 4564 * This is "/../" or "/..\0". We need to back up 4565 * our destination pointer until we find a "/". 4566 */ 4567 i--; 4568 while (j != 0 && dest[--j] != '/') 4569 continue; 4570 4571 if (c == '\0') 4572 dest[++j] = '/'; 4573 } while (c != '\0'); 4574 4575 dest[j] = '\0'; 4576 regs[rd] = (uintptr_t)dest; 4577 mstate->dtms_scratch_ptr += size; 4578 break; 4579 } 4580 4581 case DIF_SUBR_INET_NTOA: 4582 case DIF_SUBR_INET_NTOA6: 4583 case DIF_SUBR_INET_NTOP: { 4584 size_t size; 4585 int af, argi, i; 4586 char *base, *end; 4587 4588 if (subr == DIF_SUBR_INET_NTOP) { 4589 af = (int)tupregs[0].dttk_value; 4590 argi = 1; 4591 } else { 4592 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6; 4593 argi = 0; 4594 } 4595 4596 if (af == AF_INET) { 4597 ipaddr_t ip4; 4598 uint8_t *ptr8, val; 4599 4600 /* 4601 * Safely load the IPv4 address. 4602 */ 4603 ip4 = dtrace_load32(tupregs[argi].dttk_value); 4604 4605 /* 4606 * Check an IPv4 string will fit in scratch. 4607 */ 4608 size = INET_ADDRSTRLEN; 4609 if (!DTRACE_INSCRATCH(mstate, size)) { 4610 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4611 regs[rd] = 0; 4612 break; 4613 } 4614 base = (char *)mstate->dtms_scratch_ptr; 4615 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4616 4617 /* 4618 * Stringify as a dotted decimal quad. 4619 */ 4620 *end-- = '\0'; 4621 ptr8 = (uint8_t *)&ip4; 4622 for (i = 3; i >= 0; i--) { 4623 val = ptr8[i]; 4624 4625 if (val == 0) { 4626 *end-- = '0'; 4627 } else { 4628 for (; val; val /= 10) { 4629 *end-- = '0' + (val % 10); 4630 } 4631 } 4632 4633 if (i > 0) 4634 *end-- = '.'; 4635 } 4636 ASSERT(end + 1 >= base); 4637 4638 } else if (af == AF_INET6) { 4639 struct in6_addr ip6; 4640 int firstzero, tryzero, numzero, v6end; 4641 uint16_t val; 4642 const char digits[] = "0123456789abcdef"; 4643 4644 /* 4645 * Stringify using RFC 1884 convention 2 - 16 bit 4646 * hexadecimal values with a zero-run compression. 4647 * Lower case hexadecimal digits are used. 4648 * eg, fe80::214:4fff:fe0b:76c8. 4649 * The IPv4 embedded form is returned for inet_ntop, 4650 * just the IPv4 string is returned for inet_ntoa6. 4651 */ 4652 4653 /* 4654 * Safely load the IPv6 address. 4655 */ 4656 dtrace_bcopy( 4657 (void *)(uintptr_t)tupregs[argi].dttk_value, 4658 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr)); 4659 4660 /* 4661 * Check an IPv6 string will fit in scratch. 4662 */ 4663 size = INET6_ADDRSTRLEN; 4664 if (!DTRACE_INSCRATCH(mstate, size)) { 4665 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4666 regs[rd] = 0; 4667 break; 4668 } 4669 base = (char *)mstate->dtms_scratch_ptr; 4670 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4671 *end-- = '\0'; 4672 4673 /* 4674 * Find the longest run of 16 bit zero values 4675 * for the single allowed zero compression - "::". 4676 */ 4677 firstzero = -1; 4678 tryzero = -1; 4679 numzero = 1; 4680 for (i = 0; i < sizeof (struct in6_addr); i++) { 4681 #if defined(sun) 4682 if (ip6._S6_un._S6_u8[i] == 0 && 4683 #else 4684 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 4685 #endif 4686 tryzero == -1 && i % 2 == 0) { 4687 tryzero = i; 4688 continue; 4689 } 4690 4691 if (tryzero != -1 && 4692 #if defined(sun) 4693 (ip6._S6_un._S6_u8[i] != 0 || 4694 #else 4695 (ip6.__u6_addr.__u6_addr8[i] != 0 || 4696 #endif 4697 i == sizeof (struct in6_addr) - 1)) { 4698 4699 if (i - tryzero <= numzero) { 4700 tryzero = -1; 4701 continue; 4702 } 4703 4704 firstzero = tryzero; 4705 numzero = i - i % 2 - tryzero; 4706 tryzero = -1; 4707 4708 #if defined(sun) 4709 if (ip6._S6_un._S6_u8[i] == 0 && 4710 #else 4711 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 4712 #endif 4713 i == sizeof (struct in6_addr) - 1) 4714 numzero += 2; 4715 } 4716 } 4717 ASSERT(firstzero + numzero <= sizeof (struct in6_addr)); 4718 4719 /* 4720 * Check for an IPv4 embedded address. 4721 */ 4722 v6end = sizeof (struct in6_addr) - 2; 4723 if (IN6_IS_ADDR_V4MAPPED(&ip6) || 4724 IN6_IS_ADDR_V4COMPAT(&ip6)) { 4725 for (i = sizeof (struct in6_addr) - 1; 4726 i >= DTRACE_V4MAPPED_OFFSET; i--) { 4727 ASSERT(end >= base); 4728 4729 #if defined(sun) 4730 val = ip6._S6_un._S6_u8[i]; 4731 #else 4732 val = ip6.__u6_addr.__u6_addr8[i]; 4733 #endif 4734 4735 if (val == 0) { 4736 *end-- = '0'; 4737 } else { 4738 for (; val; val /= 10) { 4739 *end-- = '0' + val % 10; 4740 } 4741 } 4742 4743 if (i > DTRACE_V4MAPPED_OFFSET) 4744 *end-- = '.'; 4745 } 4746 4747 if (subr == DIF_SUBR_INET_NTOA6) 4748 goto inetout; 4749 4750 /* 4751 * Set v6end to skip the IPv4 address that 4752 * we have already stringified. 4753 */ 4754 v6end = 10; 4755 } 4756 4757 /* 4758 * Build the IPv6 string by working through the 4759 * address in reverse. 4760 */ 4761 for (i = v6end; i >= 0; i -= 2) { 4762 ASSERT(end >= base); 4763 4764 if (i == firstzero + numzero - 2) { 4765 *end-- = ':'; 4766 *end-- = ':'; 4767 i -= numzero - 2; 4768 continue; 4769 } 4770 4771 if (i < 14 && i != firstzero - 2) 4772 *end-- = ':'; 4773 4774 #if defined(sun) 4775 val = (ip6._S6_un._S6_u8[i] << 8) + 4776 ip6._S6_un._S6_u8[i + 1]; 4777 #else 4778 val = (ip6.__u6_addr.__u6_addr8[i] << 8) + 4779 ip6.__u6_addr.__u6_addr8[i + 1]; 4780 #endif 4781 4782 if (val == 0) { 4783 *end-- = '0'; 4784 } else { 4785 for (; val; val /= 16) { 4786 *end-- = digits[val % 16]; 4787 } 4788 } 4789 } 4790 ASSERT(end + 1 >= base); 4791 4792 } else { 4793 /* 4794 * The user didn't use AH_INET or AH_INET6. 4795 */ 4796 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 4797 regs[rd] = 0; 4798 break; 4799 } 4800 4801 inetout: regs[rd] = (uintptr_t)end + 1; 4802 mstate->dtms_scratch_ptr += size; 4803 break; 4804 } 4805 4806 case DIF_SUBR_MEMREF: { 4807 uintptr_t size = 2 * sizeof(uintptr_t); 4808 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 4809 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size; 4810 4811 /* address and length */ 4812 memref[0] = tupregs[0].dttk_value; 4813 memref[1] = tupregs[1].dttk_value; 4814 4815 regs[rd] = (uintptr_t) memref; 4816 mstate->dtms_scratch_ptr += scratch_size; 4817 break; 4818 } 4819 4820 case DIF_SUBR_TYPEREF: { 4821 uintptr_t size = 4 * sizeof(uintptr_t); 4822 uintptr_t *typeref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 4823 size_t scratch_size = ((uintptr_t) typeref - mstate->dtms_scratch_ptr) + size; 4824 4825 /* address, num_elements, type_str, type_len */ 4826 typeref[0] = tupregs[0].dttk_value; 4827 typeref[1] = tupregs[1].dttk_value; 4828 typeref[2] = tupregs[2].dttk_value; 4829 typeref[3] = tupregs[3].dttk_value; 4830 4831 regs[rd] = (uintptr_t) typeref; 4832 mstate->dtms_scratch_ptr += scratch_size; 4833 break; 4834 } 4835 } 4836 } 4837 4838 /* 4839 * Emulate the execution of DTrace IR instructions specified by the given 4840 * DIF object. This function is deliberately void of assertions as all of 4841 * the necessary checks are handled by a call to dtrace_difo_validate(). 4842 */ 4843 static uint64_t 4844 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 4845 dtrace_vstate_t *vstate, dtrace_state_t *state) 4846 { 4847 const dif_instr_t *text = difo->dtdo_buf; 4848 const uint_t textlen = difo->dtdo_len; 4849 const char *strtab = difo->dtdo_strtab; 4850 const uint64_t *inttab = difo->dtdo_inttab; 4851 4852 uint64_t rval = 0; 4853 dtrace_statvar_t *svar; 4854 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 4855 dtrace_difv_t *v; 4856 volatile uint16_t *flags = &cpu_core[curcpu_id].cpuc_dtrace_flags; 4857 volatile uintptr_t *illval = &cpu_core[curcpu_id].cpuc_dtrace_illval; 4858 4859 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 4860 uint64_t regs[DIF_DIR_NREGS]; 4861 uint64_t *tmp; 4862 4863 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 4864 int64_t cc_r; 4865 uint_t pc = 0, id, opc = 0; 4866 uint8_t ttop = 0; 4867 dif_instr_t instr; 4868 uint_t r1, r2, rd; 4869 4870 /* 4871 * We stash the current DIF object into the machine state: we need it 4872 * for subsequent access checking. 4873 */ 4874 mstate->dtms_difo = difo; 4875 4876 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 4877 4878 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 4879 opc = pc; 4880 4881 instr = text[pc++]; 4882 r1 = DIF_INSTR_R1(instr); 4883 r2 = DIF_INSTR_R2(instr); 4884 rd = DIF_INSTR_RD(instr); 4885 4886 switch (DIF_INSTR_OP(instr)) { 4887 case DIF_OP_OR: 4888 regs[rd] = regs[r1] | regs[r2]; 4889 break; 4890 case DIF_OP_XOR: 4891 regs[rd] = regs[r1] ^ regs[r2]; 4892 break; 4893 case DIF_OP_AND: 4894 regs[rd] = regs[r1] & regs[r2]; 4895 break; 4896 case DIF_OP_SLL: 4897 regs[rd] = regs[r1] << regs[r2]; 4898 break; 4899 case DIF_OP_SRL: 4900 regs[rd] = regs[r1] >> regs[r2]; 4901 break; 4902 case DIF_OP_SUB: 4903 regs[rd] = regs[r1] - regs[r2]; 4904 break; 4905 case DIF_OP_ADD: 4906 regs[rd] = regs[r1] + regs[r2]; 4907 break; 4908 case DIF_OP_MUL: 4909 regs[rd] = regs[r1] * regs[r2]; 4910 break; 4911 case DIF_OP_SDIV: 4912 if (regs[r2] == 0) { 4913 regs[rd] = 0; 4914 *flags |= CPU_DTRACE_DIVZERO; 4915 } else { 4916 regs[rd] = (int64_t)regs[r1] / 4917 (int64_t)regs[r2]; 4918 } 4919 break; 4920 4921 case DIF_OP_UDIV: 4922 if (regs[r2] == 0) { 4923 regs[rd] = 0; 4924 *flags |= CPU_DTRACE_DIVZERO; 4925 } else { 4926 regs[rd] = regs[r1] / regs[r2]; 4927 } 4928 break; 4929 4930 case DIF_OP_SREM: 4931 if (regs[r2] == 0) { 4932 regs[rd] = 0; 4933 *flags |= CPU_DTRACE_DIVZERO; 4934 } else { 4935 regs[rd] = (int64_t)regs[r1] % 4936 (int64_t)regs[r2]; 4937 } 4938 break; 4939 4940 case DIF_OP_UREM: 4941 if (regs[r2] == 0) { 4942 regs[rd] = 0; 4943 *flags |= CPU_DTRACE_DIVZERO; 4944 } else { 4945 regs[rd] = regs[r1] % regs[r2]; 4946 } 4947 break; 4948 4949 case DIF_OP_NOT: 4950 regs[rd] = ~regs[r1]; 4951 break; 4952 case DIF_OP_MOV: 4953 regs[rd] = regs[r1]; 4954 break; 4955 case DIF_OP_CMP: 4956 cc_r = regs[r1] - regs[r2]; 4957 cc_n = cc_r < 0; 4958 cc_z = cc_r == 0; 4959 cc_v = 0; 4960 cc_c = regs[r1] < regs[r2]; 4961 break; 4962 case DIF_OP_TST: 4963 cc_n = cc_v = cc_c = 0; 4964 cc_z = regs[r1] == 0; 4965 break; 4966 case DIF_OP_BA: 4967 pc = DIF_INSTR_LABEL(instr); 4968 break; 4969 case DIF_OP_BE: 4970 if (cc_z) 4971 pc = DIF_INSTR_LABEL(instr); 4972 break; 4973 case DIF_OP_BNE: 4974 if (cc_z == 0) 4975 pc = DIF_INSTR_LABEL(instr); 4976 break; 4977 case DIF_OP_BG: 4978 if ((cc_z | (cc_n ^ cc_v)) == 0) 4979 pc = DIF_INSTR_LABEL(instr); 4980 break; 4981 case DIF_OP_BGU: 4982 if ((cc_c | cc_z) == 0) 4983 pc = DIF_INSTR_LABEL(instr); 4984 break; 4985 case DIF_OP_BGE: 4986 if ((cc_n ^ cc_v) == 0) 4987 pc = DIF_INSTR_LABEL(instr); 4988 break; 4989 case DIF_OP_BGEU: 4990 if (cc_c == 0) 4991 pc = DIF_INSTR_LABEL(instr); 4992 break; 4993 case DIF_OP_BL: 4994 if (cc_n ^ cc_v) 4995 pc = DIF_INSTR_LABEL(instr); 4996 break; 4997 case DIF_OP_BLU: 4998 if (cc_c) 4999 pc = DIF_INSTR_LABEL(instr); 5000 break; 5001 case DIF_OP_BLE: 5002 if (cc_z | (cc_n ^ cc_v)) 5003 pc = DIF_INSTR_LABEL(instr); 5004 break; 5005 case DIF_OP_BLEU: 5006 if (cc_c | cc_z) 5007 pc = DIF_INSTR_LABEL(instr); 5008 break; 5009 case DIF_OP_RLDSB: 5010 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 5011 *flags |= CPU_DTRACE_KPRIV; 5012 *illval = regs[r1]; 5013 break; 5014 } 5015 /*FALLTHROUGH*/ 5016 case DIF_OP_LDSB: 5017 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 5018 break; 5019 case DIF_OP_RLDSH: 5020 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 5021 *flags |= CPU_DTRACE_KPRIV; 5022 *illval = regs[r1]; 5023 break; 5024 } 5025 /*FALLTHROUGH*/ 5026 case DIF_OP_LDSH: 5027 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 5028 break; 5029 case DIF_OP_RLDSW: 5030 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 5031 *flags |= CPU_DTRACE_KPRIV; 5032 *illval = regs[r1]; 5033 break; 5034 } 5035 /*FALLTHROUGH*/ 5036 case DIF_OP_LDSW: 5037 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 5038 break; 5039 case DIF_OP_RLDUB: 5040 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 5041 *flags |= CPU_DTRACE_KPRIV; 5042 *illval = regs[r1]; 5043 break; 5044 } 5045 /*FALLTHROUGH*/ 5046 case DIF_OP_LDUB: 5047 regs[rd] = dtrace_load8(regs[r1]); 5048 break; 5049 case DIF_OP_RLDUH: 5050 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 5051 *flags |= CPU_DTRACE_KPRIV; 5052 *illval = regs[r1]; 5053 break; 5054 } 5055 /*FALLTHROUGH*/ 5056 case DIF_OP_LDUH: 5057 regs[rd] = dtrace_load16(regs[r1]); 5058 break; 5059 case DIF_OP_RLDUW: 5060 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 5061 *flags |= CPU_DTRACE_KPRIV; 5062 *illval = regs[r1]; 5063 break; 5064 } 5065 /*FALLTHROUGH*/ 5066 case DIF_OP_LDUW: 5067 regs[rd] = dtrace_load32(regs[r1]); 5068 break; 5069 case DIF_OP_RLDX: 5070 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) { 5071 *flags |= CPU_DTRACE_KPRIV; 5072 *illval = regs[r1]; 5073 break; 5074 } 5075 /*FALLTHROUGH*/ 5076 case DIF_OP_LDX: 5077 regs[rd] = dtrace_load64(regs[r1]); 5078 break; 5079 case DIF_OP_ULDSB: 5080 regs[rd] = (int8_t) 5081 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 5082 break; 5083 case DIF_OP_ULDSH: 5084 regs[rd] = (int16_t) 5085 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 5086 break; 5087 case DIF_OP_ULDSW: 5088 regs[rd] = (int32_t) 5089 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 5090 break; 5091 case DIF_OP_ULDUB: 5092 regs[rd] = 5093 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 5094 break; 5095 case DIF_OP_ULDUH: 5096 regs[rd] = 5097 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 5098 break; 5099 case DIF_OP_ULDUW: 5100 regs[rd] = 5101 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 5102 break; 5103 case DIF_OP_ULDX: 5104 regs[rd] = 5105 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 5106 break; 5107 case DIF_OP_RET: 5108 rval = regs[rd]; 5109 pc = textlen; 5110 break; 5111 case DIF_OP_NOP: 5112 break; 5113 case DIF_OP_SETX: 5114 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 5115 break; 5116 case DIF_OP_SETS: 5117 regs[rd] = (uint64_t)(uintptr_t) 5118 (strtab + DIF_INSTR_STRING(instr)); 5119 break; 5120 case DIF_OP_SCMP: { 5121 size_t sz = state->dts_options[DTRACEOPT_STRSIZE]; 5122 uintptr_t s1 = regs[r1]; 5123 uintptr_t s2 = regs[r2]; 5124 5125 if (s1 != 0 && 5126 !dtrace_strcanload(s1, sz, mstate, vstate)) 5127 break; 5128 if (s2 != 0 && 5129 !dtrace_strcanload(s2, sz, mstate, vstate)) 5130 break; 5131 5132 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz); 5133 5134 cc_n = cc_r < 0; 5135 cc_z = cc_r == 0; 5136 cc_v = cc_c = 0; 5137 break; 5138 } 5139 case DIF_OP_LDGA: 5140 regs[rd] = dtrace_dif_variable(mstate, state, 5141 r1, regs[r2]); 5142 break; 5143 case DIF_OP_LDGS: 5144 id = DIF_INSTR_VAR(instr); 5145 5146 if (id >= DIF_VAR_OTHER_UBASE) { 5147 uintptr_t a; 5148 5149 id -= DIF_VAR_OTHER_UBASE; 5150 svar = vstate->dtvs_globals[id]; 5151 ASSERT(svar != NULL); 5152 v = &svar->dtsv_var; 5153 5154 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 5155 regs[rd] = svar->dtsv_data; 5156 break; 5157 } 5158 5159 a = (uintptr_t)svar->dtsv_data; 5160 5161 if (*(uint8_t *)a == UINT8_MAX) { 5162 /* 5163 * If the 0th byte is set to UINT8_MAX 5164 * then this is to be treated as a 5165 * reference to a NULL variable. 5166 */ 5167 regs[rd] = 0; 5168 } else { 5169 regs[rd] = a + sizeof (uint64_t); 5170 } 5171 5172 break; 5173 } 5174 5175 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 5176 break; 5177 5178 case DIF_OP_STGS: 5179 id = DIF_INSTR_VAR(instr); 5180 5181 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5182 id -= DIF_VAR_OTHER_UBASE; 5183 5184 svar = vstate->dtvs_globals[id]; 5185 ASSERT(svar != NULL); 5186 v = &svar->dtsv_var; 5187 5188 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5189 uintptr_t a = (uintptr_t)svar->dtsv_data; 5190 5191 ASSERT(a != 0); 5192 ASSERT(svar->dtsv_size != 0); 5193 5194 if (regs[rd] == 0) { 5195 *(uint8_t *)a = UINT8_MAX; 5196 break; 5197 } else { 5198 *(uint8_t *)a = 0; 5199 a += sizeof (uint64_t); 5200 } 5201 if (!dtrace_vcanload( 5202 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5203 mstate, vstate)) 5204 break; 5205 5206 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5207 (void *)a, &v->dtdv_type); 5208 break; 5209 } 5210 5211 svar->dtsv_data = regs[rd]; 5212 break; 5213 5214 case DIF_OP_LDTA: 5215 /* 5216 * There are no DTrace built-in thread-local arrays at 5217 * present. This opcode is saved for future work. 5218 */ 5219 *flags |= CPU_DTRACE_ILLOP; 5220 regs[rd] = 0; 5221 break; 5222 5223 case DIF_OP_LDLS: 5224 id = DIF_INSTR_VAR(instr); 5225 5226 if (id < DIF_VAR_OTHER_UBASE) { 5227 /* 5228 * For now, this has no meaning. 5229 */ 5230 regs[rd] = 0; 5231 break; 5232 } 5233 5234 id -= DIF_VAR_OTHER_UBASE; 5235 5236 ASSERT(id < vstate->dtvs_nlocals); 5237 ASSERT(vstate->dtvs_locals != NULL); 5238 5239 svar = vstate->dtvs_locals[id]; 5240 ASSERT(svar != NULL); 5241 v = &svar->dtsv_var; 5242 5243 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5244 uintptr_t a = (uintptr_t)svar->dtsv_data; 5245 size_t sz = v->dtdv_type.dtdt_size; 5246 5247 sz += sizeof (uint64_t); 5248 ASSERT(svar->dtsv_size == NCPU * sz); 5249 a += curcpu_id * sz; 5250 5251 if (*(uint8_t *)a == UINT8_MAX) { 5252 /* 5253 * If the 0th byte is set to UINT8_MAX 5254 * then this is to be treated as a 5255 * reference to a NULL variable. 5256 */ 5257 regs[rd] = 0; 5258 } else { 5259 regs[rd] = a + sizeof (uint64_t); 5260 } 5261 5262 break; 5263 } 5264 5265 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5266 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5267 regs[rd] = tmp[curcpu_id]; 5268 break; 5269 5270 case DIF_OP_STLS: 5271 id = DIF_INSTR_VAR(instr); 5272 5273 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5274 id -= DIF_VAR_OTHER_UBASE; 5275 ASSERT(id < vstate->dtvs_nlocals); 5276 5277 ASSERT(vstate->dtvs_locals != NULL); 5278 svar = vstate->dtvs_locals[id]; 5279 ASSERT(svar != NULL); 5280 v = &svar->dtsv_var; 5281 5282 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5283 uintptr_t a = (uintptr_t)svar->dtsv_data; 5284 size_t sz = v->dtdv_type.dtdt_size; 5285 5286 sz += sizeof (uint64_t); 5287 ASSERT(svar->dtsv_size == NCPU * sz); 5288 a += curcpu_id * sz; 5289 5290 if (regs[rd] == 0) { 5291 *(uint8_t *)a = UINT8_MAX; 5292 break; 5293 } else { 5294 *(uint8_t *)a = 0; 5295 a += sizeof (uint64_t); 5296 } 5297 5298 if (!dtrace_vcanload( 5299 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5300 mstate, vstate)) 5301 break; 5302 5303 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5304 (void *)a, &v->dtdv_type); 5305 break; 5306 } 5307 5308 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5309 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5310 tmp[curcpu_id] = regs[rd]; 5311 break; 5312 5313 case DIF_OP_LDTS: { 5314 dtrace_dynvar_t *dvar; 5315 dtrace_key_t *key; 5316 5317 id = DIF_INSTR_VAR(instr); 5318 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5319 id -= DIF_VAR_OTHER_UBASE; 5320 v = &vstate->dtvs_tlocals[id]; 5321 5322 key = &tupregs[DIF_DTR_NREGS]; 5323 key[0].dttk_value = (uint64_t)id; 5324 key[0].dttk_size = 0; 5325 DTRACE_TLS_THRKEY(key[1].dttk_value); 5326 key[1].dttk_size = 0; 5327 5328 dvar = dtrace_dynvar(dstate, 2, key, 5329 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, 5330 mstate, vstate); 5331 5332 if (dvar == NULL) { 5333 regs[rd] = 0; 5334 break; 5335 } 5336 5337 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5338 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5339 } else { 5340 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5341 } 5342 5343 break; 5344 } 5345 5346 case DIF_OP_STTS: { 5347 dtrace_dynvar_t *dvar; 5348 dtrace_key_t *key; 5349 5350 id = DIF_INSTR_VAR(instr); 5351 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5352 id -= DIF_VAR_OTHER_UBASE; 5353 5354 key = &tupregs[DIF_DTR_NREGS]; 5355 key[0].dttk_value = (uint64_t)id; 5356 key[0].dttk_size = 0; 5357 DTRACE_TLS_THRKEY(key[1].dttk_value); 5358 key[1].dttk_size = 0; 5359 v = &vstate->dtvs_tlocals[id]; 5360 5361 dvar = dtrace_dynvar(dstate, 2, key, 5362 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5363 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5364 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5365 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5366 5367 /* 5368 * Given that we're storing to thread-local data, 5369 * we need to flush our predicate cache. 5370 */ 5371 curthread->t_predcache = 0; 5372 5373 if (dvar == NULL) 5374 break; 5375 5376 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5377 if (!dtrace_vcanload( 5378 (void *)(uintptr_t)regs[rd], 5379 &v->dtdv_type, mstate, vstate)) 5380 break; 5381 5382 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5383 dvar->dtdv_data, &v->dtdv_type); 5384 } else { 5385 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5386 } 5387 5388 break; 5389 } 5390 5391 case DIF_OP_SRA: 5392 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 5393 break; 5394 5395 case DIF_OP_CALL: 5396 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 5397 regs, tupregs, ttop, mstate, state); 5398 break; 5399 5400 case DIF_OP_PUSHTR: 5401 if (ttop == DIF_DTR_NREGS) { 5402 *flags |= CPU_DTRACE_TUPOFLOW; 5403 break; 5404 } 5405 5406 if (r1 == DIF_TYPE_STRING) { 5407 /* 5408 * If this is a string type and the size is 0, 5409 * we'll use the system-wide default string 5410 * size. Note that we are _not_ looking at 5411 * the value of the DTRACEOPT_STRSIZE option; 5412 * had this been set, we would expect to have 5413 * a non-zero size value in the "pushtr". 5414 */ 5415 tupregs[ttop].dttk_size = 5416 dtrace_strlen((char *)(uintptr_t)regs[rd], 5417 regs[r2] ? regs[r2] : 5418 dtrace_strsize_default) + 1; 5419 } else { 5420 tupregs[ttop].dttk_size = regs[r2]; 5421 } 5422 5423 tupregs[ttop++].dttk_value = regs[rd]; 5424 break; 5425 5426 case DIF_OP_PUSHTV: 5427 if (ttop == DIF_DTR_NREGS) { 5428 *flags |= CPU_DTRACE_TUPOFLOW; 5429 break; 5430 } 5431 5432 tupregs[ttop].dttk_value = regs[rd]; 5433 tupregs[ttop++].dttk_size = 0; 5434 break; 5435 5436 case DIF_OP_POPTS: 5437 if (ttop != 0) 5438 ttop--; 5439 break; 5440 5441 case DIF_OP_FLUSHTS: 5442 ttop = 0; 5443 break; 5444 5445 case DIF_OP_LDGAA: 5446 case DIF_OP_LDTAA: { 5447 dtrace_dynvar_t *dvar; 5448 dtrace_key_t *key = tupregs; 5449 uint_t nkeys = ttop; 5450 5451 id = DIF_INSTR_VAR(instr); 5452 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5453 id -= DIF_VAR_OTHER_UBASE; 5454 5455 key[nkeys].dttk_value = (uint64_t)id; 5456 key[nkeys++].dttk_size = 0; 5457 5458 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 5459 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5460 key[nkeys++].dttk_size = 0; 5461 v = &vstate->dtvs_tlocals[id]; 5462 } else { 5463 v = &vstate->dtvs_globals[id]->dtsv_var; 5464 } 5465 5466 dvar = dtrace_dynvar(dstate, nkeys, key, 5467 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5468 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5469 DTRACE_DYNVAR_NOALLOC, mstate, vstate); 5470 5471 if (dvar == NULL) { 5472 regs[rd] = 0; 5473 break; 5474 } 5475 5476 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5477 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5478 } else { 5479 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5480 } 5481 5482 break; 5483 } 5484 5485 case DIF_OP_STGAA: 5486 case DIF_OP_STTAA: { 5487 dtrace_dynvar_t *dvar; 5488 dtrace_key_t *key = tupregs; 5489 uint_t nkeys = ttop; 5490 5491 id = DIF_INSTR_VAR(instr); 5492 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5493 id -= DIF_VAR_OTHER_UBASE; 5494 5495 key[nkeys].dttk_value = (uint64_t)id; 5496 key[nkeys++].dttk_size = 0; 5497 5498 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 5499 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5500 key[nkeys++].dttk_size = 0; 5501 v = &vstate->dtvs_tlocals[id]; 5502 } else { 5503 v = &vstate->dtvs_globals[id]->dtsv_var; 5504 } 5505 5506 dvar = dtrace_dynvar(dstate, nkeys, key, 5507 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5508 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5509 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5510 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5511 5512 if (dvar == NULL) 5513 break; 5514 5515 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5516 if (!dtrace_vcanload( 5517 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5518 mstate, vstate)) 5519 break; 5520 5521 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5522 dvar->dtdv_data, &v->dtdv_type); 5523 } else { 5524 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5525 } 5526 5527 break; 5528 } 5529 5530 case DIF_OP_ALLOCS: { 5531 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5532 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 5533 5534 /* 5535 * Rounding up the user allocation size could have 5536 * overflowed large, bogus allocations (like -1ULL) to 5537 * 0. 5538 */ 5539 if (size < regs[r1] || 5540 !DTRACE_INSCRATCH(mstate, size)) { 5541 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5542 regs[rd] = 0; 5543 break; 5544 } 5545 5546 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size); 5547 mstate->dtms_scratch_ptr += size; 5548 regs[rd] = ptr; 5549 break; 5550 } 5551 5552 case DIF_OP_COPYS: 5553 if (!dtrace_canstore(regs[rd], regs[r2], 5554 mstate, vstate)) { 5555 *flags |= CPU_DTRACE_BADADDR; 5556 *illval = regs[rd]; 5557 break; 5558 } 5559 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate)) 5560 break; 5561 5562 dtrace_bcopy((void *)(uintptr_t)regs[r1], 5563 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 5564 break; 5565 5566 case DIF_OP_STB: 5567 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 5568 *flags |= CPU_DTRACE_BADADDR; 5569 *illval = regs[rd]; 5570 break; 5571 } 5572 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 5573 break; 5574 5575 case DIF_OP_STH: 5576 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 5577 *flags |= CPU_DTRACE_BADADDR; 5578 *illval = regs[rd]; 5579 break; 5580 } 5581 if (regs[rd] & 1) { 5582 *flags |= CPU_DTRACE_BADALIGN; 5583 *illval = regs[rd]; 5584 break; 5585 } 5586 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 5587 break; 5588 5589 case DIF_OP_STW: 5590 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 5591 *flags |= CPU_DTRACE_BADADDR; 5592 *illval = regs[rd]; 5593 break; 5594 } 5595 if (regs[rd] & 3) { 5596 *flags |= CPU_DTRACE_BADALIGN; 5597 *illval = regs[rd]; 5598 break; 5599 } 5600 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 5601 break; 5602 5603 case DIF_OP_STX: 5604 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 5605 *flags |= CPU_DTRACE_BADADDR; 5606 *illval = regs[rd]; 5607 break; 5608 } 5609 if (regs[rd] & 7) { 5610 *flags |= CPU_DTRACE_BADALIGN; 5611 *illval = regs[rd]; 5612 break; 5613 } 5614 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 5615 break; 5616 } 5617 } 5618 5619 if (!(*flags & CPU_DTRACE_FAULT)) 5620 return (rval); 5621 5622 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 5623 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 5624 5625 return (0); 5626 } 5627 5628 static void 5629 dtrace_action_breakpoint(dtrace_ecb_t *ecb) 5630 { 5631 dtrace_probe_t *probe = ecb->dte_probe; 5632 dtrace_provider_t *prov = probe->dtpr_provider; 5633 char c[DTRACE_FULLNAMELEN + 80], *str; 5634 char *msg = "dtrace: breakpoint action at probe "; 5635 char *ecbmsg = " (ecb "; 5636 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 5637 uintptr_t val = (uintptr_t)ecb; 5638 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 5639 5640 if (dtrace_destructive_disallow) 5641 return; 5642 5643 /* 5644 * It's impossible to be taking action on the NULL probe. 5645 */ 5646 ASSERT(probe != NULL); 5647 5648 /* 5649 * This is a poor man's (destitute man's?) sprintf(): we want to 5650 * print the provider name, module name, function name and name of 5651 * the probe, along with the hex address of the ECB with the breakpoint 5652 * action -- all of which we must place in the character buffer by 5653 * hand. 5654 */ 5655 while (*msg != '\0') 5656 c[i++] = *msg++; 5657 5658 for (str = prov->dtpv_name; *str != '\0'; str++) 5659 c[i++] = *str; 5660 c[i++] = ':'; 5661 5662 for (str = probe->dtpr_mod; *str != '\0'; str++) 5663 c[i++] = *str; 5664 c[i++] = ':'; 5665 5666 for (str = probe->dtpr_func; *str != '\0'; str++) 5667 c[i++] = *str; 5668 c[i++] = ':'; 5669 5670 for (str = probe->dtpr_name; *str != '\0'; str++) 5671 c[i++] = *str; 5672 5673 while (*ecbmsg != '\0') 5674 c[i++] = *ecbmsg++; 5675 5676 while (shift >= 0) { 5677 mask = (uintptr_t)0xf << shift; 5678 5679 if (val >= ((uintptr_t)1 << shift)) 5680 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 5681 shift -= 4; 5682 } 5683 5684 c[i++] = ')'; 5685 c[i] = '\0'; 5686 5687 #if defined(sun) 5688 debug_enter(c); 5689 #else 5690 #ifdef DDB 5691 db_printf("%s\n", c); 5692 Debugger(); 5693 #else 5694 printf("%s ignored\n", c); 5695 #endif /* DDB */ 5696 #endif 5697 } 5698 5699 static void 5700 dtrace_action_panic(dtrace_ecb_t *ecb) 5701 { 5702 dtrace_probe_t *probe = ecb->dte_probe; 5703 5704 /* 5705 * It's impossible to be taking action on the NULL probe. 5706 */ 5707 ASSERT(probe != NULL); 5708 5709 if (dtrace_destructive_disallow) 5710 return; 5711 5712 if (dtrace_panicked != NULL) 5713 return; 5714 5715 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 5716 return; 5717 5718 /* 5719 * We won the right to panic. (We want to be sure that only one 5720 * thread calls panic() from dtrace_probe(), and that panic() is 5721 * called exactly once.) 5722 */ 5723 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 5724 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 5725 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 5726 } 5727 5728 static void 5729 dtrace_action_raise(uint64_t sig) 5730 { 5731 if (dtrace_destructive_disallow) 5732 return; 5733 5734 if (sig >= NSIG) { 5735 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 5736 return; 5737 } 5738 5739 #if defined(sun) 5740 /* 5741 * raise() has a queue depth of 1 -- we ignore all subsequent 5742 * invocations of the raise() action. 5743 */ 5744 if (curthread->t_dtrace_sig == 0) 5745 curthread->t_dtrace_sig = (uint8_t)sig; 5746 5747 curthread->t_sig_check = 1; 5748 aston(curthread); 5749 #else 5750 struct proc *p = curproc; 5751 mutex_enter(proc_lock); 5752 psignal(p, sig); 5753 mutex_exit(proc_lock); 5754 #endif 5755 } 5756 5757 static void 5758 dtrace_action_stop(void) 5759 { 5760 if (dtrace_destructive_disallow) 5761 return; 5762 5763 #if defined(sun) 5764 if (!curthread->t_dtrace_stop) { 5765 curthread->t_dtrace_stop = 1; 5766 curthread->t_sig_check = 1; 5767 aston(curthread); 5768 } 5769 #else 5770 struct proc *p = curproc; 5771 mutex_enter(proc_lock); 5772 psignal(p, SIGSTOP); 5773 mutex_exit(proc_lock); 5774 #endif 5775 } 5776 5777 static void 5778 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 5779 { 5780 #if 0 /* XXX TBD - needs solaris_cpu */ 5781 hrtime_t now; 5782 volatile uint16_t *flags; 5783 #if defined(sun) 5784 cpu_t *cpu = CPU; 5785 #else 5786 cpu_t *cpu = &solaris_cpu[curcpu_id]; 5787 #endif 5788 5789 if (dtrace_destructive_disallow) 5790 return; 5791 5792 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags; 5793 5794 now = dtrace_gethrtime(); 5795 5796 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 5797 /* 5798 * We need to advance the mark to the current time. 5799 */ 5800 cpu->cpu_dtrace_chillmark = now; 5801 cpu->cpu_dtrace_chilled = 0; 5802 } 5803 5804 /* 5805 * Now check to see if the requested chill time would take us over 5806 * the maximum amount of time allowed in the chill interval. (Or 5807 * worse, if the calculation itself induces overflow.) 5808 */ 5809 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 5810 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 5811 *flags |= CPU_DTRACE_ILLOP; 5812 return; 5813 } 5814 5815 while (dtrace_gethrtime() - now < val) 5816 continue; 5817 5818 /* 5819 * Normally, we assure that the value of the variable "timestamp" does 5820 * not change within an ECB. The presence of chill() represents an 5821 * exception to this rule, however. 5822 */ 5823 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 5824 cpu->cpu_dtrace_chilled += val; 5825 #endif 5826 } 5827 5828 #if defined(sun) 5829 static void 5830 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 5831 uint64_t *buf, uint64_t arg) 5832 { 5833 int nframes = DTRACE_USTACK_NFRAMES(arg); 5834 int strsize = DTRACE_USTACK_STRSIZE(arg); 5835 uint64_t *pcs = &buf[1], *fps; 5836 char *str = (char *)&pcs[nframes]; 5837 int size, offs = 0, i, j; 5838 uintptr_t old = mstate->dtms_scratch_ptr, saved; 5839 uint16_t *flags = &cpu_core[curcpu_id].cpuc_dtrace_flags; 5840 char *sym; 5841 5842 /* 5843 * Should be taking a faster path if string space has not been 5844 * allocated. 5845 */ 5846 ASSERT(strsize != 0); 5847 5848 /* 5849 * We will first allocate some temporary space for the frame pointers. 5850 */ 5851 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5852 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 5853 (nframes * sizeof (uint64_t)); 5854 5855 if (!DTRACE_INSCRATCH(mstate, size)) { 5856 /* 5857 * Not enough room for our frame pointers -- need to indicate 5858 * that we ran out of scratch space. 5859 */ 5860 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5861 return; 5862 } 5863 5864 mstate->dtms_scratch_ptr += size; 5865 saved = mstate->dtms_scratch_ptr; 5866 5867 /* 5868 * Now get a stack with both program counters and frame pointers. 5869 */ 5870 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5871 dtrace_getufpstack(buf, fps, nframes + 1); 5872 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5873 5874 /* 5875 * If that faulted, we're cooked. 5876 */ 5877 if (*flags & CPU_DTRACE_FAULT) 5878 goto out; 5879 5880 /* 5881 * Now we want to walk up the stack, calling the USTACK helper. For 5882 * each iteration, we restore the scratch pointer. 5883 */ 5884 for (i = 0; i < nframes; i++) { 5885 mstate->dtms_scratch_ptr = saved; 5886 5887 if (offs >= strsize) 5888 break; 5889 5890 sym = (char *)(uintptr_t)dtrace_helper( 5891 DTRACE_HELPER_ACTION_USTACK, 5892 mstate, state, pcs[i], fps[i]); 5893 5894 /* 5895 * If we faulted while running the helper, we're going to 5896 * clear the fault and null out the corresponding string. 5897 */ 5898 if (*flags & CPU_DTRACE_FAULT) { 5899 *flags &= ~CPU_DTRACE_FAULT; 5900 str[offs++] = '\0'; 5901 continue; 5902 } 5903 5904 if (sym == NULL) { 5905 str[offs++] = '\0'; 5906 continue; 5907 } 5908 5909 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5910 5911 /* 5912 * Now copy in the string that the helper returned to us. 5913 */ 5914 for (j = 0; offs + j < strsize; j++) { 5915 if ((str[offs + j] = sym[j]) == '\0') 5916 break; 5917 } 5918 5919 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5920 5921 offs += j + 1; 5922 } 5923 5924 if (offs >= strsize) { 5925 /* 5926 * If we didn't have room for all of the strings, we don't 5927 * abort processing -- this needn't be a fatal error -- but we 5928 * still want to increment a counter (dts_stkstroverflows) to 5929 * allow this condition to be warned about. (If this is from 5930 * a jstack() action, it is easily tuned via jstackstrsize.) 5931 */ 5932 dtrace_error(&state->dts_stkstroverflows); 5933 } 5934 5935 while (offs < strsize) 5936 str[offs++] = '\0'; 5937 5938 out: 5939 mstate->dtms_scratch_ptr = old; 5940 } 5941 #endif 5942 5943 /* 5944 * If you're looking for the epicenter of DTrace, you just found it. This 5945 * is the function called by the provider to fire a probe -- from which all 5946 * subsequent probe-context DTrace activity emanates. 5947 */ 5948 void 5949 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 5950 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 5951 { 5952 processorid_t cpuid; 5953 dtrace_icookie_t cookie; 5954 dtrace_probe_t *probe; 5955 dtrace_mstate_t mstate; 5956 dtrace_ecb_t *ecb; 5957 dtrace_action_t *act; 5958 intptr_t offs; 5959 size_t size; 5960 int vtime, onintr; 5961 volatile uint16_t *flags; 5962 hrtime_t now; 5963 5964 #if defined(sun) 5965 /* 5966 * Kick out immediately if this CPU is still being born (in which case 5967 * curthread will be set to -1) or the current thread can't allow 5968 * probes in its current context. 5969 */ 5970 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE)) 5971 return; 5972 #endif 5973 5974 cookie = dtrace_interrupt_disable(); 5975 probe = dtrace_probes[id - 1]; 5976 cpuid = curcpu_id; 5977 onintr = CPU_ON_INTR(CPU); 5978 5979 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 5980 probe->dtpr_predcache == curthread->t_predcache) { 5981 /* 5982 * We have hit in the predicate cache; we know that 5983 * this predicate would evaluate to be false. 5984 */ 5985 dtrace_interrupt_enable(cookie); 5986 return; 5987 } 5988 5989 #if defined(sun) 5990 if (panic_quiesce) { 5991 #else 5992 if (panicstr != NULL) { 5993 #endif 5994 /* 5995 * We don't trace anything if we're panicking. 5996 */ 5997 dtrace_interrupt_enable(cookie); 5998 return; 5999 } 6000 6001 now = dtrace_gethrtime(); 6002 vtime = dtrace_vtime_references != 0; 6003 6004 if (vtime && curthread->t_dtrace_start) 6005 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 6006 6007 mstate.dtms_difo = NULL; 6008 mstate.dtms_probe = probe; 6009 mstate.dtms_strtok = 0; 6010 mstate.dtms_arg[0] = arg0; 6011 mstate.dtms_arg[1] = arg1; 6012 mstate.dtms_arg[2] = arg2; 6013 mstate.dtms_arg[3] = arg3; 6014 mstate.dtms_arg[4] = arg4; 6015 6016 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 6017 6018 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 6019 dtrace_predicate_t *pred = ecb->dte_predicate; 6020 dtrace_state_t *state = ecb->dte_state; 6021 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 6022 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 6023 dtrace_vstate_t *vstate = &state->dts_vstate; 6024 dtrace_provider_t *prov = probe->dtpr_provider; 6025 int committed = 0; 6026 caddr_t tomax; 6027 6028 /* 6029 * A little subtlety with the following (seemingly innocuous) 6030 * declaration of the automatic 'val': by looking at the 6031 * code, you might think that it could be declared in the 6032 * action processing loop, below. (That is, it's only used in 6033 * the action processing loop.) However, it must be declared 6034 * out of that scope because in the case of DIF expression 6035 * arguments to aggregating actions, one iteration of the 6036 * action loop will use the last iteration's value. 6037 */ 6038 uint64_t val = 0; 6039 6040 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 6041 *flags &= ~CPU_DTRACE_ERROR; 6042 6043 if (prov == dtrace_provider) { 6044 /* 6045 * If dtrace itself is the provider of this probe, 6046 * we're only going to continue processing the ECB if 6047 * arg0 (the dtrace_state_t) is equal to the ECB's 6048 * creating state. (This prevents disjoint consumers 6049 * from seeing one another's metaprobes.) 6050 */ 6051 if (arg0 != (uint64_t)(uintptr_t)state) 6052 continue; 6053 } 6054 6055 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 6056 /* 6057 * We're not currently active. If our provider isn't 6058 * the dtrace pseudo provider, we're not interested. 6059 */ 6060 if (prov != dtrace_provider) 6061 continue; 6062 6063 /* 6064 * Now we must further check if we are in the BEGIN 6065 * probe. If we are, we will only continue processing 6066 * if we're still in WARMUP -- if one BEGIN enabling 6067 * has invoked the exit() action, we don't want to 6068 * evaluate subsequent BEGIN enablings. 6069 */ 6070 if (probe->dtpr_id == dtrace_probeid_begin && 6071 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 6072 ASSERT(state->dts_activity == 6073 DTRACE_ACTIVITY_DRAINING); 6074 continue; 6075 } 6076 } 6077 6078 if (ecb->dte_cond) { 6079 /* 6080 * If the dte_cond bits indicate that this 6081 * consumer is only allowed to see user-mode firings 6082 * of this probe, call the provider's dtps_usermode() 6083 * entry point to check that the probe was fired 6084 * while in a user context. Skip this ECB if that's 6085 * not the case. 6086 */ 6087 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 6088 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 6089 probe->dtpr_id, probe->dtpr_arg) == 0) 6090 continue; 6091 6092 #if defined(sun) 6093 /* 6094 * This is more subtle than it looks. We have to be 6095 * absolutely certain that CRED() isn't going to 6096 * change out from under us so it's only legit to 6097 * examine that structure if we're in constrained 6098 * situations. Currently, the only times we'll this 6099 * check is if a non-super-user has enabled the 6100 * profile or syscall providers -- providers that 6101 * allow visibility of all processes. For the 6102 * profile case, the check above will ensure that 6103 * we're examining a user context. 6104 */ 6105 if (ecb->dte_cond & DTRACE_COND_OWNER) { 6106 cred_t *cr; 6107 cred_t *s_cr = 6108 ecb->dte_state->dts_cred.dcr_cred; 6109 proc_t *proc; 6110 6111 ASSERT(s_cr != NULL); 6112 6113 if ((cr = CRED()) == NULL || 6114 s_cr->cr_uid != cr->cr_uid || 6115 s_cr->cr_uid != cr->cr_ruid || 6116 s_cr->cr_uid != cr->cr_suid || 6117 s_cr->cr_gid != cr->cr_gid || 6118 s_cr->cr_gid != cr->cr_rgid || 6119 s_cr->cr_gid != cr->cr_sgid || 6120 (proc = ttoproc(curthread)) == NULL || 6121 (proc->p_flag & SNOCD)) 6122 continue; 6123 } 6124 6125 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 6126 cred_t *cr; 6127 cred_t *s_cr = 6128 ecb->dte_state->dts_cred.dcr_cred; 6129 6130 ASSERT(s_cr != NULL); 6131 6132 if ((cr = CRED()) == NULL || 6133 s_cr->cr_zone->zone_id != 6134 cr->cr_zone->zone_id) 6135 continue; 6136 } 6137 #endif 6138 } 6139 6140 if (now - state->dts_alive > dtrace_deadman_timeout) { 6141 /* 6142 * We seem to be dead. Unless we (a) have kernel 6143 * destructive permissions (b) have expicitly enabled 6144 * destructive actions and (c) destructive actions have 6145 * not been disabled, we're going to transition into 6146 * the KILLED state, from which no further processing 6147 * on this state will be performed. 6148 */ 6149 if (!dtrace_priv_kernel_destructive(state) || 6150 !state->dts_cred.dcr_destructive || 6151 dtrace_destructive_disallow) { 6152 void *activity = &state->dts_activity; 6153 dtrace_activity_t current; 6154 6155 do { 6156 current = state->dts_activity; 6157 } while (dtrace_cas32(activity, current, 6158 DTRACE_ACTIVITY_KILLED) != current); 6159 6160 continue; 6161 } 6162 } 6163 6164 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 6165 ecb->dte_alignment, state, &mstate)) < 0) 6166 continue; 6167 6168 tomax = buf->dtb_tomax; 6169 ASSERT(tomax != NULL); 6170 6171 if (ecb->dte_size != 0) 6172 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid); 6173 6174 mstate.dtms_epid = ecb->dte_epid; 6175 mstate.dtms_present |= DTRACE_MSTATE_EPID; 6176 6177 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) 6178 mstate.dtms_access = DTRACE_ACCESS_KERNEL; 6179 else 6180 mstate.dtms_access = 0; 6181 6182 if (pred != NULL) { 6183 dtrace_difo_t *dp = pred->dtp_difo; 6184 int rval; 6185 6186 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 6187 6188 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 6189 dtrace_cacheid_t cid = probe->dtpr_predcache; 6190 6191 if (cid != DTRACE_CACHEIDNONE && !onintr) { 6192 /* 6193 * Update the predicate cache... 6194 */ 6195 ASSERT(cid == pred->dtp_cacheid); 6196 curthread->t_predcache = cid; 6197 } 6198 6199 continue; 6200 } 6201 } 6202 6203 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 6204 act != NULL; act = act->dta_next) { 6205 size_t valoffs; 6206 dtrace_difo_t *dp; 6207 dtrace_recdesc_t *rec = &act->dta_rec; 6208 6209 size = rec->dtrd_size; 6210 valoffs = offs + rec->dtrd_offset; 6211 6212 if (DTRACEACT_ISAGG(act->dta_kind)) { 6213 uint64_t v = 0xbad; 6214 dtrace_aggregation_t *agg; 6215 6216 agg = (dtrace_aggregation_t *)act; 6217 6218 if ((dp = act->dta_difo) != NULL) 6219 v = dtrace_dif_emulate(dp, 6220 &mstate, vstate, state); 6221 6222 if (*flags & CPU_DTRACE_ERROR) 6223 continue; 6224 6225 /* 6226 * Note that we always pass the expression 6227 * value from the previous iteration of the 6228 * action loop. This value will only be used 6229 * if there is an expression argument to the 6230 * aggregating action, denoted by the 6231 * dtag_hasarg field. 6232 */ 6233 dtrace_aggregate(agg, buf, 6234 offs, aggbuf, v, val); 6235 continue; 6236 } 6237 6238 switch (act->dta_kind) { 6239 case DTRACEACT_STOP: 6240 if (dtrace_priv_proc_destructive(state)) 6241 dtrace_action_stop(); 6242 continue; 6243 6244 case DTRACEACT_BREAKPOINT: 6245 if (dtrace_priv_kernel_destructive(state)) 6246 dtrace_action_breakpoint(ecb); 6247 continue; 6248 6249 case DTRACEACT_PANIC: 6250 if (dtrace_priv_kernel_destructive(state)) 6251 dtrace_action_panic(ecb); 6252 continue; 6253 6254 case DTRACEACT_STACK: 6255 if (!dtrace_priv_kernel(state)) 6256 continue; 6257 6258 dtrace_getpcstack((pc_t *)(tomax + valoffs), 6259 size / sizeof (pc_t), probe->dtpr_aframes, 6260 DTRACE_ANCHORED(probe) ? NULL : 6261 (uint32_t *)arg0); 6262 continue; 6263 6264 #if defined(sun) 6265 case DTRACEACT_JSTACK: 6266 case DTRACEACT_USTACK: 6267 if (!dtrace_priv_proc(state)) 6268 continue; 6269 6270 /* 6271 * See comment in DIF_VAR_PID. 6272 */ 6273 if (DTRACE_ANCHORED(mstate.dtms_probe) && 6274 CPU_ON_INTR(CPU)) { 6275 int depth = DTRACE_USTACK_NFRAMES( 6276 rec->dtrd_arg) + 1; 6277 6278 dtrace_bzero((void *)(tomax + valoffs), 6279 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 6280 + depth * sizeof (uint64_t)); 6281 6282 continue; 6283 } 6284 6285 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 6286 curproc->p_dtrace_helpers != NULL) { 6287 /* 6288 * This is the slow path -- we have 6289 * allocated string space, and we're 6290 * getting the stack of a process that 6291 * has helpers. Call into a separate 6292 * routine to perform this processing. 6293 */ 6294 dtrace_action_ustack(&mstate, state, 6295 (uint64_t *)(tomax + valoffs), 6296 rec->dtrd_arg); 6297 continue; 6298 } 6299 6300 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6301 dtrace_getupcstack((uint64_t *) 6302 (tomax + valoffs), 6303 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 6304 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6305 continue; 6306 #endif 6307 6308 default: 6309 break; 6310 } 6311 6312 dp = act->dta_difo; 6313 ASSERT(dp != NULL); 6314 6315 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 6316 6317 if (*flags & CPU_DTRACE_ERROR) 6318 continue; 6319 6320 switch (act->dta_kind) { 6321 case DTRACEACT_SPECULATE: 6322 ASSERT(buf == &state->dts_buffer[cpuid]); 6323 buf = dtrace_speculation_buffer(state, 6324 cpuid, val); 6325 6326 if (buf == NULL) { 6327 *flags |= CPU_DTRACE_DROP; 6328 continue; 6329 } 6330 6331 offs = dtrace_buffer_reserve(buf, 6332 ecb->dte_needed, ecb->dte_alignment, 6333 state, NULL); 6334 6335 if (offs < 0) { 6336 *flags |= CPU_DTRACE_DROP; 6337 continue; 6338 } 6339 6340 tomax = buf->dtb_tomax; 6341 ASSERT(tomax != NULL); 6342 6343 if (ecb->dte_size != 0) 6344 DTRACE_STORE(uint32_t, tomax, offs, 6345 ecb->dte_epid); 6346 continue; 6347 6348 case DTRACEACT_PRINTM: { 6349 /* The DIF returns a 'memref'. */ 6350 uintptr_t *memref = (uintptr_t *)(uintptr_t) val; 6351 6352 /* Get the size from the memref. */ 6353 size = memref[1]; 6354 6355 /* 6356 * Check if the size exceeds the allocated 6357 * buffer size. 6358 */ 6359 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 6360 /* Flag a drop! */ 6361 *flags |= CPU_DTRACE_DROP; 6362 continue; 6363 } 6364 6365 /* Store the size in the buffer first. */ 6366 DTRACE_STORE(uintptr_t, tomax, 6367 valoffs, size); 6368 6369 /* 6370 * Offset the buffer address to the start 6371 * of the data. 6372 */ 6373 valoffs += sizeof(uintptr_t); 6374 6375 /* 6376 * Reset to the memory address rather than 6377 * the memref array, then let the BYREF 6378 * code below do the work to store the 6379 * memory data in the buffer. 6380 */ 6381 val = memref[0]; 6382 break; 6383 } 6384 6385 case DTRACEACT_PRINTT: { 6386 /* The DIF returns a 'typeref'. */ 6387 uintptr_t *typeref = (uintptr_t *)(uintptr_t) val; 6388 char c = '\0' + 1; 6389 size_t s; 6390 6391 /* 6392 * Get the type string length and round it 6393 * up so that the data that follows is 6394 * aligned for easy access. 6395 */ 6396 size_t typs = strlen((char *) typeref[2]) + 1; 6397 typs = roundup(typs, sizeof(uintptr_t)); 6398 6399 /* 6400 *Get the size from the typeref using the 6401 * number of elements and the type size. 6402 */ 6403 size = typeref[1] * typeref[3]; 6404 6405 /* 6406 * Check if the size exceeds the allocated 6407 * buffer size. 6408 */ 6409 if (size + typs + 2 * sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 6410 /* Flag a drop! */ 6411 *flags |= CPU_DTRACE_DROP; 6412 6413 } 6414 6415 /* Store the size in the buffer first. */ 6416 DTRACE_STORE(uintptr_t, tomax, 6417 valoffs, size); 6418 valoffs += sizeof(uintptr_t); 6419 6420 /* Store the type size in the buffer. */ 6421 DTRACE_STORE(uintptr_t, tomax, 6422 valoffs, typeref[3]); 6423 valoffs += sizeof(uintptr_t); 6424 6425 val = typeref[2]; 6426 6427 for (s = 0; s < typs; s++) { 6428 if (c != '\0') 6429 c = dtrace_load8(val++); 6430 6431 DTRACE_STORE(uint8_t, tomax, 6432 valoffs++, c); 6433 } 6434 6435 /* 6436 * Reset to the memory address rather than 6437 * the typeref array, then let the BYREF 6438 * code below do the work to store the 6439 * memory data in the buffer. 6440 */ 6441 val = typeref[0]; 6442 break; 6443 } 6444 6445 case DTRACEACT_CHILL: 6446 if (dtrace_priv_kernel_destructive(state)) 6447 dtrace_action_chill(&mstate, val); 6448 continue; 6449 6450 case DTRACEACT_RAISE: 6451 if (dtrace_priv_proc_destructive(state)) 6452 dtrace_action_raise(val); 6453 continue; 6454 6455 case DTRACEACT_COMMIT: 6456 ASSERT(!committed); 6457 6458 /* 6459 * We need to commit our buffer state. 6460 */ 6461 if (ecb->dte_size) 6462 buf->dtb_offset = offs + ecb->dte_size; 6463 buf = &state->dts_buffer[cpuid]; 6464 dtrace_speculation_commit(state, cpuid, val); 6465 committed = 1; 6466 continue; 6467 6468 case DTRACEACT_DISCARD: 6469 dtrace_speculation_discard(state, cpuid, val); 6470 continue; 6471 6472 case DTRACEACT_DIFEXPR: 6473 case DTRACEACT_LIBACT: 6474 case DTRACEACT_PRINTF: 6475 case DTRACEACT_PRINTA: 6476 case DTRACEACT_SYSTEM: 6477 case DTRACEACT_FREOPEN: 6478 break; 6479 6480 case DTRACEACT_SYM: 6481 case DTRACEACT_MOD: 6482 if (!dtrace_priv_kernel(state)) 6483 continue; 6484 break; 6485 6486 case DTRACEACT_USYM: 6487 case DTRACEACT_UMOD: 6488 case DTRACEACT_UADDR: { 6489 #if defined(sun) 6490 struct pid *pid = curthread->t_procp->p_pidp; 6491 #endif 6492 if (!dtrace_priv_proc(state)) 6493 continue; 6494 6495 DTRACE_STORE(uint64_t, tomax, 6496 #if defined(sun) 6497 valoffs, (uint64_t)pid->pid_id); 6498 #else 6499 valoffs, (uint64_t) curproc->p_pid); 6500 #endif 6501 DTRACE_STORE(uint64_t, tomax, 6502 valoffs + sizeof (uint64_t), val); 6503 6504 continue; 6505 } 6506 6507 case DTRACEACT_EXIT: { 6508 /* 6509 * For the exit action, we are going to attempt 6510 * to atomically set our activity to be 6511 * draining. If this fails (either because 6512 * another CPU has beat us to the exit action, 6513 * or because our current activity is something 6514 * other than ACTIVE or WARMUP), we will 6515 * continue. This assures that the exit action 6516 * can be successfully recorded at most once 6517 * when we're in the ACTIVE state. If we're 6518 * encountering the exit() action while in 6519 * COOLDOWN, however, we want to honor the new 6520 * status code. (We know that we're the only 6521 * thread in COOLDOWN, so there is no race.) 6522 */ 6523 void *activity = &state->dts_activity; 6524 dtrace_activity_t current = state->dts_activity; 6525 6526 if (current == DTRACE_ACTIVITY_COOLDOWN) 6527 break; 6528 6529 if (current != DTRACE_ACTIVITY_WARMUP) 6530 current = DTRACE_ACTIVITY_ACTIVE; 6531 6532 if (dtrace_cas32(activity, current, 6533 DTRACE_ACTIVITY_DRAINING) != current) { 6534 *flags |= CPU_DTRACE_DROP; 6535 continue; 6536 } 6537 6538 break; 6539 } 6540 6541 default: 6542 ASSERT(0); 6543 } 6544 6545 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) { 6546 uintptr_t end = valoffs + size; 6547 6548 if (!dtrace_vcanload((void *)(uintptr_t)val, 6549 &dp->dtdo_rtype, &mstate, vstate)) 6550 continue; 6551 6552 /* 6553 * If this is a string, we're going to only 6554 * load until we find the zero byte -- after 6555 * which we'll store zero bytes. 6556 */ 6557 if (dp->dtdo_rtype.dtdt_kind == 6558 DIF_TYPE_STRING) { 6559 char c = '\0' + 1; 6560 int intuple = act->dta_intuple; 6561 size_t s; 6562 6563 for (s = 0; s < size; s++) { 6564 if (c != '\0') 6565 c = dtrace_load8(val++); 6566 6567 DTRACE_STORE(uint8_t, tomax, 6568 valoffs++, c); 6569 6570 if (c == '\0' && intuple) 6571 break; 6572 } 6573 6574 continue; 6575 } 6576 6577 while (valoffs < end) { 6578 DTRACE_STORE(uint8_t, tomax, valoffs++, 6579 dtrace_load8(val++)); 6580 } 6581 6582 continue; 6583 } 6584 6585 switch (size) { 6586 case 0: 6587 break; 6588 6589 case sizeof (uint8_t): 6590 DTRACE_STORE(uint8_t, tomax, valoffs, val); 6591 break; 6592 case sizeof (uint16_t): 6593 DTRACE_STORE(uint16_t, tomax, valoffs, val); 6594 break; 6595 case sizeof (uint32_t): 6596 DTRACE_STORE(uint32_t, tomax, valoffs, val); 6597 break; 6598 case sizeof (uint64_t): 6599 DTRACE_STORE(uint64_t, tomax, valoffs, val); 6600 break; 6601 default: 6602 /* 6603 * Any other size should have been returned by 6604 * reference, not by value. 6605 */ 6606 ASSERT(0); 6607 break; 6608 } 6609 } 6610 6611 if (*flags & CPU_DTRACE_DROP) 6612 continue; 6613 6614 if (*flags & CPU_DTRACE_FAULT) { 6615 int ndx; 6616 dtrace_action_t *err; 6617 6618 buf->dtb_errors++; 6619 6620 if (probe->dtpr_id == dtrace_probeid_error) { 6621 /* 6622 * There's nothing we can do -- we had an 6623 * error on the error probe. We bump an 6624 * error counter to at least indicate that 6625 * this condition happened. 6626 */ 6627 dtrace_error(&state->dts_dblerrors); 6628 continue; 6629 } 6630 6631 if (vtime) { 6632 /* 6633 * Before recursing on dtrace_probe(), we 6634 * need to explicitly clear out our start 6635 * time to prevent it from being accumulated 6636 * into t_dtrace_vtime. 6637 */ 6638 curthread->t_dtrace_start = 0; 6639 } 6640 6641 /* 6642 * Iterate over the actions to figure out which action 6643 * we were processing when we experienced the error. 6644 * Note that act points _past_ the faulting action; if 6645 * act is ecb->dte_action, the fault was in the 6646 * predicate, if it's ecb->dte_action->dta_next it's 6647 * in action #1, and so on. 6648 */ 6649 for (err = ecb->dte_action, ndx = 0; 6650 err != act; err = err->dta_next, ndx++) 6651 continue; 6652 6653 dtrace_probe_error(state, ecb->dte_epid, ndx, 6654 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 6655 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 6656 cpu_core[cpuid].cpuc_dtrace_illval); 6657 6658 continue; 6659 } 6660 6661 if (!committed) 6662 buf->dtb_offset = offs + ecb->dte_size; 6663 } 6664 6665 if (vtime) 6666 curthread->t_dtrace_start = dtrace_gethrtime(); 6667 6668 dtrace_interrupt_enable(cookie); 6669 } 6670 6671 /* 6672 * DTrace Probe Hashing Functions 6673 * 6674 * The functions in this section (and indeed, the functions in remaining 6675 * sections) are not _called_ from probe context. (Any exceptions to this are 6676 * marked with a "Note:".) Rather, they are called from elsewhere in the 6677 * DTrace framework to look-up probes in, add probes to and remove probes from 6678 * the DTrace probe hashes. (Each probe is hashed by each element of the 6679 * probe tuple -- allowing for fast lookups, regardless of what was 6680 * specified.) 6681 */ 6682 static uint_t 6683 dtrace_hash_str(const char *p) 6684 { 6685 unsigned int g; 6686 uint_t hval = 0; 6687 6688 while (*p) { 6689 hval = (hval << 4) + *p++; 6690 if ((g = (hval & 0xf0000000)) != 0) 6691 hval ^= g >> 24; 6692 hval &= ~g; 6693 } 6694 return (hval); 6695 } 6696 6697 static dtrace_hash_t * 6698 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 6699 { 6700 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 6701 6702 hash->dth_stroffs = stroffs; 6703 hash->dth_nextoffs = nextoffs; 6704 hash->dth_prevoffs = prevoffs; 6705 6706 hash->dth_size = 1; 6707 hash->dth_mask = hash->dth_size - 1; 6708 6709 hash->dth_tab = kmem_zalloc(hash->dth_size * 6710 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 6711 6712 return (hash); 6713 } 6714 6715 static void 6716 dtrace_hash_destroy(dtrace_hash_t *hash) 6717 { 6718 #ifdef DEBUG 6719 int i; 6720 6721 for (i = 0; i < hash->dth_size; i++) 6722 ASSERT(hash->dth_tab[i] == NULL); 6723 #endif 6724 6725 kmem_free(hash->dth_tab, 6726 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 6727 kmem_free(hash, sizeof (dtrace_hash_t)); 6728 } 6729 6730 static void 6731 dtrace_hash_resize(dtrace_hash_t *hash) 6732 { 6733 int size = hash->dth_size, i, ndx; 6734 int new_size = hash->dth_size << 1; 6735 int new_mask = new_size - 1; 6736 dtrace_hashbucket_t **new_tab, *bucket, *next; 6737 6738 ASSERT((new_size & new_mask) == 0); 6739 6740 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 6741 6742 for (i = 0; i < size; i++) { 6743 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 6744 dtrace_probe_t *probe = bucket->dthb_chain; 6745 6746 ASSERT(probe != NULL); 6747 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 6748 6749 next = bucket->dthb_next; 6750 bucket->dthb_next = new_tab[ndx]; 6751 new_tab[ndx] = bucket; 6752 } 6753 } 6754 6755 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 6756 hash->dth_tab = new_tab; 6757 hash->dth_size = new_size; 6758 hash->dth_mask = new_mask; 6759 } 6760 6761 static void 6762 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 6763 { 6764 int hashval = DTRACE_HASHSTR(hash, new); 6765 int ndx = hashval & hash->dth_mask; 6766 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6767 dtrace_probe_t **nextp, **prevp; 6768 6769 for (; bucket != NULL; bucket = bucket->dthb_next) { 6770 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 6771 goto add; 6772 } 6773 6774 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 6775 dtrace_hash_resize(hash); 6776 dtrace_hash_add(hash, new); 6777 return; 6778 } 6779 6780 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 6781 bucket->dthb_next = hash->dth_tab[ndx]; 6782 hash->dth_tab[ndx] = bucket; 6783 hash->dth_nbuckets++; 6784 6785 add: 6786 nextp = DTRACE_HASHNEXT(hash, new); 6787 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 6788 *nextp = bucket->dthb_chain; 6789 6790 if (bucket->dthb_chain != NULL) { 6791 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 6792 ASSERT(*prevp == NULL); 6793 *prevp = new; 6794 } 6795 6796 bucket->dthb_chain = new; 6797 bucket->dthb_len++; 6798 } 6799 6800 static dtrace_probe_t * 6801 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 6802 { 6803 int hashval = DTRACE_HASHSTR(hash, template); 6804 int ndx = hashval & hash->dth_mask; 6805 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6806 6807 for (; bucket != NULL; bucket = bucket->dthb_next) { 6808 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6809 return (bucket->dthb_chain); 6810 } 6811 6812 return (NULL); 6813 } 6814 6815 static int 6816 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 6817 { 6818 int hashval = DTRACE_HASHSTR(hash, template); 6819 int ndx = hashval & hash->dth_mask; 6820 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6821 6822 for (; bucket != NULL; bucket = bucket->dthb_next) { 6823 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6824 return (bucket->dthb_len); 6825 } 6826 6827 return (0); 6828 } 6829 6830 static void 6831 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 6832 { 6833 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 6834 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6835 6836 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 6837 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 6838 6839 /* 6840 * Find the bucket that we're removing this probe from. 6841 */ 6842 for (; bucket != NULL; bucket = bucket->dthb_next) { 6843 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 6844 break; 6845 } 6846 6847 ASSERT(bucket != NULL); 6848 6849 if (*prevp == NULL) { 6850 if (*nextp == NULL) { 6851 /* 6852 * The removed probe was the only probe on this 6853 * bucket; we need to remove the bucket. 6854 */ 6855 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 6856 6857 ASSERT(bucket->dthb_chain == probe); 6858 ASSERT(b != NULL); 6859 6860 if (b == bucket) { 6861 hash->dth_tab[ndx] = bucket->dthb_next; 6862 } else { 6863 while (b->dthb_next != bucket) 6864 b = b->dthb_next; 6865 b->dthb_next = bucket->dthb_next; 6866 } 6867 6868 ASSERT(hash->dth_nbuckets > 0); 6869 hash->dth_nbuckets--; 6870 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 6871 return; 6872 } 6873 6874 bucket->dthb_chain = *nextp; 6875 } else { 6876 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 6877 } 6878 6879 if (*nextp != NULL) 6880 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 6881 } 6882 6883 /* 6884 * DTrace Utility Functions 6885 * 6886 * These are random utility functions that are _not_ called from probe context. 6887 */ 6888 static int 6889 dtrace_badattr(const dtrace_attribute_t *a) 6890 { 6891 return (a->dtat_name > DTRACE_STABILITY_MAX || 6892 a->dtat_data > DTRACE_STABILITY_MAX || 6893 a->dtat_class > DTRACE_CLASS_MAX); 6894 } 6895 6896 /* 6897 * Return a duplicate copy of a string. If the specified string is NULL, 6898 * this function returns a zero-length string. 6899 */ 6900 static char * 6901 dtrace_strdup(const char *str) 6902 { 6903 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 6904 6905 if (str != NULL) 6906 (void) strcpy(new, str); 6907 6908 return (new); 6909 } 6910 6911 #define DTRACE_ISALPHA(c) \ 6912 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 6913 6914 static int 6915 dtrace_badname(const char *s) 6916 { 6917 char c; 6918 6919 if (s == NULL || (c = *s++) == '\0') 6920 return (0); 6921 6922 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 6923 return (1); 6924 6925 while ((c = *s++) != '\0') { 6926 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 6927 c != '-' && c != '_' && c != '.' && c != '`') 6928 return (1); 6929 } 6930 6931 return (0); 6932 } 6933 6934 static void 6935 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 6936 { 6937 uint32_t priv; 6938 6939 #if defined(sun) 6940 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 6941 /* 6942 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. 6943 */ 6944 priv = DTRACE_PRIV_ALL; 6945 } else { 6946 *uidp = crgetuid(cr); 6947 *zoneidp = crgetzoneid(cr); 6948 6949 priv = 0; 6950 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 6951 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 6952 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 6953 priv |= DTRACE_PRIV_USER; 6954 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 6955 priv |= DTRACE_PRIV_PROC; 6956 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 6957 priv |= DTRACE_PRIV_OWNER; 6958 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 6959 priv |= DTRACE_PRIV_ZONEOWNER; 6960 } 6961 #else 6962 priv = DTRACE_PRIV_ALL; 6963 #endif 6964 6965 *privp = priv; 6966 } 6967 6968 #ifdef DTRACE_ERRDEBUG 6969 static void 6970 dtrace_errdebug(const char *str) 6971 { 6972 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ; 6973 int occupied = 0; 6974 6975 mutex_enter(&dtrace_errlock); 6976 dtrace_errlast = str; 6977 dtrace_errthread = curthread; 6978 6979 while (occupied++ < DTRACE_ERRHASHSZ) { 6980 if (dtrace_errhash[hval].dter_msg == str) { 6981 dtrace_errhash[hval].dter_count++; 6982 goto out; 6983 } 6984 6985 if (dtrace_errhash[hval].dter_msg != NULL) { 6986 hval = (hval + 1) % DTRACE_ERRHASHSZ; 6987 continue; 6988 } 6989 6990 dtrace_errhash[hval].dter_msg = str; 6991 dtrace_errhash[hval].dter_count = 1; 6992 goto out; 6993 } 6994 6995 panic("dtrace: undersized error hash"); 6996 out: 6997 mutex_exit(&dtrace_errlock); 6998 } 6999 #endif 7000 7001 /* 7002 * DTrace Matching Functions 7003 * 7004 * These functions are used to match groups of probes, given some elements of 7005 * a probe tuple, or some globbed expressions for elements of a probe tuple. 7006 */ 7007 static int 7008 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 7009 zoneid_t zoneid) 7010 { 7011 if (priv != DTRACE_PRIV_ALL) { 7012 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 7013 uint32_t match = priv & ppriv; 7014 7015 /* 7016 * No PRIV_DTRACE_* privileges... 7017 */ 7018 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 7019 DTRACE_PRIV_KERNEL)) == 0) 7020 return (0); 7021 7022 /* 7023 * No matching bits, but there were bits to match... 7024 */ 7025 if (match == 0 && ppriv != 0) 7026 return (0); 7027 7028 /* 7029 * Need to have permissions to the process, but don't... 7030 */ 7031 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 7032 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 7033 return (0); 7034 } 7035 7036 /* 7037 * Need to be in the same zone unless we possess the 7038 * privilege to examine all zones. 7039 */ 7040 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 7041 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 7042 return (0); 7043 } 7044 } 7045 7046 return (1); 7047 } 7048 7049 /* 7050 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 7051 * consists of input pattern strings and an ops-vector to evaluate them. 7052 * This function returns >0 for match, 0 for no match, and <0 for error. 7053 */ 7054 static int 7055 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 7056 uint32_t priv, uid_t uid, zoneid_t zoneid) 7057 { 7058 dtrace_provider_t *pvp = prp->dtpr_provider; 7059 int rv; 7060 7061 if (pvp->dtpv_defunct) 7062 return (0); 7063 7064 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 7065 return (rv); 7066 7067 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 7068 return (rv); 7069 7070 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 7071 return (rv); 7072 7073 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 7074 return (rv); 7075 7076 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 7077 return (0); 7078 7079 return (rv); 7080 } 7081 7082 /* 7083 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 7084 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 7085 * libc's version, the kernel version only applies to 8-bit ASCII strings. 7086 * In addition, all of the recursion cases except for '*' matching have been 7087 * unwound. For '*', we still implement recursive evaluation, but a depth 7088 * counter is maintained and matching is aborted if we recurse too deep. 7089 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 7090 */ 7091 static int 7092 dtrace_match_glob(const char *s, const char *p, int depth) 7093 { 7094 const char *olds; 7095 char s1, c; 7096 int gs; 7097 7098 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 7099 return (-1); 7100 7101 if (s == NULL) 7102 s = ""; /* treat NULL as empty string */ 7103 7104 top: 7105 olds = s; 7106 s1 = *s++; 7107 7108 if (p == NULL) 7109 return (0); 7110 7111 if ((c = *p++) == '\0') 7112 return (s1 == '\0'); 7113 7114 switch (c) { 7115 case '[': { 7116 int ok = 0, notflag = 0; 7117 char lc = '\0'; 7118 7119 if (s1 == '\0') 7120 return (0); 7121 7122 if (*p == '!') { 7123 notflag = 1; 7124 p++; 7125 } 7126 7127 if ((c = *p++) == '\0') 7128 return (0); 7129 7130 do { 7131 if (c == '-' && lc != '\0' && *p != ']') { 7132 if ((c = *p++) == '\0') 7133 return (0); 7134 if (c == '\\' && (c = *p++) == '\0') 7135 return (0); 7136 7137 if (notflag) { 7138 if (s1 < lc || s1 > c) 7139 ok++; 7140 else 7141 return (0); 7142 } else if (lc <= s1 && s1 <= c) 7143 ok++; 7144 7145 } else if (c == '\\' && (c = *p++) == '\0') 7146 return (0); 7147 7148 lc = c; /* save left-hand 'c' for next iteration */ 7149 7150 if (notflag) { 7151 if (s1 != c) 7152 ok++; 7153 else 7154 return (0); 7155 } else if (s1 == c) 7156 ok++; 7157 7158 if ((c = *p++) == '\0') 7159 return (0); 7160 7161 } while (c != ']'); 7162 7163 if (ok) 7164 goto top; 7165 7166 return (0); 7167 } 7168 7169 case '\\': 7170 if ((c = *p++) == '\0') 7171 return (0); 7172 /*FALLTHRU*/ 7173 7174 default: 7175 if (c != s1) 7176 return (0); 7177 /*FALLTHRU*/ 7178 7179 case '?': 7180 if (s1 != '\0') 7181 goto top; 7182 return (0); 7183 7184 case '*': 7185 while (*p == '*') 7186 p++; /* consecutive *'s are identical to a single one */ 7187 7188 if (*p == '\0') 7189 return (1); 7190 7191 for (s = olds; *s != '\0'; s++) { 7192 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 7193 return (gs); 7194 } 7195 7196 return (0); 7197 } 7198 } 7199 7200 /*ARGSUSED*/ 7201 static int 7202 dtrace_match_string(const char *s, const char *p, int depth) 7203 { 7204 return (s != NULL && strcmp(s, p) == 0); 7205 } 7206 7207 /*ARGSUSED*/ 7208 static int 7209 dtrace_match_nul(const char *s, const char *p, int depth) 7210 { 7211 return (1); /* always match the empty pattern */ 7212 } 7213 7214 /*ARGSUSED*/ 7215 static int 7216 dtrace_match_nonzero(const char *s, const char *p, int depth) 7217 { 7218 return (s != NULL && s[0] != '\0'); 7219 } 7220 7221 static int 7222 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 7223 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 7224 { 7225 dtrace_probe_t template, *probe; 7226 dtrace_hash_t *hash = NULL; 7227 int len, rc, best = INT_MAX, nmatched = 0; 7228 dtrace_id_t i; 7229 7230 ASSERT(MUTEX_HELD(&dtrace_lock)); 7231 7232 /* 7233 * If the probe ID is specified in the key, just lookup by ID and 7234 * invoke the match callback once if a matching probe is found. 7235 */ 7236 if (pkp->dtpk_id != DTRACE_IDNONE) { 7237 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 7238 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 7239 if ((*matched)(probe, arg) == DTRACE_MATCH_FAIL) 7240 return (DTRACE_MATCH_FAIL); 7241 nmatched++; 7242 } 7243 return (nmatched); 7244 } 7245 7246 template.dtpr_mod = (char *)pkp->dtpk_mod; 7247 template.dtpr_func = (char *)pkp->dtpk_func; 7248 template.dtpr_name = (char *)pkp->dtpk_name; 7249 7250 /* 7251 * We want to find the most distinct of the module name, function 7252 * name, and name. So for each one that is not a glob pattern or 7253 * empty string, we perform a lookup in the corresponding hash and 7254 * use the hash table with the fewest collisions to do our search. 7255 */ 7256 if (pkp->dtpk_mmatch == &dtrace_match_string && 7257 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 7258 best = len; 7259 hash = dtrace_bymod; 7260 } 7261 7262 if (pkp->dtpk_fmatch == &dtrace_match_string && 7263 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 7264 best = len; 7265 hash = dtrace_byfunc; 7266 } 7267 7268 if (pkp->dtpk_nmatch == &dtrace_match_string && 7269 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 7270 best = len; 7271 hash = dtrace_byname; 7272 } 7273 7274 /* 7275 * If we did not select a hash table, iterate over every probe and 7276 * invoke our callback for each one that matches our input probe key. 7277 */ 7278 if (hash == NULL) { 7279 for (i = 0; i < dtrace_nprobes; i++) { 7280 if ((probe = dtrace_probes[i]) == NULL || 7281 dtrace_match_probe(probe, pkp, priv, uid, 7282 zoneid) <= 0) 7283 continue; 7284 7285 nmatched++; 7286 7287 if ((rc = (*matched)(probe, arg)) != 7288 DTRACE_MATCH_NEXT) { 7289 if (rc == DTRACE_MATCH_FAIL) 7290 return (DTRACE_MATCH_FAIL); 7291 break; 7292 } 7293 } 7294 7295 return (nmatched); 7296 } 7297 7298 /* 7299 * If we selected a hash table, iterate over each probe of the same key 7300 * name and invoke the callback for every probe that matches the other 7301 * attributes of our input probe key. 7302 */ 7303 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 7304 probe = *(DTRACE_HASHNEXT(hash, probe))) { 7305 7306 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 7307 continue; 7308 7309 nmatched++; 7310 7311 if ((rc = (*matched)(probe, arg)) != DTRACE_MATCH_NEXT) { 7312 if (rc == DTRACE_MATCH_FAIL) 7313 return (DTRACE_MATCH_FAIL); 7314 break; 7315 } 7316 } 7317 7318 return (nmatched); 7319 } 7320 7321 /* 7322 * Return the function pointer dtrace_probecmp() should use to compare the 7323 * specified pattern with a string. For NULL or empty patterns, we select 7324 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 7325 * For non-empty non-glob strings, we use dtrace_match_string(). 7326 */ 7327 static dtrace_probekey_f * 7328 dtrace_probekey_func(const char *p) 7329 { 7330 char c; 7331 7332 if (p == NULL || *p == '\0') 7333 return (&dtrace_match_nul); 7334 7335 while ((c = *p++) != '\0') { 7336 if (c == '[' || c == '?' || c == '*' || c == '\\') 7337 return (&dtrace_match_glob); 7338 } 7339 7340 return (&dtrace_match_string); 7341 } 7342 7343 /* 7344 * Build a probe comparison key for use with dtrace_match_probe() from the 7345 * given probe description. By convention, a null key only matches anchored 7346 * probes: if each field is the empty string, reset dtpk_fmatch to 7347 * dtrace_match_nonzero(). 7348 */ 7349 static void 7350 dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 7351 { 7352 pkp->dtpk_prov = pdp->dtpd_provider; 7353 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 7354 7355 pkp->dtpk_mod = pdp->dtpd_mod; 7356 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 7357 7358 pkp->dtpk_func = pdp->dtpd_func; 7359 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 7360 7361 pkp->dtpk_name = pdp->dtpd_name; 7362 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 7363 7364 pkp->dtpk_id = pdp->dtpd_id; 7365 7366 if (pkp->dtpk_id == DTRACE_IDNONE && 7367 pkp->dtpk_pmatch == &dtrace_match_nul && 7368 pkp->dtpk_mmatch == &dtrace_match_nul && 7369 pkp->dtpk_fmatch == &dtrace_match_nul && 7370 pkp->dtpk_nmatch == &dtrace_match_nul) 7371 pkp->dtpk_fmatch = &dtrace_match_nonzero; 7372 } 7373 7374 /* 7375 * DTrace Provider-to-Framework API Functions 7376 * 7377 * These functions implement much of the Provider-to-Framework API, as 7378 * described in <sys/dtrace.h>. The parts of the API not in this section are 7379 * the functions in the API for probe management (found below), and 7380 * dtrace_probe() itself (found above). 7381 */ 7382 7383 /* 7384 * Register the calling provider with the DTrace framework. This should 7385 * generally be called by DTrace providers in their attach(9E) entry point. 7386 */ 7387 int 7388 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 7389 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 7390 { 7391 dtrace_provider_t *provider; 7392 7393 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 7394 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7395 "arguments", name ? name : "<NULL>"); 7396 return (EINVAL); 7397 } 7398 7399 if (name[0] == '\0' || dtrace_badname(name)) { 7400 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7401 "provider name", name); 7402 return (EINVAL); 7403 } 7404 7405 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 7406 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 7407 pops->dtps_destroy == NULL || 7408 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 7409 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7410 "provider ops", name); 7411 return (EINVAL); 7412 } 7413 7414 if (dtrace_badattr(&pap->dtpa_provider) || 7415 dtrace_badattr(&pap->dtpa_mod) || 7416 dtrace_badattr(&pap->dtpa_func) || 7417 dtrace_badattr(&pap->dtpa_name) || 7418 dtrace_badattr(&pap->dtpa_args)) { 7419 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7420 "provider attributes", name); 7421 return (EINVAL); 7422 } 7423 7424 if (priv & ~DTRACE_PRIV_ALL) { 7425 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7426 "privilege attributes", name); 7427 return (EINVAL); 7428 } 7429 7430 if ((priv & DTRACE_PRIV_KERNEL) && 7431 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 7432 pops->dtps_usermode == NULL) { 7433 cmn_err(CE_WARN, "failed to register provider '%s': need " 7434 "dtps_usermode() op for given privilege attributes", name); 7435 return (EINVAL); 7436 } 7437 7438 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 7439 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 7440 (void) strcpy(provider->dtpv_name, name); 7441 7442 provider->dtpv_attr = *pap; 7443 provider->dtpv_priv.dtpp_flags = priv; 7444 if (cr != NULL) { 7445 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 7446 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 7447 } 7448 provider->dtpv_pops = *pops; 7449 7450 if (pops->dtps_provide == NULL) { 7451 ASSERT(pops->dtps_provide_module != NULL); 7452 provider->dtpv_pops.dtps_provide = 7453 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop; 7454 } 7455 7456 if (pops->dtps_provide_module == NULL) { 7457 ASSERT(pops->dtps_provide != NULL); 7458 #if defined(sun) 7459 provider->dtpv_pops.dtps_provide_module = 7460 (void (*)(void *, modctl_t *))dtrace_nullop; 7461 #else 7462 provider->dtpv_pops.dtps_provide_module = 7463 (void (*)(void *, dtrace_modctl_t *))dtrace_nullop; 7464 #endif 7465 } 7466 7467 if (pops->dtps_suspend == NULL) { 7468 ASSERT(pops->dtps_resume == NULL); 7469 provider->dtpv_pops.dtps_suspend = 7470 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7471 provider->dtpv_pops.dtps_resume = 7472 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7473 } 7474 7475 provider->dtpv_arg = arg; 7476 *idp = (dtrace_provider_id_t)provider; 7477 7478 if (pops == &dtrace_provider_ops) { 7479 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7480 ASSERT(MUTEX_HELD(&dtrace_lock)); 7481 ASSERT(dtrace_anon.dta_enabling == NULL); 7482 7483 /* 7484 * We make sure that the DTrace provider is at the head of 7485 * the provider chain. 7486 */ 7487 provider->dtpv_next = dtrace_provider; 7488 dtrace_provider = provider; 7489 return (0); 7490 } 7491 7492 mutex_enter(&dtrace_provider_lock); 7493 mutex_enter(&dtrace_lock); 7494 7495 /* 7496 * If there is at least one provider registered, we'll add this 7497 * provider after the first provider. 7498 */ 7499 if (dtrace_provider != NULL) { 7500 provider->dtpv_next = dtrace_provider->dtpv_next; 7501 dtrace_provider->dtpv_next = provider; 7502 } else { 7503 dtrace_provider = provider; 7504 } 7505 7506 if (dtrace_retained != NULL) { 7507 dtrace_enabling_provide(provider); 7508 7509 /* 7510 * Now we need to call dtrace_enabling_matchall() -- which 7511 * will acquire cpu_lock and dtrace_lock. We therefore need 7512 * to drop all of our locks before calling into it... 7513 */ 7514 mutex_exit(&dtrace_lock); 7515 mutex_exit(&dtrace_provider_lock); 7516 dtrace_enabling_matchall(); 7517 7518 return (0); 7519 } 7520 7521 mutex_exit(&dtrace_lock); 7522 mutex_exit(&dtrace_provider_lock); 7523 7524 return (0); 7525 } 7526 7527 /* 7528 * Unregister the specified provider from the DTrace framework. This should 7529 * generally be called by DTrace providers in their detach(9E) entry point. 7530 */ 7531 int 7532 dtrace_unregister(dtrace_provider_id_t id) 7533 { 7534 dtrace_provider_t *old = (dtrace_provider_t *)id; 7535 dtrace_provider_t *prev = NULL; 7536 int i, self = 0; 7537 dtrace_probe_t *probe, *first = NULL; 7538 7539 if (old->dtpv_pops.dtps_enable == 7540 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop) { 7541 /* 7542 * If DTrace itself is the provider, we're called with locks 7543 * already held. 7544 */ 7545 ASSERT(old == dtrace_provider); 7546 #if defined(sun) 7547 ASSERT(dtrace_devi != NULL); 7548 #endif 7549 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7550 ASSERT(MUTEX_HELD(&dtrace_lock)); 7551 self = 1; 7552 7553 if (dtrace_provider->dtpv_next != NULL) { 7554 /* 7555 * There's another provider here; return failure. 7556 */ 7557 return (EBUSY); 7558 } 7559 } else { 7560 mutex_enter(&dtrace_provider_lock); 7561 mutex_enter(&mod_lock); 7562 mutex_enter(&dtrace_lock); 7563 } 7564 7565 /* 7566 * If anyone has /dev/dtrace open, or if there are anonymous enabled 7567 * probes, we refuse to let providers slither away, unless this 7568 * provider has already been explicitly invalidated. 7569 */ 7570 if (!old->dtpv_defunct && 7571 (dtrace_opens || (dtrace_anon.dta_state != NULL && 7572 dtrace_anon.dta_state->dts_necbs > 0))) { 7573 if (!self) { 7574 mutex_exit(&dtrace_lock); 7575 mutex_exit(&mod_lock); 7576 mutex_exit(&dtrace_provider_lock); 7577 } 7578 return (EBUSY); 7579 } 7580 7581 /* 7582 * Attempt to destroy the probes associated with this provider. 7583 */ 7584 for (i = 0; i < dtrace_nprobes; i++) { 7585 if ((probe = dtrace_probes[i]) == NULL) 7586 continue; 7587 7588 if (probe->dtpr_provider != old) 7589 continue; 7590 7591 if (probe->dtpr_ecb == NULL) 7592 continue; 7593 7594 /* 7595 * We have at least one ECB; we can't remove this provider. 7596 */ 7597 if (!self) { 7598 mutex_exit(&dtrace_lock); 7599 mutex_exit(&mod_lock); 7600 mutex_exit(&dtrace_provider_lock); 7601 } 7602 return (EBUSY); 7603 } 7604 7605 /* 7606 * All of the probes for this provider are disabled; we can safely 7607 * remove all of them from their hash chains and from the probe array. 7608 */ 7609 for (i = 0; i < dtrace_nprobes; i++) { 7610 if ((probe = dtrace_probes[i]) == NULL) 7611 continue; 7612 7613 if (probe->dtpr_provider != old) 7614 continue; 7615 7616 dtrace_probes[i] = NULL; 7617 7618 dtrace_hash_remove(dtrace_bymod, probe); 7619 dtrace_hash_remove(dtrace_byfunc, probe); 7620 dtrace_hash_remove(dtrace_byname, probe); 7621 7622 if (first == NULL) { 7623 first = probe; 7624 probe->dtpr_nextmod = NULL; 7625 } else { 7626 probe->dtpr_nextmod = first; 7627 first = probe; 7628 } 7629 } 7630 7631 /* 7632 * The provider's probes have been removed from the hash chains and 7633 * from the probe array. Now issue a dtrace_sync() to be sure that 7634 * everyone has cleared out from any probe array processing. 7635 */ 7636 dtrace_sync(); 7637 7638 for (probe = first; probe != NULL; probe = first) { 7639 first = probe->dtpr_nextmod; 7640 7641 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 7642 probe->dtpr_arg); 7643 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7644 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7645 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7646 #if defined(sun) 7647 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 7648 #else 7649 vmem_free(dtrace_arena, (uintptr_t)(probe->dtpr_id), 1); 7650 #endif 7651 kmem_free(probe, sizeof (dtrace_probe_t)); 7652 } 7653 7654 if ((prev = dtrace_provider) == old) { 7655 #if defined(sun) 7656 ASSERT(self || dtrace_devi == NULL); 7657 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 7658 #endif 7659 dtrace_provider = old->dtpv_next; 7660 } else { 7661 while (prev != NULL && prev->dtpv_next != old) 7662 prev = prev->dtpv_next; 7663 7664 if (prev == NULL) { 7665 panic("attempt to unregister non-existent " 7666 "dtrace provider %p\n", (void *)id); 7667 } 7668 7669 prev->dtpv_next = old->dtpv_next; 7670 } 7671 7672 if (!self) { 7673 mutex_exit(&dtrace_lock); 7674 mutex_exit(&mod_lock); 7675 mutex_exit(&dtrace_provider_lock); 7676 } 7677 7678 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 7679 kmem_free(old, sizeof (dtrace_provider_t)); 7680 7681 return (0); 7682 } 7683 7684 /* 7685 * Invalidate the specified provider. All subsequent probe lookups for the 7686 * specified provider will fail, but its probes will not be removed. 7687 */ 7688 void 7689 dtrace_invalidate(dtrace_provider_id_t id) 7690 { 7691 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 7692 7693 ASSERT(pvp->dtpv_pops.dtps_enable != 7694 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop); 7695 7696 mutex_enter(&dtrace_provider_lock); 7697 mutex_enter(&dtrace_lock); 7698 7699 pvp->dtpv_defunct = 1; 7700 7701 mutex_exit(&dtrace_lock); 7702 mutex_exit(&dtrace_provider_lock); 7703 } 7704 7705 /* 7706 * Indicate whether or not DTrace has attached. 7707 */ 7708 int 7709 dtrace_attached(void) 7710 { 7711 /* 7712 * dtrace_provider will be non-NULL iff the DTrace driver has 7713 * attached. (It's non-NULL because DTrace is always itself a 7714 * provider.) 7715 */ 7716 return (dtrace_provider != NULL); 7717 } 7718 7719 /* 7720 * Remove all the unenabled probes for the given provider. This function is 7721 * not unlike dtrace_unregister(), except that it doesn't remove the provider 7722 * -- just as many of its associated probes as it can. 7723 */ 7724 int 7725 dtrace_condense(dtrace_provider_id_t id) 7726 { 7727 dtrace_provider_t *prov = (dtrace_provider_t *)id; 7728 int i; 7729 dtrace_probe_t *probe; 7730 7731 /* 7732 * Make sure this isn't the dtrace provider itself. 7733 */ 7734 ASSERT(prov->dtpv_pops.dtps_enable != 7735 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop); 7736 7737 mutex_enter(&dtrace_provider_lock); 7738 mutex_enter(&dtrace_lock); 7739 7740 /* 7741 * Attempt to destroy the probes associated with this provider. 7742 */ 7743 for (i = 0; i < dtrace_nprobes; i++) { 7744 if ((probe = dtrace_probes[i]) == NULL) 7745 continue; 7746 7747 if (probe->dtpr_provider != prov) 7748 continue; 7749 7750 if (probe->dtpr_ecb != NULL) 7751 continue; 7752 7753 dtrace_probes[i] = NULL; 7754 7755 dtrace_hash_remove(dtrace_bymod, probe); 7756 dtrace_hash_remove(dtrace_byfunc, probe); 7757 dtrace_hash_remove(dtrace_byname, probe); 7758 7759 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 7760 probe->dtpr_arg); 7761 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7762 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7763 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7764 kmem_free(probe, sizeof (dtrace_probe_t)); 7765 #if defined(sun) 7766 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 7767 #else 7768 vmem_free(dtrace_arena, ((uintptr_t)i + 1), 1); 7769 #endif 7770 } 7771 7772 mutex_exit(&dtrace_lock); 7773 mutex_exit(&dtrace_provider_lock); 7774 7775 return (0); 7776 } 7777 7778 /* 7779 * DTrace Probe Management Functions 7780 * 7781 * The functions in this section perform the DTrace probe management, 7782 * including functions to create probes, look-up probes, and call into the 7783 * providers to request that probes be provided. Some of these functions are 7784 * in the Provider-to-Framework API; these functions can be identified by the 7785 * fact that they are not declared "static". 7786 */ 7787 7788 /* 7789 * Create a probe with the specified module name, function name, and name. 7790 */ 7791 dtrace_id_t 7792 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 7793 const char *func, const char *name, int aframes, void *arg) 7794 { 7795 dtrace_probe_t *probe, **probes; 7796 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 7797 dtrace_id_t id; 7798 7799 if (provider == dtrace_provider) { 7800 ASSERT(MUTEX_HELD(&dtrace_lock)); 7801 } else { 7802 mutex_enter(&dtrace_lock); 7803 } 7804 7805 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 7806 VM_BESTFIT | VM_SLEEP); 7807 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 7808 7809 probe->dtpr_id = id; 7810 probe->dtpr_gen = dtrace_probegen++; 7811 probe->dtpr_mod = dtrace_strdup(mod); 7812 probe->dtpr_func = dtrace_strdup(func); 7813 probe->dtpr_name = dtrace_strdup(name); 7814 probe->dtpr_arg = arg; 7815 probe->dtpr_aframes = aframes; 7816 probe->dtpr_provider = provider; 7817 7818 dtrace_hash_add(dtrace_bymod, probe); 7819 dtrace_hash_add(dtrace_byfunc, probe); 7820 dtrace_hash_add(dtrace_byname, probe); 7821 7822 if (id - 1 >= dtrace_nprobes) { 7823 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 7824 size_t nsize = osize << 1; 7825 7826 if (nsize == 0) { 7827 ASSERT(osize == 0); 7828 ASSERT(dtrace_probes == NULL); 7829 nsize = sizeof (dtrace_probe_t *); 7830 } 7831 7832 probes = kmem_zalloc(nsize, KM_SLEEP); 7833 dtrace_probes_size = nsize; 7834 7835 if (dtrace_probes == NULL) { 7836 ASSERT(osize == 0); 7837 dtrace_probes = probes; 7838 dtrace_nprobes = 1; 7839 } else { 7840 dtrace_probe_t **oprobes = dtrace_probes; 7841 7842 bcopy(oprobes, probes, osize); 7843 dtrace_membar_producer(); 7844 dtrace_probes = probes; 7845 7846 dtrace_sync(); 7847 7848 /* 7849 * All CPUs are now seeing the new probes array; we can 7850 * safely free the old array. 7851 */ 7852 kmem_free(oprobes, osize); 7853 dtrace_nprobes <<= 1; 7854 } 7855 7856 ASSERT(id - 1 < dtrace_nprobes); 7857 } 7858 7859 ASSERT(dtrace_probes[id - 1] == NULL); 7860 dtrace_probes[id - 1] = probe; 7861 7862 if (provider != dtrace_provider) 7863 mutex_exit(&dtrace_lock); 7864 7865 return (id); 7866 } 7867 7868 static dtrace_probe_t * 7869 dtrace_probe_lookup_id(dtrace_id_t id) 7870 { 7871 ASSERT(MUTEX_HELD(&dtrace_lock)); 7872 7873 if (id == 0 || id > dtrace_nprobes) 7874 return (NULL); 7875 7876 return (dtrace_probes[id - 1]); 7877 } 7878 7879 static int 7880 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 7881 { 7882 *((dtrace_id_t *)arg) = probe->dtpr_id; 7883 7884 return (DTRACE_MATCH_DONE); 7885 } 7886 7887 /* 7888 * Look up a probe based on provider and one or more of module name, function 7889 * name and probe name. 7890 */ 7891 dtrace_id_t 7892 dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod, 7893 char *func, char *name) 7894 { 7895 dtrace_probekey_t pkey; 7896 dtrace_id_t id; 7897 int match; 7898 7899 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 7900 pkey.dtpk_pmatch = &dtrace_match_string; 7901 pkey.dtpk_mod = mod; 7902 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 7903 pkey.dtpk_func = func; 7904 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 7905 pkey.dtpk_name = name; 7906 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 7907 pkey.dtpk_id = DTRACE_IDNONE; 7908 7909 mutex_enter(&dtrace_lock); 7910 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 7911 dtrace_probe_lookup_match, &id); 7912 mutex_exit(&dtrace_lock); 7913 7914 ASSERT(match == 1 || match == 0); 7915 return (match ? id : 0); 7916 } 7917 7918 /* 7919 * Returns the probe argument associated with the specified probe. 7920 */ 7921 void * 7922 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 7923 { 7924 dtrace_probe_t *probe; 7925 void *rval = NULL; 7926 7927 mutex_enter(&dtrace_lock); 7928 7929 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 7930 probe->dtpr_provider == (dtrace_provider_t *)id) 7931 rval = probe->dtpr_arg; 7932 7933 mutex_exit(&dtrace_lock); 7934 7935 return (rval); 7936 } 7937 7938 /* 7939 * Copy a probe into a probe description. 7940 */ 7941 static void 7942 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 7943 { 7944 bzero(pdp, sizeof (dtrace_probedesc_t)); 7945 pdp->dtpd_id = prp->dtpr_id; 7946 7947 (void) strncpy(pdp->dtpd_provider, 7948 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 7949 7950 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 7951 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 7952 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 7953 } 7954 7955 #ifdef notyet /* XXX TBD */ 7956 #if !defined(sun) 7957 static int 7958 dtrace_probe_provide_cb(linker_file_t lf, void *arg) 7959 { 7960 dtrace_provider_t *prv = (dtrace_provider_t *) arg; 7961 7962 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, lf); 7963 7964 return(0); 7965 } 7966 #endif 7967 #endif /* notyet */ 7968 7969 7970 /* 7971 * Called to indicate that a probe -- or probes -- should be provided by a 7972 * specfied provider. If the specified description is NULL, the provider will 7973 * be told to provide all of its probes. (This is done whenever a new 7974 * consumer comes along, or whenever a retained enabling is to be matched.) If 7975 * the specified description is non-NULL, the provider is given the 7976 * opportunity to dynamically provide the specified probe, allowing providers 7977 * to support the creation of probes on-the-fly. (So-called _autocreated_ 7978 * probes.) If the provider is NULL, the operations will be applied to all 7979 * providers; if the provider is non-NULL the operations will only be applied 7980 * to the specified provider. The dtrace_provider_lock must be held, and the 7981 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 7982 * will need to grab the dtrace_lock when it reenters the framework through 7983 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 7984 */ 7985 static void 7986 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 7987 { 7988 #if defined(sun) 7989 modctl_t *ctl; 7990 #else 7991 module_t *mod; 7992 #endif 7993 int all = 0; 7994 7995 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7996 7997 if (prv == NULL) { 7998 all = 1; 7999 prv = dtrace_provider; 8000 } 8001 8002 do { 8003 /* 8004 * First, call the blanket provide operation. 8005 */ 8006 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 8007 8008 /* 8009 * Now call the per-module provide operation. We will grab 8010 * mod_lock to prevent the list from being modified. Note 8011 * that this also prevents the mod_busy bits from changing. 8012 * (mod_busy can only be changed with mod_lock held.) 8013 */ 8014 mutex_enter(&mod_lock); 8015 8016 #if defined(sun) 8017 ctl = &modules; 8018 do { 8019 if (ctl->mod_busy || ctl->mod_mp == NULL) 8020 continue; 8021 8022 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 8023 8024 } while ((ctl = ctl->mod_next) != &modules); 8025 #else 8026 8027 /* Fake netbsd module first */ 8028 if (mod_nbsd == NULL) { 8029 mod_nbsd = kmem_zalloc(sizeof(*mod_nbsd), KM_SLEEP); 8030 mod_nbsd->mod_info = kmem_zalloc(sizeof(modinfo_t), KM_SLEEP); 8031 mod_nbsd->mod_refcnt = 1; 8032 *((char **)&mod_nbsd->mod_info->mi_name) = "netbsd"; 8033 } 8034 8035 kernconfig_lock(); 8036 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, mod_nbsd); 8037 TAILQ_FOREACH(mod, &module_list, mod_chain) { 8038 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, mod); 8039 } 8040 kernconfig_unlock(); 8041 #endif 8042 8043 mutex_exit(&mod_lock); 8044 } while (all && (prv = prv->dtpv_next) != NULL); 8045 } 8046 8047 #if defined(sun) 8048 /* 8049 * Iterate over each probe, and call the Framework-to-Provider API function 8050 * denoted by offs. 8051 */ 8052 static void 8053 dtrace_probe_foreach(uintptr_t offs) 8054 { 8055 dtrace_provider_t *prov; 8056 void (*func)(void *, dtrace_id_t, void *); 8057 dtrace_probe_t *probe; 8058 dtrace_icookie_t cookie; 8059 int i; 8060 8061 /* 8062 * We disable interrupts to walk through the probe array. This is 8063 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 8064 * won't see stale data. 8065 */ 8066 cookie = dtrace_interrupt_disable(); 8067 8068 for (i = 0; i < dtrace_nprobes; i++) { 8069 if ((probe = dtrace_probes[i]) == NULL) 8070 continue; 8071 8072 if (probe->dtpr_ecb == NULL) { 8073 /* 8074 * This probe isn't enabled -- don't call the function. 8075 */ 8076 continue; 8077 } 8078 8079 prov = probe->dtpr_provider; 8080 func = *((void(**)(void *, dtrace_id_t, void *)) 8081 ((uintptr_t)&prov->dtpv_pops + offs)); 8082 8083 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 8084 } 8085 8086 dtrace_interrupt_enable(cookie); 8087 } 8088 #endif 8089 8090 static int 8091 dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 8092 { 8093 dtrace_probekey_t pkey; 8094 uint32_t priv; 8095 uid_t uid; 8096 zoneid_t zoneid; 8097 8098 ASSERT(MUTEX_HELD(&dtrace_lock)); 8099 dtrace_ecb_create_cache = NULL; 8100 8101 if (desc == NULL) { 8102 /* 8103 * If we're passed a NULL description, we're being asked to 8104 * create an ECB with a NULL probe. 8105 */ 8106 (void) dtrace_ecb_create_enable(NULL, enab); 8107 return (0); 8108 } 8109 8110 dtrace_probekey(desc, &pkey); 8111 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, 8112 &priv, &uid, &zoneid); 8113 8114 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 8115 enab)); 8116 } 8117 8118 /* 8119 * DTrace Helper Provider Functions 8120 */ 8121 static void 8122 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 8123 { 8124 attr->dtat_name = DOF_ATTR_NAME(dofattr); 8125 attr->dtat_data = DOF_ATTR_DATA(dofattr); 8126 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 8127 } 8128 8129 static void 8130 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 8131 const dof_provider_t *dofprov, char *strtab) 8132 { 8133 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 8134 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 8135 dofprov->dofpv_provattr); 8136 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 8137 dofprov->dofpv_modattr); 8138 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 8139 dofprov->dofpv_funcattr); 8140 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 8141 dofprov->dofpv_nameattr); 8142 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 8143 dofprov->dofpv_argsattr); 8144 } 8145 8146 static void 8147 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8148 { 8149 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8150 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8151 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 8152 dof_provider_t *provider; 8153 dof_probe_t *probe; 8154 uint32_t *off, *enoff; 8155 uint8_t *arg; 8156 char *strtab; 8157 uint_t i, nprobes; 8158 dtrace_helper_provdesc_t dhpv; 8159 dtrace_helper_probedesc_t dhpb; 8160 dtrace_meta_t *meta = dtrace_meta_pid; 8161 dtrace_mops_t *mops = &meta->dtm_mops; 8162 void *parg; 8163 8164 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8165 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8166 provider->dofpv_strtab * dof->dofh_secsize); 8167 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8168 provider->dofpv_probes * dof->dofh_secsize); 8169 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8170 provider->dofpv_prargs * dof->dofh_secsize); 8171 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8172 provider->dofpv_proffs * dof->dofh_secsize); 8173 8174 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8175 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 8176 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 8177 enoff = NULL; 8178 8179 /* 8180 * See dtrace_helper_provider_validate(). 8181 */ 8182 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 8183 provider->dofpv_prenoffs != DOF_SECT_NONE) { 8184 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8185 provider->dofpv_prenoffs * dof->dofh_secsize); 8186 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 8187 } 8188 8189 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 8190 8191 /* 8192 * Create the provider. 8193 */ 8194 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8195 8196 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 8197 return; 8198 8199 meta->dtm_count++; 8200 8201 /* 8202 * Create the probes. 8203 */ 8204 for (i = 0; i < nprobes; i++) { 8205 probe = (dof_probe_t *)(uintptr_t)(daddr + 8206 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 8207 8208 dhpb.dthpb_mod = dhp->dofhp_mod; 8209 dhpb.dthpb_func = strtab + probe->dofpr_func; 8210 dhpb.dthpb_name = strtab + probe->dofpr_name; 8211 dhpb.dthpb_base = probe->dofpr_addr; 8212 dhpb.dthpb_offs = off + probe->dofpr_offidx; 8213 dhpb.dthpb_noffs = probe->dofpr_noffs; 8214 if (enoff != NULL) { 8215 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 8216 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 8217 } else { 8218 dhpb.dthpb_enoffs = NULL; 8219 dhpb.dthpb_nenoffs = 0; 8220 } 8221 dhpb.dthpb_args = arg + probe->dofpr_argidx; 8222 dhpb.dthpb_nargc = probe->dofpr_nargc; 8223 dhpb.dthpb_xargc = probe->dofpr_xargc; 8224 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 8225 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 8226 8227 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 8228 } 8229 } 8230 8231 static void 8232 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 8233 { 8234 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8235 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8236 int i; 8237 8238 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8239 8240 for (i = 0; i < dof->dofh_secnum; i++) { 8241 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8242 dof->dofh_secoff + i * dof->dofh_secsize); 8243 8244 if (sec->dofs_type != DOF_SECT_PROVIDER) 8245 continue; 8246 8247 dtrace_helper_provide_one(dhp, sec, pid); 8248 } 8249 8250 /* 8251 * We may have just created probes, so we must now rematch against 8252 * any retained enablings. Note that this call will acquire both 8253 * cpu_lock and dtrace_lock; the fact that we are holding 8254 * dtrace_meta_lock now is what defines the ordering with respect to 8255 * these three locks. 8256 */ 8257 dtrace_enabling_matchall(); 8258 } 8259 8260 #if defined(sun) 8261 static void 8262 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8263 { 8264 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8265 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8266 dof_sec_t *str_sec; 8267 dof_provider_t *provider; 8268 char *strtab; 8269 dtrace_helper_provdesc_t dhpv; 8270 dtrace_meta_t *meta = dtrace_meta_pid; 8271 dtrace_mops_t *mops = &meta->dtm_mops; 8272 8273 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8274 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8275 provider->dofpv_strtab * dof->dofh_secsize); 8276 8277 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8278 8279 /* 8280 * Create the provider. 8281 */ 8282 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8283 8284 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 8285 8286 meta->dtm_count--; 8287 } 8288 8289 static void 8290 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) 8291 { 8292 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8293 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8294 int i; 8295 8296 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8297 8298 for (i = 0; i < dof->dofh_secnum; i++) { 8299 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8300 dof->dofh_secoff + i * dof->dofh_secsize); 8301 8302 if (sec->dofs_type != DOF_SECT_PROVIDER) 8303 continue; 8304 8305 dtrace_helper_provider_remove_one(dhp, sec, pid); 8306 } 8307 } 8308 #endif 8309 8310 /* 8311 * DTrace Meta Provider-to-Framework API Functions 8312 * 8313 * These functions implement the Meta Provider-to-Framework API, as described 8314 * in <sys/dtrace.h>. 8315 */ 8316 int 8317 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 8318 dtrace_meta_provider_id_t *idp) 8319 { 8320 dtrace_meta_t *meta; 8321 dtrace_helpers_t *help, *next; 8322 int i; 8323 8324 *idp = DTRACE_METAPROVNONE; 8325 8326 /* 8327 * We strictly don't need the name, but we hold onto it for 8328 * debuggability. All hail error queues! 8329 */ 8330 if (name == NULL) { 8331 cmn_err(CE_WARN, "failed to register meta-provider: " 8332 "invalid name"); 8333 return (EINVAL); 8334 } 8335 8336 if (mops == NULL || 8337 mops->dtms_create_probe == NULL || 8338 mops->dtms_provide_pid == NULL || 8339 mops->dtms_remove_pid == NULL) { 8340 cmn_err(CE_WARN, "failed to register meta-register %s: " 8341 "invalid ops", name); 8342 return (EINVAL); 8343 } 8344 8345 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 8346 meta->dtm_mops = *mops; 8347 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 8348 (void) strcpy(meta->dtm_name, name); 8349 meta->dtm_arg = arg; 8350 8351 mutex_enter(&dtrace_meta_lock); 8352 mutex_enter(&dtrace_lock); 8353 8354 if (dtrace_meta_pid != NULL) { 8355 mutex_exit(&dtrace_lock); 8356 mutex_exit(&dtrace_meta_lock); 8357 cmn_err(CE_WARN, "failed to register meta-register %s: " 8358 "user-land meta-provider exists", name); 8359 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 8360 kmem_free(meta, sizeof (dtrace_meta_t)); 8361 return (EINVAL); 8362 } 8363 8364 dtrace_meta_pid = meta; 8365 *idp = (dtrace_meta_provider_id_t)meta; 8366 8367 /* 8368 * If there are providers and probes ready to go, pass them 8369 * off to the new meta provider now. 8370 */ 8371 8372 help = dtrace_deferred_pid; 8373 dtrace_deferred_pid = NULL; 8374 8375 mutex_exit(&dtrace_lock); 8376 8377 while (help != NULL) { 8378 for (i = 0; i < help->dthps_nprovs; i++) { 8379 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 8380 help->dthps_pid); 8381 } 8382 8383 next = help->dthps_next; 8384 help->dthps_next = NULL; 8385 help->dthps_prev = NULL; 8386 help->dthps_deferred = 0; 8387 help = next; 8388 } 8389 8390 mutex_exit(&dtrace_meta_lock); 8391 8392 return (0); 8393 } 8394 8395 int 8396 dtrace_meta_unregister(dtrace_meta_provider_id_t id) 8397 { 8398 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 8399 8400 mutex_enter(&dtrace_meta_lock); 8401 mutex_enter(&dtrace_lock); 8402 8403 if (old == dtrace_meta_pid) { 8404 pp = &dtrace_meta_pid; 8405 } else { 8406 panic("attempt to unregister non-existent " 8407 "dtrace meta-provider %p\n", (void *)old); 8408 } 8409 8410 if (old->dtm_count != 0) { 8411 mutex_exit(&dtrace_lock); 8412 mutex_exit(&dtrace_meta_lock); 8413 return (EBUSY); 8414 } 8415 8416 *pp = NULL; 8417 8418 mutex_exit(&dtrace_lock); 8419 mutex_exit(&dtrace_meta_lock); 8420 8421 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 8422 kmem_free(old, sizeof (dtrace_meta_t)); 8423 8424 return (0); 8425 } 8426 8427 8428 /* 8429 * DTrace DIF Object Functions 8430 */ 8431 static int 8432 dtrace_difo_err(uint_t pc, const char *format, ...) 8433 { 8434 if (dtrace_err_verbose) { 8435 va_list alist; 8436 8437 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 8438 va_start(alist, format); 8439 (void) vuprintf(format, alist); 8440 va_end(alist); 8441 } 8442 8443 #ifdef DTRACE_ERRDEBUG 8444 dtrace_errdebug(format); 8445 #endif 8446 return (1); 8447 } 8448 8449 /* 8450 * Validate a DTrace DIF object by checking the IR instructions. The following 8451 * rules are currently enforced by dtrace_difo_validate(): 8452 * 8453 * 1. Each instruction must have a valid opcode 8454 * 2. Each register, string, variable, or subroutine reference must be valid 8455 * 3. No instruction can modify register %r0 (must be zero) 8456 * 4. All instruction reserved bits must be set to zero 8457 * 5. The last instruction must be a "ret" instruction 8458 * 6. All branch targets must reference a valid instruction _after_ the branch 8459 */ 8460 static int 8461 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 8462 cred_t *cr) 8463 { 8464 int err = 0, i; 8465 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8466 int kcheckload; 8467 uint_t pc; 8468 8469 kcheckload = cr == NULL || 8470 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0; 8471 8472 dp->dtdo_destructive = 0; 8473 8474 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 8475 dif_instr_t instr = dp->dtdo_buf[pc]; 8476 8477 uint_t r1 = DIF_INSTR_R1(instr); 8478 uint_t r2 = DIF_INSTR_R2(instr); 8479 uint_t rd = DIF_INSTR_RD(instr); 8480 uint_t rs = DIF_INSTR_RS(instr); 8481 uint_t label = DIF_INSTR_LABEL(instr); 8482 uint_t v = DIF_INSTR_VAR(instr); 8483 uint_t subr = DIF_INSTR_SUBR(instr); 8484 uint_t type = DIF_INSTR_TYPE(instr); 8485 uint_t op = DIF_INSTR_OP(instr); 8486 8487 switch (op) { 8488 case DIF_OP_OR: 8489 case DIF_OP_XOR: 8490 case DIF_OP_AND: 8491 case DIF_OP_SLL: 8492 case DIF_OP_SRL: 8493 case DIF_OP_SRA: 8494 case DIF_OP_SUB: 8495 case DIF_OP_ADD: 8496 case DIF_OP_MUL: 8497 case DIF_OP_SDIV: 8498 case DIF_OP_UDIV: 8499 case DIF_OP_SREM: 8500 case DIF_OP_UREM: 8501 case DIF_OP_COPYS: 8502 if (r1 >= nregs) 8503 err += efunc(pc, "invalid register %u\n", r1); 8504 if (r2 >= nregs) 8505 err += efunc(pc, "invalid register %u\n", r2); 8506 if (rd >= nregs) 8507 err += efunc(pc, "invalid register %u\n", rd); 8508 if (rd == 0) 8509 err += efunc(pc, "cannot write to %r0\n"); 8510 break; 8511 case DIF_OP_NOT: 8512 case DIF_OP_MOV: 8513 case DIF_OP_ALLOCS: 8514 if (r1 >= nregs) 8515 err += efunc(pc, "invalid register %u\n", r1); 8516 if (r2 != 0) 8517 err += efunc(pc, "non-zero reserved bits\n"); 8518 if (rd >= nregs) 8519 err += efunc(pc, "invalid register %u\n", rd); 8520 if (rd == 0) 8521 err += efunc(pc, "cannot write to %r0\n"); 8522 break; 8523 case DIF_OP_LDSB: 8524 case DIF_OP_LDSH: 8525 case DIF_OP_LDSW: 8526 case DIF_OP_LDUB: 8527 case DIF_OP_LDUH: 8528 case DIF_OP_LDUW: 8529 case DIF_OP_LDX: 8530 if (r1 >= nregs) 8531 err += efunc(pc, "invalid register %u\n", r1); 8532 if (r2 != 0) 8533 err += efunc(pc, "non-zero reserved bits\n"); 8534 if (rd >= nregs) 8535 err += efunc(pc, "invalid register %u\n", rd); 8536 if (rd == 0) 8537 err += efunc(pc, "cannot write to %r0\n"); 8538 if (kcheckload) 8539 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 8540 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 8541 break; 8542 case DIF_OP_RLDSB: 8543 case DIF_OP_RLDSH: 8544 case DIF_OP_RLDSW: 8545 case DIF_OP_RLDUB: 8546 case DIF_OP_RLDUH: 8547 case DIF_OP_RLDUW: 8548 case DIF_OP_RLDX: 8549 if (r1 >= nregs) 8550 err += efunc(pc, "invalid register %u\n", r1); 8551 if (r2 != 0) 8552 err += efunc(pc, "non-zero reserved bits\n"); 8553 if (rd >= nregs) 8554 err += efunc(pc, "invalid register %u\n", rd); 8555 if (rd == 0) 8556 err += efunc(pc, "cannot write to %r0\n"); 8557 break; 8558 case DIF_OP_ULDSB: 8559 case DIF_OP_ULDSH: 8560 case DIF_OP_ULDSW: 8561 case DIF_OP_ULDUB: 8562 case DIF_OP_ULDUH: 8563 case DIF_OP_ULDUW: 8564 case DIF_OP_ULDX: 8565 if (r1 >= nregs) 8566 err += efunc(pc, "invalid register %u\n", r1); 8567 if (r2 != 0) 8568 err += efunc(pc, "non-zero reserved bits\n"); 8569 if (rd >= nregs) 8570 err += efunc(pc, "invalid register %u\n", rd); 8571 if (rd == 0) 8572 err += efunc(pc, "cannot write to %r0\n"); 8573 break; 8574 case DIF_OP_STB: 8575 case DIF_OP_STH: 8576 case DIF_OP_STW: 8577 case DIF_OP_STX: 8578 if (r1 >= nregs) 8579 err += efunc(pc, "invalid register %u\n", r1); 8580 if (r2 != 0) 8581 err += efunc(pc, "non-zero reserved bits\n"); 8582 if (rd >= nregs) 8583 err += efunc(pc, "invalid register %u\n", rd); 8584 if (rd == 0) 8585 err += efunc(pc, "cannot write to 0 address\n"); 8586 break; 8587 case DIF_OP_CMP: 8588 case DIF_OP_SCMP: 8589 if (r1 >= nregs) 8590 err += efunc(pc, "invalid register %u\n", r1); 8591 if (r2 >= nregs) 8592 err += efunc(pc, "invalid register %u\n", r2); 8593 if (rd != 0) 8594 err += efunc(pc, "non-zero reserved bits\n"); 8595 break; 8596 case DIF_OP_TST: 8597 if (r1 >= nregs) 8598 err += efunc(pc, "invalid register %u\n", r1); 8599 if (r2 != 0 || rd != 0) 8600 err += efunc(pc, "non-zero reserved bits\n"); 8601 break; 8602 case DIF_OP_BA: 8603 case DIF_OP_BE: 8604 case DIF_OP_BNE: 8605 case DIF_OP_BG: 8606 case DIF_OP_BGU: 8607 case DIF_OP_BGE: 8608 case DIF_OP_BGEU: 8609 case DIF_OP_BL: 8610 case DIF_OP_BLU: 8611 case DIF_OP_BLE: 8612 case DIF_OP_BLEU: 8613 if (label >= dp->dtdo_len) { 8614 err += efunc(pc, "invalid branch target %u\n", 8615 label); 8616 } 8617 if (label <= pc) { 8618 err += efunc(pc, "backward branch to %u\n", 8619 label); 8620 } 8621 break; 8622 case DIF_OP_RET: 8623 if (r1 != 0 || r2 != 0) 8624 err += efunc(pc, "non-zero reserved bits\n"); 8625 if (rd >= nregs) 8626 err += efunc(pc, "invalid register %u\n", rd); 8627 break; 8628 case DIF_OP_NOP: 8629 case DIF_OP_POPTS: 8630 case DIF_OP_FLUSHTS: 8631 if (r1 != 0 || r2 != 0 || rd != 0) 8632 err += efunc(pc, "non-zero reserved bits\n"); 8633 break; 8634 case DIF_OP_SETX: 8635 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 8636 err += efunc(pc, "invalid integer ref %u\n", 8637 DIF_INSTR_INTEGER(instr)); 8638 } 8639 if (rd >= nregs) 8640 err += efunc(pc, "invalid register %u\n", rd); 8641 if (rd == 0) 8642 err += efunc(pc, "cannot write to %r0\n"); 8643 break; 8644 case DIF_OP_SETS: 8645 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 8646 err += efunc(pc, "invalid string ref %u\n", 8647 DIF_INSTR_STRING(instr)); 8648 } 8649 if (rd >= nregs) 8650 err += efunc(pc, "invalid register %u\n", rd); 8651 if (rd == 0) 8652 err += efunc(pc, "cannot write to %r0\n"); 8653 break; 8654 case DIF_OP_LDGA: 8655 case DIF_OP_LDTA: 8656 if (r1 > DIF_VAR_ARRAY_MAX) 8657 err += efunc(pc, "invalid array %u\n", r1); 8658 if (r2 >= nregs) 8659 err += efunc(pc, "invalid register %u\n", r2); 8660 if (rd >= nregs) 8661 err += efunc(pc, "invalid register %u\n", rd); 8662 if (rd == 0) 8663 err += efunc(pc, "cannot write to %r0\n"); 8664 break; 8665 case DIF_OP_LDGS: 8666 case DIF_OP_LDTS: 8667 case DIF_OP_LDLS: 8668 case DIF_OP_LDGAA: 8669 case DIF_OP_LDTAA: 8670 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 8671 err += efunc(pc, "invalid variable %u\n", v); 8672 if (rd >= nregs) 8673 err += efunc(pc, "invalid register %u\n", rd); 8674 if (rd == 0) 8675 err += efunc(pc, "cannot write to %r0\n"); 8676 break; 8677 case DIF_OP_STGS: 8678 case DIF_OP_STTS: 8679 case DIF_OP_STLS: 8680 case DIF_OP_STGAA: 8681 case DIF_OP_STTAA: 8682 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 8683 err += efunc(pc, "invalid variable %u\n", v); 8684 if (rs >= nregs) 8685 err += efunc(pc, "invalid register %u\n", rd); 8686 break; 8687 case DIF_OP_CALL: 8688 if (subr > DIF_SUBR_MAX) 8689 err += efunc(pc, "invalid subr %u\n", subr); 8690 if (rd >= nregs) 8691 err += efunc(pc, "invalid register %u\n", rd); 8692 if (rd == 0) 8693 err += efunc(pc, "cannot write to %r0\n"); 8694 8695 if (subr == DIF_SUBR_COPYOUT || 8696 subr == DIF_SUBR_COPYOUTSTR) { 8697 dp->dtdo_destructive = 1; 8698 } 8699 break; 8700 case DIF_OP_PUSHTR: 8701 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 8702 err += efunc(pc, "invalid ref type %u\n", type); 8703 if (r2 >= nregs) 8704 err += efunc(pc, "invalid register %u\n", r2); 8705 if (rs >= nregs) 8706 err += efunc(pc, "invalid register %u\n", rs); 8707 break; 8708 case DIF_OP_PUSHTV: 8709 if (type != DIF_TYPE_CTF) 8710 err += efunc(pc, "invalid val type %u\n", type); 8711 if (r2 >= nregs) 8712 err += efunc(pc, "invalid register %u\n", r2); 8713 if (rs >= nregs) 8714 err += efunc(pc, "invalid register %u\n", rs); 8715 break; 8716 default: 8717 err += efunc(pc, "invalid opcode %u\n", 8718 DIF_INSTR_OP(instr)); 8719 } 8720 } 8721 8722 if (dp->dtdo_len != 0 && 8723 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 8724 err += efunc(dp->dtdo_len - 1, 8725 "expected 'ret' as last DIF instruction\n"); 8726 } 8727 8728 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) { 8729 /* 8730 * If we're not returning by reference, the size must be either 8731 * 0 or the size of one of the base types. 8732 */ 8733 switch (dp->dtdo_rtype.dtdt_size) { 8734 case 0: 8735 case sizeof (uint8_t): 8736 case sizeof (uint16_t): 8737 case sizeof (uint32_t): 8738 case sizeof (uint64_t): 8739 break; 8740 8741 default: 8742 err += efunc(dp->dtdo_len - 1, "bad return size\n"); 8743 } 8744 } 8745 8746 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 8747 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 8748 dtrace_diftype_t *vt, *et; 8749 uint_t id, ndx; 8750 8751 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 8752 v->dtdv_scope != DIFV_SCOPE_THREAD && 8753 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 8754 err += efunc(i, "unrecognized variable scope %d\n", 8755 v->dtdv_scope); 8756 break; 8757 } 8758 8759 if (v->dtdv_kind != DIFV_KIND_ARRAY && 8760 v->dtdv_kind != DIFV_KIND_SCALAR) { 8761 err += efunc(i, "unrecognized variable type %d\n", 8762 v->dtdv_kind); 8763 break; 8764 } 8765 8766 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 8767 err += efunc(i, "%d exceeds variable id limit\n", id); 8768 break; 8769 } 8770 8771 if (id < DIF_VAR_OTHER_UBASE) 8772 continue; 8773 8774 /* 8775 * For user-defined variables, we need to check that this 8776 * definition is identical to any previous definition that we 8777 * encountered. 8778 */ 8779 ndx = id - DIF_VAR_OTHER_UBASE; 8780 8781 switch (v->dtdv_scope) { 8782 case DIFV_SCOPE_GLOBAL: 8783 if (ndx < vstate->dtvs_nglobals) { 8784 dtrace_statvar_t *svar; 8785 8786 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 8787 existing = &svar->dtsv_var; 8788 } 8789 8790 break; 8791 8792 case DIFV_SCOPE_THREAD: 8793 if (ndx < vstate->dtvs_ntlocals) 8794 existing = &vstate->dtvs_tlocals[ndx]; 8795 break; 8796 8797 case DIFV_SCOPE_LOCAL: 8798 if (ndx < vstate->dtvs_nlocals) { 8799 dtrace_statvar_t *svar; 8800 8801 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 8802 existing = &svar->dtsv_var; 8803 } 8804 8805 break; 8806 } 8807 8808 vt = &v->dtdv_type; 8809 8810 if (vt->dtdt_flags & DIF_TF_BYREF) { 8811 if (vt->dtdt_size == 0) { 8812 err += efunc(i, "zero-sized variable\n"); 8813 break; 8814 } 8815 8816 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL && 8817 vt->dtdt_size > dtrace_global_maxsize) { 8818 err += efunc(i, "oversized by-ref global\n"); 8819 break; 8820 } 8821 } 8822 8823 if (existing == NULL || existing->dtdv_id == 0) 8824 continue; 8825 8826 ASSERT(existing->dtdv_id == v->dtdv_id); 8827 ASSERT(existing->dtdv_scope == v->dtdv_scope); 8828 8829 if (existing->dtdv_kind != v->dtdv_kind) 8830 err += efunc(i, "%d changed variable kind\n", id); 8831 8832 et = &existing->dtdv_type; 8833 8834 if (vt->dtdt_flags != et->dtdt_flags) { 8835 err += efunc(i, "%d changed variable type flags\n", id); 8836 break; 8837 } 8838 8839 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 8840 err += efunc(i, "%d changed variable type size\n", id); 8841 break; 8842 } 8843 } 8844 8845 return (err); 8846 } 8847 8848 #if defined(sun) 8849 /* 8850 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 8851 * are much more constrained than normal DIFOs. Specifically, they may 8852 * not: 8853 * 8854 * 1. Make calls to subroutines other than copyin(), copyinstr() or 8855 * miscellaneous string routines 8856 * 2. Access DTrace variables other than the args[] array, and the 8857 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. 8858 * 3. Have thread-local variables. 8859 * 4. Have dynamic variables. 8860 */ 8861 static int 8862 dtrace_difo_validate_helper(dtrace_difo_t *dp) 8863 { 8864 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8865 int err = 0; 8866 uint_t pc; 8867 8868 for (pc = 0; pc < dp->dtdo_len; pc++) { 8869 dif_instr_t instr = dp->dtdo_buf[pc]; 8870 8871 uint_t v = DIF_INSTR_VAR(instr); 8872 uint_t subr = DIF_INSTR_SUBR(instr); 8873 uint_t op = DIF_INSTR_OP(instr); 8874 8875 switch (op) { 8876 case DIF_OP_OR: 8877 case DIF_OP_XOR: 8878 case DIF_OP_AND: 8879 case DIF_OP_SLL: 8880 case DIF_OP_SRL: 8881 case DIF_OP_SRA: 8882 case DIF_OP_SUB: 8883 case DIF_OP_ADD: 8884 case DIF_OP_MUL: 8885 case DIF_OP_SDIV: 8886 case DIF_OP_UDIV: 8887 case DIF_OP_SREM: 8888 case DIF_OP_UREM: 8889 case DIF_OP_COPYS: 8890 case DIF_OP_NOT: 8891 case DIF_OP_MOV: 8892 case DIF_OP_RLDSB: 8893 case DIF_OP_RLDSH: 8894 case DIF_OP_RLDSW: 8895 case DIF_OP_RLDUB: 8896 case DIF_OP_RLDUH: 8897 case DIF_OP_RLDUW: 8898 case DIF_OP_RLDX: 8899 case DIF_OP_ULDSB: 8900 case DIF_OP_ULDSH: 8901 case DIF_OP_ULDSW: 8902 case DIF_OP_ULDUB: 8903 case DIF_OP_ULDUH: 8904 case DIF_OP_ULDUW: 8905 case DIF_OP_ULDX: 8906 case DIF_OP_STB: 8907 case DIF_OP_STH: 8908 case DIF_OP_STW: 8909 case DIF_OP_STX: 8910 case DIF_OP_ALLOCS: 8911 case DIF_OP_CMP: 8912 case DIF_OP_SCMP: 8913 case DIF_OP_TST: 8914 case DIF_OP_BA: 8915 case DIF_OP_BE: 8916 case DIF_OP_BNE: 8917 case DIF_OP_BG: 8918 case DIF_OP_BGU: 8919 case DIF_OP_BGE: 8920 case DIF_OP_BGEU: 8921 case DIF_OP_BL: 8922 case DIF_OP_BLU: 8923 case DIF_OP_BLE: 8924 case DIF_OP_BLEU: 8925 case DIF_OP_RET: 8926 case DIF_OP_NOP: 8927 case DIF_OP_POPTS: 8928 case DIF_OP_FLUSHTS: 8929 case DIF_OP_SETX: 8930 case DIF_OP_SETS: 8931 case DIF_OP_LDGA: 8932 case DIF_OP_LDLS: 8933 case DIF_OP_STGS: 8934 case DIF_OP_STLS: 8935 case DIF_OP_PUSHTR: 8936 case DIF_OP_PUSHTV: 8937 break; 8938 8939 case DIF_OP_LDGS: 8940 if (v >= DIF_VAR_OTHER_UBASE) 8941 break; 8942 8943 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 8944 break; 8945 8946 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 8947 v == DIF_VAR_PPID || v == DIF_VAR_TID || 8948 v == DIF_VAR_EXECARGS || 8949 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || 8950 v == DIF_VAR_UID || v == DIF_VAR_GID) 8951 break; 8952 8953 err += efunc(pc, "illegal variable %u\n", v); 8954 break; 8955 8956 case DIF_OP_LDTA: 8957 case DIF_OP_LDTS: 8958 case DIF_OP_LDGAA: 8959 case DIF_OP_LDTAA: 8960 err += efunc(pc, "illegal dynamic variable load\n"); 8961 break; 8962 8963 case DIF_OP_STTS: 8964 case DIF_OP_STGAA: 8965 case DIF_OP_STTAA: 8966 err += efunc(pc, "illegal dynamic variable store\n"); 8967 break; 8968 8969 case DIF_OP_CALL: 8970 if (subr == DIF_SUBR_ALLOCA || 8971 subr == DIF_SUBR_BCOPY || 8972 subr == DIF_SUBR_COPYIN || 8973 subr == DIF_SUBR_COPYINTO || 8974 subr == DIF_SUBR_COPYINSTR || 8975 subr == DIF_SUBR_INDEX || 8976 subr == DIF_SUBR_INET_NTOA || 8977 subr == DIF_SUBR_INET_NTOA6 || 8978 subr == DIF_SUBR_INET_NTOP || 8979 subr == DIF_SUBR_LLTOSTR || 8980 subr == DIF_SUBR_RINDEX || 8981 subr == DIF_SUBR_STRCHR || 8982 subr == DIF_SUBR_STRJOIN || 8983 subr == DIF_SUBR_STRRCHR || 8984 subr == DIF_SUBR_STRSTR || 8985 subr == DIF_SUBR_HTONS || 8986 subr == DIF_SUBR_HTONL || 8987 subr == DIF_SUBR_HTONLL || 8988 subr == DIF_SUBR_NTOHS || 8989 subr == DIF_SUBR_NTOHL || 8990 subr == DIF_SUBR_NTOHLL || 8991 subr == DIF_SUBR_MEMREF || 8992 subr == DIF_SUBR_TYPEREF) 8993 break; 8994 8995 err += efunc(pc, "invalid subr %u\n", subr); 8996 break; 8997 8998 default: 8999 err += efunc(pc, "invalid opcode %u\n", 9000 DIF_INSTR_OP(instr)); 9001 } 9002 } 9003 9004 return (err); 9005 } 9006 #endif 9007 9008 /* 9009 * Returns 1 if the expression in the DIF object can be cached on a per-thread 9010 * basis; 0 if not. 9011 */ 9012 static int 9013 dtrace_difo_cacheable(dtrace_difo_t *dp) 9014 { 9015 int i; 9016 9017 if (dp == NULL) 9018 return (0); 9019 9020 for (i = 0; i < dp->dtdo_varlen; i++) { 9021 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9022 9023 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 9024 continue; 9025 9026 switch (v->dtdv_id) { 9027 case DIF_VAR_CURTHREAD: 9028 case DIF_VAR_PID: 9029 case DIF_VAR_TID: 9030 case DIF_VAR_EXECARGS: 9031 case DIF_VAR_EXECNAME: 9032 case DIF_VAR_ZONENAME: 9033 break; 9034 9035 default: 9036 return (0); 9037 } 9038 } 9039 9040 /* 9041 * This DIF object may be cacheable. Now we need to look for any 9042 * array loading instructions, any memory loading instructions, or 9043 * any stores to thread-local variables. 9044 */ 9045 for (i = 0; i < dp->dtdo_len; i++) { 9046 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 9047 9048 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 9049 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 9050 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 9051 op == DIF_OP_LDGA || op == DIF_OP_STTS) 9052 return (0); 9053 } 9054 9055 return (1); 9056 } 9057 9058 static void 9059 dtrace_difo_hold(dtrace_difo_t *dp) 9060 { 9061 int i; 9062 9063 ASSERT(MUTEX_HELD(&dtrace_lock)); 9064 9065 dp->dtdo_refcnt++; 9066 ASSERT(dp->dtdo_refcnt != 0); 9067 9068 /* 9069 * We need to check this DIF object for references to the variable 9070 * DIF_VAR_VTIMESTAMP. 9071 */ 9072 for (i = 0; i < dp->dtdo_varlen; i++) { 9073 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9074 9075 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 9076 continue; 9077 9078 if (dtrace_vtime_references++ == 0) 9079 dtrace_vtime_enable(); 9080 } 9081 } 9082 9083 /* 9084 * This routine calculates the dynamic variable chunksize for a given DIF 9085 * object. The calculation is not fool-proof, and can probably be tricked by 9086 * malicious DIF -- but it works for all compiler-generated DIF. Because this 9087 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 9088 * if a dynamic variable size exceeds the chunksize. 9089 */ 9090 static void 9091 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9092 { 9093 uint64_t sval = 0; 9094 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 9095 const dif_instr_t *text = dp->dtdo_buf; 9096 uint_t pc, srd = 0; 9097 uint_t ttop = 0; 9098 size_t size, ksize; 9099 uint_t id, i; 9100 9101 for (pc = 0; pc < dp->dtdo_len; pc++) { 9102 dif_instr_t instr = text[pc]; 9103 uint_t op = DIF_INSTR_OP(instr); 9104 uint_t rd = DIF_INSTR_RD(instr); 9105 uint_t r1 = DIF_INSTR_R1(instr); 9106 uint_t nkeys = 0; 9107 uchar_t scope = 0; 9108 9109 dtrace_key_t *key = tupregs; 9110 9111 switch (op) { 9112 case DIF_OP_SETX: 9113 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 9114 srd = rd; 9115 continue; 9116 9117 case DIF_OP_STTS: 9118 key = &tupregs[DIF_DTR_NREGS]; 9119 key[0].dttk_size = 0; 9120 key[1].dttk_size = 0; 9121 nkeys = 2; 9122 scope = DIFV_SCOPE_THREAD; 9123 break; 9124 9125 case DIF_OP_STGAA: 9126 case DIF_OP_STTAA: 9127 nkeys = ttop; 9128 9129 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 9130 key[nkeys++].dttk_size = 0; 9131 9132 key[nkeys++].dttk_size = 0; 9133 9134 if (op == DIF_OP_STTAA) { 9135 scope = DIFV_SCOPE_THREAD; 9136 } else { 9137 scope = DIFV_SCOPE_GLOBAL; 9138 } 9139 9140 break; 9141 9142 case DIF_OP_PUSHTR: 9143 if (ttop == DIF_DTR_NREGS) 9144 return; 9145 9146 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 9147 /* 9148 * If the register for the size of the "pushtr" 9149 * is %r0 (or the value is 0) and the type is 9150 * a string, we'll use the system-wide default 9151 * string size. 9152 */ 9153 tupregs[ttop++].dttk_size = 9154 dtrace_strsize_default; 9155 } else { 9156 if (srd == 0) 9157 return; 9158 9159 tupregs[ttop++].dttk_size = sval; 9160 } 9161 9162 break; 9163 9164 case DIF_OP_PUSHTV: 9165 if (ttop == DIF_DTR_NREGS) 9166 return; 9167 9168 tupregs[ttop++].dttk_size = 0; 9169 break; 9170 9171 case DIF_OP_FLUSHTS: 9172 ttop = 0; 9173 break; 9174 9175 case DIF_OP_POPTS: 9176 if (ttop != 0) 9177 ttop--; 9178 break; 9179 } 9180 9181 sval = 0; 9182 srd = 0; 9183 9184 if (nkeys == 0) 9185 continue; 9186 9187 /* 9188 * We have a dynamic variable allocation; calculate its size. 9189 */ 9190 for (ksize = 0, i = 0; i < nkeys; i++) 9191 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 9192 9193 size = sizeof (dtrace_dynvar_t); 9194 size += sizeof (dtrace_key_t) * (nkeys - 1); 9195 size += ksize; 9196 9197 /* 9198 * Now we need to determine the size of the stored data. 9199 */ 9200 id = DIF_INSTR_VAR(instr); 9201 9202 for (i = 0; i < dp->dtdo_varlen; i++) { 9203 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9204 9205 if (v->dtdv_id == id && v->dtdv_scope == scope) { 9206 size += v->dtdv_type.dtdt_size; 9207 break; 9208 } 9209 } 9210 9211 if (i == dp->dtdo_varlen) 9212 return; 9213 9214 /* 9215 * We have the size. If this is larger than the chunk size 9216 * for our dynamic variable state, reset the chunk size. 9217 */ 9218 size = P2ROUNDUP(size, sizeof (uint64_t)); 9219 9220 if (size > vstate->dtvs_dynvars.dtds_chunksize) 9221 vstate->dtvs_dynvars.dtds_chunksize = size; 9222 } 9223 } 9224 9225 static void 9226 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9227 { 9228 int i, oldsvars, osz, nsz, otlocals, ntlocals; 9229 uint_t id; 9230 9231 ASSERT(MUTEX_HELD(&dtrace_lock)); 9232 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 9233 9234 for (i = 0; i < dp->dtdo_varlen; i++) { 9235 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9236 dtrace_statvar_t *svar, ***svarp = NULL; 9237 size_t dsize = 0; 9238 uint8_t scope = v->dtdv_scope; 9239 int *np = NULL; 9240 9241 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9242 continue; 9243 9244 id -= DIF_VAR_OTHER_UBASE; 9245 9246 switch (scope) { 9247 case DIFV_SCOPE_THREAD: 9248 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 9249 dtrace_difv_t *tlocals; 9250 9251 if ((ntlocals = (otlocals << 1)) == 0) 9252 ntlocals = 1; 9253 9254 osz = otlocals * sizeof (dtrace_difv_t); 9255 nsz = ntlocals * sizeof (dtrace_difv_t); 9256 9257 tlocals = kmem_zalloc(nsz, KM_SLEEP); 9258 9259 if (osz != 0) { 9260 bcopy(vstate->dtvs_tlocals, 9261 tlocals, osz); 9262 kmem_free(vstate->dtvs_tlocals, osz); 9263 } 9264 9265 vstate->dtvs_tlocals = tlocals; 9266 vstate->dtvs_ntlocals = ntlocals; 9267 } 9268 9269 vstate->dtvs_tlocals[id] = *v; 9270 continue; 9271 9272 case DIFV_SCOPE_LOCAL: 9273 np = &vstate->dtvs_nlocals; 9274 svarp = &vstate->dtvs_locals; 9275 9276 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9277 dsize = NCPU * (v->dtdv_type.dtdt_size + 9278 sizeof (uint64_t)); 9279 else 9280 dsize = NCPU * sizeof (uint64_t); 9281 9282 break; 9283 9284 case DIFV_SCOPE_GLOBAL: 9285 np = &vstate->dtvs_nglobals; 9286 svarp = &vstate->dtvs_globals; 9287 9288 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9289 dsize = v->dtdv_type.dtdt_size + 9290 sizeof (uint64_t); 9291 9292 break; 9293 9294 default: 9295 ASSERT(0); 9296 } 9297 9298 while (id >= (oldsvars = *np)) { 9299 dtrace_statvar_t **statics; 9300 int newsvars, oldsize, newsize; 9301 9302 if ((newsvars = (oldsvars << 1)) == 0) 9303 newsvars = 1; 9304 9305 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 9306 newsize = newsvars * sizeof (dtrace_statvar_t *); 9307 9308 statics = kmem_zalloc(newsize, KM_SLEEP); 9309 9310 if (oldsize != 0) { 9311 bcopy(*svarp, statics, oldsize); 9312 kmem_free(*svarp, oldsize); 9313 } 9314 9315 *svarp = statics; 9316 *np = newsvars; 9317 } 9318 9319 if ((svar = (*svarp)[id]) == NULL) { 9320 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 9321 svar->dtsv_var = *v; 9322 9323 if ((svar->dtsv_size = dsize) != 0) { 9324 svar->dtsv_data = (uint64_t)(uintptr_t) 9325 kmem_zalloc(dsize, KM_SLEEP); 9326 } 9327 9328 (*svarp)[id] = svar; 9329 } 9330 9331 svar->dtsv_refcnt++; 9332 } 9333 9334 dtrace_difo_chunksize(dp, vstate); 9335 dtrace_difo_hold(dp); 9336 } 9337 9338 #if defined(sun) 9339 static dtrace_difo_t * 9340 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9341 { 9342 dtrace_difo_t *new; 9343 size_t sz; 9344 9345 ASSERT(dp->dtdo_buf != NULL); 9346 ASSERT(dp->dtdo_refcnt != 0); 9347 9348 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 9349 9350 ASSERT(dp->dtdo_buf != NULL); 9351 sz = dp->dtdo_len * sizeof (dif_instr_t); 9352 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 9353 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 9354 new->dtdo_len = dp->dtdo_len; 9355 9356 if (dp->dtdo_strtab != NULL) { 9357 ASSERT(dp->dtdo_strlen != 0); 9358 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 9359 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 9360 new->dtdo_strlen = dp->dtdo_strlen; 9361 } 9362 9363 if (dp->dtdo_inttab != NULL) { 9364 ASSERT(dp->dtdo_intlen != 0); 9365 sz = dp->dtdo_intlen * sizeof (uint64_t); 9366 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 9367 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 9368 new->dtdo_intlen = dp->dtdo_intlen; 9369 } 9370 9371 if (dp->dtdo_vartab != NULL) { 9372 ASSERT(dp->dtdo_varlen != 0); 9373 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 9374 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 9375 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 9376 new->dtdo_varlen = dp->dtdo_varlen; 9377 } 9378 9379 dtrace_difo_init(new, vstate); 9380 return (new); 9381 } 9382 #endif 9383 9384 static void 9385 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9386 { 9387 int i; 9388 9389 ASSERT(dp->dtdo_refcnt == 0); 9390 9391 for (i = 0; i < dp->dtdo_varlen; i++) { 9392 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9393 dtrace_statvar_t *svar, **svarp = NULL; 9394 uint_t id; 9395 uint8_t scope = v->dtdv_scope; 9396 int *np = NULL; 9397 9398 switch (scope) { 9399 case DIFV_SCOPE_THREAD: 9400 continue; 9401 9402 case DIFV_SCOPE_LOCAL: 9403 np = &vstate->dtvs_nlocals; 9404 svarp = vstate->dtvs_locals; 9405 break; 9406 9407 case DIFV_SCOPE_GLOBAL: 9408 np = &vstate->dtvs_nglobals; 9409 svarp = vstate->dtvs_globals; 9410 break; 9411 9412 default: 9413 ASSERT(0); 9414 } 9415 9416 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9417 continue; 9418 9419 id -= DIF_VAR_OTHER_UBASE; 9420 ASSERT(id < *np); 9421 9422 svar = svarp[id]; 9423 ASSERT(svar != NULL); 9424 ASSERT(svar->dtsv_refcnt > 0); 9425 9426 if (--svar->dtsv_refcnt > 0) 9427 continue; 9428 9429 if (svar->dtsv_size != 0) { 9430 ASSERT(svar->dtsv_data != 0); 9431 kmem_free((void *)(uintptr_t)svar->dtsv_data, 9432 svar->dtsv_size); 9433 } 9434 9435 kmem_free(svar, sizeof (dtrace_statvar_t)); 9436 svarp[id] = NULL; 9437 } 9438 9439 if (dp->dtdo_buf != NULL) 9440 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 9441 if (dp->dtdo_inttab != NULL) 9442 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 9443 if (dp->dtdo_strtab != NULL) 9444 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 9445 if (dp->dtdo_vartab != NULL) 9446 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 9447 9448 kmem_free(dp, sizeof (dtrace_difo_t)); 9449 } 9450 9451 static void 9452 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9453 { 9454 int i; 9455 9456 ASSERT(MUTEX_HELD(&dtrace_lock)); 9457 ASSERT(dp->dtdo_refcnt != 0); 9458 9459 for (i = 0; i < dp->dtdo_varlen; i++) { 9460 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9461 9462 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 9463 continue; 9464 9465 ASSERT(dtrace_vtime_references > 0); 9466 if (--dtrace_vtime_references == 0) 9467 dtrace_vtime_disable(); 9468 } 9469 9470 if (--dp->dtdo_refcnt == 0) 9471 dtrace_difo_destroy(dp, vstate); 9472 } 9473 9474 /* 9475 * DTrace Format Functions 9476 */ 9477 static uint16_t 9478 dtrace_format_add(dtrace_state_t *state, char *str) 9479 { 9480 char *fmt, **new; 9481 uint16_t ndx, len = strlen(str) + 1; 9482 9483 fmt = kmem_zalloc(len, KM_SLEEP); 9484 bcopy(str, fmt, len); 9485 9486 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 9487 if (state->dts_formats[ndx] == NULL) { 9488 state->dts_formats[ndx] = fmt; 9489 return (ndx + 1); 9490 } 9491 } 9492 9493 if (state->dts_nformats == USHRT_MAX) { 9494 /* 9495 * This is only likely if a denial-of-service attack is being 9496 * attempted. As such, it's okay to fail silently here. 9497 */ 9498 kmem_free(fmt, len); 9499 return (0); 9500 } 9501 9502 /* 9503 * For simplicity, we always resize the formats array to be exactly the 9504 * number of formats. 9505 */ 9506 ndx = state->dts_nformats++; 9507 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 9508 9509 if (state->dts_formats != NULL) { 9510 ASSERT(ndx != 0); 9511 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 9512 kmem_free(state->dts_formats, ndx * sizeof (char *)); 9513 } 9514 9515 state->dts_formats = new; 9516 state->dts_formats[ndx] = fmt; 9517 9518 return (ndx + 1); 9519 } 9520 9521 static void 9522 dtrace_format_remove(dtrace_state_t *state, uint16_t format) 9523 { 9524 char *fmt; 9525 9526 ASSERT(state->dts_formats != NULL); 9527 ASSERT(format <= state->dts_nformats); 9528 ASSERT(state->dts_formats[format - 1] != NULL); 9529 9530 fmt = state->dts_formats[format - 1]; 9531 kmem_free(fmt, strlen(fmt) + 1); 9532 state->dts_formats[format - 1] = NULL; 9533 } 9534 9535 static void 9536 dtrace_format_destroy(dtrace_state_t *state) 9537 { 9538 int i; 9539 9540 if (state->dts_nformats == 0) { 9541 ASSERT(state->dts_formats == NULL); 9542 return; 9543 } 9544 9545 ASSERT(state->dts_formats != NULL); 9546 9547 for (i = 0; i < state->dts_nformats; i++) { 9548 char *fmt = state->dts_formats[i]; 9549 9550 if (fmt == NULL) 9551 continue; 9552 9553 kmem_free(fmt, strlen(fmt) + 1); 9554 } 9555 9556 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 9557 state->dts_nformats = 0; 9558 state->dts_formats = NULL; 9559 } 9560 9561 /* 9562 * DTrace Predicate Functions 9563 */ 9564 static dtrace_predicate_t * 9565 dtrace_predicate_create(dtrace_difo_t *dp) 9566 { 9567 dtrace_predicate_t *pred; 9568 9569 ASSERT(MUTEX_HELD(&dtrace_lock)); 9570 ASSERT(dp->dtdo_refcnt != 0); 9571 9572 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 9573 pred->dtp_difo = dp; 9574 pred->dtp_refcnt = 1; 9575 9576 if (!dtrace_difo_cacheable(dp)) 9577 return (pred); 9578 9579 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 9580 /* 9581 * This is only theoretically possible -- we have had 2^32 9582 * cacheable predicates on this machine. We cannot allow any 9583 * more predicates to become cacheable: as unlikely as it is, 9584 * there may be a thread caching a (now stale) predicate cache 9585 * ID. (N.B.: the temptation is being successfully resisted to 9586 * have this cmn_err() "Holy shit -- we executed this code!") 9587 */ 9588 return (pred); 9589 } 9590 9591 pred->dtp_cacheid = dtrace_predcache_id++; 9592 9593 return (pred); 9594 } 9595 9596 static void 9597 dtrace_predicate_hold(dtrace_predicate_t *pred) 9598 { 9599 ASSERT(MUTEX_HELD(&dtrace_lock)); 9600 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 9601 ASSERT(pred->dtp_refcnt > 0); 9602 9603 pred->dtp_refcnt++; 9604 } 9605 9606 static void 9607 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 9608 { 9609 dtrace_difo_t *dp = pred->dtp_difo; 9610 9611 ASSERT(MUTEX_HELD(&dtrace_lock)); 9612 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 9613 ASSERT(pred->dtp_refcnt > 0); 9614 9615 if (--pred->dtp_refcnt == 0) { 9616 dtrace_difo_release(pred->dtp_difo, vstate); 9617 kmem_free(pred, sizeof (dtrace_predicate_t)); 9618 } 9619 } 9620 9621 /* 9622 * DTrace Action Description Functions 9623 */ 9624 static dtrace_actdesc_t * 9625 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 9626 uint64_t uarg, uint64_t arg) 9627 { 9628 dtrace_actdesc_t *act; 9629 9630 #if defined(sun) 9631 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 9632 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 9633 #endif 9634 9635 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 9636 act->dtad_kind = kind; 9637 act->dtad_ntuple = ntuple; 9638 act->dtad_uarg = uarg; 9639 act->dtad_arg = arg; 9640 act->dtad_refcnt = 1; 9641 9642 return (act); 9643 } 9644 9645 static void 9646 dtrace_actdesc_hold(dtrace_actdesc_t *act) 9647 { 9648 ASSERT(act->dtad_refcnt >= 1); 9649 act->dtad_refcnt++; 9650 } 9651 9652 static void 9653 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 9654 { 9655 dtrace_actkind_t kind = act->dtad_kind; 9656 dtrace_difo_t *dp; 9657 9658 ASSERT(act->dtad_refcnt >= 1); 9659 9660 if (--act->dtad_refcnt != 0) 9661 return; 9662 9663 if ((dp = act->dtad_difo) != NULL) 9664 dtrace_difo_release(dp, vstate); 9665 9666 if (DTRACEACT_ISPRINTFLIKE(kind)) { 9667 char *str = (char *)(uintptr_t)act->dtad_arg; 9668 9669 #if defined(sun) 9670 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 9671 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 9672 #endif 9673 9674 if (str != NULL) 9675 kmem_free(str, strlen(str) + 1); 9676 } 9677 9678 kmem_free(act, sizeof (dtrace_actdesc_t)); 9679 } 9680 9681 /* 9682 * DTrace ECB Functions 9683 */ 9684 static dtrace_ecb_t * 9685 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 9686 { 9687 dtrace_ecb_t *ecb; 9688 dtrace_epid_t epid; 9689 9690 ASSERT(MUTEX_HELD(&dtrace_lock)); 9691 9692 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 9693 ecb->dte_predicate = NULL; 9694 ecb->dte_probe = probe; 9695 9696 /* 9697 * The default size is the size of the default action: recording 9698 * the epid. 9699 */ 9700 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 9701 ecb->dte_alignment = sizeof (dtrace_epid_t); 9702 9703 epid = state->dts_epid++; 9704 9705 if (epid - 1 >= state->dts_necbs) { 9706 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 9707 int necbs = state->dts_necbs << 1; 9708 9709 ASSERT(epid == state->dts_necbs + 1); 9710 9711 if (necbs == 0) { 9712 ASSERT(oecbs == NULL); 9713 necbs = 1; 9714 } 9715 9716 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 9717 9718 if (oecbs != NULL) 9719 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 9720 9721 dtrace_membar_producer(); 9722 state->dts_ecbs = ecbs; 9723 9724 if (oecbs != NULL) { 9725 /* 9726 * If this state is active, we must dtrace_sync() 9727 * before we can free the old dts_ecbs array: we're 9728 * coming in hot, and there may be active ring 9729 * buffer processing (which indexes into the dts_ecbs 9730 * array) on another CPU. 9731 */ 9732 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 9733 dtrace_sync(); 9734 9735 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 9736 } 9737 9738 dtrace_membar_producer(); 9739 state->dts_necbs = necbs; 9740 } 9741 9742 ecb->dte_state = state; 9743 9744 ASSERT(state->dts_ecbs[epid - 1] == NULL); 9745 dtrace_membar_producer(); 9746 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 9747 9748 return (ecb); 9749 } 9750 9751 static int 9752 dtrace_ecb_enable(dtrace_ecb_t *ecb) 9753 { 9754 dtrace_probe_t *probe = ecb->dte_probe; 9755 9756 ASSERT(MUTEX_HELD(&cpu_lock)); 9757 ASSERT(MUTEX_HELD(&dtrace_lock)); 9758 ASSERT(ecb->dte_next == NULL); 9759 9760 if (probe == NULL) { 9761 /* 9762 * This is the NULL probe -- there's nothing to do. 9763 */ 9764 return (0); 9765 } 9766 9767 if (probe->dtpr_ecb == NULL) { 9768 dtrace_provider_t *prov = probe->dtpr_provider; 9769 9770 /* 9771 * We're the first ECB on this probe. 9772 */ 9773 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 9774 9775 if (ecb->dte_predicate != NULL) 9776 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 9777 9778 return (prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 9779 probe->dtpr_id, probe->dtpr_arg)); 9780 } else { 9781 /* 9782 * This probe is already active. Swing the last pointer to 9783 * point to the new ECB, and issue a dtrace_sync() to assure 9784 * that all CPUs have seen the change. 9785 */ 9786 ASSERT(probe->dtpr_ecb_last != NULL); 9787 probe->dtpr_ecb_last->dte_next = ecb; 9788 probe->dtpr_ecb_last = ecb; 9789 probe->dtpr_predcache = 0; 9790 9791 dtrace_sync(); 9792 return (0); 9793 } 9794 } 9795 9796 static void 9797 dtrace_ecb_resize(dtrace_ecb_t *ecb) 9798 { 9799 uint32_t maxalign = sizeof (dtrace_epid_t); 9800 uint32_t align = sizeof (uint8_t), offs, diff; 9801 dtrace_action_t *act; 9802 int wastuple = 0; 9803 uint32_t aggbase = UINT32_MAX; 9804 dtrace_state_t *state = ecb->dte_state; 9805 9806 /* 9807 * If we record anything, we always record the epid. (And we always 9808 * record it first.) 9809 */ 9810 offs = sizeof (dtrace_epid_t); 9811 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t); 9812 9813 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 9814 dtrace_recdesc_t *rec = &act->dta_rec; 9815 9816 if ((align = rec->dtrd_alignment) > maxalign) 9817 maxalign = align; 9818 9819 if (!wastuple && act->dta_intuple) { 9820 /* 9821 * This is the first record in a tuple. Align the 9822 * offset to be at offset 4 in an 8-byte aligned 9823 * block. 9824 */ 9825 diff = offs + sizeof (dtrace_aggid_t); 9826 9827 if ((diff = (diff & (sizeof (uint64_t) - 1)))) 9828 offs += sizeof (uint64_t) - diff; 9829 9830 aggbase = offs - sizeof (dtrace_aggid_t); 9831 ASSERT(!(aggbase & (sizeof (uint64_t) - 1))); 9832 } 9833 9834 /*LINTED*/ 9835 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) { 9836 /* 9837 * The current offset is not properly aligned; align it. 9838 */ 9839 offs += align - diff; 9840 } 9841 9842 rec->dtrd_offset = offs; 9843 9844 if (offs + rec->dtrd_size > ecb->dte_needed) { 9845 ecb->dte_needed = offs + rec->dtrd_size; 9846 9847 if (ecb->dte_needed > state->dts_needed) 9848 state->dts_needed = ecb->dte_needed; 9849 } 9850 9851 if (DTRACEACT_ISAGG(act->dta_kind)) { 9852 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9853 dtrace_action_t *first = agg->dtag_first, *prev; 9854 9855 ASSERT(rec->dtrd_size != 0 && first != NULL); 9856 ASSERT(wastuple); 9857 ASSERT(aggbase != UINT32_MAX); 9858 9859 agg->dtag_base = aggbase; 9860 9861 while ((prev = first->dta_prev) != NULL && 9862 DTRACEACT_ISAGG(prev->dta_kind)) { 9863 agg = (dtrace_aggregation_t *)prev; 9864 first = agg->dtag_first; 9865 } 9866 9867 if (prev != NULL) { 9868 offs = prev->dta_rec.dtrd_offset + 9869 prev->dta_rec.dtrd_size; 9870 } else { 9871 offs = sizeof (dtrace_epid_t); 9872 } 9873 wastuple = 0; 9874 } else { 9875 if (!act->dta_intuple) 9876 ecb->dte_size = offs + rec->dtrd_size; 9877 9878 offs += rec->dtrd_size; 9879 } 9880 9881 wastuple = act->dta_intuple; 9882 } 9883 9884 if ((act = ecb->dte_action) != NULL && 9885 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 9886 ecb->dte_size == sizeof (dtrace_epid_t)) { 9887 /* 9888 * If the size is still sizeof (dtrace_epid_t), then all 9889 * actions store no data; set the size to 0. 9890 */ 9891 ecb->dte_alignment = maxalign; 9892 ecb->dte_size = 0; 9893 9894 /* 9895 * If the needed space is still sizeof (dtrace_epid_t), then 9896 * all actions need no additional space; set the needed 9897 * size to 0. 9898 */ 9899 if (ecb->dte_needed == sizeof (dtrace_epid_t)) 9900 ecb->dte_needed = 0; 9901 9902 return; 9903 } 9904 9905 /* 9906 * Set our alignment, and make sure that the dte_size and dte_needed 9907 * are aligned to the size of an EPID. 9908 */ 9909 ecb->dte_alignment = maxalign; 9910 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) & 9911 ~(sizeof (dtrace_epid_t) - 1); 9912 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) & 9913 ~(sizeof (dtrace_epid_t) - 1); 9914 ASSERT(ecb->dte_size <= ecb->dte_needed); 9915 } 9916 9917 static dtrace_action_t * 9918 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 9919 { 9920 dtrace_aggregation_t *agg; 9921 size_t size = sizeof (uint64_t); 9922 int ntuple = desc->dtad_ntuple; 9923 dtrace_action_t *act; 9924 dtrace_recdesc_t *frec; 9925 dtrace_aggid_t aggid; 9926 dtrace_state_t *state = ecb->dte_state; 9927 9928 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 9929 agg->dtag_ecb = ecb; 9930 9931 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 9932 9933 switch (desc->dtad_kind) { 9934 case DTRACEAGG_MIN: 9935 agg->dtag_initial = INT64_MAX; 9936 agg->dtag_aggregate = dtrace_aggregate_min; 9937 break; 9938 9939 case DTRACEAGG_MAX: 9940 agg->dtag_initial = INT64_MIN; 9941 agg->dtag_aggregate = dtrace_aggregate_max; 9942 break; 9943 9944 case DTRACEAGG_COUNT: 9945 agg->dtag_aggregate = dtrace_aggregate_count; 9946 break; 9947 9948 case DTRACEAGG_QUANTIZE: 9949 agg->dtag_aggregate = dtrace_aggregate_quantize; 9950 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 9951 sizeof (uint64_t); 9952 break; 9953 9954 case DTRACEAGG_LQUANTIZE: { 9955 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 9956 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 9957 9958 agg->dtag_initial = desc->dtad_arg; 9959 agg->dtag_aggregate = dtrace_aggregate_lquantize; 9960 9961 if (step == 0 || levels == 0) 9962 goto err; 9963 9964 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 9965 break; 9966 } 9967 9968 case DTRACEAGG_AVG: 9969 agg->dtag_aggregate = dtrace_aggregate_avg; 9970 size = sizeof (uint64_t) * 2; 9971 break; 9972 9973 case DTRACEAGG_STDDEV: 9974 agg->dtag_aggregate = dtrace_aggregate_stddev; 9975 size = sizeof (uint64_t) * 4; 9976 break; 9977 9978 case DTRACEAGG_SUM: 9979 agg->dtag_aggregate = dtrace_aggregate_sum; 9980 break; 9981 9982 default: 9983 goto err; 9984 } 9985 9986 agg->dtag_action.dta_rec.dtrd_size = size; 9987 9988 if (ntuple == 0) 9989 goto err; 9990 9991 /* 9992 * We must make sure that we have enough actions for the n-tuple. 9993 */ 9994 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 9995 if (DTRACEACT_ISAGG(act->dta_kind)) 9996 break; 9997 9998 if (--ntuple == 0) { 9999 /* 10000 * This is the action with which our n-tuple begins. 10001 */ 10002 agg->dtag_first = act; 10003 goto success; 10004 } 10005 } 10006 10007 /* 10008 * This n-tuple is short by ntuple elements. Return failure. 10009 */ 10010 ASSERT(ntuple != 0); 10011 err: 10012 kmem_free(agg, sizeof (dtrace_aggregation_t)); 10013 return (NULL); 10014 10015 success: 10016 /* 10017 * If the last action in the tuple has a size of zero, it's actually 10018 * an expression argument for the aggregating action. 10019 */ 10020 ASSERT(ecb->dte_action_last != NULL); 10021 act = ecb->dte_action_last; 10022 10023 if (act->dta_kind == DTRACEACT_DIFEXPR) { 10024 ASSERT(act->dta_difo != NULL); 10025 10026 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 10027 agg->dtag_hasarg = 1; 10028 } 10029 10030 /* 10031 * We need to allocate an id for this aggregation. 10032 */ 10033 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 10034 VM_BESTFIT | VM_SLEEP); 10035 10036 if (aggid - 1 >= state->dts_naggregations) { 10037 dtrace_aggregation_t **oaggs = state->dts_aggregations; 10038 dtrace_aggregation_t **aggs; 10039 int naggs = state->dts_naggregations << 1; 10040 int onaggs = state->dts_naggregations; 10041 10042 ASSERT(aggid == state->dts_naggregations + 1); 10043 10044 if (naggs == 0) { 10045 ASSERT(oaggs == NULL); 10046 naggs = 1; 10047 } 10048 10049 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 10050 10051 if (oaggs != NULL) { 10052 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 10053 kmem_free(oaggs, onaggs * sizeof (*aggs)); 10054 } 10055 10056 state->dts_aggregations = aggs; 10057 state->dts_naggregations = naggs; 10058 } 10059 10060 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 10061 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 10062 10063 frec = &agg->dtag_first->dta_rec; 10064 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 10065 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 10066 10067 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 10068 ASSERT(!act->dta_intuple); 10069 act->dta_intuple = 1; 10070 } 10071 10072 return (&agg->dtag_action); 10073 } 10074 10075 static void 10076 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 10077 { 10078 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 10079 dtrace_state_t *state = ecb->dte_state; 10080 dtrace_aggid_t aggid = agg->dtag_id; 10081 10082 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 10083 #if defined(sun) 10084 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 10085 #else 10086 vmem_free(state->dts_aggid_arena, (uintptr_t)aggid, 1); 10087 #endif 10088 10089 ASSERT(state->dts_aggregations[aggid - 1] == agg); 10090 state->dts_aggregations[aggid - 1] = NULL; 10091 10092 kmem_free(agg, sizeof (dtrace_aggregation_t)); 10093 } 10094 10095 static int 10096 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 10097 { 10098 dtrace_action_t *action, *last; 10099 dtrace_difo_t *dp = desc->dtad_difo; 10100 uint32_t size = 0, align = sizeof (uint8_t), mask; 10101 uint16_t format = 0; 10102 dtrace_recdesc_t *rec; 10103 dtrace_state_t *state = ecb->dte_state; 10104 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize; 10105 uint64_t arg = desc->dtad_arg; 10106 10107 ASSERT(MUTEX_HELD(&dtrace_lock)); 10108 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 10109 10110 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 10111 /* 10112 * If this is an aggregating action, there must be neither 10113 * a speculate nor a commit on the action chain. 10114 */ 10115 dtrace_action_t *act; 10116 10117 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 10118 if (act->dta_kind == DTRACEACT_COMMIT) 10119 return (EINVAL); 10120 10121 if (act->dta_kind == DTRACEACT_SPECULATE) 10122 return (EINVAL); 10123 } 10124 10125 action = dtrace_ecb_aggregation_create(ecb, desc); 10126 10127 if (action == NULL) 10128 return (EINVAL); 10129 } else { 10130 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 10131 (desc->dtad_kind == DTRACEACT_DIFEXPR && 10132 dp != NULL && dp->dtdo_destructive)) { 10133 state->dts_destructive = 1; 10134 } 10135 10136 switch (desc->dtad_kind) { 10137 case DTRACEACT_PRINTF: 10138 case DTRACEACT_PRINTA: 10139 case DTRACEACT_SYSTEM: 10140 case DTRACEACT_FREOPEN: 10141 /* 10142 * We know that our arg is a string -- turn it into a 10143 * format. 10144 */ 10145 if (arg == 0) { 10146 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA); 10147 format = 0; 10148 } else { 10149 ASSERT(arg != 0); 10150 #if defined(sun) 10151 ASSERT(arg > KERNELBASE); 10152 #endif 10153 format = dtrace_format_add(state, 10154 (char *)(uintptr_t)arg); 10155 } 10156 10157 /*FALLTHROUGH*/ 10158 case DTRACEACT_LIBACT: 10159 case DTRACEACT_DIFEXPR: 10160 if (dp == NULL) 10161 return (EINVAL); 10162 10163 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 10164 break; 10165 10166 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 10167 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10168 return (EINVAL); 10169 10170 size = opt[DTRACEOPT_STRSIZE]; 10171 } 10172 10173 break; 10174 10175 case DTRACEACT_STACK: 10176 if ((nframes = arg) == 0) { 10177 nframes = opt[DTRACEOPT_STACKFRAMES]; 10178 ASSERT(nframes > 0); 10179 arg = nframes; 10180 } 10181 10182 size = nframes * sizeof (pc_t); 10183 break; 10184 10185 case DTRACEACT_JSTACK: 10186 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 10187 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 10188 10189 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 10190 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 10191 10192 arg = DTRACE_USTACK_ARG(nframes, strsize); 10193 10194 /*FALLTHROUGH*/ 10195 case DTRACEACT_USTACK: 10196 if (desc->dtad_kind != DTRACEACT_JSTACK && 10197 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 10198 strsize = DTRACE_USTACK_STRSIZE(arg); 10199 nframes = opt[DTRACEOPT_USTACKFRAMES]; 10200 ASSERT(nframes > 0); 10201 arg = DTRACE_USTACK_ARG(nframes, strsize); 10202 } 10203 10204 /* 10205 * Save a slot for the pid. 10206 */ 10207 size = (nframes + 1) * sizeof (uint64_t); 10208 size += DTRACE_USTACK_STRSIZE(arg); 10209 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 10210 10211 break; 10212 10213 case DTRACEACT_SYM: 10214 case DTRACEACT_MOD: 10215 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 10216 sizeof (uint64_t)) || 10217 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10218 return (EINVAL); 10219 break; 10220 10221 case DTRACEACT_USYM: 10222 case DTRACEACT_UMOD: 10223 case DTRACEACT_UADDR: 10224 if (dp == NULL || 10225 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 10226 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10227 return (EINVAL); 10228 10229 /* 10230 * We have a slot for the pid, plus a slot for the 10231 * argument. To keep things simple (aligned with 10232 * bitness-neutral sizing), we store each as a 64-bit 10233 * quantity. 10234 */ 10235 size = 2 * sizeof (uint64_t); 10236 break; 10237 10238 case DTRACEACT_STOP: 10239 case DTRACEACT_BREAKPOINT: 10240 case DTRACEACT_PANIC: 10241 break; 10242 10243 case DTRACEACT_CHILL: 10244 case DTRACEACT_DISCARD: 10245 case DTRACEACT_RAISE: 10246 if (dp == NULL) 10247 return (EINVAL); 10248 break; 10249 10250 case DTRACEACT_EXIT: 10251 if (dp == NULL || 10252 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 10253 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10254 return (EINVAL); 10255 break; 10256 10257 case DTRACEACT_SPECULATE: 10258 if (ecb->dte_size > sizeof (dtrace_epid_t)) 10259 return (EINVAL); 10260 10261 if (dp == NULL) 10262 return (EINVAL); 10263 10264 state->dts_speculates = 1; 10265 break; 10266 10267 case DTRACEACT_PRINTM: 10268 size = dp->dtdo_rtype.dtdt_size; 10269 break; 10270 10271 case DTRACEACT_PRINTT: 10272 size = dp->dtdo_rtype.dtdt_size; 10273 break; 10274 10275 case DTRACEACT_COMMIT: { 10276 dtrace_action_t *act = ecb->dte_action; 10277 10278 for (; act != NULL; act = act->dta_next) { 10279 if (act->dta_kind == DTRACEACT_COMMIT) 10280 return (EINVAL); 10281 } 10282 10283 if (dp == NULL) 10284 return (EINVAL); 10285 break; 10286 } 10287 10288 default: 10289 return (EINVAL); 10290 } 10291 10292 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 10293 /* 10294 * If this is a data-storing action or a speculate, 10295 * we must be sure that there isn't a commit on the 10296 * action chain. 10297 */ 10298 dtrace_action_t *act = ecb->dte_action; 10299 10300 for (; act != NULL; act = act->dta_next) { 10301 if (act->dta_kind == DTRACEACT_COMMIT) 10302 return (EINVAL); 10303 } 10304 } 10305 10306 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 10307 action->dta_rec.dtrd_size = size; 10308 } 10309 10310 action->dta_refcnt = 1; 10311 rec = &action->dta_rec; 10312 size = rec->dtrd_size; 10313 10314 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 10315 if (!(size & mask)) { 10316 align = mask + 1; 10317 break; 10318 } 10319 } 10320 10321 action->dta_kind = desc->dtad_kind; 10322 10323 if ((action->dta_difo = dp) != NULL) 10324 dtrace_difo_hold(dp); 10325 10326 rec->dtrd_action = action->dta_kind; 10327 rec->dtrd_arg = arg; 10328 rec->dtrd_uarg = desc->dtad_uarg; 10329 rec->dtrd_alignment = (uint16_t)align; 10330 rec->dtrd_format = format; 10331 10332 if ((last = ecb->dte_action_last) != NULL) { 10333 ASSERT(ecb->dte_action != NULL); 10334 action->dta_prev = last; 10335 last->dta_next = action; 10336 } else { 10337 ASSERT(ecb->dte_action == NULL); 10338 ecb->dte_action = action; 10339 } 10340 10341 ecb->dte_action_last = action; 10342 10343 return (0); 10344 } 10345 10346 static void 10347 dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 10348 { 10349 dtrace_action_t *act = ecb->dte_action, *next; 10350 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 10351 dtrace_difo_t *dp; 10352 uint16_t format; 10353 10354 if (act != NULL && act->dta_refcnt > 1) { 10355 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 10356 act->dta_refcnt--; 10357 } else { 10358 for (; act != NULL; act = next) { 10359 next = act->dta_next; 10360 ASSERT(next != NULL || act == ecb->dte_action_last); 10361 ASSERT(act->dta_refcnt == 1); 10362 10363 if ((format = act->dta_rec.dtrd_format) != 0) 10364 dtrace_format_remove(ecb->dte_state, format); 10365 10366 if ((dp = act->dta_difo) != NULL) 10367 dtrace_difo_release(dp, vstate); 10368 10369 if (DTRACEACT_ISAGG(act->dta_kind)) { 10370 dtrace_ecb_aggregation_destroy(ecb, act); 10371 } else { 10372 kmem_free(act, sizeof (dtrace_action_t)); 10373 } 10374 } 10375 } 10376 10377 ecb->dte_action = NULL; 10378 ecb->dte_action_last = NULL; 10379 ecb->dte_size = sizeof (dtrace_epid_t); 10380 } 10381 10382 static void 10383 dtrace_ecb_disable(dtrace_ecb_t *ecb) 10384 { 10385 /* 10386 * We disable the ECB by removing it from its probe. 10387 */ 10388 dtrace_ecb_t *pecb, *prev = NULL; 10389 dtrace_probe_t *probe = ecb->dte_probe; 10390 10391 ASSERT(MUTEX_HELD(&dtrace_lock)); 10392 10393 if (probe == NULL) { 10394 /* 10395 * This is the NULL probe; there is nothing to disable. 10396 */ 10397 return; 10398 } 10399 10400 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 10401 if (pecb == ecb) 10402 break; 10403 prev = pecb; 10404 } 10405 10406 ASSERT(pecb != NULL); 10407 10408 if (prev == NULL) { 10409 probe->dtpr_ecb = ecb->dte_next; 10410 } else { 10411 prev->dte_next = ecb->dte_next; 10412 } 10413 10414 if (ecb == probe->dtpr_ecb_last) { 10415 ASSERT(ecb->dte_next == NULL); 10416 probe->dtpr_ecb_last = prev; 10417 } 10418 10419 /* 10420 * The ECB has been disconnected from the probe; now sync to assure 10421 * that all CPUs have seen the change before returning. 10422 */ 10423 dtrace_sync(); 10424 10425 if (probe->dtpr_ecb == NULL) { 10426 /* 10427 * That was the last ECB on the probe; clear the predicate 10428 * cache ID for the probe, disable it and sync one more time 10429 * to assure that we'll never hit it again. 10430 */ 10431 dtrace_provider_t *prov = probe->dtpr_provider; 10432 10433 ASSERT(ecb->dte_next == NULL); 10434 ASSERT(probe->dtpr_ecb_last == NULL); 10435 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 10436 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 10437 probe->dtpr_id, probe->dtpr_arg); 10438 dtrace_sync(); 10439 } else { 10440 /* 10441 * There is at least one ECB remaining on the probe. If there 10442 * is _exactly_ one, set the probe's predicate cache ID to be 10443 * the predicate cache ID of the remaining ECB. 10444 */ 10445 ASSERT(probe->dtpr_ecb_last != NULL); 10446 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 10447 10448 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 10449 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 10450 10451 ASSERT(probe->dtpr_ecb->dte_next == NULL); 10452 10453 if (p != NULL) 10454 probe->dtpr_predcache = p->dtp_cacheid; 10455 } 10456 10457 ecb->dte_next = NULL; 10458 } 10459 } 10460 10461 static void 10462 dtrace_ecb_destroy(dtrace_ecb_t *ecb) 10463 { 10464 dtrace_state_t *state = ecb->dte_state; 10465 dtrace_vstate_t *vstate = &state->dts_vstate; 10466 dtrace_predicate_t *pred; 10467 dtrace_epid_t epid = ecb->dte_epid; 10468 10469 ASSERT(MUTEX_HELD(&dtrace_lock)); 10470 ASSERT(ecb->dte_next == NULL); 10471 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 10472 10473 if ((pred = ecb->dte_predicate) != NULL) 10474 dtrace_predicate_release(pred, vstate); 10475 10476 dtrace_ecb_action_remove(ecb); 10477 10478 ASSERT(state->dts_ecbs[epid - 1] == ecb); 10479 state->dts_ecbs[epid - 1] = NULL; 10480 10481 kmem_free(ecb, sizeof (dtrace_ecb_t)); 10482 } 10483 10484 static dtrace_ecb_t * 10485 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 10486 dtrace_enabling_t *enab) 10487 { 10488 dtrace_ecb_t *ecb; 10489 dtrace_predicate_t *pred; 10490 dtrace_actdesc_t *act; 10491 dtrace_provider_t *prov; 10492 dtrace_ecbdesc_t *desc = enab->dten_current; 10493 10494 ASSERT(MUTEX_HELD(&dtrace_lock)); 10495 ASSERT(state != NULL); 10496 10497 ecb = dtrace_ecb_add(state, probe); 10498 ecb->dte_uarg = desc->dted_uarg; 10499 10500 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 10501 dtrace_predicate_hold(pred); 10502 ecb->dte_predicate = pred; 10503 } 10504 10505 if (probe != NULL) { 10506 /* 10507 * If the provider shows more leg than the consumer is old 10508 * enough to see, we need to enable the appropriate implicit 10509 * predicate bits to prevent the ecb from activating at 10510 * revealing times. 10511 * 10512 * Providers specifying DTRACE_PRIV_USER at register time 10513 * are stating that they need the /proc-style privilege 10514 * model to be enforced, and this is what DTRACE_COND_OWNER 10515 * and DTRACE_COND_ZONEOWNER will then do at probe time. 10516 */ 10517 prov = probe->dtpr_provider; 10518 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 10519 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10520 ecb->dte_cond |= DTRACE_COND_OWNER; 10521 10522 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 10523 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10524 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 10525 10526 /* 10527 * If the provider shows us kernel innards and the user 10528 * is lacking sufficient privilege, enable the 10529 * DTRACE_COND_USERMODE implicit predicate. 10530 */ 10531 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 10532 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 10533 ecb->dte_cond |= DTRACE_COND_USERMODE; 10534 } 10535 10536 if (dtrace_ecb_create_cache != NULL) { 10537 /* 10538 * If we have a cached ecb, we'll use its action list instead 10539 * of creating our own (saving both time and space). 10540 */ 10541 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 10542 dtrace_action_t *act = cached->dte_action; 10543 10544 if (act != NULL) { 10545 ASSERT(act->dta_refcnt > 0); 10546 act->dta_refcnt++; 10547 ecb->dte_action = act; 10548 ecb->dte_action_last = cached->dte_action_last; 10549 ecb->dte_needed = cached->dte_needed; 10550 ecb->dte_size = cached->dte_size; 10551 ecb->dte_alignment = cached->dte_alignment; 10552 } 10553 10554 return (ecb); 10555 } 10556 10557 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 10558 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 10559 dtrace_ecb_destroy(ecb); 10560 return (NULL); 10561 } 10562 } 10563 10564 dtrace_ecb_resize(ecb); 10565 10566 return (dtrace_ecb_create_cache = ecb); 10567 } 10568 10569 static int 10570 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 10571 { 10572 dtrace_ecb_t *ecb; 10573 dtrace_enabling_t *enab = arg; 10574 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 10575 10576 ASSERT(state != NULL); 10577 10578 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 10579 /* 10580 * This probe was created in a generation for which this 10581 * enabling has previously created ECBs; we don't want to 10582 * enable it again, so just kick out. 10583 */ 10584 return (DTRACE_MATCH_NEXT); 10585 } 10586 10587 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 10588 return (DTRACE_MATCH_DONE); 10589 10590 if (dtrace_ecb_enable(ecb) < 0) 10591 return (DTRACE_MATCH_FAIL); 10592 10593 return (DTRACE_MATCH_NEXT); 10594 } 10595 10596 static dtrace_ecb_t * 10597 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 10598 { 10599 dtrace_ecb_t *ecb; 10600 10601 ASSERT(MUTEX_HELD(&dtrace_lock)); 10602 10603 if (id == 0 || id > state->dts_necbs) 10604 return (NULL); 10605 10606 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 10607 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 10608 10609 return (state->dts_ecbs[id - 1]); 10610 } 10611 10612 static dtrace_aggregation_t * 10613 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 10614 { 10615 dtrace_aggregation_t *agg; 10616 10617 ASSERT(MUTEX_HELD(&dtrace_lock)); 10618 10619 if (id == 0 || id > state->dts_naggregations) 10620 return (NULL); 10621 10622 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 10623 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 10624 agg->dtag_id == id); 10625 10626 return (state->dts_aggregations[id - 1]); 10627 } 10628 10629 /* 10630 * DTrace Buffer Functions 10631 * 10632 * The following functions manipulate DTrace buffers. Most of these functions 10633 * are called in the context of establishing or processing consumer state; 10634 * exceptions are explicitly noted. 10635 */ 10636 10637 /* 10638 * Note: called from cross call context. This function switches the two 10639 * buffers on a given CPU. The atomicity of this operation is assured by 10640 * disabling interrupts while the actual switch takes place; the disabling of 10641 * interrupts serializes the execution with any execution of dtrace_probe() on 10642 * the same CPU. 10643 */ 10644 static void 10645 dtrace_buffer_switch(dtrace_buffer_t *buf) 10646 { 10647 caddr_t tomax = buf->dtb_tomax; 10648 caddr_t xamot = buf->dtb_xamot; 10649 dtrace_icookie_t cookie; 10650 10651 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 10652 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 10653 10654 cookie = dtrace_interrupt_disable(); 10655 buf->dtb_tomax = xamot; 10656 buf->dtb_xamot = tomax; 10657 buf->dtb_xamot_drops = buf->dtb_drops; 10658 buf->dtb_xamot_offset = buf->dtb_offset; 10659 buf->dtb_xamot_errors = buf->dtb_errors; 10660 buf->dtb_xamot_flags = buf->dtb_flags; 10661 buf->dtb_offset = 0; 10662 buf->dtb_drops = 0; 10663 buf->dtb_errors = 0; 10664 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 10665 dtrace_interrupt_enable(cookie); 10666 } 10667 10668 /* 10669 * Note: called from cross call context. This function activates a buffer 10670 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 10671 * is guaranteed by the disabling of interrupts. 10672 */ 10673 static void 10674 dtrace_buffer_activate(dtrace_state_t *state) 10675 { 10676 dtrace_buffer_t *buf; 10677 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 10678 10679 buf = &state->dts_buffer[curcpu_id]; 10680 10681 if (buf->dtb_tomax != NULL) { 10682 /* 10683 * We might like to assert that the buffer is marked inactive, 10684 * but this isn't necessarily true: the buffer for the CPU 10685 * that processes the BEGIN probe has its buffer activated 10686 * manually. In this case, we take the (harmless) action 10687 * re-clearing the bit INACTIVE bit. 10688 */ 10689 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 10690 } 10691 10692 dtrace_interrupt_enable(cookie); 10693 } 10694 10695 static int 10696 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 10697 processorid_t cpu) 10698 { 10699 #if defined(sun) 10700 cpu_t *cp; 10701 #else 10702 CPU_INFO_ITERATOR cpuind; 10703 struct cpu_info *cinfo; 10704 #endif 10705 dtrace_buffer_t *buf; 10706 10707 #if defined(sun) 10708 ASSERT(MUTEX_HELD(&cpu_lock)); 10709 ASSERT(MUTEX_HELD(&dtrace_lock)); 10710 10711 if (size > dtrace_nonroot_maxsize && 10712 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 10713 return (EFBIG); 10714 10715 cp = cpu_list; 10716 10717 do { 10718 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10719 continue; 10720 10721 buf = &bufs[cp->cpu_id]; 10722 10723 /* 10724 * If there is already a buffer allocated for this CPU, it 10725 * is only possible that this is a DR event. In this case, 10726 */ 10727 if (buf->dtb_tomax != NULL) { 10728 ASSERT(buf->dtb_size == size); 10729 continue; 10730 } 10731 10732 ASSERT(buf->dtb_xamot == NULL); 10733 10734 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10735 goto err; 10736 10737 buf->dtb_size = size; 10738 buf->dtb_flags = flags; 10739 buf->dtb_offset = 0; 10740 buf->dtb_drops = 0; 10741 10742 if (flags & DTRACEBUF_NOSWITCH) 10743 continue; 10744 10745 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10746 goto err; 10747 } while ((cp = cp->cpu_next) != cpu_list); 10748 10749 return (0); 10750 10751 err: 10752 cp = cpu_list; 10753 10754 do { 10755 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10756 continue; 10757 10758 buf = &bufs[cp->cpu_id]; 10759 10760 if (buf->dtb_xamot != NULL) { 10761 ASSERT(buf->dtb_tomax != NULL); 10762 ASSERT(buf->dtb_size == size); 10763 kmem_free(buf->dtb_xamot, size); 10764 } 10765 10766 if (buf->dtb_tomax != NULL) { 10767 ASSERT(buf->dtb_size == size); 10768 kmem_free(buf->dtb_tomax, size); 10769 } 10770 10771 buf->dtb_tomax = NULL; 10772 buf->dtb_xamot = NULL; 10773 buf->dtb_size = 0; 10774 } while ((cp = cp->cpu_next) != cpu_list); 10775 10776 return (ENOMEM); 10777 #else 10778 10779 #if defined(__amd64__) 10780 /* 10781 * FreeBSD isn't good at limiting the amount of memory we 10782 * ask to malloc, so let's place a limit here before trying 10783 * to do something that might well end in tears at bedtime. 10784 */ 10785 if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1))) 10786 return(ENOMEM); 10787 #endif 10788 10789 ASSERT(MUTEX_HELD(&dtrace_lock)); 10790 for (CPU_INFO_FOREACH(cpuind, cinfo)) { 10791 if (cpu != DTRACE_CPUALL && cpu != cpu_index(cinfo)) 10792 continue; 10793 10794 buf = &bufs[cpu_index(cinfo)]; 10795 10796 /* 10797 * If there is already a buffer allocated for this CPU, it 10798 * is only possible that this is a DR event. In this case, 10799 * the buffer size must match our specified size. 10800 */ 10801 if (buf->dtb_tomax != NULL) { 10802 ASSERT(buf->dtb_size == size); 10803 continue; 10804 } 10805 10806 ASSERT(buf->dtb_xamot == NULL); 10807 10808 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10809 goto err; 10810 10811 buf->dtb_size = size; 10812 buf->dtb_flags = flags; 10813 buf->dtb_offset = 0; 10814 buf->dtb_drops = 0; 10815 10816 if (flags & DTRACEBUF_NOSWITCH) 10817 continue; 10818 10819 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10820 goto err; 10821 } 10822 10823 return (0); 10824 10825 err: 10826 /* 10827 * Error allocating memory, so free the buffers that were 10828 * allocated before the failed allocation. 10829 */ 10830 for (CPU_INFO_FOREACH(cpuind, cinfo)) { 10831 if (cpu != DTRACE_CPUALL && cpu != cpu_index(cinfo)) 10832 continue; 10833 10834 buf = &bufs[cpu_index(cinfo)]; 10835 10836 if (buf->dtb_xamot != NULL) { 10837 ASSERT(buf->dtb_tomax != NULL); 10838 ASSERT(buf->dtb_size == size); 10839 kmem_free(buf->dtb_xamot, size); 10840 } 10841 10842 if (buf->dtb_tomax != NULL) { 10843 ASSERT(buf->dtb_size == size); 10844 kmem_free(buf->dtb_tomax, size); 10845 } 10846 10847 buf->dtb_tomax = NULL; 10848 buf->dtb_xamot = NULL; 10849 buf->dtb_size = 0; 10850 10851 } 10852 10853 return (ENOMEM); 10854 #endif 10855 } 10856 10857 /* 10858 * Note: called from probe context. This function just increments the drop 10859 * count on a buffer. It has been made a function to allow for the 10860 * possibility of understanding the source of mysterious drop counts. (A 10861 * problem for which one may be particularly disappointed that DTrace cannot 10862 * be used to understand DTrace.) 10863 */ 10864 static void 10865 dtrace_buffer_drop(dtrace_buffer_t *buf) 10866 { 10867 buf->dtb_drops++; 10868 } 10869 10870 /* 10871 * Note: called from probe context. This function is called to reserve space 10872 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 10873 * mstate. Returns the new offset in the buffer, or a negative value if an 10874 * error has occurred. 10875 */ 10876 static intptr_t 10877 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 10878 dtrace_state_t *state, dtrace_mstate_t *mstate) 10879 { 10880 intptr_t offs = buf->dtb_offset, soffs; 10881 intptr_t woffs; 10882 caddr_t tomax; 10883 size_t total; 10884 10885 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 10886 return (-1); 10887 10888 if ((tomax = buf->dtb_tomax) == NULL) { 10889 dtrace_buffer_drop(buf); 10890 return (-1); 10891 } 10892 10893 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 10894 while (offs & (align - 1)) { 10895 /* 10896 * Assert that our alignment is off by a number which 10897 * is itself sizeof (uint32_t) aligned. 10898 */ 10899 ASSERT(!((align - (offs & (align - 1))) & 10900 (sizeof (uint32_t) - 1))); 10901 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 10902 offs += sizeof (uint32_t); 10903 } 10904 10905 if ((soffs = offs + needed) > buf->dtb_size) { 10906 dtrace_buffer_drop(buf); 10907 return (-1); 10908 } 10909 10910 if (mstate == NULL) 10911 return (offs); 10912 10913 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 10914 mstate->dtms_scratch_size = buf->dtb_size - soffs; 10915 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 10916 10917 return (offs); 10918 } 10919 10920 if (buf->dtb_flags & DTRACEBUF_FILL) { 10921 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 10922 (buf->dtb_flags & DTRACEBUF_FULL)) 10923 return (-1); 10924 goto out; 10925 } 10926 10927 total = needed + (offs & (align - 1)); 10928 10929 /* 10930 * For a ring buffer, life is quite a bit more complicated. Before 10931 * we can store any padding, we need to adjust our wrapping offset. 10932 * (If we've never before wrapped or we're not about to, no adjustment 10933 * is required.) 10934 */ 10935 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 10936 offs + total > buf->dtb_size) { 10937 woffs = buf->dtb_xamot_offset; 10938 10939 if (offs + total > buf->dtb_size) { 10940 /* 10941 * We can't fit in the end of the buffer. First, a 10942 * sanity check that we can fit in the buffer at all. 10943 */ 10944 if (total > buf->dtb_size) { 10945 dtrace_buffer_drop(buf); 10946 return (-1); 10947 } 10948 10949 /* 10950 * We're going to be storing at the top of the buffer, 10951 * so now we need to deal with the wrapped offset. We 10952 * only reset our wrapped offset to 0 if it is 10953 * currently greater than the current offset. If it 10954 * is less than the current offset, it is because a 10955 * previous allocation induced a wrap -- but the 10956 * allocation didn't subsequently take the space due 10957 * to an error or false predicate evaluation. In this 10958 * case, we'll just leave the wrapped offset alone: if 10959 * the wrapped offset hasn't been advanced far enough 10960 * for this allocation, it will be adjusted in the 10961 * lower loop. 10962 */ 10963 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 10964 if (woffs >= offs) 10965 woffs = 0; 10966 } else { 10967 woffs = 0; 10968 } 10969 10970 /* 10971 * Now we know that we're going to be storing to the 10972 * top of the buffer and that there is room for us 10973 * there. We need to clear the buffer from the current 10974 * offset to the end (there may be old gunk there). 10975 */ 10976 while (offs < buf->dtb_size) 10977 tomax[offs++] = 0; 10978 10979 /* 10980 * We need to set our offset to zero. And because we 10981 * are wrapping, we need to set the bit indicating as 10982 * much. We can also adjust our needed space back 10983 * down to the space required by the ECB -- we know 10984 * that the top of the buffer is aligned. 10985 */ 10986 offs = 0; 10987 total = needed; 10988 buf->dtb_flags |= DTRACEBUF_WRAPPED; 10989 } else { 10990 /* 10991 * There is room for us in the buffer, so we simply 10992 * need to check the wrapped offset. 10993 */ 10994 if (woffs < offs) { 10995 /* 10996 * The wrapped offset is less than the offset. 10997 * This can happen if we allocated buffer space 10998 * that induced a wrap, but then we didn't 10999 * subsequently take the space due to an error 11000 * or false predicate evaluation. This is 11001 * okay; we know that _this_ allocation isn't 11002 * going to induce a wrap. We still can't 11003 * reset the wrapped offset to be zero, 11004 * however: the space may have been trashed in 11005 * the previous failed probe attempt. But at 11006 * least the wrapped offset doesn't need to 11007 * be adjusted at all... 11008 */ 11009 goto out; 11010 } 11011 } 11012 11013 while (offs + total > woffs) { 11014 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 11015 size_t size; 11016 11017 if (epid == DTRACE_EPIDNONE) { 11018 size = sizeof (uint32_t); 11019 } else { 11020 ASSERT(epid <= state->dts_necbs); 11021 ASSERT(state->dts_ecbs[epid - 1] != NULL); 11022 11023 size = state->dts_ecbs[epid - 1]->dte_size; 11024 } 11025 11026 ASSERT(woffs + size <= buf->dtb_size); 11027 ASSERT(size != 0); 11028 11029 if (woffs + size == buf->dtb_size) { 11030 /* 11031 * We've reached the end of the buffer; we want 11032 * to set the wrapped offset to 0 and break 11033 * out. However, if the offs is 0, then we're 11034 * in a strange edge-condition: the amount of 11035 * space that we want to reserve plus the size 11036 * of the record that we're overwriting is 11037 * greater than the size of the buffer. This 11038 * is problematic because if we reserve the 11039 * space but subsequently don't consume it (due 11040 * to a failed predicate or error) the wrapped 11041 * offset will be 0 -- yet the EPID at offset 0 11042 * will not be committed. This situation is 11043 * relatively easy to deal with: if we're in 11044 * this case, the buffer is indistinguishable 11045 * from one that hasn't wrapped; we need only 11046 * finish the job by clearing the wrapped bit, 11047 * explicitly setting the offset to be 0, and 11048 * zero'ing out the old data in the buffer. 11049 */ 11050 if (offs == 0) { 11051 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 11052 buf->dtb_offset = 0; 11053 woffs = total; 11054 11055 while (woffs < buf->dtb_size) 11056 tomax[woffs++] = 0; 11057 } 11058 11059 woffs = 0; 11060 break; 11061 } 11062 11063 woffs += size; 11064 } 11065 11066 /* 11067 * We have a wrapped offset. It may be that the wrapped offset 11068 * has become zero -- that's okay. 11069 */ 11070 buf->dtb_xamot_offset = woffs; 11071 } 11072 11073 out: 11074 /* 11075 * Now we can plow the buffer with any necessary padding. 11076 */ 11077 while (offs & (align - 1)) { 11078 /* 11079 * Assert that our alignment is off by a number which 11080 * is itself sizeof (uint32_t) aligned. 11081 */ 11082 ASSERT(!((align - (offs & (align - 1))) & 11083 (sizeof (uint32_t) - 1))); 11084 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 11085 offs += sizeof (uint32_t); 11086 } 11087 11088 if (buf->dtb_flags & DTRACEBUF_FILL) { 11089 if (offs + needed > buf->dtb_size - state->dts_reserve) { 11090 buf->dtb_flags |= DTRACEBUF_FULL; 11091 return (-1); 11092 } 11093 } 11094 11095 if (mstate == NULL) 11096 return (offs); 11097 11098 /* 11099 * For ring buffers and fill buffers, the scratch space is always 11100 * the inactive buffer. 11101 */ 11102 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 11103 mstate->dtms_scratch_size = buf->dtb_size; 11104 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 11105 11106 return (offs); 11107 } 11108 11109 static void 11110 dtrace_buffer_polish(dtrace_buffer_t *buf) 11111 { 11112 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 11113 ASSERT(MUTEX_HELD(&dtrace_lock)); 11114 11115 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 11116 return; 11117 11118 /* 11119 * We need to polish the ring buffer. There are three cases: 11120 * 11121 * - The first (and presumably most common) is that there is no gap 11122 * between the buffer offset and the wrapped offset. In this case, 11123 * there is nothing in the buffer that isn't valid data; we can 11124 * mark the buffer as polished and return. 11125 * 11126 * - The second (less common than the first but still more common 11127 * than the third) is that there is a gap between the buffer offset 11128 * and the wrapped offset, and the wrapped offset is larger than the 11129 * buffer offset. This can happen because of an alignment issue, or 11130 * can happen because of a call to dtrace_buffer_reserve() that 11131 * didn't subsequently consume the buffer space. In this case, 11132 * we need to zero the data from the buffer offset to the wrapped 11133 * offset. 11134 * 11135 * - The third (and least common) is that there is a gap between the 11136 * buffer offset and the wrapped offset, but the wrapped offset is 11137 * _less_ than the buffer offset. This can only happen because a 11138 * call to dtrace_buffer_reserve() induced a wrap, but the space 11139 * was not subsequently consumed. In this case, we need to zero the 11140 * space from the offset to the end of the buffer _and_ from the 11141 * top of the buffer to the wrapped offset. 11142 */ 11143 if (buf->dtb_offset < buf->dtb_xamot_offset) { 11144 bzero(buf->dtb_tomax + buf->dtb_offset, 11145 buf->dtb_xamot_offset - buf->dtb_offset); 11146 } 11147 11148 if (buf->dtb_offset > buf->dtb_xamot_offset) { 11149 bzero(buf->dtb_tomax + buf->dtb_offset, 11150 buf->dtb_size - buf->dtb_offset); 11151 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 11152 } 11153 } 11154 11155 static void 11156 dtrace_buffer_free(dtrace_buffer_t *bufs) 11157 { 11158 int i; 11159 11160 for (i = 0; i < NCPU; i++) { 11161 dtrace_buffer_t *buf = &bufs[i]; 11162 11163 if (buf->dtb_tomax == NULL) { 11164 ASSERT(buf->dtb_xamot == NULL); 11165 ASSERT(buf->dtb_size == 0); 11166 continue; 11167 } 11168 11169 if (buf->dtb_xamot != NULL) { 11170 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 11171 kmem_free(buf->dtb_xamot, buf->dtb_size); 11172 } 11173 11174 kmem_free(buf->dtb_tomax, buf->dtb_size); 11175 buf->dtb_size = 0; 11176 buf->dtb_tomax = NULL; 11177 buf->dtb_xamot = NULL; 11178 } 11179 } 11180 11181 /* 11182 * DTrace Enabling Functions 11183 */ 11184 static dtrace_enabling_t * 11185 dtrace_enabling_create(dtrace_vstate_t *vstate) 11186 { 11187 dtrace_enabling_t *enab; 11188 11189 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 11190 enab->dten_vstate = vstate; 11191 11192 return (enab); 11193 } 11194 11195 static void 11196 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 11197 { 11198 dtrace_ecbdesc_t **ndesc; 11199 size_t osize, nsize; 11200 11201 /* 11202 * We can't add to enablings after we've enabled them, or after we've 11203 * retained them. 11204 */ 11205 ASSERT(enab->dten_probegen == 0); 11206 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11207 11208 if (enab->dten_ndesc < enab->dten_maxdesc) { 11209 enab->dten_desc[enab->dten_ndesc++] = ecb; 11210 return; 11211 } 11212 11213 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11214 11215 if (enab->dten_maxdesc == 0) { 11216 enab->dten_maxdesc = 1; 11217 } else { 11218 enab->dten_maxdesc <<= 1; 11219 } 11220 11221 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 11222 11223 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11224 ndesc = kmem_zalloc(nsize, KM_SLEEP); 11225 bcopy(enab->dten_desc, ndesc, osize); 11226 if (enab->dten_desc != NULL) 11227 kmem_free(enab->dten_desc, osize); 11228 11229 enab->dten_desc = ndesc; 11230 enab->dten_desc[enab->dten_ndesc++] = ecb; 11231 } 11232 11233 static void 11234 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 11235 dtrace_probedesc_t *pd) 11236 { 11237 dtrace_ecbdesc_t *new; 11238 dtrace_predicate_t *pred; 11239 dtrace_actdesc_t *act; 11240 11241 /* 11242 * We're going to create a new ECB description that matches the 11243 * specified ECB in every way, but has the specified probe description. 11244 */ 11245 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 11246 11247 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 11248 dtrace_predicate_hold(pred); 11249 11250 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 11251 dtrace_actdesc_hold(act); 11252 11253 new->dted_action = ecb->dted_action; 11254 new->dted_pred = ecb->dted_pred; 11255 new->dted_probe = *pd; 11256 new->dted_uarg = ecb->dted_uarg; 11257 11258 dtrace_enabling_add(enab, new); 11259 } 11260 11261 static void 11262 dtrace_enabling_dump(dtrace_enabling_t *enab) 11263 { 11264 int i; 11265 11266 for (i = 0; i < enab->dten_ndesc; i++) { 11267 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 11268 11269 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 11270 desc->dtpd_provider, desc->dtpd_mod, 11271 desc->dtpd_func, desc->dtpd_name); 11272 } 11273 } 11274 11275 static void 11276 dtrace_enabling_destroy(dtrace_enabling_t *enab) 11277 { 11278 int i; 11279 dtrace_ecbdesc_t *ep; 11280 dtrace_vstate_t *vstate = enab->dten_vstate; 11281 11282 ASSERT(MUTEX_HELD(&dtrace_lock)); 11283 11284 for (i = 0; i < enab->dten_ndesc; i++) { 11285 dtrace_actdesc_t *act, *next; 11286 dtrace_predicate_t *pred; 11287 11288 ep = enab->dten_desc[i]; 11289 11290 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 11291 dtrace_predicate_release(pred, vstate); 11292 11293 for (act = ep->dted_action; act != NULL; act = next) { 11294 next = act->dtad_next; 11295 dtrace_actdesc_release(act, vstate); 11296 } 11297 11298 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 11299 } 11300 11301 if (enab->dten_desc != NULL) 11302 kmem_free(enab->dten_desc, 11303 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 11304 11305 /* 11306 * If this was a retained enabling, decrement the dts_nretained count 11307 * and take it off of the dtrace_retained list. 11308 */ 11309 if (enab->dten_prev != NULL || enab->dten_next != NULL || 11310 dtrace_retained == enab) { 11311 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11312 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 11313 enab->dten_vstate->dtvs_state->dts_nretained--; 11314 } 11315 11316 if (enab->dten_prev == NULL) { 11317 if (dtrace_retained == enab) { 11318 dtrace_retained = enab->dten_next; 11319 11320 if (dtrace_retained != NULL) 11321 dtrace_retained->dten_prev = NULL; 11322 } 11323 } else { 11324 ASSERT(enab != dtrace_retained); 11325 ASSERT(dtrace_retained != NULL); 11326 enab->dten_prev->dten_next = enab->dten_next; 11327 } 11328 11329 if (enab->dten_next != NULL) { 11330 ASSERT(dtrace_retained != NULL); 11331 enab->dten_next->dten_prev = enab->dten_prev; 11332 } 11333 11334 kmem_free(enab, sizeof (dtrace_enabling_t)); 11335 } 11336 11337 static int 11338 dtrace_enabling_retain(dtrace_enabling_t *enab) 11339 { 11340 dtrace_state_t *state; 11341 11342 ASSERT(MUTEX_HELD(&dtrace_lock)); 11343 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11344 ASSERT(enab->dten_vstate != NULL); 11345 11346 state = enab->dten_vstate->dtvs_state; 11347 ASSERT(state != NULL); 11348 11349 /* 11350 * We only allow each state to retain dtrace_retain_max enablings. 11351 */ 11352 if (state->dts_nretained >= dtrace_retain_max) 11353 return (ENOSPC); 11354 11355 state->dts_nretained++; 11356 11357 if (dtrace_retained == NULL) { 11358 dtrace_retained = enab; 11359 return (0); 11360 } 11361 11362 enab->dten_next = dtrace_retained; 11363 dtrace_retained->dten_prev = enab; 11364 dtrace_retained = enab; 11365 11366 return (0); 11367 } 11368 11369 static int 11370 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 11371 dtrace_probedesc_t *create) 11372 { 11373 dtrace_enabling_t *new, *enab; 11374 int found = 0, err = ENOENT; 11375 11376 ASSERT(MUTEX_HELD(&dtrace_lock)); 11377 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 11378 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 11379 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 11380 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 11381 11382 new = dtrace_enabling_create(&state->dts_vstate); 11383 11384 /* 11385 * Iterate over all retained enablings, looking for enablings that 11386 * match the specified state. 11387 */ 11388 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11389 int i; 11390 11391 /* 11392 * dtvs_state can only be NULL for helper enablings -- and 11393 * helper enablings can't be retained. 11394 */ 11395 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11396 11397 if (enab->dten_vstate->dtvs_state != state) 11398 continue; 11399 11400 /* 11401 * Now iterate over each probe description; we're looking for 11402 * an exact match to the specified probe description. 11403 */ 11404 for (i = 0; i < enab->dten_ndesc; i++) { 11405 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11406 dtrace_probedesc_t *pd = &ep->dted_probe; 11407 11408 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 11409 continue; 11410 11411 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 11412 continue; 11413 11414 if (strcmp(pd->dtpd_func, match->dtpd_func)) 11415 continue; 11416 11417 if (strcmp(pd->dtpd_name, match->dtpd_name)) 11418 continue; 11419 11420 /* 11421 * We have a winning probe! Add it to our growing 11422 * enabling. 11423 */ 11424 found = 1; 11425 dtrace_enabling_addlike(new, ep, create); 11426 } 11427 } 11428 11429 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 11430 dtrace_enabling_destroy(new); 11431 return (err); 11432 } 11433 11434 return (0); 11435 } 11436 11437 static void 11438 dtrace_enabling_retract(dtrace_state_t *state) 11439 { 11440 dtrace_enabling_t *enab, *next; 11441 11442 ASSERT(MUTEX_HELD(&dtrace_lock)); 11443 11444 /* 11445 * Iterate over all retained enablings, destroy the enablings retained 11446 * for the specified state. 11447 */ 11448 for (enab = dtrace_retained; enab != NULL; enab = next) { 11449 next = enab->dten_next; 11450 11451 /* 11452 * dtvs_state can only be NULL for helper enablings -- and 11453 * helper enablings can't be retained. 11454 */ 11455 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11456 11457 if (enab->dten_vstate->dtvs_state == state) { 11458 ASSERT(state->dts_nretained > 0); 11459 dtrace_enabling_destroy(enab); 11460 } 11461 } 11462 11463 ASSERT(state->dts_nretained == 0); 11464 } 11465 11466 static int 11467 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 11468 { 11469 int i = 0; 11470 int total_matched = 0, matched = 0; 11471 11472 ASSERT(MUTEX_HELD(&cpu_lock)); 11473 ASSERT(MUTEX_HELD(&dtrace_lock)); 11474 11475 for (i = 0; i < enab->dten_ndesc; i++) { 11476 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11477 11478 enab->dten_current = ep; 11479 enab->dten_error = 0; 11480 11481 /* 11482 * If a provider failed to enable a probe then get out and 11483 * let the consumer know we failed. 11484 */ 11485 if ((matched = dtrace_probe_enable(&ep->dted_probe, enab)) < 0) 11486 return (EBUSY); 11487 11488 total_matched += matched; 11489 11490 if (enab->dten_error != 0) { 11491 /* 11492 * If we get an error half-way through enabling the 11493 * probes, we kick out -- perhaps with some number of 11494 * them enabled. Leaving enabled probes enabled may 11495 * be slightly confusing for user-level, but we expect 11496 * that no one will attempt to actually drive on in 11497 * the face of such errors. If this is an anonymous 11498 * enabling (indicated with a NULL nmatched pointer), 11499 * we cmn_err() a message. We aren't expecting to 11500 * get such an error -- such as it can exist at all, 11501 * it would be a result of corrupted DOF in the driver 11502 * properties. 11503 */ 11504 if (nmatched == NULL) { 11505 cmn_err(CE_WARN, "dtrace_enabling_match() " 11506 "error on %p: %d", (void *)ep, 11507 enab->dten_error); 11508 } 11509 11510 return (enab->dten_error); 11511 } 11512 } 11513 11514 enab->dten_probegen = dtrace_probegen; 11515 if (nmatched != NULL) 11516 *nmatched = total_matched; 11517 11518 return (0); 11519 } 11520 11521 static void 11522 dtrace_enabling_matchall(void) 11523 { 11524 dtrace_enabling_t *enab; 11525 11526 mutex_enter(&cpu_lock); 11527 mutex_enter(&dtrace_lock); 11528 11529 /* 11530 * Iterate over all retained enablings to see if any probes match 11531 * against them. We only perform this operation on enablings for which 11532 * we have sufficient permissions by virtue of being in the global zone 11533 * or in the same zone as the DTrace client. Because we can be called 11534 * after dtrace_detach() has been called, we cannot assert that there 11535 * are retained enablings. We can safely load from dtrace_retained, 11536 * however: the taskq_destroy() at the end of dtrace_detach() will 11537 * block pending our completion. 11538 */ 11539 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11540 #if defined(sun) 11541 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred; 11542 11543 if (INGLOBALZONE(curproc) || getzoneid() == crgetzoneid(cr)) 11544 #endif 11545 (void) dtrace_enabling_match(enab, NULL); 11546 } 11547 11548 mutex_exit(&dtrace_lock); 11549 mutex_exit(&cpu_lock); 11550 } 11551 11552 /* 11553 * If an enabling is to be enabled without having matched probes (that is, if 11554 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 11555 * enabling must be _primed_ by creating an ECB for every ECB description. 11556 * This must be done to assure that we know the number of speculations, the 11557 * number of aggregations, the minimum buffer size needed, etc. before we 11558 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 11559 * enabling any probes, we create ECBs for every ECB decription, but with a 11560 * NULL probe -- which is exactly what this function does. 11561 */ 11562 static void 11563 dtrace_enabling_prime(dtrace_state_t *state) 11564 { 11565 dtrace_enabling_t *enab; 11566 int i; 11567 11568 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11569 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11570 11571 if (enab->dten_vstate->dtvs_state != state) 11572 continue; 11573 11574 /* 11575 * We don't want to prime an enabling more than once, lest 11576 * we allow a malicious user to induce resource exhaustion. 11577 * (The ECBs that result from priming an enabling aren't 11578 * leaked -- but they also aren't deallocated until the 11579 * consumer state is destroyed.) 11580 */ 11581 if (enab->dten_primed) 11582 continue; 11583 11584 for (i = 0; i < enab->dten_ndesc; i++) { 11585 enab->dten_current = enab->dten_desc[i]; 11586 (void) dtrace_probe_enable(NULL, enab); 11587 } 11588 11589 enab->dten_primed = 1; 11590 } 11591 } 11592 11593 /* 11594 * Called to indicate that probes should be provided due to retained 11595 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 11596 * must take an initial lap through the enabling calling the dtps_provide() 11597 * entry point explicitly to allow for autocreated probes. 11598 */ 11599 static void 11600 dtrace_enabling_provide(dtrace_provider_t *prv) 11601 { 11602 int i, all = 0; 11603 dtrace_probedesc_t desc; 11604 11605 ASSERT(MUTEX_HELD(&dtrace_lock)); 11606 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 11607 11608 if (prv == NULL) { 11609 all = 1; 11610 prv = dtrace_provider; 11611 } 11612 11613 do { 11614 dtrace_enabling_t *enab = dtrace_retained; 11615 void *parg = prv->dtpv_arg; 11616 11617 for (; enab != NULL; enab = enab->dten_next) { 11618 for (i = 0; i < enab->dten_ndesc; i++) { 11619 desc = enab->dten_desc[i]->dted_probe; 11620 mutex_exit(&dtrace_lock); 11621 prv->dtpv_pops.dtps_provide(parg, &desc); 11622 mutex_enter(&dtrace_lock); 11623 } 11624 } 11625 } while (all && (prv = prv->dtpv_next) != NULL); 11626 11627 mutex_exit(&dtrace_lock); 11628 dtrace_probe_provide(NULL, all ? NULL : prv); 11629 mutex_enter(&dtrace_lock); 11630 } 11631 11632 /* 11633 * DTrace DOF Functions 11634 */ 11635 /*ARGSUSED*/ 11636 static void 11637 dtrace_dof_error(dof_hdr_t *dof, const char *str) 11638 { 11639 if (dtrace_err_verbose) 11640 cmn_err(CE_WARN, "failed to process DOF: %s", str); 11641 11642 #ifdef DTRACE_ERRDEBUG 11643 dtrace_errdebug(str); 11644 #endif 11645 } 11646 11647 /* 11648 * Create DOF out of a currently enabled state. Right now, we only create 11649 * DOF containing the run-time options -- but this could be expanded to create 11650 * complete DOF representing the enabled state. 11651 */ 11652 static dof_hdr_t * 11653 dtrace_dof_create(dtrace_state_t *state) 11654 { 11655 dof_hdr_t *dof; 11656 dof_sec_t *sec; 11657 dof_optdesc_t *opt; 11658 int i, len = sizeof (dof_hdr_t) + 11659 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 11660 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11661 11662 ASSERT(MUTEX_HELD(&dtrace_lock)); 11663 11664 dof = kmem_zalloc(len, KM_SLEEP); 11665 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 11666 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 11667 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 11668 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 11669 11670 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 11671 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 11672 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 11673 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 11674 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 11675 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 11676 11677 dof->dofh_flags = 0; 11678 dof->dofh_hdrsize = sizeof (dof_hdr_t); 11679 dof->dofh_secsize = sizeof (dof_sec_t); 11680 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 11681 dof->dofh_secoff = sizeof (dof_hdr_t); 11682 dof->dofh_loadsz = len; 11683 dof->dofh_filesz = len; 11684 dof->dofh_pad = 0; 11685 11686 /* 11687 * Fill in the option section header... 11688 */ 11689 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 11690 sec->dofs_type = DOF_SECT_OPTDESC; 11691 sec->dofs_align = sizeof (uint64_t); 11692 sec->dofs_flags = DOF_SECF_LOAD; 11693 sec->dofs_entsize = sizeof (dof_optdesc_t); 11694 11695 opt = (dof_optdesc_t *)((uintptr_t)sec + 11696 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 11697 11698 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 11699 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11700 11701 for (i = 0; i < DTRACEOPT_MAX; i++) { 11702 opt[i].dofo_option = i; 11703 opt[i].dofo_strtab = DOF_SECIDX_NONE; 11704 opt[i].dofo_value = state->dts_options[i]; 11705 } 11706 11707 return (dof); 11708 } 11709 11710 static dof_hdr_t * 11711 dtrace_dof_copyin(uintptr_t uarg, int *errp) 11712 { 11713 dof_hdr_t hdr, *dof; 11714 11715 ASSERT(!MUTEX_HELD(&dtrace_lock)); 11716 11717 /* 11718 * First, we're going to copyin() the sizeof (dof_hdr_t). 11719 */ 11720 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 11721 dtrace_dof_error(NULL, "failed to copyin DOF header"); 11722 *errp = EFAULT; 11723 return (NULL); 11724 } 11725 11726 /* 11727 * Now we'll allocate the entire DOF and copy it in -- provided 11728 * that the length isn't outrageous. 11729 */ 11730 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 11731 dtrace_dof_error(&hdr, "load size exceeds maximum"); 11732 *errp = E2BIG; 11733 return (NULL); 11734 } 11735 11736 if (hdr.dofh_loadsz < sizeof (hdr)) { 11737 dtrace_dof_error(&hdr, "invalid load size"); 11738 *errp = EINVAL; 11739 return (NULL); 11740 } 11741 11742 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 11743 11744 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0 || 11745 dof->dofh_loadsz != hdr.dofh_loadsz) { 11746 kmem_free(dof, hdr.dofh_loadsz); 11747 *errp = EFAULT; 11748 return (NULL); 11749 } 11750 11751 return (dof); 11752 } 11753 11754 #if !defined(sun) 11755 static __inline uchar_t 11756 dtrace_dof_char(char c) { 11757 switch (c) { 11758 case '0': 11759 case '1': 11760 case '2': 11761 case '3': 11762 case '4': 11763 case '5': 11764 case '6': 11765 case '7': 11766 case '8': 11767 case '9': 11768 return (c - '0'); 11769 case 'A': 11770 case 'B': 11771 case 'C': 11772 case 'D': 11773 case 'E': 11774 case 'F': 11775 return (c - 'A' + 10); 11776 case 'a': 11777 case 'b': 11778 case 'c': 11779 case 'd': 11780 case 'e': 11781 case 'f': 11782 return (c - 'a' + 10); 11783 } 11784 /* Should not reach here. */ 11785 return (0); 11786 } 11787 #endif 11788 11789 static dof_hdr_t * 11790 dtrace_dof_property(const char *name) 11791 { 11792 dof_hdr_t *dof = NULL; 11793 #if defined(sun) 11794 uchar_t *buf; 11795 uint64_t loadsz; 11796 unsigned int len, i; 11797 11798 /* 11799 * Unfortunately, array of values in .conf files are always (and 11800 * only) interpreted to be integer arrays. We must read our DOF 11801 * as an integer array, and then squeeze it into a byte array. 11802 */ 11803 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 11804 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 11805 return (NULL); 11806 11807 for (i = 0; i < len; i++) 11808 buf[i] = (uchar_t)(((int *)buf)[i]); 11809 11810 if (len < sizeof (dof_hdr_t)) { 11811 ddi_prop_free(buf); 11812 dtrace_dof_error(NULL, "truncated header"); 11813 return (NULL); 11814 } 11815 11816 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 11817 ddi_prop_free(buf); 11818 dtrace_dof_error(NULL, "truncated DOF"); 11819 return (NULL); 11820 } 11821 11822 if (loadsz >= dtrace_dof_maxsize) { 11823 ddi_prop_free(buf); 11824 dtrace_dof_error(NULL, "oversized DOF"); 11825 return (NULL); 11826 } 11827 11828 dof = kmem_alloc(loadsz, KM_SLEEP); 11829 bcopy(buf, dof, loadsz); 11830 ddi_prop_free(buf); 11831 #else 11832 printf("dtrace: XXX %s not implemented (name=%s)\n", __func__, name); 11833 #if 0 /* XXX TBD dtrace_dof_provide */ 11834 char *p; 11835 char *p_env; 11836 11837 if ((p_env = getenv(name)) == NULL) 11838 return (NULL); 11839 11840 len = strlen(p_env) / 2; 11841 11842 buf = kmem_alloc(len, KM_SLEEP); 11843 11844 dof = (dof_hdr_t *) buf; 11845 11846 p = p_env; 11847 11848 for (i = 0; i < len; i++) { 11849 buf[i] = (dtrace_dof_char(p[0]) << 4) | 11850 dtrace_dof_char(p[1]); 11851 p += 2; 11852 } 11853 11854 freeenv(p_env); 11855 11856 if (len < sizeof (dof_hdr_t)) { 11857 kmem_free(buf, len); 11858 dtrace_dof_error(NULL, "truncated header"); 11859 return (NULL); 11860 } 11861 11862 if (len < (loadsz = dof->dofh_loadsz)) { 11863 kmem_free(buf, len); 11864 dtrace_dof_error(NULL, "truncated DOF"); 11865 return (NULL); 11866 } 11867 11868 if (loadsz >= dtrace_dof_maxsize) { 11869 kmem_free(buf, len); 11870 dtrace_dof_error(NULL, "oversized DOF"); 11871 return (NULL); 11872 } 11873 #endif 11874 #endif 11875 11876 return (dof); 11877 } 11878 11879 static void 11880 dtrace_dof_destroy(dof_hdr_t *dof) 11881 { 11882 kmem_free(dof, dof->dofh_loadsz); 11883 } 11884 11885 /* 11886 * Return the dof_sec_t pointer corresponding to a given section index. If the 11887 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 11888 * a type other than DOF_SECT_NONE is specified, the header is checked against 11889 * this type and NULL is returned if the types do not match. 11890 */ 11891 static dof_sec_t * 11892 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 11893 { 11894 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 11895 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 11896 11897 if (i >= dof->dofh_secnum) { 11898 dtrace_dof_error(dof, "referenced section index is invalid"); 11899 return (NULL); 11900 } 11901 11902 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 11903 dtrace_dof_error(dof, "referenced section is not loadable"); 11904 return (NULL); 11905 } 11906 11907 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 11908 dtrace_dof_error(dof, "referenced section is the wrong type"); 11909 return (NULL); 11910 } 11911 11912 return (sec); 11913 } 11914 11915 static dtrace_probedesc_t * 11916 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 11917 { 11918 dof_probedesc_t *probe; 11919 dof_sec_t *strtab; 11920 uintptr_t daddr = (uintptr_t)dof; 11921 uintptr_t str; 11922 size_t size; 11923 11924 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 11925 dtrace_dof_error(dof, "invalid probe section"); 11926 return (NULL); 11927 } 11928 11929 if (sec->dofs_align != sizeof (dof_secidx_t)) { 11930 dtrace_dof_error(dof, "bad alignment in probe description"); 11931 return (NULL); 11932 } 11933 11934 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 11935 dtrace_dof_error(dof, "truncated probe description"); 11936 return (NULL); 11937 } 11938 11939 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 11940 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 11941 11942 if (strtab == NULL) 11943 return (NULL); 11944 11945 str = daddr + strtab->dofs_offset; 11946 size = strtab->dofs_size; 11947 11948 if (probe->dofp_provider >= strtab->dofs_size) { 11949 dtrace_dof_error(dof, "corrupt probe provider"); 11950 return (NULL); 11951 } 11952 11953 (void) strncpy(desc->dtpd_provider, 11954 (char *)(str + probe->dofp_provider), 11955 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 11956 11957 if (probe->dofp_mod >= strtab->dofs_size) { 11958 dtrace_dof_error(dof, "corrupt probe module"); 11959 return (NULL); 11960 } 11961 11962 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 11963 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 11964 11965 if (probe->dofp_func >= strtab->dofs_size) { 11966 dtrace_dof_error(dof, "corrupt probe function"); 11967 return (NULL); 11968 } 11969 11970 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 11971 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 11972 11973 if (probe->dofp_name >= strtab->dofs_size) { 11974 dtrace_dof_error(dof, "corrupt probe name"); 11975 return (NULL); 11976 } 11977 11978 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 11979 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 11980 11981 return (desc); 11982 } 11983 11984 static dtrace_difo_t * 11985 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 11986 cred_t *cr) 11987 { 11988 dtrace_difo_t *dp; 11989 size_t ttl = 0; 11990 dof_difohdr_t *dofd; 11991 uintptr_t daddr = (uintptr_t)dof; 11992 size_t max = dtrace_difo_maxsize; 11993 int i, l, n; 11994 11995 static const struct { 11996 int section; 11997 int bufoffs; 11998 int lenoffs; 11999 int entsize; 12000 int align; 12001 const char *msg; 12002 } difo[] = { 12003 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 12004 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 12005 sizeof (dif_instr_t), "multiple DIF sections" }, 12006 12007 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 12008 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 12009 sizeof (uint64_t), "multiple integer tables" }, 12010 12011 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 12012 offsetof(dtrace_difo_t, dtdo_strlen), 0, 12013 sizeof (char), "multiple string tables" }, 12014 12015 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 12016 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 12017 sizeof (uint_t), "multiple variable tables" }, 12018 12019 { DOF_SECT_NONE, 0, 0, 0, 0, NULL } 12020 }; 12021 12022 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 12023 dtrace_dof_error(dof, "invalid DIFO header section"); 12024 return (NULL); 12025 } 12026 12027 if (sec->dofs_align != sizeof (dof_secidx_t)) { 12028 dtrace_dof_error(dof, "bad alignment in DIFO header"); 12029 return (NULL); 12030 } 12031 12032 if (sec->dofs_size < sizeof (dof_difohdr_t) || 12033 sec->dofs_size % sizeof (dof_secidx_t)) { 12034 dtrace_dof_error(dof, "bad size in DIFO header"); 12035 return (NULL); 12036 } 12037 12038 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 12039 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 12040 12041 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 12042 dp->dtdo_rtype = dofd->dofd_rtype; 12043 12044 for (l = 0; l < n; l++) { 12045 dof_sec_t *subsec; 12046 void **bufp; 12047 uint32_t *lenp; 12048 12049 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 12050 dofd->dofd_links[l])) == NULL) 12051 goto err; /* invalid section link */ 12052 12053 if (ttl + subsec->dofs_size > max) { 12054 dtrace_dof_error(dof, "exceeds maximum size"); 12055 goto err; 12056 } 12057 12058 ttl += subsec->dofs_size; 12059 12060 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 12061 if (subsec->dofs_type != difo[i].section) 12062 continue; 12063 12064 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 12065 dtrace_dof_error(dof, "section not loaded"); 12066 goto err; 12067 } 12068 12069 if (subsec->dofs_align != difo[i].align) { 12070 dtrace_dof_error(dof, "bad alignment"); 12071 goto err; 12072 } 12073 12074 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 12075 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 12076 12077 if (*bufp != NULL) { 12078 dtrace_dof_error(dof, difo[i].msg); 12079 goto err; 12080 } 12081 12082 if (difo[i].entsize != subsec->dofs_entsize) { 12083 dtrace_dof_error(dof, "entry size mismatch"); 12084 goto err; 12085 } 12086 12087 if (subsec->dofs_entsize != 0 && 12088 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 12089 dtrace_dof_error(dof, "corrupt entry size"); 12090 goto err; 12091 } 12092 12093 *lenp = subsec->dofs_size; 12094 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 12095 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 12096 *bufp, subsec->dofs_size); 12097 12098 if (subsec->dofs_entsize != 0) 12099 *lenp /= subsec->dofs_entsize; 12100 12101 break; 12102 } 12103 12104 /* 12105 * If we encounter a loadable DIFO sub-section that is not 12106 * known to us, assume this is a broken program and fail. 12107 */ 12108 if (difo[i].section == DOF_SECT_NONE && 12109 (subsec->dofs_flags & DOF_SECF_LOAD)) { 12110 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 12111 goto err; 12112 } 12113 } 12114 12115 if (dp->dtdo_buf == NULL) { 12116 /* 12117 * We can't have a DIF object without DIF text. 12118 */ 12119 dtrace_dof_error(dof, "missing DIF text"); 12120 goto err; 12121 } 12122 12123 /* 12124 * Before we validate the DIF object, run through the variable table 12125 * looking for the strings -- if any of their size are under, we'll set 12126 * their size to be the system-wide default string size. Note that 12127 * this should _not_ happen if the "strsize" option has been set -- 12128 * in this case, the compiler should have set the size to reflect the 12129 * setting of the option. 12130 */ 12131 for (i = 0; i < dp->dtdo_varlen; i++) { 12132 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 12133 dtrace_diftype_t *t = &v->dtdv_type; 12134 12135 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 12136 continue; 12137 12138 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 12139 t->dtdt_size = dtrace_strsize_default; 12140 } 12141 12142 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 12143 goto err; 12144 12145 dtrace_difo_init(dp, vstate); 12146 return (dp); 12147 12148 err: 12149 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 12150 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 12151 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 12152 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 12153 12154 kmem_free(dp, sizeof (dtrace_difo_t)); 12155 return (NULL); 12156 } 12157 12158 static dtrace_predicate_t * 12159 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12160 cred_t *cr) 12161 { 12162 dtrace_difo_t *dp; 12163 12164 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 12165 return (NULL); 12166 12167 return (dtrace_predicate_create(dp)); 12168 } 12169 12170 static dtrace_actdesc_t * 12171 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12172 cred_t *cr) 12173 { 12174 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 12175 dof_actdesc_t *desc; 12176 dof_sec_t *difosec; 12177 size_t offs; 12178 uintptr_t daddr = (uintptr_t)dof; 12179 uint64_t arg; 12180 dtrace_actkind_t kind; 12181 12182 if (sec->dofs_type != DOF_SECT_ACTDESC) { 12183 dtrace_dof_error(dof, "invalid action section"); 12184 return (NULL); 12185 } 12186 12187 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 12188 dtrace_dof_error(dof, "truncated action description"); 12189 return (NULL); 12190 } 12191 12192 if (sec->dofs_align != sizeof (uint64_t)) { 12193 dtrace_dof_error(dof, "bad alignment in action description"); 12194 return (NULL); 12195 } 12196 12197 if (sec->dofs_size < sec->dofs_entsize) { 12198 dtrace_dof_error(dof, "section entry size exceeds total size"); 12199 return (NULL); 12200 } 12201 12202 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 12203 dtrace_dof_error(dof, "bad entry size in action description"); 12204 return (NULL); 12205 } 12206 12207 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 12208 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 12209 return (NULL); 12210 } 12211 12212 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 12213 desc = (dof_actdesc_t *)(daddr + 12214 (uintptr_t)sec->dofs_offset + offs); 12215 kind = (dtrace_actkind_t)desc->dofa_kind; 12216 12217 if (DTRACEACT_ISPRINTFLIKE(kind) && 12218 (kind != DTRACEACT_PRINTA || 12219 desc->dofa_strtab != DOF_SECIDX_NONE)) { 12220 dof_sec_t *strtab; 12221 char *str, *fmt; 12222 uint64_t i; 12223 12224 /* 12225 * printf()-like actions must have a format string. 12226 */ 12227 if ((strtab = dtrace_dof_sect(dof, 12228 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 12229 goto err; 12230 12231 str = (char *)((uintptr_t)dof + 12232 (uintptr_t)strtab->dofs_offset); 12233 12234 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 12235 if (str[i] == '\0') 12236 break; 12237 } 12238 12239 if (i >= strtab->dofs_size) { 12240 dtrace_dof_error(dof, "bogus format string"); 12241 goto err; 12242 } 12243 12244 if (i == desc->dofa_arg) { 12245 dtrace_dof_error(dof, "empty format string"); 12246 goto err; 12247 } 12248 12249 i -= desc->dofa_arg; 12250 fmt = kmem_alloc(i + 1, KM_SLEEP); 12251 bcopy(&str[desc->dofa_arg], fmt, i + 1); 12252 arg = (uint64_t)(uintptr_t)fmt; 12253 } else { 12254 if (kind == DTRACEACT_PRINTA) { 12255 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 12256 arg = 0; 12257 } else { 12258 arg = desc->dofa_arg; 12259 } 12260 } 12261 12262 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 12263 desc->dofa_uarg, arg); 12264 12265 if (last != NULL) { 12266 last->dtad_next = act; 12267 } else { 12268 first = act; 12269 } 12270 12271 last = act; 12272 12273 if (desc->dofa_difo == DOF_SECIDX_NONE) 12274 continue; 12275 12276 if ((difosec = dtrace_dof_sect(dof, 12277 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 12278 goto err; 12279 12280 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 12281 12282 if (act->dtad_difo == NULL) 12283 goto err; 12284 } 12285 12286 ASSERT(first != NULL); 12287 return (first); 12288 12289 err: 12290 for (act = first; act != NULL; act = next) { 12291 next = act->dtad_next; 12292 dtrace_actdesc_release(act, vstate); 12293 } 12294 12295 return (NULL); 12296 } 12297 12298 static dtrace_ecbdesc_t * 12299 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12300 cred_t *cr) 12301 { 12302 dtrace_ecbdesc_t *ep; 12303 dof_ecbdesc_t *ecb; 12304 dtrace_probedesc_t *desc; 12305 dtrace_predicate_t *pred = NULL; 12306 12307 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 12308 dtrace_dof_error(dof, "truncated ECB description"); 12309 return (NULL); 12310 } 12311 12312 if (sec->dofs_align != sizeof (uint64_t)) { 12313 dtrace_dof_error(dof, "bad alignment in ECB description"); 12314 return (NULL); 12315 } 12316 12317 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 12318 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 12319 12320 if (sec == NULL) 12321 return (NULL); 12322 12323 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 12324 ep->dted_uarg = ecb->dofe_uarg; 12325 desc = &ep->dted_probe; 12326 12327 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 12328 goto err; 12329 12330 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 12331 if ((sec = dtrace_dof_sect(dof, 12332 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 12333 goto err; 12334 12335 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 12336 goto err; 12337 12338 ep->dted_pred.dtpdd_predicate = pred; 12339 } 12340 12341 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 12342 if ((sec = dtrace_dof_sect(dof, 12343 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 12344 goto err; 12345 12346 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 12347 12348 if (ep->dted_action == NULL) 12349 goto err; 12350 } 12351 12352 return (ep); 12353 12354 err: 12355 if (pred != NULL) 12356 dtrace_predicate_release(pred, vstate); 12357 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 12358 return (NULL); 12359 } 12360 12361 /* 12362 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 12363 * specified DOF. At present, this amounts to simply adding 'ubase' to the 12364 * site of any user SETX relocations to account for load object base address. 12365 * In the future, if we need other relocations, this function can be extended. 12366 */ 12367 static int 12368 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 12369 { 12370 uintptr_t daddr = (uintptr_t)dof; 12371 dof_relohdr_t *dofr = 12372 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 12373 dof_sec_t *ss, *rs, *ts; 12374 dof_relodesc_t *r; 12375 uint_t i, n; 12376 12377 if (sec->dofs_size < sizeof (dof_relohdr_t) || 12378 sec->dofs_align != sizeof (dof_secidx_t)) { 12379 dtrace_dof_error(dof, "invalid relocation header"); 12380 return (-1); 12381 } 12382 12383 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 12384 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 12385 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 12386 12387 if (ss == NULL || rs == NULL || ts == NULL) 12388 return (-1); /* dtrace_dof_error() has been called already */ 12389 12390 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 12391 rs->dofs_align != sizeof (uint64_t)) { 12392 dtrace_dof_error(dof, "invalid relocation section"); 12393 return (-1); 12394 } 12395 12396 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 12397 n = rs->dofs_size / rs->dofs_entsize; 12398 12399 for (i = 0; i < n; i++) { 12400 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 12401 12402 switch (r->dofr_type) { 12403 case DOF_RELO_NONE: 12404 break; 12405 case DOF_RELO_SETX: 12406 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 12407 sizeof (uint64_t) > ts->dofs_size) { 12408 dtrace_dof_error(dof, "bad relocation offset"); 12409 return (-1); 12410 } 12411 12412 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 12413 dtrace_dof_error(dof, "misaligned setx relo"); 12414 return (-1); 12415 } 12416 12417 *(uint64_t *)taddr += ubase; 12418 break; 12419 default: 12420 dtrace_dof_error(dof, "invalid relocation type"); 12421 return (-1); 12422 } 12423 12424 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 12425 } 12426 12427 return (0); 12428 } 12429 12430 /* 12431 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 12432 * header: it should be at the front of a memory region that is at least 12433 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 12434 * size. It need not be validated in any other way. 12435 */ 12436 static int 12437 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 12438 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 12439 { 12440 uint64_t len = dof->dofh_loadsz, seclen; 12441 uintptr_t daddr = (uintptr_t)dof; 12442 dtrace_ecbdesc_t *ep; 12443 dtrace_enabling_t *enab; 12444 uint_t i; 12445 12446 ASSERT(MUTEX_HELD(&dtrace_lock)); 12447 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 12448 12449 /* 12450 * Check the DOF header identification bytes. In addition to checking 12451 * valid settings, we also verify that unused bits/bytes are zeroed so 12452 * we can use them later without fear of regressing existing binaries. 12453 */ 12454 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 12455 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 12456 dtrace_dof_error(dof, "DOF magic string mismatch"); 12457 return (-1); 12458 } 12459 12460 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 12461 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 12462 dtrace_dof_error(dof, "DOF has invalid data model"); 12463 return (-1); 12464 } 12465 12466 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 12467 dtrace_dof_error(dof, "DOF encoding mismatch"); 12468 return (-1); 12469 } 12470 12471 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 12472 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 12473 dtrace_dof_error(dof, "DOF version mismatch"); 12474 return (-1); 12475 } 12476 12477 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 12478 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 12479 return (-1); 12480 } 12481 12482 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 12483 dtrace_dof_error(dof, "DOF uses too many integer registers"); 12484 return (-1); 12485 } 12486 12487 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 12488 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 12489 return (-1); 12490 } 12491 12492 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 12493 if (dof->dofh_ident[i] != 0) { 12494 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 12495 return (-1); 12496 } 12497 } 12498 12499 if (dof->dofh_flags & ~DOF_FL_VALID) { 12500 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 12501 return (-1); 12502 } 12503 12504 if (dof->dofh_secsize == 0) { 12505 dtrace_dof_error(dof, "zero section header size"); 12506 return (-1); 12507 } 12508 12509 /* 12510 * Check that the section headers don't exceed the amount of DOF 12511 * data. Note that we cast the section size and number of sections 12512 * to uint64_t's to prevent possible overflow in the multiplication. 12513 */ 12514 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 12515 12516 if (dof->dofh_secoff > len || seclen > len || 12517 dof->dofh_secoff + seclen > len) { 12518 dtrace_dof_error(dof, "truncated section headers"); 12519 return (-1); 12520 } 12521 12522 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 12523 dtrace_dof_error(dof, "misaligned section headers"); 12524 return (-1); 12525 } 12526 12527 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 12528 dtrace_dof_error(dof, "misaligned section size"); 12529 return (-1); 12530 } 12531 12532 /* 12533 * Take an initial pass through the section headers to be sure that 12534 * the headers don't have stray offsets. If the 'noprobes' flag is 12535 * set, do not permit sections relating to providers, probes, or args. 12536 */ 12537 for (i = 0; i < dof->dofh_secnum; i++) { 12538 dof_sec_t *sec = (dof_sec_t *)(daddr + 12539 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12540 12541 if (noprobes) { 12542 switch (sec->dofs_type) { 12543 case DOF_SECT_PROVIDER: 12544 case DOF_SECT_PROBES: 12545 case DOF_SECT_PRARGS: 12546 case DOF_SECT_PROFFS: 12547 dtrace_dof_error(dof, "illegal sections " 12548 "for enabling"); 12549 return (-1); 12550 } 12551 } 12552 12553 if (DOF_SEC_ISLOADABLE(sec->dofs_type) && 12554 !(sec->dofs_flags & DOF_SECF_LOAD)) { 12555 dtrace_dof_error(dof, "loadable section with load " 12556 "flag unset"); 12557 return (-1); 12558 } 12559 12560 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12561 continue; /* just ignore non-loadable sections */ 12562 12563 if (sec->dofs_align & (sec->dofs_align - 1)) { 12564 dtrace_dof_error(dof, "bad section alignment"); 12565 return (-1); 12566 } 12567 12568 if (sec->dofs_offset & (sec->dofs_align - 1)) { 12569 dtrace_dof_error(dof, "misaligned section"); 12570 return (-1); 12571 } 12572 12573 if (sec->dofs_offset > len || sec->dofs_size > len || 12574 sec->dofs_offset + sec->dofs_size > len) { 12575 dtrace_dof_error(dof, "corrupt section header"); 12576 return (-1); 12577 } 12578 12579 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 12580 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 12581 dtrace_dof_error(dof, "non-terminating string table"); 12582 return (-1); 12583 } 12584 } 12585 12586 /* 12587 * Take a second pass through the sections and locate and perform any 12588 * relocations that are present. We do this after the first pass to 12589 * be sure that all sections have had their headers validated. 12590 */ 12591 for (i = 0; i < dof->dofh_secnum; i++) { 12592 dof_sec_t *sec = (dof_sec_t *)(daddr + 12593 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12594 12595 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12596 continue; /* skip sections that are not loadable */ 12597 12598 switch (sec->dofs_type) { 12599 case DOF_SECT_URELHDR: 12600 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 12601 return (-1); 12602 break; 12603 } 12604 } 12605 12606 if ((enab = *enabp) == NULL) 12607 enab = *enabp = dtrace_enabling_create(vstate); 12608 12609 for (i = 0; i < dof->dofh_secnum; i++) { 12610 dof_sec_t *sec = (dof_sec_t *)(daddr + 12611 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12612 12613 if (sec->dofs_type != DOF_SECT_ECBDESC) 12614 continue; 12615 12616 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 12617 dtrace_enabling_destroy(enab); 12618 *enabp = NULL; 12619 return (-1); 12620 } 12621 12622 dtrace_enabling_add(enab, ep); 12623 } 12624 12625 return (0); 12626 } 12627 12628 /* 12629 * Process DOF for any options. This routine assumes that the DOF has been 12630 * at least processed by dtrace_dof_slurp(). 12631 */ 12632 static int 12633 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 12634 { 12635 int i, rval; 12636 uint32_t entsize; 12637 size_t offs; 12638 dof_optdesc_t *desc; 12639 12640 for (i = 0; i < dof->dofh_secnum; i++) { 12641 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 12642 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12643 12644 if (sec->dofs_type != DOF_SECT_OPTDESC) 12645 continue; 12646 12647 if (sec->dofs_align != sizeof (uint64_t)) { 12648 dtrace_dof_error(dof, "bad alignment in " 12649 "option description"); 12650 return (EINVAL); 12651 } 12652 12653 if ((entsize = sec->dofs_entsize) == 0) { 12654 dtrace_dof_error(dof, "zeroed option entry size"); 12655 return (EINVAL); 12656 } 12657 12658 if (entsize < sizeof (dof_optdesc_t)) { 12659 dtrace_dof_error(dof, "bad option entry size"); 12660 return (EINVAL); 12661 } 12662 12663 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 12664 desc = (dof_optdesc_t *)((uintptr_t)dof + 12665 (uintptr_t)sec->dofs_offset + offs); 12666 12667 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 12668 dtrace_dof_error(dof, "non-zero option string"); 12669 return (EINVAL); 12670 } 12671 12672 if (desc->dofo_value == DTRACEOPT_UNSET) { 12673 dtrace_dof_error(dof, "unset option"); 12674 return (EINVAL); 12675 } 12676 12677 if ((rval = dtrace_state_option(state, 12678 desc->dofo_option, desc->dofo_value)) != 0) { 12679 dtrace_dof_error(dof, "rejected option"); 12680 return (rval); 12681 } 12682 } 12683 } 12684 12685 return (0); 12686 } 12687 12688 /* 12689 * DTrace Consumer State Functions 12690 */ 12691 static int 12692 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 12693 { 12694 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 12695 void *base; 12696 uintptr_t limit; 12697 dtrace_dynvar_t *dvar, *next, *start; 12698 int i; 12699 12700 ASSERT(MUTEX_HELD(&dtrace_lock)); 12701 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 12702 12703 bzero(dstate, sizeof (dtrace_dstate_t)); 12704 12705 if ((dstate->dtds_chunksize = chunksize) == 0) 12706 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 12707 12708 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 12709 size = min; 12710 12711 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 12712 return (ENOMEM); 12713 12714 dstate->dtds_size = size; 12715 dstate->dtds_base = base; 12716 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 12717 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 12718 12719 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 12720 12721 if (hashsize != 1 && (hashsize & 1)) 12722 hashsize--; 12723 12724 dstate->dtds_hashsize = hashsize; 12725 dstate->dtds_hash = dstate->dtds_base; 12726 12727 /* 12728 * Set all of our hash buckets to point to the single sink, and (if 12729 * it hasn't already been set), set the sink's hash value to be the 12730 * sink sentinel value. The sink is needed for dynamic variable 12731 * lookups to know that they have iterated over an entire, valid hash 12732 * chain. 12733 */ 12734 for (i = 0; i < hashsize; i++) 12735 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; 12736 12737 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) 12738 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; 12739 12740 /* 12741 * Determine number of active CPUs. Divide free list evenly among 12742 * active CPUs. 12743 */ 12744 start = (dtrace_dynvar_t *) 12745 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 12746 limit = (uintptr_t)base + size; 12747 12748 maxper = (limit - (uintptr_t)start) / NCPU; 12749 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 12750 12751 for (i = 0; i < NCPU; i++) { 12752 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 12753 12754 /* 12755 * If we don't even have enough chunks to make it once through 12756 * NCPUs, we're just going to allocate everything to the first 12757 * CPU. And if we're on the last CPU, we're going to allocate 12758 * whatever is left over. In either case, we set the limit to 12759 * be the limit of the dynamic variable space. 12760 */ 12761 if (maxper == 0 || i == NCPU - 1) { 12762 limit = (uintptr_t)base + size; 12763 start = NULL; 12764 } else { 12765 limit = (uintptr_t)start + maxper; 12766 start = (dtrace_dynvar_t *)limit; 12767 } 12768 12769 ASSERT(limit <= (uintptr_t)base + size); 12770 12771 for (;;) { 12772 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 12773 dstate->dtds_chunksize); 12774 12775 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 12776 break; 12777 12778 dvar->dtdv_next = next; 12779 dvar = next; 12780 } 12781 12782 if (maxper == 0) 12783 break; 12784 } 12785 12786 return (0); 12787 } 12788 12789 static void 12790 dtrace_dstate_fini(dtrace_dstate_t *dstate) 12791 { 12792 ASSERT(MUTEX_HELD(&cpu_lock)); 12793 12794 if (dstate->dtds_base == NULL) 12795 return; 12796 12797 kmem_free(dstate->dtds_base, dstate->dtds_size); 12798 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 12799 } 12800 12801 static void 12802 dtrace_vstate_fini(dtrace_vstate_t *vstate) 12803 { 12804 /* 12805 * Logical XOR, where are you? 12806 */ 12807 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 12808 12809 if (vstate->dtvs_nglobals > 0) { 12810 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 12811 sizeof (dtrace_statvar_t *)); 12812 } 12813 12814 if (vstate->dtvs_ntlocals > 0) { 12815 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 12816 sizeof (dtrace_difv_t)); 12817 } 12818 12819 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 12820 12821 if (vstate->dtvs_nlocals > 0) { 12822 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 12823 sizeof (dtrace_statvar_t *)); 12824 } 12825 } 12826 12827 #if defined(sun) 12828 static void 12829 dtrace_state_clean(dtrace_state_t *state) 12830 { 12831 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 12832 return; 12833 12834 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 12835 dtrace_speculation_clean(state); 12836 } 12837 12838 static void 12839 dtrace_state_deadman(dtrace_state_t *state) 12840 { 12841 hrtime_t now; 12842 12843 dtrace_sync(); 12844 12845 now = dtrace_gethrtime(); 12846 12847 if (state != dtrace_anon.dta_state && 12848 now - state->dts_laststatus >= dtrace_deadman_user) 12849 return; 12850 12851 /* 12852 * We must be sure that dts_alive never appears to be less than the 12853 * value upon entry to dtrace_state_deadman(), and because we lack a 12854 * dtrace_cas64(), we cannot store to it atomically. We thus instead 12855 * store INT64_MAX to it, followed by a memory barrier, followed by 12856 * the new value. This assures that dts_alive never appears to be 12857 * less than its true value, regardless of the order in which the 12858 * stores to the underlying storage are issued. 12859 */ 12860 state->dts_alive = INT64_MAX; 12861 dtrace_membar_producer(); 12862 state->dts_alive = now; 12863 } 12864 #else 12865 static void 12866 dtrace_state_clean(void *arg) 12867 { 12868 dtrace_state_t *state = arg; 12869 dtrace_optval_t *opt = state->dts_options; 12870 12871 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 12872 return; 12873 12874 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 12875 dtrace_speculation_clean(state); 12876 12877 callout_reset(&state->dts_cleaner, ((dtrace_optval_t)hz * opt[DTRACEOPT_CLEANRATE]) / NANOSEC, 12878 dtrace_state_clean, state); 12879 } 12880 12881 static void 12882 dtrace_state_deadman(void *arg) 12883 { 12884 dtrace_state_t *state = arg; 12885 hrtime_t now; 12886 int res; 12887 kmutex_t dtrace_deadman_mutex; 12888 12889 mutex_init(&dtrace_deadman_mutex, NULL, MUTEX_DEFAULT, NULL); 12890 12891 while (dtrace_deadman_alive) { 12892 mutex_enter(&dtrace_deadman_mutex); 12893 res = mtsleep(&dtrace_deadman_wchan, PRI_BIO, "dtrace_deadman", 12894 ((dtrace_optval_t)hz * dtrace_deadman_interval) / NANOSEC, 12895 &dtrace_deadman_mutex); 12896 mutex_exit(&dtrace_deadman_mutex); 12897 12898 if (!dtrace_deadman_alive) { 12899 break; 12900 } 12901 12902 dtrace_sync(); 12903 12904 dtrace_debug_output(); 12905 12906 now = dtrace_gethrtime(); 12907 12908 if (state != dtrace_anon.dta_state && 12909 now - state->dts_laststatus >= dtrace_deadman_user) 12910 continue; 12911 12912 /* 12913 * We must be sure that dts_alive never appears to be less than the 12914 * value upon entry to dtrace_state_deadman(), and because we lack a 12915 * dtrace_cas64(), we cannot store to it atomically. We thus instead 12916 * store INT64_MAX to it, followed by a memory barrier, followed by 12917 * the new value. This assures that dts_alive never appears to be 12918 * less than its true value, regardless of the order in which the 12919 * stores to the underlying storage are issued. 12920 */ 12921 state->dts_alive = INT64_MAX; 12922 dtrace_membar_producer(); 12923 state->dts_alive = now; 12924 } 12925 12926 mutex_destroy(&dtrace_deadman_mutex); 12927 12928 kthread_exit(0); 12929 } 12930 #endif 12931 12932 static dtrace_state_t * 12933 #if defined(sun) 12934 dtrace_state_create(dev_t *devp, cred_t *cr) 12935 #else 12936 dtrace_state_create(dev_t dev, cred_t *cr) 12937 #endif 12938 { 12939 #if defined(sun) 12940 minor_t minor; 12941 major_t major; 12942 #else 12943 int m = 0; 12944 #endif 12945 char c[30]; 12946 dtrace_state_t *state; 12947 dtrace_optval_t *opt; 12948 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 12949 12950 ASSERT(MUTEX_HELD(&dtrace_lock)); 12951 ASSERT(MUTEX_HELD(&cpu_lock)); 12952 12953 #if defined(sun) 12954 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 12955 VM_BESTFIT | VM_SLEEP); 12956 12957 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 12958 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 12959 return (NULL); 12960 } 12961 12962 state = ddi_get_soft_state(dtrace_softstate, minor); 12963 #else 12964 m = minor(dev) & 0x0F; 12965 12966 /* Allocate memory for the state. */ 12967 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP); 12968 #endif 12969 12970 state->dts_epid = DTRACE_EPIDNONE + 1; 12971 12972 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m); 12973 #if defined(sun) 12974 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 12975 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 12976 12977 if (devp != NULL) { 12978 major = getemajor(*devp); 12979 } else { 12980 major = ddi_driver_major(dtrace_devi); 12981 } 12982 12983 state->dts_dev = makedevice(major, minor); 12984 12985 if (devp != NULL) 12986 *devp = state->dts_dev; 12987 #else 12988 state->dts_aggid_arena = vmem_create(c, 1, INT_MAX, 1, 12989 NULL, NULL, NULL, 0, VM_SLEEP, IPL_NONE); 12990 state->dts_dev = dev; 12991 #endif 12992 12993 /* 12994 * We allocate NCPU buffers. On the one hand, this can be quite 12995 * a bit of memory per instance (nearly 36K on a Starcat). On the 12996 * other hand, it saves an additional memory reference in the probe 12997 * path. 12998 */ 12999 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 13000 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 13001 13002 #if defined(sun) 13003 state->dts_cleaner = CYCLIC_NONE; 13004 state->dts_deadman = CYCLIC_NONE; 13005 #else 13006 callout_init(&state->dts_cleaner, CALLOUT_MPSAFE); 13007 #endif 13008 state->dts_vstate.dtvs_state = state; 13009 13010 for (i = 0; i < DTRACEOPT_MAX; i++) 13011 state->dts_options[i] = DTRACEOPT_UNSET; 13012 13013 /* 13014 * Set the default options. 13015 */ 13016 opt = state->dts_options; 13017 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 13018 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 13019 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 13020 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 13021 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 13022 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 13023 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 13024 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 13025 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 13026 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 13027 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 13028 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 13029 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 13030 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 13031 13032 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 13033 13034 /* 13035 * Depending on the user credentials, we set flag bits which alter probe 13036 * visibility or the amount of destructiveness allowed. In the case of 13037 * actual anonymous tracing, or the possession of all privileges, all of 13038 * the normal checks are bypassed. 13039 */ 13040 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 13041 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 13042 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 13043 } else { 13044 /* 13045 * Set up the credentials for this instantiation. We take a 13046 * hold on the credential to prevent it from disappearing on 13047 * us; this in turn prevents the zone_t referenced by this 13048 * credential from disappearing. This means that we can 13049 * examine the credential and the zone from probe context. 13050 */ 13051 #if defined(sun) 13052 crhold(cr); 13053 #else 13054 kauth_cred_hold(cr); 13055 #endif 13056 state->dts_cred.dcr_cred = cr; 13057 13058 /* 13059 * CRA_PROC means "we have *some* privilege for dtrace" and 13060 * unlocks the use of variables like pid, zonename, etc. 13061 */ 13062 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 13063 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 13064 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 13065 } 13066 13067 /* 13068 * dtrace_user allows use of syscall and profile providers. 13069 * If the user also has proc_owner and/or proc_zone, we 13070 * extend the scope to include additional visibility and 13071 * destructive power. 13072 */ 13073 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 13074 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 13075 state->dts_cred.dcr_visible |= 13076 DTRACE_CRV_ALLPROC; 13077 13078 state->dts_cred.dcr_action |= 13079 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13080 } 13081 13082 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 13083 state->dts_cred.dcr_visible |= 13084 DTRACE_CRV_ALLZONE; 13085 13086 state->dts_cred.dcr_action |= 13087 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13088 } 13089 13090 /* 13091 * If we have all privs in whatever zone this is, 13092 * we can do destructive things to processes which 13093 * have altered credentials. 13094 */ 13095 #if defined(sun) 13096 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 13097 cr->cr_zone->zone_privset)) { 13098 state->dts_cred.dcr_action |= 13099 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 13100 } 13101 #endif 13102 } 13103 13104 /* 13105 * Holding the dtrace_kernel privilege also implies that 13106 * the user has the dtrace_user privilege from a visibility 13107 * perspective. But without further privileges, some 13108 * destructive actions are not available. 13109 */ 13110 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 13111 /* 13112 * Make all probes in all zones visible. However, 13113 * this doesn't mean that all actions become available 13114 * to all zones. 13115 */ 13116 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 13117 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 13118 13119 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 13120 DTRACE_CRA_PROC; 13121 /* 13122 * Holding proc_owner means that destructive actions 13123 * for *this* zone are allowed. 13124 */ 13125 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 13126 state->dts_cred.dcr_action |= 13127 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13128 13129 /* 13130 * Holding proc_zone means that destructive actions 13131 * for this user/group ID in all zones is allowed. 13132 */ 13133 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 13134 state->dts_cred.dcr_action |= 13135 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13136 13137 #if defined(sun) 13138 /* 13139 * If we have all privs in whatever zone this is, 13140 * we can do destructive things to processes which 13141 * have altered credentials. 13142 */ 13143 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 13144 cr->cr_zone->zone_privset)) { 13145 state->dts_cred.dcr_action |= 13146 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 13147 } 13148 #endif 13149 } 13150 13151 /* 13152 * Holding the dtrace_proc privilege gives control over fasttrap 13153 * and pid providers. We need to grant wider destructive 13154 * privileges in the event that the user has proc_owner and/or 13155 * proc_zone. 13156 */ 13157 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 13158 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 13159 state->dts_cred.dcr_action |= 13160 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13161 13162 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 13163 state->dts_cred.dcr_action |= 13164 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13165 } 13166 } 13167 13168 return (state); 13169 } 13170 13171 static int 13172 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 13173 { 13174 dtrace_optval_t *opt = state->dts_options, size; 13175 processorid_t cpu = 0;; 13176 int flags = 0, rval; 13177 13178 ASSERT(MUTEX_HELD(&dtrace_lock)); 13179 ASSERT(MUTEX_HELD(&cpu_lock)); 13180 ASSERT(which < DTRACEOPT_MAX); 13181 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 13182 (state == dtrace_anon.dta_state && 13183 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 13184 13185 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 13186 return (0); 13187 13188 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 13189 cpu = opt[DTRACEOPT_CPU]; 13190 13191 if (which == DTRACEOPT_SPECSIZE) 13192 flags |= DTRACEBUF_NOSWITCH; 13193 13194 if (which == DTRACEOPT_BUFSIZE) { 13195 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 13196 flags |= DTRACEBUF_RING; 13197 13198 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 13199 flags |= DTRACEBUF_FILL; 13200 13201 if (state != dtrace_anon.dta_state || 13202 state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 13203 flags |= DTRACEBUF_INACTIVE; 13204 } 13205 13206 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) { 13207 /* 13208 * The size must be 8-byte aligned. If the size is not 8-byte 13209 * aligned, drop it down by the difference. 13210 */ 13211 if (size & (sizeof (uint64_t) - 1)) 13212 size -= size & (sizeof (uint64_t) - 1); 13213 13214 if (size < state->dts_reserve) { 13215 /* 13216 * Buffers always must be large enough to accommodate 13217 * their prereserved space. We return E2BIG instead 13218 * of ENOMEM in this case to allow for user-level 13219 * software to differentiate the cases. 13220 */ 13221 return (E2BIG); 13222 } 13223 13224 rval = dtrace_buffer_alloc(buf, size, flags, cpu); 13225 13226 if (rval != ENOMEM) { 13227 opt[which] = size; 13228 return (rval); 13229 } 13230 13231 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13232 return (rval); 13233 } 13234 13235 return (ENOMEM); 13236 } 13237 13238 static int 13239 dtrace_state_buffers(dtrace_state_t *state) 13240 { 13241 dtrace_speculation_t *spec = state->dts_speculations; 13242 int rval, i; 13243 13244 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 13245 DTRACEOPT_BUFSIZE)) != 0) 13246 return (rval); 13247 13248 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 13249 DTRACEOPT_AGGSIZE)) != 0) 13250 return (rval); 13251 13252 for (i = 0; i < state->dts_nspeculations; i++) { 13253 if ((rval = dtrace_state_buffer(state, 13254 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 13255 return (rval); 13256 } 13257 13258 return (0); 13259 } 13260 13261 static void 13262 dtrace_state_prereserve(dtrace_state_t *state) 13263 { 13264 dtrace_ecb_t *ecb; 13265 dtrace_probe_t *probe; 13266 13267 state->dts_reserve = 0; 13268 13269 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 13270 return; 13271 13272 /* 13273 * If our buffer policy is a "fill" buffer policy, we need to set the 13274 * prereserved space to be the space required by the END probes. 13275 */ 13276 probe = dtrace_probes[dtrace_probeid_end - 1]; 13277 ASSERT(probe != NULL); 13278 13279 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 13280 if (ecb->dte_state != state) 13281 continue; 13282 13283 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 13284 } 13285 } 13286 13287 static int 13288 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 13289 { 13290 dtrace_optval_t *opt = state->dts_options, sz, nspec; 13291 dtrace_speculation_t *spec; 13292 dtrace_buffer_t *buf; 13293 #if defined(sun) 13294 cyc_handler_t hdlr; 13295 cyc_time_t when; 13296 #endif 13297 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13298 dtrace_icookie_t cookie; 13299 13300 mutex_enter(&cpu_lock); 13301 mutex_enter(&dtrace_lock); 13302 13303 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 13304 rval = EBUSY; 13305 goto out; 13306 } 13307 13308 /* 13309 * Before we can perform any checks, we must prime all of the 13310 * retained enablings that correspond to this state. 13311 */ 13312 dtrace_enabling_prime(state); 13313 13314 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 13315 rval = EACCES; 13316 goto out; 13317 } 13318 13319 dtrace_state_prereserve(state); 13320 13321 /* 13322 * Now we want to do is try to allocate our speculations. 13323 * We do not automatically resize the number of speculations; if 13324 * this fails, we will fail the operation. 13325 */ 13326 nspec = opt[DTRACEOPT_NSPEC]; 13327 ASSERT(nspec != DTRACEOPT_UNSET); 13328 13329 if (nspec > INT_MAX) { 13330 rval = ENOMEM; 13331 goto out; 13332 } 13333 13334 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP); 13335 13336 if (spec == NULL) { 13337 rval = ENOMEM; 13338 goto out; 13339 } 13340 13341 state->dts_speculations = spec; 13342 state->dts_nspeculations = (int)nspec; 13343 13344 for (i = 0; i < nspec; i++) { 13345 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) { 13346 rval = ENOMEM; 13347 goto err; 13348 } 13349 13350 spec[i].dtsp_buffer = buf; 13351 } 13352 13353 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 13354 if (dtrace_anon.dta_state == NULL) { 13355 rval = ENOENT; 13356 goto out; 13357 } 13358 13359 if (state->dts_necbs != 0) { 13360 rval = EALREADY; 13361 goto out; 13362 } 13363 13364 state->dts_anon = dtrace_anon_grab(); 13365 ASSERT(state->dts_anon != NULL); 13366 state = state->dts_anon; 13367 13368 /* 13369 * We want "grabanon" to be set in the grabbed state, so we'll 13370 * copy that option value from the grabbing state into the 13371 * grabbed state. 13372 */ 13373 state->dts_options[DTRACEOPT_GRABANON] = 13374 opt[DTRACEOPT_GRABANON]; 13375 13376 *cpu = dtrace_anon.dta_beganon; 13377 13378 /* 13379 * If the anonymous state is active (as it almost certainly 13380 * is if the anonymous enabling ultimately matched anything), 13381 * we don't allow any further option processing -- but we 13382 * don't return failure. 13383 */ 13384 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13385 goto out; 13386 } 13387 13388 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 13389 opt[DTRACEOPT_AGGSIZE] != 0) { 13390 if (state->dts_aggregations == NULL) { 13391 /* 13392 * We're not going to create an aggregation buffer 13393 * because we don't have any ECBs that contain 13394 * aggregations -- set this option to 0. 13395 */ 13396 opt[DTRACEOPT_AGGSIZE] = 0; 13397 } else { 13398 /* 13399 * If we have an aggregation buffer, we must also have 13400 * a buffer to use as scratch. 13401 */ 13402 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 13403 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 13404 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 13405 } 13406 } 13407 } 13408 13409 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 13410 opt[DTRACEOPT_SPECSIZE] != 0) { 13411 if (!state->dts_speculates) { 13412 /* 13413 * We're not going to create speculation buffers 13414 * because we don't have any ECBs that actually 13415 * speculate -- set the speculation size to 0. 13416 */ 13417 opt[DTRACEOPT_SPECSIZE] = 0; 13418 } 13419 } 13420 13421 /* 13422 * The bare minimum size for any buffer that we're actually going to 13423 * do anything to is sizeof (uint64_t). 13424 */ 13425 sz = sizeof (uint64_t); 13426 13427 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 13428 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 13429 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 13430 /* 13431 * A buffer size has been explicitly set to 0 (or to a size 13432 * that will be adjusted to 0) and we need the space -- we 13433 * need to return failure. We return ENOSPC to differentiate 13434 * it from failing to allocate a buffer due to failure to meet 13435 * the reserve (for which we return E2BIG). 13436 */ 13437 rval = ENOSPC; 13438 goto out; 13439 } 13440 13441 if ((rval = dtrace_state_buffers(state)) != 0) 13442 goto err; 13443 13444 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 13445 sz = dtrace_dstate_defsize; 13446 13447 do { 13448 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 13449 13450 if (rval == 0) 13451 break; 13452 13453 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13454 goto err; 13455 } while (sz >>= 1); 13456 13457 opt[DTRACEOPT_DYNVARSIZE] = sz; 13458 13459 if (rval != 0) 13460 goto err; 13461 13462 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 13463 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 13464 13465 if (opt[DTRACEOPT_CLEANRATE] == 0) 13466 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13467 13468 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 13469 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 13470 13471 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 13472 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13473 13474 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 13475 #if defined(sun) 13476 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 13477 hdlr.cyh_arg = state; 13478 hdlr.cyh_level = CY_LOW_LEVEL; 13479 13480 when.cyt_when = 0; 13481 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 13482 13483 state->dts_cleaner = cyclic_add(&hdlr, &when); 13484 13485 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 13486 hdlr.cyh_arg = state; 13487 hdlr.cyh_level = CY_LOW_LEVEL; 13488 13489 when.cyt_when = 0; 13490 when.cyt_interval = dtrace_deadman_interval; 13491 13492 state->dts_deadman = cyclic_add(&hdlr, &when); 13493 #else 13494 callout_reset(&state->dts_cleaner, 13495 ((dtrace_optval_t)hz * opt[DTRACEOPT_CLEANRATE]) / NANOSEC, 13496 dtrace_state_clean, state); 13497 13498 dtrace_deadman_wchan = &dtrace_deadman_wchan; 13499 dtrace_deadman_alive = 1; 13500 13501 if ((rval = kthread_create(PRI_BIO, KTHREAD_MPSAFE, 13502 NULL, dtrace_state_deadman, state, 13503 &dtrace_deadman_proc, "dtrace_deadman")) != 0) { 13504 printf("failed to create deadman thread, error=%d\n", rval); 13505 goto out; 13506 } 13507 #endif 13508 13509 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 13510 13511 /* 13512 * Now it's time to actually fire the BEGIN probe. We need to disable 13513 * interrupts here both to record the CPU on which we fired the BEGIN 13514 * probe (the data from this CPU will be processed first at user 13515 * level) and to manually activate the buffer for this CPU. 13516 */ 13517 cookie = dtrace_interrupt_disable(); 13518 *cpu = curcpu_id; 13519 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 13520 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 13521 13522 dtrace_probe(dtrace_probeid_begin, 13523 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13524 dtrace_interrupt_enable(cookie); 13525 /* 13526 * We may have had an exit action from a BEGIN probe; only change our 13527 * state to ACTIVE if we're still in WARMUP. 13528 */ 13529 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 13530 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 13531 13532 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 13533 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 13534 13535 /* 13536 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 13537 * want each CPU to transition its principal buffer out of the 13538 * INACTIVE state. Doing this assures that no CPU will suddenly begin 13539 * processing an ECB halfway down a probe's ECB chain; all CPUs will 13540 * atomically transition from processing none of a state's ECBs to 13541 * processing all of them. 13542 */ 13543 dtrace_xcall(DTRACE_CPUALL, 13544 (dtrace_xcall_t)dtrace_buffer_activate, state); 13545 goto out; 13546 13547 err: 13548 dtrace_buffer_free(state->dts_buffer); 13549 dtrace_buffer_free(state->dts_aggbuffer); 13550 13551 if ((nspec = state->dts_nspeculations) == 0) { 13552 ASSERT(state->dts_speculations == NULL); 13553 goto out; 13554 } 13555 13556 spec = state->dts_speculations; 13557 ASSERT(spec != NULL); 13558 13559 for (i = 0; i < state->dts_nspeculations; i++) { 13560 if ((buf = spec[i].dtsp_buffer) == NULL) 13561 break; 13562 13563 dtrace_buffer_free(buf); 13564 kmem_free(buf, bufsize); 13565 } 13566 13567 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13568 state->dts_nspeculations = 0; 13569 state->dts_speculations = NULL; 13570 13571 out: 13572 mutex_exit(&dtrace_lock); 13573 mutex_exit(&cpu_lock); 13574 13575 return (rval); 13576 } 13577 13578 static int 13579 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 13580 { 13581 dtrace_icookie_t cookie; 13582 13583 ASSERT(MUTEX_HELD(&dtrace_lock)); 13584 13585 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 13586 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 13587 return (EINVAL); 13588 13589 /* 13590 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 13591 * to be sure that every CPU has seen it. See below for the details 13592 * on why this is done. 13593 */ 13594 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 13595 dtrace_sync(); 13596 13597 /* 13598 * By this point, it is impossible for any CPU to be still processing 13599 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 13600 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 13601 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 13602 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 13603 * iff we're in the END probe. 13604 */ 13605 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 13606 dtrace_sync(); 13607 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 13608 13609 /* 13610 * Finally, we can release the reserve and call the END probe. We 13611 * disable interrupts across calling the END probe to allow us to 13612 * return the CPU on which we actually called the END probe. This 13613 * allows user-land to be sure that this CPU's principal buffer is 13614 * processed last. 13615 */ 13616 state->dts_reserve = 0; 13617 13618 cookie = dtrace_interrupt_disable(); 13619 *cpu = curcpu_id; 13620 dtrace_probe(dtrace_probeid_end, 13621 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13622 dtrace_interrupt_enable(cookie); 13623 13624 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 13625 dtrace_sync(); 13626 13627 return (0); 13628 } 13629 13630 static int 13631 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 13632 dtrace_optval_t val) 13633 { 13634 ASSERT(MUTEX_HELD(&dtrace_lock)); 13635 13636 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13637 return (EBUSY); 13638 13639 if (option >= DTRACEOPT_MAX) 13640 return (EINVAL); 13641 13642 if (option != DTRACEOPT_CPU && val < 0) 13643 return (EINVAL); 13644 13645 switch (option) { 13646 case DTRACEOPT_DESTRUCTIVE: 13647 if (dtrace_destructive_disallow) 13648 return (EACCES); 13649 13650 state->dts_cred.dcr_destructive = 1; 13651 break; 13652 13653 case DTRACEOPT_BUFSIZE: 13654 case DTRACEOPT_DYNVARSIZE: 13655 case DTRACEOPT_AGGSIZE: 13656 case DTRACEOPT_SPECSIZE: 13657 case DTRACEOPT_STRSIZE: 13658 if (val < 0) 13659 return (EINVAL); 13660 13661 if (val >= LONG_MAX) { 13662 /* 13663 * If this is an otherwise negative value, set it to 13664 * the highest multiple of 128m less than LONG_MAX. 13665 * Technically, we're adjusting the size without 13666 * regard to the buffer resizing policy, but in fact, 13667 * this has no effect -- if we set the buffer size to 13668 * ~LONG_MAX and the buffer policy is ultimately set to 13669 * be "manual", the buffer allocation is guaranteed to 13670 * fail, if only because the allocation requires two 13671 * buffers. (We set the the size to the highest 13672 * multiple of 128m because it ensures that the size 13673 * will remain a multiple of a megabyte when 13674 * repeatedly halved -- all the way down to 15m.) 13675 */ 13676 val = LONG_MAX - (1 << 27) + 1; 13677 } 13678 } 13679 13680 state->dts_options[option] = val; 13681 13682 return (0); 13683 } 13684 13685 static void 13686 dtrace_state_destroy(dtrace_state_t *state) 13687 { 13688 dtrace_ecb_t *ecb; 13689 dtrace_vstate_t *vstate = &state->dts_vstate; 13690 #if defined(sun) 13691 minor_t minor = getminor(state->dts_dev); 13692 #endif 13693 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13694 dtrace_speculation_t *spec = state->dts_speculations; 13695 int nspec = state->dts_nspeculations; 13696 uint32_t match; 13697 13698 ASSERT(MUTEX_HELD(&dtrace_lock)); 13699 ASSERT(MUTEX_HELD(&cpu_lock)); 13700 13701 /* 13702 * First, retract any retained enablings for this state. 13703 */ 13704 dtrace_enabling_retract(state); 13705 ASSERT(state->dts_nretained == 0); 13706 13707 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || 13708 state->dts_activity == DTRACE_ACTIVITY_DRAINING) { 13709 /* 13710 * We have managed to come into dtrace_state_destroy() on a 13711 * hot enabling -- almost certainly because of a disorderly 13712 * shutdown of a consumer. (That is, a consumer that is 13713 * exiting without having called dtrace_stop().) In this case, 13714 * we're going to set our activity to be KILLED, and then 13715 * issue a sync to be sure that everyone is out of probe 13716 * context before we start blowing away ECBs. 13717 */ 13718 state->dts_activity = DTRACE_ACTIVITY_KILLED; 13719 dtrace_sync(); 13720 } 13721 13722 /* 13723 * Release the credential hold we took in dtrace_state_create(). 13724 */ 13725 if (state->dts_cred.dcr_cred != NULL) { 13726 #if defined(sun) 13727 crfree(state->dts_cred.dcr_cred); 13728 #else 13729 kauth_cred_free(state->dts_cred.dcr_cred); 13730 #endif 13731 } 13732 13733 /* 13734 * Now we can safely disable and destroy any enabled probes. Because 13735 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress 13736 * (especially if they're all enabled), we take two passes through the 13737 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and 13738 * in the second we disable whatever is left over. 13739 */ 13740 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 13741 for (i = 0; i < state->dts_necbs; i++) { 13742 if ((ecb = state->dts_ecbs[i]) == NULL) 13743 continue; 13744 13745 if (match && ecb->dte_probe != NULL) { 13746 dtrace_probe_t *probe = ecb->dte_probe; 13747 dtrace_provider_t *prov = probe->dtpr_provider; 13748 13749 if (!(prov->dtpv_priv.dtpp_flags & match)) 13750 continue; 13751 } 13752 13753 dtrace_ecb_disable(ecb); 13754 dtrace_ecb_destroy(ecb); 13755 } 13756 13757 if (!match) 13758 break; 13759 } 13760 13761 /* 13762 * Before we free the buffers, perform one more sync to assure that 13763 * every CPU is out of probe context. 13764 */ 13765 dtrace_sync(); 13766 13767 dtrace_buffer_free(state->dts_buffer); 13768 dtrace_buffer_free(state->dts_aggbuffer); 13769 13770 for (i = 0; i < nspec; i++) 13771 dtrace_buffer_free(spec[i].dtsp_buffer); 13772 13773 #if defined(sun) 13774 if (state->dts_cleaner != CYCLIC_NONE) 13775 cyclic_remove(state->dts_cleaner); 13776 13777 if (state->dts_deadman != CYCLIC_NONE) 13778 cyclic_remove(state->dts_deadman); 13779 #else 13780 callout_stop(&state->dts_cleaner); 13781 13782 if (dtrace_deadman_alive) { 13783 /* tell the deadman thread to exit */ 13784 dtrace_deadman_alive = 0; 13785 wakeup(dtrace_deadman_wchan); 13786 } 13787 #endif 13788 13789 dtrace_dstate_fini(&vstate->dtvs_dynvars); 13790 dtrace_vstate_fini(vstate); 13791 if (state->dts_ecbs != NULL) 13792 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 13793 13794 if (state->dts_aggregations != NULL) { 13795 #ifdef DEBUG 13796 for (i = 0; i < state->dts_naggregations; i++) 13797 ASSERT(state->dts_aggregations[i] == NULL); 13798 #endif 13799 ASSERT(state->dts_naggregations > 0); 13800 kmem_free(state->dts_aggregations, 13801 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 13802 } 13803 13804 kmem_free(state->dts_buffer, bufsize); 13805 kmem_free(state->dts_aggbuffer, bufsize); 13806 13807 for (i = 0; i < nspec; i++) 13808 kmem_free(spec[i].dtsp_buffer, bufsize); 13809 13810 if (spec != NULL) 13811 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13812 13813 dtrace_format_destroy(state); 13814 13815 if (state->dts_aggid_arena != NULL) { 13816 vmem_destroy(state->dts_aggid_arena); 13817 state->dts_aggid_arena = NULL; 13818 } 13819 #if defined(sun) 13820 ddi_soft_state_free(dtrace_softstate, minor); 13821 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 13822 #else 13823 kmem_free(state, sizeof(dtrace_state_t)); 13824 #endif 13825 } 13826 13827 /* 13828 * DTrace Anonymous Enabling Functions 13829 */ 13830 static dtrace_state_t * 13831 dtrace_anon_grab(void) 13832 { 13833 dtrace_state_t *state; 13834 13835 ASSERT(MUTEX_HELD(&dtrace_lock)); 13836 13837 if ((state = dtrace_anon.dta_state) == NULL) { 13838 ASSERT(dtrace_anon.dta_enabling == NULL); 13839 return (NULL); 13840 } 13841 13842 ASSERT(dtrace_anon.dta_enabling != NULL); 13843 ASSERT(dtrace_retained != NULL); 13844 13845 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 13846 dtrace_anon.dta_enabling = NULL; 13847 dtrace_anon.dta_state = NULL; 13848 13849 return (state); 13850 } 13851 13852 static void 13853 dtrace_anon_property(void) 13854 { 13855 int i, rv; 13856 dtrace_state_t *state; 13857 dof_hdr_t *dof; 13858 char c[32]; /* enough for "dof-data-" + digits */ 13859 13860 ASSERT(MUTEX_HELD(&dtrace_lock)); 13861 ASSERT(MUTEX_HELD(&cpu_lock)); 13862 13863 for (i = 0; ; i++) { 13864 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 13865 13866 dtrace_err_verbose = 1; 13867 13868 if ((dof = dtrace_dof_property(c)) == NULL) { 13869 dtrace_err_verbose = 0; 13870 break; 13871 } 13872 13873 #if defined(sun) 13874 /* 13875 * We want to create anonymous state, so we need to transition 13876 * the kernel debugger to indicate that DTrace is active. If 13877 * this fails (e.g. because the debugger has modified text in 13878 * some way), we won't continue with the processing. 13879 */ 13880 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 13881 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 13882 "enabling ignored."); 13883 dtrace_dof_destroy(dof); 13884 break; 13885 } 13886 #endif 13887 13888 /* 13889 * If we haven't allocated an anonymous state, we'll do so now. 13890 */ 13891 if ((state = dtrace_anon.dta_state) == NULL) { 13892 #if defined(sun) 13893 state = dtrace_state_create(NULL, NULL); 13894 #endif 13895 dtrace_anon.dta_state = state; 13896 13897 if (state == NULL) { 13898 /* 13899 * This basically shouldn't happen: the only 13900 * failure mode from dtrace_state_create() is a 13901 * failure of ddi_soft_state_zalloc() that 13902 * itself should never happen. Still, the 13903 * interface allows for a failure mode, and 13904 * we want to fail as gracefully as possible: 13905 * we'll emit an error message and cease 13906 * processing anonymous state in this case. 13907 */ 13908 cmn_err(CE_WARN, "failed to create " 13909 "anonymous state"); 13910 dtrace_dof_destroy(dof); 13911 break; 13912 } 13913 } 13914 13915 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 13916 &dtrace_anon.dta_enabling, 0, B_TRUE); 13917 13918 if (rv == 0) 13919 rv = dtrace_dof_options(dof, state); 13920 13921 dtrace_err_verbose = 0; 13922 dtrace_dof_destroy(dof); 13923 13924 if (rv != 0) { 13925 /* 13926 * This is malformed DOF; chuck any anonymous state 13927 * that we created. 13928 */ 13929 ASSERT(dtrace_anon.dta_enabling == NULL); 13930 dtrace_state_destroy(state); 13931 dtrace_anon.dta_state = NULL; 13932 break; 13933 } 13934 13935 ASSERT(dtrace_anon.dta_enabling != NULL); 13936 } 13937 13938 if (dtrace_anon.dta_enabling != NULL) { 13939 int rval; 13940 13941 /* 13942 * dtrace_enabling_retain() can only fail because we are 13943 * trying to retain more enablings than are allowed -- but 13944 * we only have one anonymous enabling, and we are guaranteed 13945 * to be allowed at least one retained enabling; we assert 13946 * that dtrace_enabling_retain() returns success. 13947 */ 13948 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 13949 ASSERT(rval == 0); 13950 13951 dtrace_enabling_dump(dtrace_anon.dta_enabling); 13952 } 13953 } 13954 13955 #if defined(sun) 13956 /* 13957 * DTrace Helper Functions 13958 */ 13959 static void 13960 dtrace_helper_trace(dtrace_helper_action_t *helper, 13961 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 13962 { 13963 uint32_t size, next, nnext, i; 13964 dtrace_helptrace_t *ent; 13965 uint16_t flags = cpu_core[curcpu_id].cpuc_dtrace_flags; 13966 13967 if (!dtrace_helptrace_enabled) 13968 return; 13969 13970 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 13971 13972 /* 13973 * What would a tracing framework be without its own tracing 13974 * framework? (Well, a hell of a lot simpler, for starters...) 13975 */ 13976 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 13977 sizeof (uint64_t) - sizeof (uint64_t); 13978 13979 /* 13980 * Iterate until we can allocate a slot in the trace buffer. 13981 */ 13982 do { 13983 next = dtrace_helptrace_next; 13984 13985 if (next + size < dtrace_helptrace_bufsize) { 13986 nnext = next + size; 13987 } else { 13988 nnext = size; 13989 } 13990 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 13991 13992 /* 13993 * We have our slot; fill it in. 13994 */ 13995 if (nnext == size) 13996 next = 0; 13997 13998 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next]; 13999 ent->dtht_helper = helper; 14000 ent->dtht_where = where; 14001 ent->dtht_nlocals = vstate->dtvs_nlocals; 14002 14003 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 14004 mstate->dtms_fltoffs : -1; 14005 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 14006 ent->dtht_illval = cpu_core[curcpu_id].cpuc_dtrace_illval; 14007 14008 for (i = 0; i < vstate->dtvs_nlocals; i++) { 14009 dtrace_statvar_t *svar; 14010 14011 if ((svar = vstate->dtvs_locals[i]) == NULL) 14012 continue; 14013 14014 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 14015 ent->dtht_locals[i] = 14016 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu_id]; 14017 } 14018 } 14019 #endif 14020 14021 #if defined(sun) 14022 static uint64_t 14023 dtrace_helper(int which, dtrace_mstate_t *mstate, 14024 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 14025 { 14026 uint16_t *flags = &cpu_core[curcpu_id].cpuc_dtrace_flags; 14027 uint64_t sarg0 = mstate->dtms_arg[0]; 14028 uint64_t sarg1 = mstate->dtms_arg[1]; 14029 uint64_t rval; 14030 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 14031 dtrace_helper_action_t *helper; 14032 dtrace_vstate_t *vstate; 14033 dtrace_difo_t *pred; 14034 int i, trace = dtrace_helptrace_enabled; 14035 14036 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 14037 14038 if (helpers == NULL) 14039 return (0); 14040 14041 if ((helper = helpers->dthps_actions[which]) == NULL) 14042 return (0); 14043 14044 vstate = &helpers->dthps_vstate; 14045 mstate->dtms_arg[0] = arg0; 14046 mstate->dtms_arg[1] = arg1; 14047 14048 /* 14049 * Now iterate over each helper. If its predicate evaluates to 'true', 14050 * we'll call the corresponding actions. Note that the below calls 14051 * to dtrace_dif_emulate() may set faults in machine state. This is 14052 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 14053 * the stored DIF offset with its own (which is the desired behavior). 14054 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 14055 * from machine state; this is okay, too. 14056 */ 14057 for (; helper != NULL; helper = helper->dtha_next) { 14058 if ((pred = helper->dtha_predicate) != NULL) { 14059 if (trace) 14060 dtrace_helper_trace(helper, mstate, vstate, 0); 14061 14062 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 14063 goto next; 14064 14065 if (*flags & CPU_DTRACE_FAULT) 14066 goto err; 14067 } 14068 14069 for (i = 0; i < helper->dtha_nactions; i++) { 14070 if (trace) 14071 dtrace_helper_trace(helper, 14072 mstate, vstate, i + 1); 14073 14074 rval = dtrace_dif_emulate(helper->dtha_actions[i], 14075 mstate, vstate, state); 14076 14077 if (*flags & CPU_DTRACE_FAULT) 14078 goto err; 14079 } 14080 14081 next: 14082 if (trace) 14083 dtrace_helper_trace(helper, mstate, vstate, 14084 DTRACE_HELPTRACE_NEXT); 14085 } 14086 14087 if (trace) 14088 dtrace_helper_trace(helper, mstate, vstate, 14089 DTRACE_HELPTRACE_DONE); 14090 14091 /* 14092 * Restore the arg0 that we saved upon entry. 14093 */ 14094 mstate->dtms_arg[0] = sarg0; 14095 mstate->dtms_arg[1] = sarg1; 14096 14097 return (rval); 14098 14099 err: 14100 if (trace) 14101 dtrace_helper_trace(helper, mstate, vstate, 14102 DTRACE_HELPTRACE_ERR); 14103 14104 /* 14105 * Restore the arg0 that we saved upon entry. 14106 */ 14107 mstate->dtms_arg[0] = sarg0; 14108 mstate->dtms_arg[1] = sarg1; 14109 14110 return (0); 14111 } 14112 14113 static void 14114 dtrace_helper_action_destroy(dtrace_helper_action_t *helper, 14115 dtrace_vstate_t *vstate) 14116 { 14117 int i; 14118 14119 if (helper->dtha_predicate != NULL) 14120 dtrace_difo_release(helper->dtha_predicate, vstate); 14121 14122 for (i = 0; i < helper->dtha_nactions; i++) { 14123 ASSERT(helper->dtha_actions[i] != NULL); 14124 dtrace_difo_release(helper->dtha_actions[i], vstate); 14125 } 14126 14127 kmem_free(helper->dtha_actions, 14128 helper->dtha_nactions * sizeof (dtrace_difo_t *)); 14129 kmem_free(helper, sizeof (dtrace_helper_action_t)); 14130 } 14131 14132 static int 14133 dtrace_helper_destroygen(int gen) 14134 { 14135 proc_t *p = curproc; 14136 dtrace_helpers_t *help = p->p_dtrace_helpers; 14137 dtrace_vstate_t *vstate; 14138 int i; 14139 14140 ASSERT(MUTEX_HELD(&dtrace_lock)); 14141 14142 if (help == NULL || gen > help->dthps_generation) 14143 return (EINVAL); 14144 14145 vstate = &help->dthps_vstate; 14146 14147 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14148 dtrace_helper_action_t *last = NULL, *h, *next; 14149 14150 for (h = help->dthps_actions[i]; h != NULL; h = next) { 14151 next = h->dtha_next; 14152 14153 if (h->dtha_generation == gen) { 14154 if (last != NULL) { 14155 last->dtha_next = next; 14156 } else { 14157 help->dthps_actions[i] = next; 14158 } 14159 14160 dtrace_helper_action_destroy(h, vstate); 14161 } else { 14162 last = h; 14163 } 14164 } 14165 } 14166 14167 /* 14168 * Interate until we've cleared out all helper providers with the 14169 * given generation number. 14170 */ 14171 for (;;) { 14172 dtrace_helper_provider_t *prov; 14173 14174 /* 14175 * Look for a helper provider with the right generation. We 14176 * have to start back at the beginning of the list each time 14177 * because we drop dtrace_lock. It's unlikely that we'll make 14178 * more than two passes. 14179 */ 14180 for (i = 0; i < help->dthps_nprovs; i++) { 14181 prov = help->dthps_provs[i]; 14182 14183 if (prov->dthp_generation == gen) 14184 break; 14185 } 14186 14187 /* 14188 * If there were no matches, we're done. 14189 */ 14190 if (i == help->dthps_nprovs) 14191 break; 14192 14193 /* 14194 * Move the last helper provider into this slot. 14195 */ 14196 help->dthps_nprovs--; 14197 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; 14198 help->dthps_provs[help->dthps_nprovs] = NULL; 14199 14200 mutex_exit(&dtrace_lock); 14201 14202 /* 14203 * If we have a meta provider, remove this helper provider. 14204 */ 14205 mutex_enter(&dtrace_meta_lock); 14206 if (dtrace_meta_pid != NULL) { 14207 ASSERT(dtrace_deferred_pid == NULL); 14208 dtrace_helper_provider_remove(&prov->dthp_prov, 14209 p->p_pid); 14210 } 14211 mutex_exit(&dtrace_meta_lock); 14212 14213 dtrace_helper_provider_destroy(prov); 14214 14215 mutex_enter(&dtrace_lock); 14216 } 14217 14218 return (0); 14219 } 14220 #endif 14221 14222 #if defined(sun) 14223 static int 14224 dtrace_helper_validate(dtrace_helper_action_t *helper) 14225 { 14226 int err = 0, i; 14227 dtrace_difo_t *dp; 14228 14229 if ((dp = helper->dtha_predicate) != NULL) 14230 err += dtrace_difo_validate_helper(dp); 14231 14232 for (i = 0; i < helper->dtha_nactions; i++) 14233 err += dtrace_difo_validate_helper(helper->dtha_actions[i]); 14234 14235 return (err == 0); 14236 } 14237 #endif 14238 14239 #if defined(sun) 14240 static int 14241 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep) 14242 { 14243 dtrace_helpers_t *help; 14244 dtrace_helper_action_t *helper, *last; 14245 dtrace_actdesc_t *act; 14246 dtrace_vstate_t *vstate; 14247 dtrace_predicate_t *pred; 14248 int count = 0, nactions = 0, i; 14249 14250 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 14251 return (EINVAL); 14252 14253 help = curproc->p_dtrace_helpers; 14254 last = help->dthps_actions[which]; 14255 vstate = &help->dthps_vstate; 14256 14257 for (count = 0; last != NULL; last = last->dtha_next) { 14258 count++; 14259 if (last->dtha_next == NULL) 14260 break; 14261 } 14262 14263 /* 14264 * If we already have dtrace_helper_actions_max helper actions for this 14265 * helper action type, we'll refuse to add a new one. 14266 */ 14267 if (count >= dtrace_helper_actions_max) 14268 return (ENOSPC); 14269 14270 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 14271 helper->dtha_generation = help->dthps_generation; 14272 14273 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 14274 ASSERT(pred->dtp_difo != NULL); 14275 dtrace_difo_hold(pred->dtp_difo); 14276 helper->dtha_predicate = pred->dtp_difo; 14277 } 14278 14279 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 14280 if (act->dtad_kind != DTRACEACT_DIFEXPR) 14281 goto err; 14282 14283 if (act->dtad_difo == NULL) 14284 goto err; 14285 14286 nactions++; 14287 } 14288 14289 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 14290 (helper->dtha_nactions = nactions), KM_SLEEP); 14291 14292 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 14293 dtrace_difo_hold(act->dtad_difo); 14294 helper->dtha_actions[i++] = act->dtad_difo; 14295 } 14296 14297 if (!dtrace_helper_validate(helper)) 14298 goto err; 14299 14300 if (last == NULL) { 14301 help->dthps_actions[which] = helper; 14302 } else { 14303 last->dtha_next = helper; 14304 } 14305 14306 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 14307 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 14308 dtrace_helptrace_next = 0; 14309 } 14310 14311 return (0); 14312 err: 14313 dtrace_helper_action_destroy(helper, vstate); 14314 return (EINVAL); 14315 } 14316 14317 static void 14318 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 14319 dof_helper_t *dofhp) 14320 { 14321 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 14322 14323 mutex_enter(&dtrace_meta_lock); 14324 mutex_enter(&dtrace_lock); 14325 14326 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 14327 /* 14328 * If the dtrace module is loaded but not attached, or if 14329 * there aren't isn't a meta provider registered to deal with 14330 * these provider descriptions, we need to postpone creating 14331 * the actual providers until later. 14332 */ 14333 14334 if (help->dthps_next == NULL && help->dthps_prev == NULL && 14335 dtrace_deferred_pid != help) { 14336 help->dthps_deferred = 1; 14337 help->dthps_pid = p->p_pid; 14338 help->dthps_next = dtrace_deferred_pid; 14339 help->dthps_prev = NULL; 14340 if (dtrace_deferred_pid != NULL) 14341 dtrace_deferred_pid->dthps_prev = help; 14342 dtrace_deferred_pid = help; 14343 } 14344 14345 mutex_exit(&dtrace_lock); 14346 14347 } else if (dofhp != NULL) { 14348 /* 14349 * If the dtrace module is loaded and we have a particular 14350 * helper provider description, pass that off to the 14351 * meta provider. 14352 */ 14353 14354 mutex_exit(&dtrace_lock); 14355 14356 dtrace_helper_provide(dofhp, p->p_pid); 14357 14358 } else { 14359 /* 14360 * Otherwise, just pass all the helper provider descriptions 14361 * off to the meta provider. 14362 */ 14363 14364 int i; 14365 mutex_exit(&dtrace_lock); 14366 14367 for (i = 0; i < help->dthps_nprovs; i++) { 14368 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 14369 p->p_pid); 14370 } 14371 } 14372 14373 mutex_exit(&dtrace_meta_lock); 14374 } 14375 14376 static int 14377 dtrace_helper_provider_add(dof_helper_t *dofhp, int gen) 14378 { 14379 dtrace_helpers_t *help; 14380 dtrace_helper_provider_t *hprov, **tmp_provs; 14381 uint_t tmp_maxprovs, i; 14382 14383 ASSERT(MUTEX_HELD(&dtrace_lock)); 14384 14385 help = curproc->p_dtrace_helpers; 14386 ASSERT(help != NULL); 14387 14388 /* 14389 * If we already have dtrace_helper_providers_max helper providers, 14390 * we're refuse to add a new one. 14391 */ 14392 if (help->dthps_nprovs >= dtrace_helper_providers_max) 14393 return (ENOSPC); 14394 14395 /* 14396 * Check to make sure this isn't a duplicate. 14397 */ 14398 for (i = 0; i < help->dthps_nprovs; i++) { 14399 if (dofhp->dofhp_addr == 14400 help->dthps_provs[i]->dthp_prov.dofhp_addr) 14401 return (EALREADY); 14402 } 14403 14404 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 14405 hprov->dthp_prov = *dofhp; 14406 hprov->dthp_ref = 1; 14407 hprov->dthp_generation = gen; 14408 14409 /* 14410 * Allocate a bigger table for helper providers if it's already full. 14411 */ 14412 if (help->dthps_maxprovs == help->dthps_nprovs) { 14413 tmp_maxprovs = help->dthps_maxprovs; 14414 tmp_provs = help->dthps_provs; 14415 14416 if (help->dthps_maxprovs == 0) 14417 help->dthps_maxprovs = 2; 14418 else 14419 help->dthps_maxprovs *= 2; 14420 if (help->dthps_maxprovs > dtrace_helper_providers_max) 14421 help->dthps_maxprovs = dtrace_helper_providers_max; 14422 14423 ASSERT(tmp_maxprovs < help->dthps_maxprovs); 14424 14425 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * 14426 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 14427 14428 if (tmp_provs != NULL) { 14429 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * 14430 sizeof (dtrace_helper_provider_t *)); 14431 kmem_free(tmp_provs, tmp_maxprovs * 14432 sizeof (dtrace_helper_provider_t *)); 14433 } 14434 } 14435 14436 help->dthps_provs[help->dthps_nprovs] = hprov; 14437 help->dthps_nprovs++; 14438 14439 return (0); 14440 } 14441 14442 static void 14443 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) 14444 { 14445 mutex_enter(&dtrace_lock); 14446 14447 if (--hprov->dthp_ref == 0) { 14448 dof_hdr_t *dof; 14449 mutex_exit(&dtrace_lock); 14450 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 14451 dtrace_dof_destroy(dof); 14452 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 14453 } else { 14454 mutex_exit(&dtrace_lock); 14455 } 14456 } 14457 14458 static int 14459 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 14460 { 14461 uintptr_t daddr = (uintptr_t)dof; 14462 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 14463 dof_provider_t *provider; 14464 dof_probe_t *probe; 14465 uint8_t *arg; 14466 char *strtab, *typestr; 14467 dof_stridx_t typeidx; 14468 size_t typesz; 14469 uint_t nprobes, j, k; 14470 14471 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 14472 14473 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 14474 dtrace_dof_error(dof, "misaligned section offset"); 14475 return (-1); 14476 } 14477 14478 /* 14479 * The section needs to be large enough to contain the DOF provider 14480 * structure appropriate for the given version. 14481 */ 14482 if (sec->dofs_size < 14483 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 14484 offsetof(dof_provider_t, dofpv_prenoffs) : 14485 sizeof (dof_provider_t))) { 14486 dtrace_dof_error(dof, "provider section too small"); 14487 return (-1); 14488 } 14489 14490 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 14491 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 14492 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 14493 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 14494 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 14495 14496 if (str_sec == NULL || prb_sec == NULL || 14497 arg_sec == NULL || off_sec == NULL) 14498 return (-1); 14499 14500 enoff_sec = NULL; 14501 14502 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 14503 provider->dofpv_prenoffs != DOF_SECT_NONE && 14504 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, 14505 provider->dofpv_prenoffs)) == NULL) 14506 return (-1); 14507 14508 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 14509 14510 if (provider->dofpv_name >= str_sec->dofs_size || 14511 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 14512 dtrace_dof_error(dof, "invalid provider name"); 14513 return (-1); 14514 } 14515 14516 if (prb_sec->dofs_entsize == 0 || 14517 prb_sec->dofs_entsize > prb_sec->dofs_size) { 14518 dtrace_dof_error(dof, "invalid entry size"); 14519 return (-1); 14520 } 14521 14522 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 14523 dtrace_dof_error(dof, "misaligned entry size"); 14524 return (-1); 14525 } 14526 14527 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 14528 dtrace_dof_error(dof, "invalid entry size"); 14529 return (-1); 14530 } 14531 14532 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 14533 dtrace_dof_error(dof, "misaligned section offset"); 14534 return (-1); 14535 } 14536 14537 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 14538 dtrace_dof_error(dof, "invalid entry size"); 14539 return (-1); 14540 } 14541 14542 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 14543 14544 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 14545 14546 /* 14547 * Take a pass through the probes to check for errors. 14548 */ 14549 for (j = 0; j < nprobes; j++) { 14550 probe = (dof_probe_t *)(uintptr_t)(daddr + 14551 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 14552 14553 if (probe->dofpr_func >= str_sec->dofs_size) { 14554 dtrace_dof_error(dof, "invalid function name"); 14555 return (-1); 14556 } 14557 14558 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 14559 dtrace_dof_error(dof, "function name too long"); 14560 return (-1); 14561 } 14562 14563 if (probe->dofpr_name >= str_sec->dofs_size || 14564 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 14565 dtrace_dof_error(dof, "invalid probe name"); 14566 return (-1); 14567 } 14568 14569 /* 14570 * The offset count must not wrap the index, and the offsets 14571 * must also not overflow the section's data. 14572 */ 14573 if (probe->dofpr_offidx + probe->dofpr_noffs < 14574 probe->dofpr_offidx || 14575 (probe->dofpr_offidx + probe->dofpr_noffs) * 14576 off_sec->dofs_entsize > off_sec->dofs_size) { 14577 dtrace_dof_error(dof, "invalid probe offset"); 14578 return (-1); 14579 } 14580 14581 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 14582 /* 14583 * If there's no is-enabled offset section, make sure 14584 * there aren't any is-enabled offsets. Otherwise 14585 * perform the same checks as for probe offsets 14586 * (immediately above). 14587 */ 14588 if (enoff_sec == NULL) { 14589 if (probe->dofpr_enoffidx != 0 || 14590 probe->dofpr_nenoffs != 0) { 14591 dtrace_dof_error(dof, "is-enabled " 14592 "offsets with null section"); 14593 return (-1); 14594 } 14595 } else if (probe->dofpr_enoffidx + 14596 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 14597 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 14598 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 14599 dtrace_dof_error(dof, "invalid is-enabled " 14600 "offset"); 14601 return (-1); 14602 } 14603 14604 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { 14605 dtrace_dof_error(dof, "zero probe and " 14606 "is-enabled offsets"); 14607 return (-1); 14608 } 14609 } else if (probe->dofpr_noffs == 0) { 14610 dtrace_dof_error(dof, "zero probe offsets"); 14611 return (-1); 14612 } 14613 14614 if (probe->dofpr_argidx + probe->dofpr_xargc < 14615 probe->dofpr_argidx || 14616 (probe->dofpr_argidx + probe->dofpr_xargc) * 14617 arg_sec->dofs_entsize > arg_sec->dofs_size) { 14618 dtrace_dof_error(dof, "invalid args"); 14619 return (-1); 14620 } 14621 14622 typeidx = probe->dofpr_nargv; 14623 typestr = strtab + probe->dofpr_nargv; 14624 for (k = 0; k < probe->dofpr_nargc; k++) { 14625 if (typeidx >= str_sec->dofs_size) { 14626 dtrace_dof_error(dof, "bad " 14627 "native argument type"); 14628 return (-1); 14629 } 14630 14631 typesz = strlen(typestr) + 1; 14632 if (typesz > DTRACE_ARGTYPELEN) { 14633 dtrace_dof_error(dof, "native " 14634 "argument type too long"); 14635 return (-1); 14636 } 14637 typeidx += typesz; 14638 typestr += typesz; 14639 } 14640 14641 typeidx = probe->dofpr_xargv; 14642 typestr = strtab + probe->dofpr_xargv; 14643 for (k = 0; k < probe->dofpr_xargc; k++) { 14644 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 14645 dtrace_dof_error(dof, "bad " 14646 "native argument index"); 14647 return (-1); 14648 } 14649 14650 if (typeidx >= str_sec->dofs_size) { 14651 dtrace_dof_error(dof, "bad " 14652 "translated argument type"); 14653 return (-1); 14654 } 14655 14656 typesz = strlen(typestr) + 1; 14657 if (typesz > DTRACE_ARGTYPELEN) { 14658 dtrace_dof_error(dof, "translated argument " 14659 "type too long"); 14660 return (-1); 14661 } 14662 14663 typeidx += typesz; 14664 typestr += typesz; 14665 } 14666 } 14667 14668 return (0); 14669 } 14670 14671 static int 14672 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 14673 { 14674 dtrace_helpers_t *help; 14675 dtrace_vstate_t *vstate; 14676 dtrace_enabling_t *enab = NULL; 14677 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 14678 uintptr_t daddr = (uintptr_t)dof; 14679 14680 ASSERT(MUTEX_HELD(&dtrace_lock)); 14681 14682 if ((help = curproc->p_dtrace_helpers) == NULL) 14683 help = dtrace_helpers_create(curproc); 14684 14685 vstate = &help->dthps_vstate; 14686 14687 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 14688 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 14689 dtrace_dof_destroy(dof); 14690 return (rv); 14691 } 14692 14693 /* 14694 * Look for helper providers and validate their descriptions. 14695 */ 14696 if (dhp != NULL) { 14697 for (i = 0; i < dof->dofh_secnum; i++) { 14698 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 14699 dof->dofh_secoff + i * dof->dofh_secsize); 14700 14701 if (sec->dofs_type != DOF_SECT_PROVIDER) 14702 continue; 14703 14704 if (dtrace_helper_provider_validate(dof, sec) != 0) { 14705 dtrace_enabling_destroy(enab); 14706 dtrace_dof_destroy(dof); 14707 return (-1); 14708 } 14709 14710 nprovs++; 14711 } 14712 } 14713 14714 /* 14715 * Now we need to walk through the ECB descriptions in the enabling. 14716 */ 14717 for (i = 0; i < enab->dten_ndesc; i++) { 14718 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 14719 dtrace_probedesc_t *desc = &ep->dted_probe; 14720 14721 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 14722 continue; 14723 14724 if (strcmp(desc->dtpd_mod, "helper") != 0) 14725 continue; 14726 14727 if (strcmp(desc->dtpd_func, "ustack") != 0) 14728 continue; 14729 14730 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 14731 ep)) != 0) { 14732 /* 14733 * Adding this helper action failed -- we are now going 14734 * to rip out the entire generation and return failure. 14735 */ 14736 (void) dtrace_helper_destroygen(help->dthps_generation); 14737 dtrace_enabling_destroy(enab); 14738 dtrace_dof_destroy(dof); 14739 return (-1); 14740 } 14741 14742 nhelpers++; 14743 } 14744 14745 if (nhelpers < enab->dten_ndesc) 14746 dtrace_dof_error(dof, "unmatched helpers"); 14747 14748 gen = help->dthps_generation++; 14749 dtrace_enabling_destroy(enab); 14750 14751 if (dhp != NULL && nprovs > 0) { 14752 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 14753 if (dtrace_helper_provider_add(dhp, gen) == 0) { 14754 mutex_exit(&dtrace_lock); 14755 dtrace_helper_provider_register(curproc, help, dhp); 14756 mutex_enter(&dtrace_lock); 14757 14758 destroy = 0; 14759 } 14760 } 14761 14762 if (destroy) 14763 dtrace_dof_destroy(dof); 14764 14765 return (gen); 14766 } 14767 14768 static dtrace_helpers_t * 14769 dtrace_helpers_create(proc_t *p) 14770 { 14771 dtrace_helpers_t *help; 14772 14773 ASSERT(MUTEX_HELD(&dtrace_lock)); 14774 ASSERT(p->p_dtrace_helpers == NULL); 14775 14776 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 14777 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 14778 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 14779 14780 p->p_dtrace_helpers = help; 14781 dtrace_helpers++; 14782 14783 return (help); 14784 } 14785 14786 static void 14787 dtrace_helpers_destroy(void) 14788 { 14789 dtrace_helpers_t *help; 14790 dtrace_vstate_t *vstate; 14791 proc_t *p = curproc; 14792 int i; 14793 14794 mutex_enter(&dtrace_lock); 14795 14796 ASSERT(p->p_dtrace_helpers != NULL); 14797 ASSERT(dtrace_helpers > 0); 14798 14799 help = p->p_dtrace_helpers; 14800 vstate = &help->dthps_vstate; 14801 14802 /* 14803 * We're now going to lose the help from this process. 14804 */ 14805 p->p_dtrace_helpers = NULL; 14806 dtrace_sync(); 14807 14808 /* 14809 * Destory the helper actions. 14810 */ 14811 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14812 dtrace_helper_action_t *h, *next; 14813 14814 for (h = help->dthps_actions[i]; h != NULL; h = next) { 14815 next = h->dtha_next; 14816 dtrace_helper_action_destroy(h, vstate); 14817 h = next; 14818 } 14819 } 14820 14821 mutex_exit(&dtrace_lock); 14822 14823 /* 14824 * Destroy the helper providers. 14825 */ 14826 if (help->dthps_maxprovs > 0) { 14827 mutex_enter(&dtrace_meta_lock); 14828 if (dtrace_meta_pid != NULL) { 14829 ASSERT(dtrace_deferred_pid == NULL); 14830 14831 for (i = 0; i < help->dthps_nprovs; i++) { 14832 dtrace_helper_provider_remove( 14833 &help->dthps_provs[i]->dthp_prov, p->p_pid); 14834 } 14835 } else { 14836 mutex_enter(&dtrace_lock); 14837 ASSERT(help->dthps_deferred == 0 || 14838 help->dthps_next != NULL || 14839 help->dthps_prev != NULL || 14840 help == dtrace_deferred_pid); 14841 14842 /* 14843 * Remove the helper from the deferred list. 14844 */ 14845 if (help->dthps_next != NULL) 14846 help->dthps_next->dthps_prev = help->dthps_prev; 14847 if (help->dthps_prev != NULL) 14848 help->dthps_prev->dthps_next = help->dthps_next; 14849 if (dtrace_deferred_pid == help) { 14850 dtrace_deferred_pid = help->dthps_next; 14851 ASSERT(help->dthps_prev == NULL); 14852 } 14853 14854 mutex_exit(&dtrace_lock); 14855 } 14856 14857 mutex_exit(&dtrace_meta_lock); 14858 14859 for (i = 0; i < help->dthps_nprovs; i++) { 14860 dtrace_helper_provider_destroy(help->dthps_provs[i]); 14861 } 14862 14863 kmem_free(help->dthps_provs, help->dthps_maxprovs * 14864 sizeof (dtrace_helper_provider_t *)); 14865 } 14866 14867 mutex_enter(&dtrace_lock); 14868 14869 dtrace_vstate_fini(&help->dthps_vstate); 14870 kmem_free(help->dthps_actions, 14871 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 14872 kmem_free(help, sizeof (dtrace_helpers_t)); 14873 14874 --dtrace_helpers; 14875 mutex_exit(&dtrace_lock); 14876 } 14877 14878 static void 14879 dtrace_helpers_duplicate(proc_t *from, proc_t *to) 14880 { 14881 dtrace_helpers_t *help, *newhelp; 14882 dtrace_helper_action_t *helper, *new, *last; 14883 dtrace_difo_t *dp; 14884 dtrace_vstate_t *vstate; 14885 int i, j, sz, hasprovs = 0; 14886 14887 mutex_enter(&dtrace_lock); 14888 ASSERT(from->p_dtrace_helpers != NULL); 14889 ASSERT(dtrace_helpers > 0); 14890 14891 help = from->p_dtrace_helpers; 14892 newhelp = dtrace_helpers_create(to); 14893 ASSERT(to->p_dtrace_helpers != NULL); 14894 14895 newhelp->dthps_generation = help->dthps_generation; 14896 vstate = &newhelp->dthps_vstate; 14897 14898 /* 14899 * Duplicate the helper actions. 14900 */ 14901 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14902 if ((helper = help->dthps_actions[i]) == NULL) 14903 continue; 14904 14905 for (last = NULL; helper != NULL; helper = helper->dtha_next) { 14906 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 14907 KM_SLEEP); 14908 new->dtha_generation = helper->dtha_generation; 14909 14910 if ((dp = helper->dtha_predicate) != NULL) { 14911 dp = dtrace_difo_duplicate(dp, vstate); 14912 new->dtha_predicate = dp; 14913 } 14914 14915 new->dtha_nactions = helper->dtha_nactions; 14916 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; 14917 new->dtha_actions = kmem_alloc(sz, KM_SLEEP); 14918 14919 for (j = 0; j < new->dtha_nactions; j++) { 14920 dtrace_difo_t *dp = helper->dtha_actions[j]; 14921 14922 ASSERT(dp != NULL); 14923 dp = dtrace_difo_duplicate(dp, vstate); 14924 new->dtha_actions[j] = dp; 14925 } 14926 14927 if (last != NULL) { 14928 last->dtha_next = new; 14929 } else { 14930 newhelp->dthps_actions[i] = new; 14931 } 14932 14933 last = new; 14934 } 14935 } 14936 14937 /* 14938 * Duplicate the helper providers and register them with the 14939 * DTrace framework. 14940 */ 14941 if (help->dthps_nprovs > 0) { 14942 newhelp->dthps_nprovs = help->dthps_nprovs; 14943 newhelp->dthps_maxprovs = help->dthps_nprovs; 14944 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 14945 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 14946 for (i = 0; i < newhelp->dthps_nprovs; i++) { 14947 newhelp->dthps_provs[i] = help->dthps_provs[i]; 14948 newhelp->dthps_provs[i]->dthp_ref++; 14949 } 14950 14951 hasprovs = 1; 14952 } 14953 14954 mutex_exit(&dtrace_lock); 14955 14956 if (hasprovs) 14957 dtrace_helper_provider_register(to, newhelp, NULL); 14958 } 14959 #endif 14960 14961 #if defined(sun) 14962 /* 14963 * DTrace Hook Functions 14964 */ 14965 static void 14966 dtrace_module_loaded(modctl_t *ctl) 14967 { 14968 dtrace_provider_t *prv; 14969 14970 mutex_enter(&dtrace_provider_lock); 14971 mutex_enter(&mod_lock); 14972 14973 ASSERT(ctl->mod_busy); 14974 14975 /* 14976 * We're going to call each providers per-module provide operation 14977 * specifying only this module. 14978 */ 14979 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 14980 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 14981 14982 mutex_exit(&mod_lock); 14983 mutex_exit(&dtrace_provider_lock); 14984 14985 /* 14986 * If we have any retained enablings, we need to match against them. 14987 * Enabling probes requires that cpu_lock be held, and we cannot hold 14988 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 14989 * module. (In particular, this happens when loading scheduling 14990 * classes.) So if we have any retained enablings, we need to dispatch 14991 * our task queue to do the match for us. 14992 */ 14993 mutex_enter(&dtrace_lock); 14994 14995 if (dtrace_retained == NULL) { 14996 mutex_exit(&dtrace_lock); 14997 return; 14998 } 14999 15000 (void) taskq_dispatch(dtrace_taskq, 15001 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 15002 15003 mutex_exit(&dtrace_lock); 15004 15005 /* 15006 * And now, for a little heuristic sleaze: in general, we want to 15007 * match modules as soon as they load. However, we cannot guarantee 15008 * this, because it would lead us to the lock ordering violation 15009 * outlined above. The common case, of course, is that cpu_lock is 15010 * _not_ held -- so we delay here for a clock tick, hoping that that's 15011 * long enough for the task queue to do its work. If it's not, it's 15012 * not a serious problem -- it just means that the module that we 15013 * just loaded may not be immediately instrumentable. 15014 */ 15015 delay(1); 15016 } 15017 15018 static void 15019 dtrace_module_unloaded(modctl_t *ctl) 15020 { 15021 dtrace_probe_t template, *probe, *first, *next; 15022 dtrace_provider_t *prov; 15023 15024 template.dtpr_mod = ctl->mod_modname; 15025 15026 mutex_enter(&dtrace_provider_lock); 15027 mutex_enter(&mod_lock); 15028 mutex_enter(&dtrace_lock); 15029 15030 if (dtrace_bymod == NULL) { 15031 /* 15032 * The DTrace module is loaded (obviously) but not attached; 15033 * we don't have any work to do. 15034 */ 15035 mutex_exit(&dtrace_provider_lock); 15036 mutex_exit(&mod_lock); 15037 mutex_exit(&dtrace_lock); 15038 return; 15039 } 15040 15041 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 15042 probe != NULL; probe = probe->dtpr_nextmod) { 15043 if (probe->dtpr_ecb != NULL) { 15044 mutex_exit(&dtrace_provider_lock); 15045 mutex_exit(&mod_lock); 15046 mutex_exit(&dtrace_lock); 15047 15048 /* 15049 * This shouldn't _actually_ be possible -- we're 15050 * unloading a module that has an enabled probe in it. 15051 * (It's normally up to the provider to make sure that 15052 * this can't happen.) However, because dtps_enable() 15053 * doesn't have a failure mode, there can be an 15054 * enable/unload race. Upshot: we don't want to 15055 * assert, but we're not going to disable the 15056 * probe, either. 15057 */ 15058 if (dtrace_err_verbose) { 15059 cmn_err(CE_WARN, "unloaded module '%s' had " 15060 "enabled probes", ctl->mod_modname); 15061 } 15062 15063 return; 15064 } 15065 } 15066 15067 probe = first; 15068 15069 for (first = NULL; probe != NULL; probe = next) { 15070 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 15071 15072 dtrace_probes[probe->dtpr_id - 1] = NULL; 15073 15074 next = probe->dtpr_nextmod; 15075 dtrace_hash_remove(dtrace_bymod, probe); 15076 dtrace_hash_remove(dtrace_byfunc, probe); 15077 dtrace_hash_remove(dtrace_byname, probe); 15078 15079 if (first == NULL) { 15080 first = probe; 15081 probe->dtpr_nextmod = NULL; 15082 } else { 15083 probe->dtpr_nextmod = first; 15084 first = probe; 15085 } 15086 } 15087 15088 /* 15089 * We've removed all of the module's probes from the hash chains and 15090 * from the probe array. Now issue a dtrace_sync() to be sure that 15091 * everyone has cleared out from any probe array processing. 15092 */ 15093 dtrace_sync(); 15094 15095 for (probe = first; probe != NULL; probe = first) { 15096 first = probe->dtpr_nextmod; 15097 prov = probe->dtpr_provider; 15098 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 15099 probe->dtpr_arg); 15100 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 15101 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 15102 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 15103 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 15104 kmem_free(probe, sizeof (dtrace_probe_t)); 15105 } 15106 15107 mutex_exit(&dtrace_lock); 15108 mutex_exit(&mod_lock); 15109 mutex_exit(&dtrace_provider_lock); 15110 } 15111 15112 static void 15113 dtrace_suspend(void) 15114 { 15115 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 15116 } 15117 15118 static void 15119 dtrace_resume(void) 15120 { 15121 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 15122 } 15123 #endif 15124 15125 static int 15126 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 15127 { 15128 ASSERT(MUTEX_HELD(&cpu_lock)); 15129 mutex_enter(&dtrace_lock); 15130 15131 switch (what) { 15132 case CPU_CONFIG: { 15133 dtrace_state_t *state; 15134 dtrace_optval_t *opt, rs, c; 15135 15136 /* 15137 * For now, we only allocate a new buffer for anonymous state. 15138 */ 15139 if ((state = dtrace_anon.dta_state) == NULL) 15140 break; 15141 15142 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 15143 break; 15144 15145 opt = state->dts_options; 15146 c = opt[DTRACEOPT_CPU]; 15147 15148 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 15149 break; 15150 15151 /* 15152 * Regardless of what the actual policy is, we're going to 15153 * temporarily set our resize policy to be manual. We're 15154 * also going to temporarily set our CPU option to denote 15155 * the newly configured CPU. 15156 */ 15157 rs = opt[DTRACEOPT_BUFRESIZE]; 15158 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 15159 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 15160 15161 (void) dtrace_state_buffers(state); 15162 15163 opt[DTRACEOPT_BUFRESIZE] = rs; 15164 opt[DTRACEOPT_CPU] = c; 15165 15166 break; 15167 } 15168 15169 case CPU_UNCONFIG: 15170 /* 15171 * We don't free the buffer in the CPU_UNCONFIG case. (The 15172 * buffer will be freed when the consumer exits.) 15173 */ 15174 break; 15175 15176 default: 15177 break; 15178 } 15179 15180 mutex_exit(&dtrace_lock); 15181 return (0); 15182 } 15183 15184 #if defined(sun) 15185 static void 15186 dtrace_cpu_setup_initial(processorid_t cpu) 15187 { 15188 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 15189 } 15190 #endif 15191 15192 static void 15193 dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 15194 { 15195 if (dtrace_toxranges >= dtrace_toxranges_max) { 15196 int osize, nsize; 15197 dtrace_toxrange_t *range; 15198 15199 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 15200 15201 if (osize == 0) { 15202 ASSERT(dtrace_toxrange == NULL); 15203 ASSERT(dtrace_toxranges_max == 0); 15204 dtrace_toxranges_max = 1; 15205 } else { 15206 dtrace_toxranges_max <<= 1; 15207 } 15208 15209 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 15210 range = kmem_zalloc(nsize, KM_SLEEP); 15211 15212 if (dtrace_toxrange != NULL) { 15213 ASSERT(osize != 0); 15214 bcopy(dtrace_toxrange, range, osize); 15215 kmem_free(dtrace_toxrange, osize); 15216 } 15217 15218 dtrace_toxrange = range; 15219 } 15220 15221 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0); 15222 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0); 15223 15224 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 15225 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 15226 dtrace_toxranges++; 15227 } 15228 15229 /* 15230 * DTrace Driver Cookbook Functions 15231 */ 15232 #if defined(sun) 15233 /*ARGSUSED*/ 15234 static int 15235 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 15236 { 15237 dtrace_provider_id_t id; 15238 dtrace_state_t *state = NULL; 15239 dtrace_enabling_t *enab; 15240 15241 mutex_enter(&cpu_lock); 15242 mutex_enter(&dtrace_provider_lock); 15243 mutex_enter(&dtrace_lock); 15244 15245 if (ddi_soft_state_init(&dtrace_softstate, 15246 sizeof (dtrace_state_t), 0) != 0) { 15247 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 15248 mutex_exit(&cpu_lock); 15249 mutex_exit(&dtrace_provider_lock); 15250 mutex_exit(&dtrace_lock); 15251 return (DDI_FAILURE); 15252 } 15253 15254 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 15255 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 15256 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 15257 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 15258 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 15259 ddi_remove_minor_node(devi, NULL); 15260 ddi_soft_state_fini(&dtrace_softstate); 15261 mutex_exit(&cpu_lock); 15262 mutex_exit(&dtrace_provider_lock); 15263 mutex_exit(&dtrace_lock); 15264 return (DDI_FAILURE); 15265 } 15266 15267 ddi_report_dev(devi); 15268 dtrace_devi = devi; 15269 15270 dtrace_modload = dtrace_module_loaded; 15271 dtrace_modunload = dtrace_module_unloaded; 15272 dtrace_cpu_init = dtrace_cpu_setup_initial; 15273 dtrace_helpers_cleanup = dtrace_helpers_destroy; 15274 dtrace_helpers_fork = dtrace_helpers_duplicate; 15275 dtrace_cpustart_init = dtrace_suspend; 15276 dtrace_cpustart_fini = dtrace_resume; 15277 dtrace_debugger_init = dtrace_suspend; 15278 dtrace_debugger_fini = dtrace_resume; 15279 15280 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 15281 15282 ASSERT(MUTEX_HELD(&cpu_lock)); 15283 15284 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 15285 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 15286 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 15287 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 15288 VM_SLEEP | VMC_IDENTIFIER); 15289 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 15290 1, INT_MAX, 0); 15291 15292 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 15293 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 15294 NULL, NULL, NULL, NULL, NULL, 0); 15295 15296 ASSERT(MUTEX_HELD(&cpu_lock)); 15297 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 15298 offsetof(dtrace_probe_t, dtpr_nextmod), 15299 offsetof(dtrace_probe_t, dtpr_prevmod)); 15300 15301 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 15302 offsetof(dtrace_probe_t, dtpr_nextfunc), 15303 offsetof(dtrace_probe_t, dtpr_prevfunc)); 15304 15305 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 15306 offsetof(dtrace_probe_t, dtpr_nextname), 15307 offsetof(dtrace_probe_t, dtpr_prevname)); 15308 15309 if (dtrace_retain_max < 1) { 15310 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 15311 "setting to 1", dtrace_retain_max); 15312 dtrace_retain_max = 1; 15313 } 15314 15315 /* 15316 * Now discover our toxic ranges. 15317 */ 15318 dtrace_toxic_ranges(dtrace_toxrange_add); 15319 15320 /* 15321 * Before we register ourselves as a provider to our own framework, 15322 * we would like to assert that dtrace_provider is NULL -- but that's 15323 * not true if we were loaded as a dependency of a DTrace provider. 15324 * Once we've registered, we can assert that dtrace_provider is our 15325 * pseudo provider. 15326 */ 15327 (void) dtrace_register("dtrace", &dtrace_provider_attr, 15328 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 15329 15330 ASSERT(dtrace_provider != NULL); 15331 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 15332 15333 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 15334 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 15335 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 15336 dtrace_provider, NULL, NULL, "END", 0, NULL); 15337 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 15338 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 15339 15340 dtrace_anon_property(); 15341 mutex_exit(&cpu_lock); 15342 15343 /* 15344 * If DTrace helper tracing is enabled, we need to allocate the 15345 * trace buffer and initialize the values. 15346 */ 15347 if (dtrace_helptrace_enabled) { 15348 ASSERT(dtrace_helptrace_buffer == NULL); 15349 dtrace_helptrace_buffer = 15350 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 15351 dtrace_helptrace_next = 0; 15352 } 15353 15354 /* 15355 * If there are already providers, we must ask them to provide their 15356 * probes, and then match any anonymous enabling against them. Note 15357 * that there should be no other retained enablings at this time: 15358 * the only retained enablings at this time should be the anonymous 15359 * enabling. 15360 */ 15361 if (dtrace_anon.dta_enabling != NULL) { 15362 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 15363 15364 dtrace_enabling_provide(NULL); 15365 state = dtrace_anon.dta_state; 15366 15367 /* 15368 * We couldn't hold cpu_lock across the above call to 15369 * dtrace_enabling_provide(), but we must hold it to actually 15370 * enable the probes. We have to drop all of our locks, pick 15371 * up cpu_lock, and regain our locks before matching the 15372 * retained anonymous enabling. 15373 */ 15374 mutex_exit(&dtrace_lock); 15375 mutex_exit(&dtrace_provider_lock); 15376 15377 mutex_enter(&cpu_lock); 15378 mutex_enter(&dtrace_provider_lock); 15379 mutex_enter(&dtrace_lock); 15380 15381 if ((enab = dtrace_anon.dta_enabling) != NULL) 15382 (void) dtrace_enabling_match(enab, NULL); 15383 15384 mutex_exit(&cpu_lock); 15385 } 15386 15387 mutex_exit(&dtrace_lock); 15388 mutex_exit(&dtrace_provider_lock); 15389 15390 if (state != NULL) { 15391 /* 15392 * If we created any anonymous state, set it going now. 15393 */ 15394 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 15395 } 15396 15397 return (DDI_SUCCESS); 15398 } 15399 #endif 15400 15401 #if !defined(sun) 15402 #if __FreeBSD_version >= 800039 15403 static void 15404 dtrace_dtr(void *data __unused) 15405 { 15406 } 15407 #endif 15408 #endif 15409 15410 #if !defined(sun) 15411 static dev_type_open(dtrace_open); 15412 15413 /* Pseudo Device Entry points */ 15414 /* Just opens, clones to the fileops below */ 15415 const struct cdevsw dtrace_cdevsw = { 15416 dtrace_open, noclose, noread, nowrite, noioctl, 15417 nostop, notty, nopoll, nommap, nokqfilter, 15418 D_OTHER 15419 }; 15420 15421 static int dtrace_ioctl(struct file *fp, u_long cmd, void *data); 15422 static int dtrace_close(struct file *fp); 15423 15424 static const struct fileops dtrace_fileops = { 15425 .fo_read = fbadop_read, 15426 .fo_write = fbadop_write, 15427 .fo_ioctl = dtrace_ioctl, 15428 .fo_fcntl = fnullop_fcntl, 15429 .fo_poll = fnullop_poll, 15430 .fo_stat = fbadop_stat, 15431 .fo_close = dtrace_close, 15432 .fo_kqfilter = fnullop_kqfilter, 15433 }; 15434 #endif 15435 15436 /*ARGSUSED*/ 15437 static int 15438 #if defined(sun) 15439 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 15440 #else 15441 dtrace_open(dev_t dev, int flags, int mode, struct lwp *l) 15442 #endif 15443 { 15444 dtrace_state_t *state; 15445 uint32_t priv; 15446 uid_t uid; 15447 zoneid_t zoneid; 15448 15449 #if defined(sun) 15450 if (getminor(*devp) == DTRACEMNRN_HELPER) 15451 return (0); 15452 15453 /* 15454 * If this wasn't an open with the "helper" minor, then it must be 15455 * the "dtrace" minor. 15456 */ 15457 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE); 15458 #else 15459 cred_t *cred_p = NULL; 15460 struct file *fp; 15461 int fd; 15462 int res; 15463 15464 if ((res = fd_allocfile(&fp, &fd)) != 0) 15465 return res; 15466 #if 0 15467 #if __FreeBSD_version < 800039 15468 /* 15469 * The first minor device is the one that is cloned so there is 15470 * nothing more to do here. 15471 */ 15472 if (dev2unit(dev) == 0) 15473 return 0; 15474 15475 /* 15476 * Devices are cloned, so if the DTrace state has already 15477 * been allocated, that means this device belongs to a 15478 * different client. Each client should open '/dev/dtrace' 15479 * to get a cloned device. 15480 */ 15481 if (dev->si_drv1 != NULL) 15482 return (EBUSY); 15483 #endif 15484 15485 cred_p = dev->si_cred; 15486 #endif 15487 cred_p = l->l_cred; 15488 #endif 15489 15490 /* 15491 * If no DTRACE_PRIV_* bits are set in the credential, then the 15492 * caller lacks sufficient permission to do anything with DTrace. 15493 */ 15494 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 15495 if (priv == DTRACE_PRIV_NONE) { 15496 return (EACCES); 15497 } 15498 15499 /* 15500 * Ask all providers to provide all their probes. 15501 */ 15502 mutex_enter(&dtrace_provider_lock); 15503 dtrace_probe_provide(NULL, NULL); 15504 mutex_exit(&dtrace_provider_lock); 15505 15506 mutex_enter(&cpu_lock); 15507 mutex_enter(&dtrace_lock); 15508 dtrace_opens++; 15509 dtrace_membar_producer(); 15510 15511 #if defined(sun) 15512 /* 15513 * If the kernel debugger is active (that is, if the kernel debugger 15514 * modified text in some way), we won't allow the open. 15515 */ 15516 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 15517 dtrace_opens--; 15518 mutex_exit(&cpu_lock); 15519 mutex_exit(&dtrace_lock); 15520 return (EBUSY); 15521 } 15522 15523 state = dtrace_state_create(devp, cred_p); 15524 #else 15525 state = dtrace_state_create(dev, cred_p); 15526 #endif 15527 15528 mutex_exit(&cpu_lock); 15529 15530 if (state == NULL) { 15531 #if defined(sun) 15532 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) 15533 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15534 #else 15535 --dtrace_opens; 15536 #endif 15537 mutex_exit(&dtrace_lock); 15538 return (EAGAIN); 15539 } 15540 15541 mutex_exit(&dtrace_lock); 15542 15543 #if defined(sun) 15544 return (0); 15545 #else 15546 return fd_clone(fp, fd, flags, &dtrace_fileops, state); 15547 #endif 15548 } 15549 15550 /*ARGSUSED*/ 15551 static int 15552 #if defined(sun) 15553 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 15554 #else 15555 dtrace_close(struct file *fp) 15556 #endif 15557 { 15558 #if defined(sun) 15559 minor_t minor = getminor(dev); 15560 dtrace_state_t *state; 15561 15562 if (minor == DTRACEMNRN_HELPER) 15563 return (0); 15564 15565 state = ddi_get_soft_state(dtrace_softstate, minor); 15566 #else 15567 dtrace_state_t *state = (dtrace_state_t *)fp->f_data; 15568 #endif 15569 15570 mutex_enter(&cpu_lock); 15571 mutex_enter(&dtrace_lock); 15572 15573 if (state != NULL) { 15574 if (state->dts_anon) { 15575 /* 15576 * There is anonymous state. Destroy that first. 15577 */ 15578 ASSERT(dtrace_anon.dta_state == NULL); 15579 dtrace_state_destroy(state->dts_anon); 15580 } 15581 15582 dtrace_state_destroy(state); 15583 15584 #if !defined(sun) 15585 fp->f_data = NULL; 15586 #endif 15587 } 15588 15589 ASSERT(dtrace_opens > 0); 15590 #if defined(sun) 15591 /* 15592 * Only relinquish control of the kernel debugger interface when there 15593 * are no consumers and no anonymous enablings. 15594 */ 15595 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL) 15596 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15597 #else 15598 --dtrace_opens; 15599 #endif 15600 15601 mutex_exit(&dtrace_lock); 15602 mutex_exit(&cpu_lock); 15603 15604 return (0); 15605 } 15606 15607 #if defined(sun) 15608 /*ARGSUSED*/ 15609 static int 15610 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 15611 { 15612 int rval; 15613 dof_helper_t help, *dhp = NULL; 15614 15615 switch (cmd) { 15616 case DTRACEHIOC_ADDDOF: 15617 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 15618 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 15619 return (EFAULT); 15620 } 15621 15622 dhp = &help; 15623 arg = (intptr_t)help.dofhp_dof; 15624 /*FALLTHROUGH*/ 15625 15626 case DTRACEHIOC_ADD: { 15627 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 15628 15629 if (dof == NULL) 15630 return (rval); 15631 15632 mutex_enter(&dtrace_lock); 15633 15634 /* 15635 * dtrace_helper_slurp() takes responsibility for the dof -- 15636 * it may free it now or it may save it and free it later. 15637 */ 15638 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 15639 *rv = rval; 15640 rval = 0; 15641 } else { 15642 rval = EINVAL; 15643 } 15644 15645 mutex_exit(&dtrace_lock); 15646 return (rval); 15647 } 15648 15649 case DTRACEHIOC_REMOVE: { 15650 mutex_enter(&dtrace_lock); 15651 rval = dtrace_helper_destroygen(arg); 15652 mutex_exit(&dtrace_lock); 15653 15654 return (rval); 15655 } 15656 15657 default: 15658 break; 15659 } 15660 15661 return (ENOTTY); 15662 } 15663 15664 /*ARGSUSED*/ 15665 static int 15666 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 15667 { 15668 minor_t minor = getminor(dev); 15669 dtrace_state_t *state; 15670 int rval; 15671 15672 if (minor == DTRACEMNRN_HELPER) 15673 return (dtrace_ioctl_helper(cmd, arg, rv)); 15674 15675 state = ddi_get_soft_state(dtrace_softstate, minor); 15676 15677 if (state->dts_anon) { 15678 ASSERT(dtrace_anon.dta_state == NULL); 15679 state = state->dts_anon; 15680 } 15681 15682 switch (cmd) { 15683 case DTRACEIOC_PROVIDER: { 15684 dtrace_providerdesc_t pvd; 15685 dtrace_provider_t *pvp; 15686 15687 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 15688 return (EFAULT); 15689 15690 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 15691 mutex_enter(&dtrace_provider_lock); 15692 15693 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 15694 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 15695 break; 15696 } 15697 15698 mutex_exit(&dtrace_provider_lock); 15699 15700 if (pvp == NULL) 15701 return (ESRCH); 15702 15703 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 15704 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 15705 15706 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 15707 return (EFAULT); 15708 15709 return (0); 15710 } 15711 15712 case DTRACEIOC_EPROBE: { 15713 dtrace_eprobedesc_t epdesc; 15714 dtrace_ecb_t *ecb; 15715 dtrace_action_t *act; 15716 void *buf; 15717 size_t size; 15718 uintptr_t dest; 15719 int nrecs; 15720 15721 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 15722 return (EFAULT); 15723 15724 mutex_enter(&dtrace_lock); 15725 15726 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 15727 mutex_exit(&dtrace_lock); 15728 return (EINVAL); 15729 } 15730 15731 if (ecb->dte_probe == NULL) { 15732 mutex_exit(&dtrace_lock); 15733 return (EINVAL); 15734 } 15735 15736 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 15737 epdesc.dtepd_uarg = ecb->dte_uarg; 15738 epdesc.dtepd_size = ecb->dte_size; 15739 15740 nrecs = epdesc.dtepd_nrecs; 15741 epdesc.dtepd_nrecs = 0; 15742 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 15743 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 15744 continue; 15745 15746 epdesc.dtepd_nrecs++; 15747 } 15748 15749 /* 15750 * Now that we have the size, we need to allocate a temporary 15751 * buffer in which to store the complete description. We need 15752 * the temporary buffer to be able to drop dtrace_lock() 15753 * across the copyout(), below. 15754 */ 15755 size = sizeof (dtrace_eprobedesc_t) + 15756 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 15757 15758 buf = kmem_alloc(size, KM_SLEEP); 15759 dest = (uintptr_t)buf; 15760 15761 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 15762 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 15763 15764 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 15765 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 15766 continue; 15767 15768 if (nrecs-- == 0) 15769 break; 15770 15771 bcopy(&act->dta_rec, (void *)dest, 15772 sizeof (dtrace_recdesc_t)); 15773 dest += sizeof (dtrace_recdesc_t); 15774 } 15775 15776 mutex_exit(&dtrace_lock); 15777 15778 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 15779 kmem_free(buf, size); 15780 return (EFAULT); 15781 } 15782 15783 kmem_free(buf, size); 15784 return (0); 15785 } 15786 15787 case DTRACEIOC_AGGDESC: { 15788 dtrace_aggdesc_t aggdesc; 15789 dtrace_action_t *act; 15790 dtrace_aggregation_t *agg; 15791 int nrecs; 15792 uint32_t offs; 15793 dtrace_recdesc_t *lrec; 15794 void *buf; 15795 size_t size; 15796 uintptr_t dest; 15797 15798 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 15799 return (EFAULT); 15800 15801 mutex_enter(&dtrace_lock); 15802 15803 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 15804 mutex_exit(&dtrace_lock); 15805 return (EINVAL); 15806 } 15807 15808 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 15809 15810 nrecs = aggdesc.dtagd_nrecs; 15811 aggdesc.dtagd_nrecs = 0; 15812 15813 offs = agg->dtag_base; 15814 lrec = &agg->dtag_action.dta_rec; 15815 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 15816 15817 for (act = agg->dtag_first; ; act = act->dta_next) { 15818 ASSERT(act->dta_intuple || 15819 DTRACEACT_ISAGG(act->dta_kind)); 15820 15821 /* 15822 * If this action has a record size of zero, it 15823 * denotes an argument to the aggregating action. 15824 * Because the presence of this record doesn't (or 15825 * shouldn't) affect the way the data is interpreted, 15826 * we don't copy it out to save user-level the 15827 * confusion of dealing with a zero-length record. 15828 */ 15829 if (act->dta_rec.dtrd_size == 0) { 15830 ASSERT(agg->dtag_hasarg); 15831 continue; 15832 } 15833 15834 aggdesc.dtagd_nrecs++; 15835 15836 if (act == &agg->dtag_action) 15837 break; 15838 } 15839 15840 /* 15841 * Now that we have the size, we need to allocate a temporary 15842 * buffer in which to store the complete description. We need 15843 * the temporary buffer to be able to drop dtrace_lock() 15844 * across the copyout(), below. 15845 */ 15846 size = sizeof (dtrace_aggdesc_t) + 15847 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 15848 15849 buf = kmem_alloc(size, KM_SLEEP); 15850 dest = (uintptr_t)buf; 15851 15852 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 15853 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 15854 15855 for (act = agg->dtag_first; ; act = act->dta_next) { 15856 dtrace_recdesc_t rec = act->dta_rec; 15857 15858 /* 15859 * See the comment in the above loop for why we pass 15860 * over zero-length records. 15861 */ 15862 if (rec.dtrd_size == 0) { 15863 ASSERT(agg->dtag_hasarg); 15864 continue; 15865 } 15866 15867 if (nrecs-- == 0) 15868 break; 15869 15870 rec.dtrd_offset -= offs; 15871 bcopy(&rec, (void *)dest, sizeof (rec)); 15872 dest += sizeof (dtrace_recdesc_t); 15873 15874 if (act == &agg->dtag_action) 15875 break; 15876 } 15877 15878 mutex_exit(&dtrace_lock); 15879 15880 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 15881 kmem_free(buf, size); 15882 return (EFAULT); 15883 } 15884 15885 kmem_free(buf, size); 15886 return (0); 15887 } 15888 15889 case DTRACEIOC_ENABLE: { 15890 dof_hdr_t *dof; 15891 dtrace_enabling_t *enab = NULL; 15892 dtrace_vstate_t *vstate; 15893 int err = 0; 15894 15895 *rv = 0; 15896 15897 /* 15898 * If a NULL argument has been passed, we take this as our 15899 * cue to reevaluate our enablings. 15900 */ 15901 if (arg == NULL) { 15902 dtrace_enabling_matchall(); 15903 15904 return (0); 15905 } 15906 15907 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 15908 return (rval); 15909 15910 mutex_enter(&cpu_lock); 15911 mutex_enter(&dtrace_lock); 15912 vstate = &state->dts_vstate; 15913 15914 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 15915 mutex_exit(&dtrace_lock); 15916 mutex_exit(&cpu_lock); 15917 dtrace_dof_destroy(dof); 15918 return (EBUSY); 15919 } 15920 15921 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 15922 mutex_exit(&dtrace_lock); 15923 mutex_exit(&cpu_lock); 15924 dtrace_dof_destroy(dof); 15925 return (EINVAL); 15926 } 15927 15928 if ((rval = dtrace_dof_options(dof, state)) != 0) { 15929 dtrace_enabling_destroy(enab); 15930 mutex_exit(&dtrace_lock); 15931 mutex_exit(&cpu_lock); 15932 dtrace_dof_destroy(dof); 15933 return (rval); 15934 } 15935 15936 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 15937 err = dtrace_enabling_retain(enab); 15938 } else { 15939 dtrace_enabling_destroy(enab); 15940 } 15941 15942 mutex_exit(&cpu_lock); 15943 mutex_exit(&dtrace_lock); 15944 dtrace_dof_destroy(dof); 15945 15946 return (err); 15947 } 15948 15949 case DTRACEIOC_REPLICATE: { 15950 dtrace_repldesc_t desc; 15951 dtrace_probedesc_t *match = &desc.dtrpd_match; 15952 dtrace_probedesc_t *create = &desc.dtrpd_create; 15953 int err; 15954 15955 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15956 return (EFAULT); 15957 15958 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15959 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15960 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15961 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15962 15963 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15964 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15965 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15966 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15967 15968 mutex_enter(&dtrace_lock); 15969 err = dtrace_enabling_replicate(state, match, create); 15970 mutex_exit(&dtrace_lock); 15971 15972 return (err); 15973 } 15974 15975 case DTRACEIOC_PROBEMATCH: 15976 case DTRACEIOC_PROBES: { 15977 dtrace_probe_t *probe = NULL; 15978 dtrace_probedesc_t desc; 15979 dtrace_probekey_t pkey; 15980 dtrace_id_t i; 15981 int m = 0; 15982 uint32_t priv; 15983 uid_t uid; 15984 zoneid_t zoneid; 15985 15986 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 15987 return (EFAULT); 15988 15989 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 15990 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 15991 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 15992 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 15993 15994 /* 15995 * Before we attempt to match this probe, we want to give 15996 * all providers the opportunity to provide it. 15997 */ 15998 if (desc.dtpd_id == DTRACE_IDNONE) { 15999 mutex_enter(&dtrace_provider_lock); 16000 dtrace_probe_provide(&desc, NULL); 16001 mutex_exit(&dtrace_provider_lock); 16002 desc.dtpd_id++; 16003 } 16004 16005 if (cmd == DTRACEIOC_PROBEMATCH) { 16006 dtrace_probekey(&desc, &pkey); 16007 pkey.dtpk_id = DTRACE_IDNONE; 16008 } 16009 16010 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 16011 16012 mutex_enter(&dtrace_lock); 16013 16014 if (cmd == DTRACEIOC_PROBEMATCH) { 16015 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 16016 if ((probe = dtrace_probes[i - 1]) != NULL && 16017 (m = dtrace_match_probe(probe, &pkey, 16018 priv, uid, zoneid)) != 0) 16019 break; 16020 } 16021 16022 if (m < 0) { 16023 mutex_exit(&dtrace_lock); 16024 return (EINVAL); 16025 } 16026 16027 } else { 16028 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 16029 if ((probe = dtrace_probes[i - 1]) != NULL && 16030 dtrace_match_priv(probe, priv, uid, zoneid)) 16031 break; 16032 } 16033 } 16034 16035 if (probe == NULL) { 16036 mutex_exit(&dtrace_lock); 16037 return (ESRCH); 16038 } 16039 16040 dtrace_probe_description(probe, &desc); 16041 mutex_exit(&dtrace_lock); 16042 16043 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16044 return (EFAULT); 16045 16046 return (0); 16047 } 16048 16049 case DTRACEIOC_PROBEARG: { 16050 dtrace_argdesc_t desc; 16051 dtrace_probe_t *probe; 16052 dtrace_provider_t *prov; 16053 16054 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16055 return (EFAULT); 16056 16057 if (desc.dtargd_id == DTRACE_IDNONE) 16058 return (EINVAL); 16059 16060 if (desc.dtargd_ndx == DTRACE_ARGNONE) 16061 return (EINVAL); 16062 16063 mutex_enter(&dtrace_provider_lock); 16064 mutex_enter(&mod_lock); 16065 mutex_enter(&dtrace_lock); 16066 16067 if (desc.dtargd_id > dtrace_nprobes) { 16068 mutex_exit(&dtrace_lock); 16069 mutex_exit(&mod_lock); 16070 mutex_exit(&dtrace_provider_lock); 16071 return (EINVAL); 16072 } 16073 16074 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 16075 mutex_exit(&dtrace_lock); 16076 mutex_exit(&mod_lock); 16077 mutex_exit(&dtrace_provider_lock); 16078 return (EINVAL); 16079 } 16080 16081 mutex_exit(&dtrace_lock); 16082 16083 prov = probe->dtpr_provider; 16084 16085 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 16086 /* 16087 * There isn't any typed information for this probe. 16088 * Set the argument number to DTRACE_ARGNONE. 16089 */ 16090 desc.dtargd_ndx = DTRACE_ARGNONE; 16091 } else { 16092 desc.dtargd_native[0] = '\0'; 16093 desc.dtargd_xlate[0] = '\0'; 16094 desc.dtargd_mapping = desc.dtargd_ndx; 16095 16096 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 16097 probe->dtpr_id, probe->dtpr_arg, &desc); 16098 } 16099 16100 mutex_exit(&mod_lock); 16101 mutex_exit(&dtrace_provider_lock); 16102 16103 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16104 return (EFAULT); 16105 16106 return (0); 16107 } 16108 16109 case DTRACEIOC_GO: { 16110 processorid_t cpuid; 16111 rval = dtrace_state_go(state, &cpuid); 16112 16113 if (rval != 0) 16114 return (rval); 16115 16116 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 16117 return (EFAULT); 16118 16119 return (0); 16120 } 16121 16122 case DTRACEIOC_STOP: { 16123 processorid_t cpuid; 16124 16125 mutex_enter(&dtrace_lock); 16126 rval = dtrace_state_stop(state, &cpuid); 16127 mutex_exit(&dtrace_lock); 16128 16129 if (rval != 0) 16130 return (rval); 16131 16132 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 16133 return (EFAULT); 16134 16135 return (0); 16136 } 16137 16138 case DTRACEIOC_DOFGET: { 16139 dof_hdr_t hdr, *dof; 16140 uint64_t len; 16141 16142 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 16143 return (EFAULT); 16144 16145 mutex_enter(&dtrace_lock); 16146 dof = dtrace_dof_create(state); 16147 mutex_exit(&dtrace_lock); 16148 16149 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 16150 rval = copyout(dof, (void *)arg, len); 16151 dtrace_dof_destroy(dof); 16152 16153 return (rval == 0 ? 0 : EFAULT); 16154 } 16155 16156 case DTRACEIOC_AGGSNAP: 16157 case DTRACEIOC_BUFSNAP: { 16158 dtrace_bufdesc_t desc; 16159 caddr_t cached; 16160 dtrace_buffer_t *buf; 16161 16162 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16163 return (EFAULT); 16164 16165 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 16166 return (EINVAL); 16167 16168 mutex_enter(&dtrace_lock); 16169 16170 if (cmd == DTRACEIOC_BUFSNAP) { 16171 buf = &state->dts_buffer[desc.dtbd_cpu]; 16172 } else { 16173 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 16174 } 16175 16176 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 16177 size_t sz = buf->dtb_offset; 16178 16179 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 16180 mutex_exit(&dtrace_lock); 16181 return (EBUSY); 16182 } 16183 16184 /* 16185 * If this buffer has already been consumed, we're 16186 * going to indicate that there's nothing left here 16187 * to consume. 16188 */ 16189 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 16190 mutex_exit(&dtrace_lock); 16191 16192 desc.dtbd_size = 0; 16193 desc.dtbd_drops = 0; 16194 desc.dtbd_errors = 0; 16195 desc.dtbd_oldest = 0; 16196 sz = sizeof (desc); 16197 16198 if (copyout(&desc, (void *)arg, sz) != 0) 16199 return (EFAULT); 16200 16201 return (0); 16202 } 16203 16204 /* 16205 * If this is a ring buffer that has wrapped, we want 16206 * to copy the whole thing out. 16207 */ 16208 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 16209 dtrace_buffer_polish(buf); 16210 sz = buf->dtb_size; 16211 } 16212 16213 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 16214 mutex_exit(&dtrace_lock); 16215 return (EFAULT); 16216 } 16217 16218 desc.dtbd_size = sz; 16219 desc.dtbd_drops = buf->dtb_drops; 16220 desc.dtbd_errors = buf->dtb_errors; 16221 desc.dtbd_oldest = buf->dtb_xamot_offset; 16222 16223 mutex_exit(&dtrace_lock); 16224 16225 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16226 return (EFAULT); 16227 16228 buf->dtb_flags |= DTRACEBUF_CONSUMED; 16229 16230 return (0); 16231 } 16232 16233 if (buf->dtb_tomax == NULL) { 16234 ASSERT(buf->dtb_xamot == NULL); 16235 mutex_exit(&dtrace_lock); 16236 return (ENOENT); 16237 } 16238 16239 cached = buf->dtb_tomax; 16240 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 16241 16242 dtrace_xcall(desc.dtbd_cpu, 16243 (dtrace_xcall_t)dtrace_buffer_switch, buf); 16244 16245 state->dts_errors += buf->dtb_xamot_errors; 16246 16247 /* 16248 * If the buffers did not actually switch, then the cross call 16249 * did not take place -- presumably because the given CPU is 16250 * not in the ready set. If this is the case, we'll return 16251 * ENOENT. 16252 */ 16253 if (buf->dtb_tomax == cached) { 16254 ASSERT(buf->dtb_xamot != cached); 16255 mutex_exit(&dtrace_lock); 16256 return (ENOENT); 16257 } 16258 16259 ASSERT(cached == buf->dtb_xamot); 16260 16261 /* 16262 * We have our snapshot; now copy it out. 16263 */ 16264 if (copyout(buf->dtb_xamot, desc.dtbd_data, 16265 buf->dtb_xamot_offset) != 0) { 16266 mutex_exit(&dtrace_lock); 16267 return (EFAULT); 16268 } 16269 16270 desc.dtbd_size = buf->dtb_xamot_offset; 16271 desc.dtbd_drops = buf->dtb_xamot_drops; 16272 desc.dtbd_errors = buf->dtb_xamot_errors; 16273 desc.dtbd_oldest = 0; 16274 16275 mutex_exit(&dtrace_lock); 16276 16277 /* 16278 * Finally, copy out the buffer description. 16279 */ 16280 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16281 return (EFAULT); 16282 16283 return (0); 16284 } 16285 16286 case DTRACEIOC_CONF: { 16287 dtrace_conf_t conf; 16288 16289 bzero(&conf, sizeof (conf)); 16290 conf.dtc_difversion = DIF_VERSION; 16291 conf.dtc_difintregs = DIF_DIR_NREGS; 16292 conf.dtc_diftupregs = DIF_DTR_NREGS; 16293 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 16294 16295 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 16296 return (EFAULT); 16297 16298 return (0); 16299 } 16300 16301 case DTRACEIOC_STATUS: { 16302 dtrace_status_t stat; 16303 dtrace_dstate_t *dstate; 16304 int i, j; 16305 uint64_t nerrs; 16306 16307 /* 16308 * See the comment in dtrace_state_deadman() for the reason 16309 * for setting dts_laststatus to INT64_MAX before setting 16310 * it to the correct value. 16311 */ 16312 state->dts_laststatus = INT64_MAX; 16313 dtrace_membar_producer(); 16314 state->dts_laststatus = dtrace_gethrtime(); 16315 16316 bzero(&stat, sizeof (stat)); 16317 16318 mutex_enter(&dtrace_lock); 16319 16320 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 16321 mutex_exit(&dtrace_lock); 16322 return (ENOENT); 16323 } 16324 16325 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 16326 stat.dtst_exiting = 1; 16327 16328 nerrs = state->dts_errors; 16329 dstate = &state->dts_vstate.dtvs_dynvars; 16330 16331 for (i = 0; i < NCPU; i++) { 16332 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 16333 16334 stat.dtst_dyndrops += dcpu->dtdsc_drops; 16335 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 16336 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 16337 16338 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 16339 stat.dtst_filled++; 16340 16341 nerrs += state->dts_buffer[i].dtb_errors; 16342 16343 for (j = 0; j < state->dts_nspeculations; j++) { 16344 dtrace_speculation_t *spec; 16345 dtrace_buffer_t *buf; 16346 16347 spec = &state->dts_speculations[j]; 16348 buf = &spec->dtsp_buffer[i]; 16349 stat.dtst_specdrops += buf->dtb_xamot_drops; 16350 } 16351 } 16352 16353 stat.dtst_specdrops_busy = state->dts_speculations_busy; 16354 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 16355 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 16356 stat.dtst_dblerrors = state->dts_dblerrors; 16357 stat.dtst_killed = 16358 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 16359 stat.dtst_errors = nerrs; 16360 16361 mutex_exit(&dtrace_lock); 16362 16363 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 16364 return (EFAULT); 16365 16366 return (0); 16367 } 16368 16369 case DTRACEIOC_FORMAT: { 16370 dtrace_fmtdesc_t fmt; 16371 char *str; 16372 int len; 16373 16374 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 16375 return (EFAULT); 16376 16377 mutex_enter(&dtrace_lock); 16378 16379 if (fmt.dtfd_format == 0 || 16380 fmt.dtfd_format > state->dts_nformats) { 16381 mutex_exit(&dtrace_lock); 16382 return (EINVAL); 16383 } 16384 16385 /* 16386 * Format strings are allocated contiguously and they are 16387 * never freed; if a format index is less than the number 16388 * of formats, we can assert that the format map is non-NULL 16389 * and that the format for the specified index is non-NULL. 16390 */ 16391 ASSERT(state->dts_formats != NULL); 16392 str = state->dts_formats[fmt.dtfd_format - 1]; 16393 ASSERT(str != NULL); 16394 16395 len = strlen(str) + 1; 16396 16397 if (len > fmt.dtfd_length) { 16398 fmt.dtfd_length = len; 16399 16400 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 16401 mutex_exit(&dtrace_lock); 16402 return (EINVAL); 16403 } 16404 } else { 16405 if (copyout(str, fmt.dtfd_string, len) != 0) { 16406 mutex_exit(&dtrace_lock); 16407 return (EINVAL); 16408 } 16409 } 16410 16411 mutex_exit(&dtrace_lock); 16412 return (0); 16413 } 16414 16415 default: 16416 break; 16417 } 16418 16419 return (ENOTTY); 16420 } 16421 16422 /*ARGSUSED*/ 16423 static int 16424 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 16425 { 16426 dtrace_state_t *state; 16427 16428 switch (cmd) { 16429 case DDI_DETACH: 16430 break; 16431 16432 case DDI_SUSPEND: 16433 return (DDI_SUCCESS); 16434 16435 default: 16436 return (DDI_FAILURE); 16437 } 16438 16439 mutex_enter(&cpu_lock); 16440 mutex_enter(&dtrace_provider_lock); 16441 mutex_enter(&dtrace_lock); 16442 16443 ASSERT(dtrace_opens == 0); 16444 16445 if (dtrace_helpers > 0) { 16446 mutex_exit(&dtrace_provider_lock); 16447 mutex_exit(&dtrace_lock); 16448 mutex_exit(&cpu_lock); 16449 return (DDI_FAILURE); 16450 } 16451 16452 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 16453 mutex_exit(&dtrace_provider_lock); 16454 mutex_exit(&dtrace_lock); 16455 mutex_exit(&cpu_lock); 16456 return (DDI_FAILURE); 16457 } 16458 16459 dtrace_provider = NULL; 16460 16461 if ((state = dtrace_anon_grab()) != NULL) { 16462 /* 16463 * If there were ECBs on this state, the provider should 16464 * have not been allowed to detach; assert that there is 16465 * none. 16466 */ 16467 ASSERT(state->dts_necbs == 0); 16468 dtrace_state_destroy(state); 16469 16470 /* 16471 * If we're being detached with anonymous state, we need to 16472 * indicate to the kernel debugger that DTrace is now inactive. 16473 */ 16474 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 16475 } 16476 16477 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 16478 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 16479 dtrace_cpu_init = NULL; 16480 dtrace_helpers_cleanup = NULL; 16481 dtrace_helpers_fork = NULL; 16482 dtrace_cpustart_init = NULL; 16483 dtrace_cpustart_fini = NULL; 16484 dtrace_debugger_init = NULL; 16485 dtrace_debugger_fini = NULL; 16486 dtrace_modload = NULL; 16487 dtrace_modunload = NULL; 16488 16489 mutex_exit(&cpu_lock); 16490 16491 if (dtrace_helptrace_enabled) { 16492 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize); 16493 dtrace_helptrace_buffer = NULL; 16494 } 16495 16496 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 16497 dtrace_probes = NULL; 16498 dtrace_nprobes = 0; 16499 16500 dtrace_hash_destroy(dtrace_bymod); 16501 dtrace_hash_destroy(dtrace_byfunc); 16502 dtrace_hash_destroy(dtrace_byname); 16503 dtrace_bymod = NULL; 16504 dtrace_byfunc = NULL; 16505 dtrace_byname = NULL; 16506 16507 kmem_cache_destroy(dtrace_state_cache); 16508 vmem_destroy(dtrace_minor); 16509 vmem_destroy(dtrace_arena); 16510 16511 if (dtrace_toxrange != NULL) { 16512 kmem_free(dtrace_toxrange, 16513 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 16514 dtrace_toxrange = NULL; 16515 dtrace_toxranges = 0; 16516 dtrace_toxranges_max = 0; 16517 } 16518 16519 ddi_remove_minor_node(dtrace_devi, NULL); 16520 dtrace_devi = NULL; 16521 16522 ddi_soft_state_fini(&dtrace_softstate); 16523 16524 ASSERT(dtrace_vtime_references == 0); 16525 ASSERT(dtrace_opens == 0); 16526 ASSERT(dtrace_retained == NULL); 16527 16528 mutex_exit(&dtrace_lock); 16529 mutex_exit(&dtrace_provider_lock); 16530 16531 /* 16532 * We don't destroy the task queue until after we have dropped our 16533 * locks (taskq_destroy() may block on running tasks). To prevent 16534 * attempting to do work after we have effectively detached but before 16535 * the task queue has been destroyed, all tasks dispatched via the 16536 * task queue must check that DTrace is still attached before 16537 * performing any operation. 16538 */ 16539 taskq_destroy(dtrace_taskq); 16540 dtrace_taskq = NULL; 16541 16542 return (DDI_SUCCESS); 16543 } 16544 #endif 16545 16546 #if defined(sun) 16547 /*ARGSUSED*/ 16548 static int 16549 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 16550 { 16551 int error; 16552 16553 switch (infocmd) { 16554 case DDI_INFO_DEVT2DEVINFO: 16555 *result = (void *)dtrace_devi; 16556 error = DDI_SUCCESS; 16557 break; 16558 case DDI_INFO_DEVT2INSTANCE: 16559 *result = (void *)0; 16560 error = DDI_SUCCESS; 16561 break; 16562 default: 16563 error = DDI_FAILURE; 16564 } 16565 return (error); 16566 } 16567 #endif 16568 16569 #if defined(sun) 16570 static struct cb_ops dtrace_cb_ops = { 16571 dtrace_open, /* open */ 16572 dtrace_close, /* close */ 16573 nulldev, /* strategy */ 16574 nulldev, /* print */ 16575 nodev, /* dump */ 16576 nodev, /* read */ 16577 nodev, /* write */ 16578 dtrace_ioctl, /* ioctl */ 16579 nodev, /* devmap */ 16580 nodev, /* mmap */ 16581 nodev, /* segmap */ 16582 nochpoll, /* poll */ 16583 ddi_prop_op, /* cb_prop_op */ 16584 0, /* streamtab */ 16585 D_NEW | D_MP /* Driver compatibility flag */ 16586 }; 16587 16588 static struct dev_ops dtrace_ops = { 16589 DEVO_REV, /* devo_rev */ 16590 0, /* refcnt */ 16591 dtrace_info, /* get_dev_info */ 16592 nulldev, /* identify */ 16593 nulldev, /* probe */ 16594 dtrace_attach, /* attach */ 16595 dtrace_detach, /* detach */ 16596 nodev, /* reset */ 16597 &dtrace_cb_ops, /* driver operations */ 16598 NULL, /* bus operations */ 16599 nodev /* dev power */ 16600 }; 16601 16602 static struct modldrv modldrv = { 16603 &mod_driverops, /* module type (this is a pseudo driver) */ 16604 "Dynamic Tracing", /* name of module */ 16605 &dtrace_ops, /* driver ops */ 16606 }; 16607 16608 static struct modlinkage modlinkage = { 16609 MODREV_1, 16610 (void *)&modldrv, 16611 NULL 16612 }; 16613 16614 int 16615 _init(void) 16616 { 16617 return (mod_install(&modlinkage)); 16618 } 16619 16620 int 16621 _info(struct modinfo *modinfop) 16622 { 16623 return (mod_info(&modlinkage, modinfop)); 16624 } 16625 16626 int 16627 _fini(void) 16628 { 16629 return (mod_remove(&modlinkage)); 16630 } 16631 #else 16632 16633 #if 0 16634 static d_ioctl_t dtrace_ioctl; 16635 static void dtrace_load(void *); 16636 static int dtrace_unload(void); 16637 #if __FreeBSD_version < 800039 16638 static void dtrace_clone(void *, struct ucred *, char *, int , struct cdev **); 16639 static struct clonedevs *dtrace_clones; /* Ptr to the array of cloned devices. */ 16640 static eventhandler_tag eh_tag; /* Event handler tag. */ 16641 #else 16642 static struct cdev *dtrace_dev; 16643 #endif 16644 16645 void dtrace_invop_init(void); 16646 void dtrace_invop_uninit(void); 16647 16648 static struct cdevsw dtrace_cdevsw = { 16649 .d_version = D_VERSION, 16650 .d_flags = D_TRACKCLOSE | D_NEEDMINOR, 16651 .d_close = dtrace_close, 16652 .d_ioctl = dtrace_ioctl, 16653 .d_open = dtrace_open, 16654 .d_name = "dtrace", 16655 }; 16656 #endif 16657 void dtrace_invop_init(void); 16658 void dtrace_invop_uninit(void); 16659 16660 static void dtrace_load(void *); 16661 static int dtrace_unload(void); 16662 16663 #include <dtrace_anon.c> 16664 #include <dtrace_ioctl.c> 16665 #include <dtrace_load.c> 16666 #include <dtrace_modevent.c> 16667 #include <dtrace_sysctl.c> 16668 #include <dtrace_unload.c> 16669 #include <dtrace_vtime.c> 16670 #include <dtrace_hacks.c> 16671 #include <dtrace_isa.c> 16672 16673 MODULE(MODULE_CLASS_MISC, dtrace, "solaris"); 16674 16675 #if 0 16676 DEV_MODULE(dtrace, dtrace_modevent, NULL); 16677 MODULE_VERSION(dtrace, 1); 16678 MODULE_DEPEND(dtrace, cyclic, 1, 1, 1); 16679 MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1); 16680 #endif 16681 #endif 16682