1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * $FreeBSD$ 22 */ 23 24 /* 25 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 26 * Use is subject to license terms. 27 */ 28 29 /* 30 * Copyright 2016 Joyent, Inc. 31 * Copyright (c) 2012 by Delphix. All rights reserved. 32 */ 33 34 #ifndef _SYS_DTRACE_IMPL_H 35 #define _SYS_DTRACE_IMPL_H 36 37 #ifdef __cplusplus 38 extern "C" { 39 #endif 40 41 /* 42 * DTrace Dynamic Tracing Software: Kernel Implementation Interfaces 43 * 44 * Note: The contents of this file are private to the implementation of the 45 * Solaris system and DTrace subsystem and are subject to change at any time 46 * without notice. Applications and drivers using these interfaces will fail 47 * to run on future releases. These interfaces should not be used for any 48 * purpose except those expressly outlined in dtrace(7D) and libdtrace(3LIB). 49 * Please refer to the "Solaris Dynamic Tracing Guide" for more information. 50 */ 51 52 #include <sys/dtrace.h> 53 #include <sys/file.h> 54 55 #ifndef illumos 56 #ifdef __sparcv9 57 typedef uint32_t pc_t; 58 #else 59 typedef uintptr_t pc_t; 60 #endif 61 typedef u_long greg_t; 62 #endif 63 64 /* 65 * DTrace Implementation Constants and Typedefs 66 */ 67 #define DTRACE_MAXPROPLEN 128 68 #define DTRACE_DYNVAR_CHUNKSIZE 256 69 70 #ifdef __FreeBSD__ 71 #define NCPU MAXCPU 72 #endif /* __FreeBSD__ */ 73 74 struct dtrace_probe; 75 struct dtrace_ecb; 76 struct dtrace_predicate; 77 struct dtrace_action; 78 struct dtrace_provider; 79 struct dtrace_state; 80 81 typedef struct dtrace_probe dtrace_probe_t; 82 typedef struct dtrace_ecb dtrace_ecb_t; 83 typedef struct dtrace_predicate dtrace_predicate_t; 84 typedef struct dtrace_action dtrace_action_t; 85 typedef struct dtrace_provider dtrace_provider_t; 86 typedef struct dtrace_meta dtrace_meta_t; 87 typedef struct dtrace_state dtrace_state_t; 88 typedef uint32_t dtrace_optid_t; 89 typedef uint32_t dtrace_specid_t; 90 typedef uint64_t dtrace_genid_t; 91 92 /* 93 * DTrace Probes 94 * 95 * The probe is the fundamental unit of the DTrace architecture. Probes are 96 * created by DTrace providers, and managed by the DTrace framework. A probe 97 * is identified by a unique <provider, module, function, name> tuple, and has 98 * a unique probe identifier assigned to it. (Some probes are not associated 99 * with a specific point in text; these are called _unanchored probes_ and have 100 * no module or function associated with them.) Probes are represented as a 101 * dtrace_probe structure. To allow quick lookups based on each element of the 102 * probe tuple, probes are hashed by each of provider, module, function and 103 * name. (If a lookup is performed based on a regular expression, a 104 * dtrace_probekey is prepared, and a linear search is performed.) Each probe 105 * is additionally pointed to by a linear array indexed by its identifier. The 106 * identifier is the provider's mechanism for indicating to the DTrace 107 * framework that a probe has fired: the identifier is passed as the first 108 * argument to dtrace_probe(), where it is then mapped into the corresponding 109 * dtrace_probe structure. From the dtrace_probe structure, dtrace_probe() can 110 * iterate over the probe's list of enabling control blocks; see "DTrace 111 * Enabling Control Blocks", below.) 112 */ 113 struct dtrace_probe { 114 dtrace_id_t dtpr_id; /* probe identifier */ 115 dtrace_ecb_t *dtpr_ecb; /* ECB list; see below */ 116 dtrace_ecb_t *dtpr_ecb_last; /* last ECB in list */ 117 void *dtpr_arg; /* provider argument */ 118 dtrace_cacheid_t dtpr_predcache; /* predicate cache ID */ 119 int dtpr_aframes; /* artificial frames */ 120 dtrace_provider_t *dtpr_provider; /* pointer to provider */ 121 char *dtpr_mod; /* probe's module name */ 122 char *dtpr_func; /* probe's function name */ 123 char *dtpr_name; /* probe's name */ 124 dtrace_probe_t *dtpr_nextmod; /* next in module hash */ 125 dtrace_probe_t *dtpr_prevmod; /* previous in module hash */ 126 dtrace_probe_t *dtpr_nextfunc; /* next in function hash */ 127 dtrace_probe_t *dtpr_prevfunc; /* previous in function hash */ 128 dtrace_probe_t *dtpr_nextname; /* next in name hash */ 129 dtrace_probe_t *dtpr_prevname; /* previous in name hash */ 130 dtrace_genid_t dtpr_gen; /* probe generation ID */ 131 }; 132 133 typedef int dtrace_probekey_f(const char *, const char *, int); 134 135 typedef struct dtrace_probekey { 136 char *dtpk_prov; /* provider name to match */ 137 dtrace_probekey_f *dtpk_pmatch; /* provider matching function */ 138 char *dtpk_mod; /* module name to match */ 139 dtrace_probekey_f *dtpk_mmatch; /* module matching function */ 140 char *dtpk_func; /* func name to match */ 141 dtrace_probekey_f *dtpk_fmatch; /* func matching function */ 142 char *dtpk_name; /* name to match */ 143 dtrace_probekey_f *dtpk_nmatch; /* name matching function */ 144 dtrace_id_t dtpk_id; /* identifier to match */ 145 } dtrace_probekey_t; 146 147 typedef struct dtrace_hashbucket { 148 struct dtrace_hashbucket *dthb_next; /* next on hash chain */ 149 dtrace_probe_t *dthb_chain; /* chain of probes */ 150 int dthb_len; /* number of probes here */ 151 } dtrace_hashbucket_t; 152 153 typedef struct dtrace_hash { 154 dtrace_hashbucket_t **dth_tab; /* hash table */ 155 int dth_size; /* size of hash table */ 156 int dth_mask; /* mask to index into table */ 157 int dth_nbuckets; /* total number of buckets */ 158 uintptr_t dth_nextoffs; /* offset of next in probe */ 159 uintptr_t dth_prevoffs; /* offset of prev in probe */ 160 uintptr_t dth_stroffs; /* offset of str in probe */ 161 } dtrace_hash_t; 162 163 /* 164 * DTrace Enabling Control Blocks 165 * 166 * When a provider wishes to fire a probe, it calls into dtrace_probe(), 167 * passing the probe identifier as the first argument. As described above, 168 * dtrace_probe() maps the identifier into a pointer to a dtrace_probe_t 169 * structure. This structure contains information about the probe, and a 170 * pointer to the list of Enabling Control Blocks (ECBs). Each ECB points to 171 * DTrace consumer state, and contains an optional predicate, and a list of 172 * actions. (Shown schematically below.) The ECB abstraction allows a single 173 * probe to be multiplexed across disjoint consumers, or across disjoint 174 * enablings of a single probe within one consumer. 175 * 176 * Enabling Control Block 177 * dtrace_ecb_t 178 * +------------------------+ 179 * | dtrace_epid_t ---------+--------------> Enabled Probe ID (EPID) 180 * | dtrace_state_t * ------+--------------> State associated with this ECB 181 * | dtrace_predicate_t * --+---------+ 182 * | dtrace_action_t * -----+----+ | 183 * | dtrace_ecb_t * ---+ | | | Predicate (if any) 184 * +-------------------+----+ | | dtrace_predicate_t 185 * | | +---> +--------------------+ 186 * | | | dtrace_difo_t * ---+----> DIFO 187 * | | +--------------------+ 188 * | | 189 * Next ECB | | Action 190 * (if any) | | dtrace_action_t 191 * : +--> +-------------------+ 192 * : | dtrace_actkind_t -+------> kind 193 * v | dtrace_difo_t * --+------> DIFO (if any) 194 * | dtrace_recdesc_t -+------> record descr. 195 * | dtrace_action_t * +------+ 196 * +-------------------+ | 197 * | Next action 198 * +-------------------------------+ (if any) 199 * | 200 * | Action 201 * | dtrace_action_t 202 * +--> +-------------------+ 203 * | dtrace_actkind_t -+------> kind 204 * | dtrace_difo_t * --+------> DIFO (if any) 205 * | dtrace_action_t * +------+ 206 * +-------------------+ | 207 * | Next action 208 * +-------------------------------+ (if any) 209 * | 210 * : 211 * v 212 * 213 * 214 * dtrace_probe() iterates over the ECB list. If the ECB needs less space 215 * than is available in the principal buffer, the ECB is processed: if the 216 * predicate is non-NULL, the DIF object is executed. If the result is 217 * non-zero, the action list is processed, with each action being executed 218 * accordingly. When the action list has been completely executed, processing 219 * advances to the next ECB. The ECB abstraction allows disjoint consumers 220 * to multiplex on single probes. 221 * 222 * Execution of the ECB results in consuming dte_size bytes in the buffer 223 * to record data. During execution, dte_needed bytes must be available in 224 * the buffer. This space is used for both recorded data and tuple data. 225 */ 226 struct dtrace_ecb { 227 dtrace_epid_t dte_epid; /* enabled probe ID */ 228 uint32_t dte_alignment; /* required alignment */ 229 size_t dte_needed; /* space needed for execution */ 230 size_t dte_size; /* size of recorded payload */ 231 dtrace_predicate_t *dte_predicate; /* predicate, if any */ 232 dtrace_action_t *dte_action; /* actions, if any */ 233 dtrace_ecb_t *dte_next; /* next ECB on probe */ 234 dtrace_state_t *dte_state; /* pointer to state */ 235 uint32_t dte_cond; /* security condition */ 236 dtrace_probe_t *dte_probe; /* pointer to probe */ 237 dtrace_action_t *dte_action_last; /* last action on ECB */ 238 uint64_t dte_uarg; /* library argument */ 239 }; 240 241 struct dtrace_predicate { 242 dtrace_difo_t *dtp_difo; /* DIF object */ 243 dtrace_cacheid_t dtp_cacheid; /* cache identifier */ 244 int dtp_refcnt; /* reference count */ 245 }; 246 247 struct dtrace_action { 248 dtrace_actkind_t dta_kind; /* kind of action */ 249 uint16_t dta_intuple; /* boolean: in aggregation */ 250 uint32_t dta_refcnt; /* reference count */ 251 dtrace_difo_t *dta_difo; /* pointer to DIFO */ 252 dtrace_recdesc_t dta_rec; /* record description */ 253 dtrace_action_t *dta_prev; /* previous action */ 254 dtrace_action_t *dta_next; /* next action */ 255 }; 256 257 typedef struct dtrace_aggregation { 258 dtrace_action_t dtag_action; /* action; must be first */ 259 dtrace_aggid_t dtag_id; /* identifier */ 260 dtrace_ecb_t *dtag_ecb; /* corresponding ECB */ 261 dtrace_action_t *dtag_first; /* first action in tuple */ 262 uint32_t dtag_base; /* base of aggregation */ 263 uint8_t dtag_hasarg; /* boolean: has argument */ 264 uint64_t dtag_initial; /* initial value */ 265 void (*dtag_aggregate)(uint64_t *, uint64_t, uint64_t); 266 } dtrace_aggregation_t; 267 268 /* 269 * DTrace Buffers 270 * 271 * Principal buffers, aggregation buffers, and speculative buffers are all 272 * managed with the dtrace_buffer structure. By default, this structure 273 * includes twin data buffers -- dtb_tomax and dtb_xamot -- that serve as the 274 * active and passive buffers, respectively. For speculative buffers, 275 * dtb_xamot will be NULL; for "ring" and "fill" buffers, dtb_xamot will point 276 * to a scratch buffer. For all buffer types, the dtrace_buffer structure is 277 * always allocated on a per-CPU basis; a single dtrace_buffer structure is 278 * never shared among CPUs. (That is, there is never true sharing of the 279 * dtrace_buffer structure; to prevent false sharing of the structure, it must 280 * always be aligned to the coherence granularity -- generally 64 bytes.) 281 * 282 * One of the critical design decisions of DTrace is that a given ECB always 283 * stores the same quantity and type of data. This is done to assure that the 284 * only metadata required for an ECB's traced data is the EPID. That is, from 285 * the EPID, the consumer can determine the data layout. (The data buffer 286 * layout is shown schematically below.) By assuring that one can determine 287 * data layout from the EPID, the metadata stream can be separated from the 288 * data stream -- simplifying the data stream enormously. The ECB always 289 * proceeds the recorded data as part of the dtrace_rechdr_t structure that 290 * includes the EPID and a high-resolution timestamp used for output ordering 291 * consistency. 292 * 293 * base of data buffer ---> +--------+--------------------+--------+ 294 * | rechdr | data | rechdr | 295 * +--------+------+--------+----+--------+ 296 * | data | rechdr | data | 297 * +---------------+--------+-------------+ 298 * | data, cont. | 299 * +--------+--------------------+--------+ 300 * | rechdr | data | | 301 * +--------+--------------------+ | 302 * | || | 303 * | || | 304 * | \/ | 305 * : : 306 * . . 307 * . . 308 * . . 309 * : : 310 * | | 311 * limit of data buffer ---> +--------------------------------------+ 312 * 313 * When evaluating an ECB, dtrace_probe() determines if the ECB's needs of the 314 * principal buffer (both scratch and payload) exceed the available space. If 315 * the ECB's needs exceed available space (and if the principal buffer policy 316 * is the default "switch" policy), the ECB is dropped, the buffer's drop count 317 * is incremented, and processing advances to the next ECB. If the ECB's needs 318 * can be met with the available space, the ECB is processed, but the offset in 319 * the principal buffer is only advanced if the ECB completes processing 320 * without error. 321 * 322 * When a buffer is to be switched (either because the buffer is the principal 323 * buffer with a "switch" policy or because it is an aggregation buffer), a 324 * cross call is issued to the CPU associated with the buffer. In the cross 325 * call context, interrupts are disabled, and the active and the inactive 326 * buffers are atomically switched. This involves switching the data pointers, 327 * copying the various state fields (offset, drops, errors, etc.) into their 328 * inactive equivalents, and clearing the state fields. Because interrupts are 329 * disabled during this procedure, the switch is guaranteed to appear atomic to 330 * dtrace_probe(). 331 * 332 * DTrace Ring Buffering 333 * 334 * To process a ring buffer correctly, one must know the oldest valid record. 335 * Processing starts at the oldest record in the buffer and continues until 336 * the end of the buffer is reached. Processing then resumes starting with 337 * the record stored at offset 0 in the buffer, and continues until the 338 * youngest record is processed. If trace records are of a fixed-length, 339 * determining the oldest record is trivial: 340 * 341 * - If the ring buffer has not wrapped, the oldest record is the record 342 * stored at offset 0. 343 * 344 * - If the ring buffer has wrapped, the oldest record is the record stored 345 * at the current offset. 346 * 347 * With variable length records, however, just knowing the current offset 348 * doesn't suffice for determining the oldest valid record: assuming that one 349 * allows for arbitrary data, one has no way of searching forward from the 350 * current offset to find the oldest valid record. (That is, one has no way 351 * of separating data from metadata.) It would be possible to simply refuse to 352 * process any data in the ring buffer between the current offset and the 353 * limit, but this leaves (potentially) an enormous amount of otherwise valid 354 * data unprocessed. 355 * 356 * To effect ring buffering, we track two offsets in the buffer: the current 357 * offset and the _wrapped_ offset. If a request is made to reserve some 358 * amount of data, and the buffer has wrapped, the wrapped offset is 359 * incremented until the wrapped offset minus the current offset is greater 360 * than or equal to the reserve request. This is done by repeatedly looking 361 * up the ECB corresponding to the EPID at the current wrapped offset, and 362 * incrementing the wrapped offset by the size of the data payload 363 * corresponding to that ECB. If this offset is greater than or equal to the 364 * limit of the data buffer, the wrapped offset is set to 0. Thus, the 365 * current offset effectively "chases" the wrapped offset around the buffer. 366 * Schematically: 367 * 368 * base of data buffer ---> +------+--------------------+------+ 369 * | EPID | data | EPID | 370 * +------+--------+------+----+------+ 371 * | data | EPID | data | 372 * +---------------+------+-----------+ 373 * | data, cont. | 374 * +------+---------------------------+ 375 * | EPID | data | 376 * current offset ---> +------+---------------------------+ 377 * | invalid data | 378 * wrapped offset ---> +------+--------------------+------+ 379 * | EPID | data | EPID | 380 * +------+--------+------+----+------+ 381 * | data | EPID | data | 382 * +---------------+------+-----------+ 383 * : : 384 * . . 385 * . ... valid data ... . 386 * . . 387 * : : 388 * +------+-------------+------+------+ 389 * | EPID | data | EPID | data | 390 * +------+------------++------+------+ 391 * | data, cont. | leftover | 392 * limit of data buffer ---> +-------------------+--------------+ 393 * 394 * If the amount of requested buffer space exceeds the amount of space 395 * available between the current offset and the end of the buffer: 396 * 397 * (1) all words in the data buffer between the current offset and the limit 398 * of the data buffer (marked "leftover", above) are set to 399 * DTRACE_EPIDNONE 400 * 401 * (2) the wrapped offset is set to zero 402 * 403 * (3) the iteration process described above occurs until the wrapped offset 404 * is greater than the amount of desired space. 405 * 406 * The wrapped offset is implemented by (re-)using the inactive offset. 407 * In a "switch" buffer policy, the inactive offset stores the offset in 408 * the inactive buffer; in a "ring" buffer policy, it stores the wrapped 409 * offset. 410 * 411 * DTrace Scratch Buffering 412 * 413 * Some ECBs may wish to allocate dynamically-sized temporary scratch memory. 414 * To accommodate such requests easily, scratch memory may be allocated in 415 * the buffer beyond the current offset plus the needed memory of the current 416 * ECB. If there isn't sufficient room in the buffer for the requested amount 417 * of scratch space, the allocation fails and an error is generated. Scratch 418 * memory is tracked in the dtrace_mstate_t and is automatically freed when 419 * the ECB ceases processing. Note that ring buffers cannot allocate their 420 * scratch from the principal buffer -- lest they needlessly overwrite older, 421 * valid data. Ring buffers therefore have their own dedicated scratch buffer 422 * from which scratch is allocated. 423 */ 424 #define DTRACEBUF_RING 0x0001 /* bufpolicy set to "ring" */ 425 #define DTRACEBUF_FILL 0x0002 /* bufpolicy set to "fill" */ 426 #define DTRACEBUF_NOSWITCH 0x0004 /* do not switch buffer */ 427 #define DTRACEBUF_WRAPPED 0x0008 /* ring buffer has wrapped */ 428 #define DTRACEBUF_DROPPED 0x0010 /* drops occurred */ 429 #define DTRACEBUF_ERROR 0x0020 /* errors occurred */ 430 #define DTRACEBUF_FULL 0x0040 /* "fill" buffer is full */ 431 #define DTRACEBUF_CONSUMED 0x0080 /* buffer has been consumed */ 432 #define DTRACEBUF_INACTIVE 0x0100 /* buffer is not yet active */ 433 434 typedef struct dtrace_buffer { 435 uint64_t dtb_offset; /* current offset in buffer */ 436 uint64_t dtb_size; /* size of buffer */ 437 uint32_t dtb_flags; /* flags */ 438 uint32_t dtb_drops; /* number of drops */ 439 caddr_t dtb_tomax; /* active buffer */ 440 caddr_t dtb_xamot; /* inactive buffer */ 441 uint32_t dtb_xamot_flags; /* inactive flags */ 442 uint32_t dtb_xamot_drops; /* drops in inactive buffer */ 443 uint64_t dtb_xamot_offset; /* offset in inactive buffer */ 444 uint32_t dtb_errors; /* number of errors */ 445 uint32_t dtb_xamot_errors; /* errors in inactive buffer */ 446 #ifndef _LP64 447 uint64_t dtb_pad1; /* pad out to 64 bytes */ 448 #endif 449 uint64_t dtb_switched; /* time of last switch */ 450 uint64_t dtb_interval; /* observed switch interval */ 451 uint64_t dtb_pad2[6]; /* pad to avoid false sharing */ 452 } dtrace_buffer_t; 453 454 /* 455 * DTrace Aggregation Buffers 456 * 457 * Aggregation buffers use much of the same mechanism as described above 458 * ("DTrace Buffers"). However, because an aggregation is fundamentally a 459 * hash, there exists dynamic metadata associated with an aggregation buffer 460 * that is not associated with other kinds of buffers. This aggregation 461 * metadata is _only_ relevant for the in-kernel implementation of 462 * aggregations; it is not actually relevant to user-level consumers. To do 463 * this, we allocate dynamic aggregation data (hash keys and hash buckets) 464 * starting below the _limit_ of the buffer, and we allocate data from the 465 * _base_ of the buffer. When the aggregation buffer is copied out, _only_ the 466 * data is copied out; the metadata is simply discarded. Schematically, 467 * aggregation buffers look like: 468 * 469 * base of data buffer ---> +-------+------+-----------+-------+ 470 * | aggid | key | value | aggid | 471 * +-------+------+-----------+-------+ 472 * | key | 473 * +-------+-------+-----+------------+ 474 * | value | aggid | key | value | 475 * +-------+------++-----+------+-----+ 476 * | aggid | key | value | | 477 * +-------+------+-------------+ | 478 * | || | 479 * | || | 480 * | \/ | 481 * : : 482 * . . 483 * . . 484 * . . 485 * : : 486 * | /\ | 487 * | || +------------+ 488 * | || | | 489 * +---------------------+ | 490 * | hash keys | 491 * | (dtrace_aggkey structures) | 492 * | | 493 * +----------------------------------+ 494 * | hash buckets | 495 * | (dtrace_aggbuffer structure) | 496 * | | 497 * limit of data buffer ---> +----------------------------------+ 498 * 499 * 500 * As implied above, just as we assure that ECBs always store a constant 501 * amount of data, we assure that a given aggregation -- identified by its 502 * aggregation ID -- always stores data of a constant quantity and type. 503 * As with EPIDs, this allows the aggregation ID to serve as the metadata for a 504 * given record. 505 * 506 * Note that the size of the dtrace_aggkey structure must be sizeof (uintptr_t) 507 * aligned. (If this the structure changes such that this becomes false, an 508 * assertion will fail in dtrace_aggregate().) 509 */ 510 typedef struct dtrace_aggkey { 511 uint32_t dtak_hashval; /* hash value */ 512 uint32_t dtak_action:4; /* action -- 4 bits */ 513 uint32_t dtak_size:28; /* size -- 28 bits */ 514 caddr_t dtak_data; /* data pointer */ 515 struct dtrace_aggkey *dtak_next; /* next in hash chain */ 516 } dtrace_aggkey_t; 517 518 typedef struct dtrace_aggbuffer { 519 uintptr_t dtagb_hashsize; /* number of buckets */ 520 uintptr_t dtagb_free; /* free list of keys */ 521 dtrace_aggkey_t **dtagb_hash; /* hash table */ 522 } dtrace_aggbuffer_t; 523 524 /* 525 * DTrace Speculations 526 * 527 * Speculations have a per-CPU buffer and a global state. Once a speculation 528 * buffer has been comitted or discarded, it cannot be reused until all CPUs 529 * have taken the same action (commit or discard) on their respective 530 * speculative buffer. However, because DTrace probes may execute in arbitrary 531 * context, other CPUs cannot simply be cross-called at probe firing time to 532 * perform the necessary commit or discard. The speculation states thus 533 * optimize for the case that a speculative buffer is only active on one CPU at 534 * the time of a commit() or discard() -- for if this is the case, other CPUs 535 * need not take action, and the speculation is immediately available for 536 * reuse. If the speculation is active on multiple CPUs, it must be 537 * asynchronously cleaned -- potentially leading to a higher rate of dirty 538 * speculative drops. The speculation states are as follows: 539 * 540 * DTRACESPEC_INACTIVE <= Initial state; inactive speculation 541 * DTRACESPEC_ACTIVE <= Allocated, but not yet speculatively traced to 542 * DTRACESPEC_ACTIVEONE <= Speculatively traced to on one CPU 543 * DTRACESPEC_ACTIVEMANY <= Speculatively traced to on more than one CPU 544 * DTRACESPEC_COMMITTING <= Currently being commited on one CPU 545 * DTRACESPEC_COMMITTINGMANY <= Currently being commited on many CPUs 546 * DTRACESPEC_DISCARDING <= Currently being discarded on many CPUs 547 * 548 * The state transition diagram is as follows: 549 * 550 * +----------------------------------------------------------+ 551 * | | 552 * | +------------+ | 553 * | +-------------------| COMMITTING |<-----------------+ | 554 * | | +------------+ | | 555 * | | copied spec. ^ commit() on | | discard() on 556 * | | into principal | active CPU | | active CPU 557 * | | | commit() | | 558 * V V | | | 559 * +----------+ +--------+ +-----------+ 560 * | INACTIVE |---------------->| ACTIVE |--------------->| ACTIVEONE | 561 * +----------+ speculation() +--------+ speculate() +-----------+ 562 * ^ ^ | | | 563 * | | | discard() | | 564 * | | asynchronously | discard() on | | speculate() 565 * | | cleaned V inactive CPU | | on inactive 566 * | | +------------+ | | CPU 567 * | +-------------------| DISCARDING |<-----------------+ | 568 * | +------------+ | 569 * | asynchronously ^ | 570 * | copied spec. | discard() | 571 * | into principal +------------------------+ | 572 * | | V 573 * +----------------+ commit() +------------+ 574 * | COMMITTINGMANY |<----------------------------------| ACTIVEMANY | 575 * +----------------+ +------------+ 576 */ 577 typedef enum dtrace_speculation_state { 578 DTRACESPEC_INACTIVE = 0, 579 DTRACESPEC_ACTIVE, 580 DTRACESPEC_ACTIVEONE, 581 DTRACESPEC_ACTIVEMANY, 582 DTRACESPEC_COMMITTING, 583 DTRACESPEC_COMMITTINGMANY, 584 DTRACESPEC_DISCARDING 585 } dtrace_speculation_state_t; 586 587 typedef struct dtrace_speculation { 588 dtrace_speculation_state_t dtsp_state; /* current speculation state */ 589 int dtsp_cleaning; /* non-zero if being cleaned */ 590 dtrace_buffer_t *dtsp_buffer; /* speculative buffer */ 591 } dtrace_speculation_t; 592 593 /* 594 * DTrace Dynamic Variables 595 * 596 * The dynamic variable problem is obviously decomposed into two subproblems: 597 * allocating new dynamic storage, and freeing old dynamic storage. The 598 * presence of the second problem makes the first much more complicated -- or 599 * rather, the absence of the second renders the first trivial. This is the 600 * case with aggregations, for which there is effectively no deallocation of 601 * dynamic storage. (Or more accurately, all dynamic storage is deallocated 602 * when a snapshot is taken of the aggregation.) As DTrace dynamic variables 603 * allow for both dynamic allocation and dynamic deallocation, the 604 * implementation of dynamic variables is quite a bit more complicated than 605 * that of their aggregation kin. 606 * 607 * We observe that allocating new dynamic storage is tricky only because the 608 * size can vary -- the allocation problem is much easier if allocation sizes 609 * are uniform. We further observe that in D, the size of dynamic variables is 610 * actually _not_ dynamic -- dynamic variable sizes may be determined by static 611 * analysis of DIF text. (This is true even of putatively dynamically-sized 612 * objects like strings and stacks, the sizes of which are dictated by the 613 * "stringsize" and "stackframes" variables, respectively.) We exploit this by 614 * performing this analysis on all DIF before enabling any probes. For each 615 * dynamic load or store, we calculate the dynamically-allocated size plus the 616 * size of the dtrace_dynvar structure plus the storage required to key the 617 * data. For all DIF, we take the largest value and dub it the _chunksize_. 618 * We then divide dynamic memory into two parts: a hash table that is wide 619 * enough to have every chunk in its own bucket, and a larger region of equal 620 * chunksize units. Whenever we wish to dynamically allocate a variable, we 621 * always allocate a single chunk of memory. Depending on the uniformity of 622 * allocation, this will waste some amount of memory -- but it eliminates the 623 * non-determinism inherent in traditional heap fragmentation. 624 * 625 * Dynamic objects are allocated by storing a non-zero value to them; they are 626 * deallocated by storing a zero value to them. Dynamic variables are 627 * complicated enormously by being shared between CPUs. In particular, 628 * consider the following scenario: 629 * 630 * CPU A CPU B 631 * +---------------------------------+ +---------------------------------+ 632 * | | | | 633 * | allocates dynamic object a[123] | | | 634 * | by storing the value 345 to it | | | 635 * | ---------> | 636 * | | | wishing to load from object | 637 * | | | a[123], performs lookup in | 638 * | | | dynamic variable space | 639 * | <--------- | 640 * | deallocates object a[123] by | | | 641 * | storing 0 to it | | | 642 * | | | | 643 * | allocates dynamic object b[567] | | performs load from a[123] | 644 * | by storing the value 789 to it | | | 645 * : : : : 646 * . . . . 647 * 648 * This is obviously a race in the D program, but there are nonetheless only 649 * two valid values for CPU B's load from a[123]: 345 or 0. Most importantly, 650 * CPU B may _not_ see the value 789 for a[123]. 651 * 652 * There are essentially two ways to deal with this: 653 * 654 * (1) Explicitly spin-lock variables. That is, if CPU B wishes to load 655 * from a[123], it needs to lock a[123] and hold the lock for the 656 * duration that it wishes to manipulate it. 657 * 658 * (2) Avoid reusing freed chunks until it is known that no CPU is referring 659 * to them. 660 * 661 * The implementation of (1) is rife with complexity, because it requires the 662 * user of a dynamic variable to explicitly decree when they are done using it. 663 * Were all variables by value, this perhaps wouldn't be debilitating -- but 664 * dynamic variables of non-scalar types are tracked by reference. That is, if 665 * a dynamic variable is, say, a string, and that variable is to be traced to, 666 * say, the principal buffer, the DIF emulation code returns to the main 667 * dtrace_probe() loop a pointer to the underlying storage, not the contents of 668 * the storage. Further, code calling on DIF emulation would have to be aware 669 * that the DIF emulation has returned a reference to a dynamic variable that 670 * has been potentially locked. The variable would have to be unlocked after 671 * the main dtrace_probe() loop is finished with the variable, and the main 672 * dtrace_probe() loop would have to be careful to not call any further DIF 673 * emulation while the variable is locked to avoid deadlock. More generally, 674 * if one were to implement (1), DIF emulation code dealing with dynamic 675 * variables could only deal with one dynamic variable at a time (lest deadlock 676 * result). To sum, (1) exports too much subtlety to the users of dynamic 677 * variables -- increasing maintenance burden and imposing serious constraints 678 * on future DTrace development. 679 * 680 * The implementation of (2) is also complex, but the complexity is more 681 * manageable. We need to be sure that when a variable is deallocated, it is 682 * not placed on a traditional free list, but rather on a _dirty_ list. Once a 683 * variable is on a dirty list, it cannot be found by CPUs performing a 684 * subsequent lookup of the variable -- but it may still be in use by other 685 * CPUs. To assure that all CPUs that may be seeing the old variable have 686 * cleared out of probe context, a dtrace_sync() can be issued. Once the 687 * dtrace_sync() has completed, it can be known that all CPUs are done 688 * manipulating the dynamic variable -- the dirty list can be atomically 689 * appended to the free list. Unfortunately, there's a slight hiccup in this 690 * mechanism: dtrace_sync() may not be issued from probe context. The 691 * dtrace_sync() must be therefore issued asynchronously from non-probe 692 * context. For this we rely on the DTrace cleaner, a cyclic that runs at the 693 * "cleanrate" frequency. To ease this implementation, we define several chunk 694 * lists: 695 * 696 * - Dirty. Deallocated chunks, not yet cleaned. Not available. 697 * 698 * - Rinsing. Formerly dirty chunks that are currently being asynchronously 699 * cleaned. Not available, but will be shortly. Dynamic variable 700 * allocation may not spin or block for availability, however. 701 * 702 * - Clean. Clean chunks, ready for allocation -- but not on the free list. 703 * 704 * - Free. Available for allocation. 705 * 706 * Moreover, to avoid absurd contention, _each_ of these lists is implemented 707 * on a per-CPU basis. This is only for performance, not correctness; chunks 708 * may be allocated from another CPU's free list. The algorithm for allocation 709 * then is this: 710 * 711 * (1) Attempt to atomically allocate from current CPU's free list. If list 712 * is non-empty and allocation is successful, allocation is complete. 713 * 714 * (2) If the clean list is non-empty, atomically move it to the free list, 715 * and reattempt (1). 716 * 717 * (3) If the dynamic variable space is in the CLEAN state, look for free 718 * and clean lists on other CPUs by setting the current CPU to the next 719 * CPU, and reattempting (1). If the next CPU is the current CPU (that 720 * is, if all CPUs have been checked), atomically switch the state of 721 * the dynamic variable space based on the following: 722 * 723 * - If no free chunks were found and no dirty chunks were found, 724 * atomically set the state to EMPTY. 725 * 726 * - If dirty chunks were found, atomically set the state to DIRTY. 727 * 728 * - If rinsing chunks were found, atomically set the state to RINSING. 729 * 730 * (4) Based on state of dynamic variable space state, increment appropriate 731 * counter to indicate dynamic drops (if in EMPTY state) vs. dynamic 732 * dirty drops (if in DIRTY state) vs. dynamic rinsing drops (if in 733 * RINSING state). Fail the allocation. 734 * 735 * The cleaning cyclic operates with the following algorithm: for all CPUs 736 * with a non-empty dirty list, atomically move the dirty list to the rinsing 737 * list. Perform a dtrace_sync(). For all CPUs with a non-empty rinsing list, 738 * atomically move the rinsing list to the clean list. Perform another 739 * dtrace_sync(). By this point, all CPUs have seen the new clean list; the 740 * state of the dynamic variable space can be restored to CLEAN. 741 * 742 * There exist two final races that merit explanation. The first is a simple 743 * allocation race: 744 * 745 * CPU A CPU B 746 * +---------------------------------+ +---------------------------------+ 747 * | | | | 748 * | allocates dynamic object a[123] | | allocates dynamic object a[123] | 749 * | by storing the value 345 to it | | by storing the value 567 to it | 750 * | | | | 751 * : : : : 752 * . . . . 753 * 754 * Again, this is a race in the D program. It can be resolved by having a[123] 755 * hold the value 345 or a[123] hold the value 567 -- but it must be true that 756 * a[123] have only _one_ of these values. (That is, the racing CPUs may not 757 * put the same element twice on the same hash chain.) This is resolved 758 * simply: before the allocation is undertaken, the start of the new chunk's 759 * hash chain is noted. Later, after the allocation is complete, the hash 760 * chain is atomically switched to point to the new element. If this fails 761 * (because of either concurrent allocations or an allocation concurrent with a 762 * deletion), the newly allocated chunk is deallocated to the dirty list, and 763 * the whole process of looking up (and potentially allocating) the dynamic 764 * variable is reattempted. 765 * 766 * The final race is a simple deallocation race: 767 * 768 * CPU A CPU B 769 * +---------------------------------+ +---------------------------------+ 770 * | | | | 771 * | deallocates dynamic object | | deallocates dynamic object | 772 * | a[123] by storing the value 0 | | a[123] by storing the value 0 | 773 * | to it | | to it | 774 * | | | | 775 * : : : : 776 * . . . . 777 * 778 * Once again, this is a race in the D program, but it is one that we must 779 * handle without corrupting the underlying data structures. Because 780 * deallocations require the deletion of a chunk from the middle of a hash 781 * chain, we cannot use a single-word atomic operation to remove it. For this, 782 * we add a spin lock to the hash buckets that is _only_ used for deallocations 783 * (allocation races are handled as above). Further, this spin lock is _only_ 784 * held for the duration of the delete; before control is returned to the DIF 785 * emulation code, the hash bucket is unlocked. 786 */ 787 typedef struct dtrace_key { 788 uint64_t dttk_value; /* data value or data pointer */ 789 uint64_t dttk_size; /* 0 if by-val, >0 if by-ref */ 790 } dtrace_key_t; 791 792 typedef struct dtrace_tuple { 793 uint32_t dtt_nkeys; /* number of keys in tuple */ 794 uint32_t dtt_pad; /* padding */ 795 dtrace_key_t dtt_key[1]; /* array of tuple keys */ 796 } dtrace_tuple_t; 797 798 typedef struct dtrace_dynvar { 799 uint64_t dtdv_hashval; /* hash value -- 0 if free */ 800 struct dtrace_dynvar *dtdv_next; /* next on list or hash chain */ 801 void *dtdv_data; /* pointer to data */ 802 dtrace_tuple_t dtdv_tuple; /* tuple key */ 803 } dtrace_dynvar_t; 804 805 typedef enum dtrace_dynvar_op { 806 DTRACE_DYNVAR_ALLOC, 807 DTRACE_DYNVAR_NOALLOC, 808 DTRACE_DYNVAR_DEALLOC 809 } dtrace_dynvar_op_t; 810 811 typedef struct dtrace_dynhash { 812 dtrace_dynvar_t *dtdh_chain; /* hash chain for this bucket */ 813 uintptr_t dtdh_lock; /* deallocation lock */ 814 #ifdef _LP64 815 uintptr_t dtdh_pad[6]; /* pad to avoid false sharing */ 816 #else 817 uintptr_t dtdh_pad[14]; /* pad to avoid false sharing */ 818 #endif 819 } dtrace_dynhash_t; 820 821 typedef struct dtrace_dstate_percpu { 822 dtrace_dynvar_t *dtdsc_free; /* free list for this CPU */ 823 dtrace_dynvar_t *dtdsc_dirty; /* dirty list for this CPU */ 824 dtrace_dynvar_t *dtdsc_rinsing; /* rinsing list for this CPU */ 825 dtrace_dynvar_t *dtdsc_clean; /* clean list for this CPU */ 826 uint64_t dtdsc_drops; /* number of capacity drops */ 827 uint64_t dtdsc_dirty_drops; /* number of dirty drops */ 828 uint64_t dtdsc_rinsing_drops; /* number of rinsing drops */ 829 #ifdef _LP64 830 uint64_t dtdsc_pad; /* pad to avoid false sharing */ 831 #else 832 uint64_t dtdsc_pad[2]; /* pad to avoid false sharing */ 833 #endif 834 } dtrace_dstate_percpu_t; 835 836 typedef enum dtrace_dstate_state { 837 DTRACE_DSTATE_CLEAN = 0, 838 DTRACE_DSTATE_EMPTY, 839 DTRACE_DSTATE_DIRTY, 840 DTRACE_DSTATE_RINSING 841 } dtrace_dstate_state_t; 842 843 typedef struct dtrace_dstate { 844 void *dtds_base; /* base of dynamic var. space */ 845 size_t dtds_size; /* size of dynamic var. space */ 846 size_t dtds_hashsize; /* number of buckets in hash */ 847 size_t dtds_chunksize; /* size of each chunk */ 848 dtrace_dynhash_t *dtds_hash; /* pointer to hash table */ 849 dtrace_dstate_state_t dtds_state; /* current dynamic var. state */ 850 dtrace_dstate_percpu_t *dtds_percpu; /* per-CPU dyn. var. state */ 851 } dtrace_dstate_t; 852 853 /* 854 * DTrace Variable State 855 * 856 * The DTrace variable state tracks user-defined variables in its dtrace_vstate 857 * structure. Each DTrace consumer has exactly one dtrace_vstate structure, 858 * but some dtrace_vstate structures may exist without a corresponding DTrace 859 * consumer (see "DTrace Helpers", below). As described in <sys/dtrace.h>, 860 * user-defined variables can have one of three scopes: 861 * 862 * DIFV_SCOPE_GLOBAL => global scope 863 * DIFV_SCOPE_THREAD => thread-local scope (i.e. "self->" variables) 864 * DIFV_SCOPE_LOCAL => clause-local scope (i.e. "this->" variables) 865 * 866 * The variable state tracks variables by both their scope and their allocation 867 * type: 868 * 869 * - The dtvs_globals and dtvs_locals members each point to an array of 870 * dtrace_statvar structures. These structures contain both the variable 871 * metadata (dtrace_difv structures) and the underlying storage for all 872 * statically allocated variables, including statically allocated 873 * DIFV_SCOPE_GLOBAL variables and all DIFV_SCOPE_LOCAL variables. 874 * 875 * - The dtvs_tlocals member points to an array of dtrace_difv structures for 876 * DIFV_SCOPE_THREAD variables. As such, this array tracks _only_ the 877 * variable metadata for DIFV_SCOPE_THREAD variables; the underlying storage 878 * is allocated out of the dynamic variable space. 879 * 880 * - The dtvs_dynvars member is the dynamic variable state associated with the 881 * variable state. The dynamic variable state (described in "DTrace Dynamic 882 * Variables", above) tracks all DIFV_SCOPE_THREAD variables and all 883 * dynamically-allocated DIFV_SCOPE_GLOBAL variables. 884 */ 885 typedef struct dtrace_statvar { 886 uint64_t dtsv_data; /* data or pointer to it */ 887 size_t dtsv_size; /* size of pointed-to data */ 888 int dtsv_refcnt; /* reference count */ 889 dtrace_difv_t dtsv_var; /* variable metadata */ 890 } dtrace_statvar_t; 891 892 typedef struct dtrace_vstate { 893 dtrace_state_t *dtvs_state; /* back pointer to state */ 894 dtrace_statvar_t **dtvs_globals; /* statically-allocated glbls */ 895 int dtvs_nglobals; /* number of globals */ 896 dtrace_difv_t *dtvs_tlocals; /* thread-local metadata */ 897 int dtvs_ntlocals; /* number of thread-locals */ 898 dtrace_statvar_t **dtvs_locals; /* clause-local data */ 899 int dtvs_nlocals; /* number of clause-locals */ 900 dtrace_dstate_t dtvs_dynvars; /* dynamic variable state */ 901 } dtrace_vstate_t; 902 903 /* 904 * DTrace Machine State 905 * 906 * In the process of processing a fired probe, DTrace needs to track and/or 907 * cache some per-CPU state associated with that particular firing. This is 908 * state that is always discarded after the probe firing has completed, and 909 * much of it is not specific to any DTrace consumer, remaining valid across 910 * all ECBs. This state is tracked in the dtrace_mstate structure. 911 */ 912 #define DTRACE_MSTATE_ARGS 0x00000001 913 #define DTRACE_MSTATE_PROBE 0x00000002 914 #define DTRACE_MSTATE_EPID 0x00000004 915 #define DTRACE_MSTATE_TIMESTAMP 0x00000008 916 #define DTRACE_MSTATE_STACKDEPTH 0x00000010 917 #define DTRACE_MSTATE_CALLER 0x00000020 918 #define DTRACE_MSTATE_IPL 0x00000040 919 #define DTRACE_MSTATE_FLTOFFS 0x00000080 920 #define DTRACE_MSTATE_WALLTIMESTAMP 0x00000100 921 #define DTRACE_MSTATE_USTACKDEPTH 0x00000200 922 #define DTRACE_MSTATE_UCALLER 0x00000400 923 924 typedef struct dtrace_mstate { 925 uintptr_t dtms_scratch_base; /* base of scratch space */ 926 uintptr_t dtms_scratch_ptr; /* current scratch pointer */ 927 size_t dtms_scratch_size; /* scratch size */ 928 uint32_t dtms_present; /* variables that are present */ 929 uint64_t dtms_arg[5]; /* cached arguments */ 930 dtrace_epid_t dtms_epid; /* current EPID */ 931 uint64_t dtms_timestamp; /* cached timestamp */ 932 hrtime_t dtms_walltimestamp; /* cached wall timestamp */ 933 int dtms_stackdepth; /* cached stackdepth */ 934 int dtms_ustackdepth; /* cached ustackdepth */ 935 struct dtrace_probe *dtms_probe; /* current probe */ 936 uintptr_t dtms_caller; /* cached caller */ 937 uint64_t dtms_ucaller; /* cached user-level caller */ 938 int dtms_ipl; /* cached interrupt pri lev */ 939 int dtms_fltoffs; /* faulting DIFO offset */ 940 uintptr_t dtms_strtok; /* saved strtok() pointer */ 941 uintptr_t dtms_strtok_limit; /* upper bound of strtok ptr */ 942 uint32_t dtms_access; /* memory access rights */ 943 dtrace_difo_t *dtms_difo; /* current dif object */ 944 file_t *dtms_getf; /* cached rval of getf() */ 945 } dtrace_mstate_t; 946 947 #define DTRACE_COND_OWNER 0x1 948 #define DTRACE_COND_USERMODE 0x2 949 #define DTRACE_COND_ZONEOWNER 0x4 950 951 #define DTRACE_PROBEKEY_MAXDEPTH 8 /* max glob recursion depth */ 952 953 /* 954 * Access flag used by dtrace_mstate.dtms_access. 955 */ 956 #define DTRACE_ACCESS_KERNEL 0x1 /* the priv to read kmem */ 957 958 959 /* 960 * DTrace Activity 961 * 962 * Each DTrace consumer is in one of several states, which (for purposes of 963 * avoiding yet-another overloading of the noun "state") we call the current 964 * _activity_. The activity transitions on dtrace_go() (from DTRACIOCGO), on 965 * dtrace_stop() (from DTRACIOCSTOP) and on the exit() action. Activities may 966 * only transition in one direction; the activity transition diagram is a 967 * directed acyclic graph. The activity transition diagram is as follows: 968 * 969 * 970 * +----------+ +--------+ +--------+ 971 * | INACTIVE |------------------>| WARMUP |------------------>| ACTIVE | 972 * +----------+ dtrace_go(), +--------+ dtrace_go(), +--------+ 973 * before BEGIN | after BEGIN | | | 974 * | | | | 975 * exit() action | | | | 976 * from BEGIN ECB | | | | 977 * | | | | 978 * v | | | 979 * +----------+ exit() action | | | 980 * +-----------------------------| DRAINING |<-------------------+ | | 981 * | +----------+ | | 982 * | | | | 983 * | dtrace_stop(), | | | 984 * | before END | | | 985 * | | | | 986 * | v | | 987 * | +---------+ +----------+ | | 988 * | | STOPPED |<----------------| COOLDOWN |<----------------------+ | 989 * | +---------+ dtrace_stop(), +----------+ dtrace_stop(), | 990 * | after END before END | 991 * | | 992 * | +--------+ | 993 * +----------------------------->| KILLED |<--------------------------+ 994 * deadman timeout or +--------+ deadman timeout or 995 * killed consumer killed consumer 996 * 997 * Note that once a DTrace consumer has stopped tracing, there is no way to 998 * restart it; if a DTrace consumer wishes to restart tracing, it must reopen 999 * the DTrace pseudodevice. 1000 */ 1001 typedef enum dtrace_activity { 1002 DTRACE_ACTIVITY_INACTIVE = 0, /* not yet running */ 1003 DTRACE_ACTIVITY_WARMUP, /* while starting */ 1004 DTRACE_ACTIVITY_ACTIVE, /* running */ 1005 DTRACE_ACTIVITY_DRAINING, /* before stopping */ 1006 DTRACE_ACTIVITY_COOLDOWN, /* while stopping */ 1007 DTRACE_ACTIVITY_STOPPED, /* after stopping */ 1008 DTRACE_ACTIVITY_KILLED /* killed */ 1009 } dtrace_activity_t; 1010 1011 /* 1012 * DTrace Helper Implementation 1013 * 1014 * A description of the helper architecture may be found in <sys/dtrace.h>. 1015 * Each process contains a pointer to its helpers in its p_dtrace_helpers 1016 * member. This is a pointer to a dtrace_helpers structure, which contains an 1017 * array of pointers to dtrace_helper structures, helper variable state (shared 1018 * among a process's helpers) and a generation count. (The generation count is 1019 * used to provide an identifier when a helper is added so that it may be 1020 * subsequently removed.) The dtrace_helper structure is self-explanatory, 1021 * containing pointers to the objects needed to execute the helper. Note that 1022 * helpers are _duplicated_ across fork(2), and destroyed on exec(2). No more 1023 * than dtrace_helpers_max are allowed per-process. 1024 */ 1025 #define DTRACE_HELPER_ACTION_USTACK 0 1026 #define DTRACE_NHELPER_ACTIONS 1 1027 1028 typedef struct dtrace_helper_action { 1029 int dtha_generation; /* helper action generation */ 1030 int dtha_nactions; /* number of actions */ 1031 dtrace_difo_t *dtha_predicate; /* helper action predicate */ 1032 dtrace_difo_t **dtha_actions; /* array of actions */ 1033 struct dtrace_helper_action *dtha_next; /* next helper action */ 1034 } dtrace_helper_action_t; 1035 1036 typedef struct dtrace_helper_provider { 1037 int dthp_generation; /* helper provider generation */ 1038 uint32_t dthp_ref; /* reference count */ 1039 dof_helper_t dthp_prov; /* DOF w/ provider and probes */ 1040 } dtrace_helper_provider_t; 1041 1042 typedef struct dtrace_helpers { 1043 dtrace_helper_action_t **dthps_actions; /* array of helper actions */ 1044 dtrace_vstate_t dthps_vstate; /* helper action var. state */ 1045 dtrace_helper_provider_t **dthps_provs; /* array of providers */ 1046 uint_t dthps_nprovs; /* count of providers */ 1047 uint_t dthps_maxprovs; /* provider array size */ 1048 int dthps_generation; /* current generation */ 1049 pid_t dthps_pid; /* pid of associated proc */ 1050 int dthps_deferred; /* helper in deferred list */ 1051 struct dtrace_helpers *dthps_next; /* next pointer */ 1052 struct dtrace_helpers *dthps_prev; /* prev pointer */ 1053 } dtrace_helpers_t; 1054 1055 /* 1056 * DTrace Helper Action Tracing 1057 * 1058 * Debugging helper actions can be arduous. To ease the development and 1059 * debugging of helpers, DTrace contains a tracing-framework-within-a-tracing- 1060 * framework: helper tracing. If dtrace_helptrace_enabled is non-zero (which 1061 * it is by default on DEBUG kernels), all helper activity will be traced to a 1062 * global, in-kernel ring buffer. Each entry includes a pointer to the specific 1063 * helper, the location within the helper, and a trace of all local variables. 1064 * The ring buffer may be displayed in a human-readable format with the 1065 * ::dtrace_helptrace mdb(1) dcmd. 1066 */ 1067 #define DTRACE_HELPTRACE_NEXT (-1) 1068 #define DTRACE_HELPTRACE_DONE (-2) 1069 #define DTRACE_HELPTRACE_ERR (-3) 1070 1071 typedef struct dtrace_helptrace { 1072 dtrace_helper_action_t *dtht_helper; /* helper action */ 1073 int dtht_where; /* where in helper action */ 1074 int dtht_nlocals; /* number of locals */ 1075 int dtht_fault; /* type of fault (if any) */ 1076 int dtht_fltoffs; /* DIF offset */ 1077 uint64_t dtht_illval; /* faulting value */ 1078 uint64_t dtht_locals[1]; /* local variables */ 1079 } dtrace_helptrace_t; 1080 1081 /* 1082 * DTrace Credentials 1083 * 1084 * In probe context, we have limited flexibility to examine the credentials 1085 * of the DTrace consumer that created a particular enabling. We use 1086 * the Least Privilege interfaces to cache the consumer's cred pointer and 1087 * some facts about that credential in a dtrace_cred_t structure. These 1088 * can limit the consumer's breadth of visibility and what actions the 1089 * consumer may take. 1090 */ 1091 #define DTRACE_CRV_ALLPROC 0x01 1092 #define DTRACE_CRV_KERNEL 0x02 1093 #define DTRACE_CRV_ALLZONE 0x04 1094 1095 #define DTRACE_CRV_ALL (DTRACE_CRV_ALLPROC | DTRACE_CRV_KERNEL | \ 1096 DTRACE_CRV_ALLZONE) 1097 1098 #define DTRACE_CRA_PROC 0x0001 1099 #define DTRACE_CRA_PROC_CONTROL 0x0002 1100 #define DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER 0x0004 1101 #define DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE 0x0008 1102 #define DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG 0x0010 1103 #define DTRACE_CRA_KERNEL 0x0020 1104 #define DTRACE_CRA_KERNEL_DESTRUCTIVE 0x0040 1105 1106 #define DTRACE_CRA_ALL (DTRACE_CRA_PROC | \ 1107 DTRACE_CRA_PROC_CONTROL | \ 1108 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER | \ 1109 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE | \ 1110 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG | \ 1111 DTRACE_CRA_KERNEL | \ 1112 DTRACE_CRA_KERNEL_DESTRUCTIVE) 1113 1114 typedef struct dtrace_cred { 1115 cred_t *dcr_cred; 1116 uint8_t dcr_destructive; 1117 uint8_t dcr_visible; 1118 uint16_t dcr_action; 1119 } dtrace_cred_t; 1120 1121 /* 1122 * DTrace Consumer State 1123 * 1124 * Each DTrace consumer has an associated dtrace_state structure that contains 1125 * its in-kernel DTrace state -- including options, credentials, statistics and 1126 * pointers to ECBs, buffers, speculations and formats. A dtrace_state 1127 * structure is also allocated for anonymous enablings. When anonymous state 1128 * is grabbed, the grabbing consumers dts_anon pointer is set to the grabbed 1129 * dtrace_state structure. 1130 */ 1131 struct dtrace_state { 1132 #ifdef illumos 1133 dev_t dts_dev; /* device */ 1134 #else 1135 struct cdev *dts_dev; /* device */ 1136 #endif 1137 int dts_necbs; /* total number of ECBs */ 1138 dtrace_ecb_t **dts_ecbs; /* array of ECBs */ 1139 dtrace_epid_t dts_epid; /* next EPID to allocate */ 1140 size_t dts_needed; /* greatest needed space */ 1141 struct dtrace_state *dts_anon; /* anon. state, if grabbed */ 1142 dtrace_activity_t dts_activity; /* current activity */ 1143 dtrace_vstate_t dts_vstate; /* variable state */ 1144 dtrace_buffer_t *dts_buffer; /* principal buffer */ 1145 dtrace_buffer_t *dts_aggbuffer; /* aggregation buffer */ 1146 dtrace_speculation_t *dts_speculations; /* speculation array */ 1147 int dts_nspeculations; /* number of speculations */ 1148 int dts_naggregations; /* number of aggregations */ 1149 dtrace_aggregation_t **dts_aggregations; /* aggregation array */ 1150 #ifdef illumos 1151 vmem_t *dts_aggid_arena; /* arena for aggregation IDs */ 1152 #else 1153 struct unrhdr *dts_aggid_arena; /* arena for aggregation IDs */ 1154 #endif 1155 uint64_t dts_errors; /* total number of errors */ 1156 uint32_t dts_speculations_busy; /* number of spec. busy */ 1157 uint32_t dts_speculations_unavail; /* number of spec unavail */ 1158 uint32_t dts_stkstroverflows; /* stack string tab overflows */ 1159 uint32_t dts_dblerrors; /* errors in ERROR probes */ 1160 uint32_t dts_reserve; /* space reserved for END */ 1161 hrtime_t dts_laststatus; /* time of last status */ 1162 #ifdef illumos 1163 cyclic_id_t dts_cleaner; /* cleaning cyclic */ 1164 cyclic_id_t dts_deadman; /* deadman cyclic */ 1165 #else 1166 struct callout dts_cleaner; /* Cleaning callout. */ 1167 struct callout dts_deadman; /* Deadman callout. */ 1168 #endif 1169 hrtime_t dts_alive; /* time last alive */ 1170 char dts_speculates; /* boolean: has speculations */ 1171 char dts_destructive; /* boolean: has dest. actions */ 1172 int dts_nformats; /* number of formats */ 1173 char **dts_formats; /* format string array */ 1174 dtrace_optval_t dts_options[DTRACEOPT_MAX]; /* options */ 1175 dtrace_cred_t dts_cred; /* credentials */ 1176 size_t dts_nretained; /* number of retained enabs */ 1177 int dts_getf; /* number of getf() calls */ 1178 uint64_t dts_rstate[NCPU][2]; /* per-CPU random state */ 1179 }; 1180 1181 struct dtrace_provider { 1182 dtrace_pattr_t dtpv_attr; /* provider attributes */ 1183 dtrace_ppriv_t dtpv_priv; /* provider privileges */ 1184 dtrace_pops_t dtpv_pops; /* provider operations */ 1185 char *dtpv_name; /* provider name */ 1186 void *dtpv_arg; /* provider argument */ 1187 hrtime_t dtpv_defunct; /* when made defunct */ 1188 struct dtrace_provider *dtpv_next; /* next provider */ 1189 }; 1190 1191 struct dtrace_meta { 1192 dtrace_mops_t dtm_mops; /* meta provider operations */ 1193 char *dtm_name; /* meta provider name */ 1194 void *dtm_arg; /* meta provider user arg */ 1195 uint64_t dtm_count; /* no. of associated provs. */ 1196 }; 1197 1198 /* 1199 * DTrace Enablings 1200 * 1201 * A dtrace_enabling structure is used to track a collection of ECB 1202 * descriptions -- before they have been turned into actual ECBs. This is 1203 * created as a result of DOF processing, and is generally used to generate 1204 * ECBs immediately thereafter. However, enablings are also generally 1205 * retained should the probes they describe be created at a later time; as 1206 * each new module or provider registers with the framework, the retained 1207 * enablings are reevaluated, with any new match resulting in new ECBs. To 1208 * prevent probes from being matched more than once, the enabling tracks the 1209 * last probe generation matched, and only matches probes from subsequent 1210 * generations. 1211 */ 1212 typedef struct dtrace_enabling { 1213 dtrace_ecbdesc_t **dten_desc; /* all ECB descriptions */ 1214 int dten_ndesc; /* number of ECB descriptions */ 1215 int dten_maxdesc; /* size of ECB array */ 1216 dtrace_vstate_t *dten_vstate; /* associated variable state */ 1217 dtrace_genid_t dten_probegen; /* matched probe generation */ 1218 dtrace_ecbdesc_t *dten_current; /* current ECB description */ 1219 int dten_error; /* current error value */ 1220 int dten_primed; /* boolean: set if primed */ 1221 struct dtrace_enabling *dten_prev; /* previous enabling */ 1222 struct dtrace_enabling *dten_next; /* next enabling */ 1223 } dtrace_enabling_t; 1224 1225 /* 1226 * DTrace Anonymous Enablings 1227 * 1228 * Anonymous enablings are DTrace enablings that are not associated with a 1229 * controlling process, but rather derive their enabling from DOF stored as 1230 * properties in the dtrace.conf file. If there is an anonymous enabling, a 1231 * DTrace consumer state and enabling are created on attach. The state may be 1232 * subsequently grabbed by the first consumer specifying the "grabanon" 1233 * option. As long as an anonymous DTrace enabling exists, dtrace(7D) will 1234 * refuse to unload. 1235 */ 1236 typedef struct dtrace_anon { 1237 dtrace_state_t *dta_state; /* DTrace consumer state */ 1238 dtrace_enabling_t *dta_enabling; /* pointer to enabling */ 1239 processorid_t dta_beganon; /* which CPU BEGIN ran on */ 1240 } dtrace_anon_t; 1241 1242 /* 1243 * DTrace Error Debugging 1244 */ 1245 #ifdef DEBUG 1246 #define DTRACE_ERRDEBUG 1247 #endif 1248 1249 #ifdef DTRACE_ERRDEBUG 1250 1251 typedef struct dtrace_errhash { 1252 const char *dter_msg; /* error message */ 1253 int dter_count; /* number of times seen */ 1254 } dtrace_errhash_t; 1255 1256 #define DTRACE_ERRHASHSZ 256 /* must be > number of err msgs */ 1257 1258 #endif /* DTRACE_ERRDEBUG */ 1259 1260 /* 1261 * DTrace Toxic Ranges 1262 * 1263 * DTrace supports safe loads from probe context; if the address turns out to 1264 * be invalid, a bit will be set by the kernel indicating that DTrace 1265 * encountered a memory error, and DTrace will propagate the error to the user 1266 * accordingly. However, there may exist some regions of memory in which an 1267 * arbitrary load can change system state, and from which it is impossible to 1268 * recover from such a load after it has been attempted. Examples of this may 1269 * include memory in which programmable I/O registers are mapped (for which a 1270 * read may have some implications for the device) or (in the specific case of 1271 * UltraSPARC-I and -II) the virtual address hole. The platform is required 1272 * to make DTrace aware of these toxic ranges; DTrace will then check that 1273 * target addresses are not in a toxic range before attempting to issue a 1274 * safe load. 1275 */ 1276 typedef struct dtrace_toxrange { 1277 uintptr_t dtt_base; /* base of toxic range */ 1278 uintptr_t dtt_limit; /* limit of toxic range */ 1279 } dtrace_toxrange_t; 1280 1281 #ifdef illumos 1282 extern uint64_t dtrace_getarg(int, int); 1283 #else 1284 extern uint64_t __noinline dtrace_getarg(int, int); 1285 #endif 1286 extern greg_t dtrace_getfp(void); 1287 extern int dtrace_getipl(void); 1288 extern uintptr_t dtrace_caller(int); 1289 extern uint32_t dtrace_cas32(uint32_t *, uint32_t, uint32_t); 1290 extern void *dtrace_casptr(volatile void *, volatile void *, volatile void *); 1291 extern void dtrace_copyin(uintptr_t, uintptr_t, size_t, volatile uint16_t *); 1292 extern void dtrace_copyinstr(uintptr_t, uintptr_t, size_t, volatile uint16_t *); 1293 extern void dtrace_copyout(uintptr_t, uintptr_t, size_t, volatile uint16_t *); 1294 extern void dtrace_copyoutstr(uintptr_t, uintptr_t, size_t, 1295 volatile uint16_t *); 1296 extern void dtrace_getpcstack(pc_t *, int, int, uint32_t *); 1297 extern ulong_t dtrace_getreg(struct trapframe *, uint_t); 1298 extern int dtrace_getstackdepth(int); 1299 extern void dtrace_getupcstack(uint64_t *, int); 1300 extern void dtrace_getufpstack(uint64_t *, uint64_t *, int); 1301 extern int dtrace_getustackdepth(void); 1302 extern uintptr_t dtrace_fulword(void *); 1303 extern uint8_t dtrace_fuword8(void *); 1304 extern uint16_t dtrace_fuword16(void *); 1305 extern uint32_t dtrace_fuword32(void *); 1306 extern uint64_t dtrace_fuword64(void *); 1307 extern void dtrace_probe_error(dtrace_state_t *, dtrace_epid_t, int, int, 1308 int, uintptr_t); 1309 extern int dtrace_assfail(const char *, const char *, int); 1310 extern int dtrace_attached(void); 1311 #ifdef illumos 1312 extern hrtime_t dtrace_gethrestime(void); 1313 #endif 1314 1315 #ifdef __sparc 1316 extern void dtrace_flush_windows(void); 1317 extern void dtrace_flush_user_windows(void); 1318 extern uint_t dtrace_getotherwin(void); 1319 extern uint_t dtrace_getfprs(void); 1320 #else 1321 extern void dtrace_copy(uintptr_t, uintptr_t, size_t); 1322 extern void dtrace_copystr(uintptr_t, uintptr_t, size_t, volatile uint16_t *); 1323 #endif 1324 1325 /* 1326 * DTrace Assertions 1327 * 1328 * DTrace calls ASSERT and VERIFY from probe context. To assure that a failed 1329 * ASSERT or VERIFY does not induce a markedly more catastrophic failure (e.g., 1330 * one from which a dump cannot be gleaned), DTrace must define its own ASSERT 1331 * and VERIFY macros to be ones that may safely be called from probe context. 1332 * This header file must thus be included by any DTrace component that calls 1333 * ASSERT and/or VERIFY from probe context, and _only_ by those components. 1334 * (The only exception to this is kernel debugging infrastructure at user-level 1335 * that doesn't depend on calling ASSERT.) 1336 */ 1337 #undef ASSERT 1338 #undef VERIFY 1339 #define VERIFY(EX) ((void)((EX) || \ 1340 dtrace_assfail(#EX, __FILE__, __LINE__))) 1341 #ifdef DEBUG 1342 #define ASSERT(EX) ((void)((EX) || \ 1343 dtrace_assfail(#EX, __FILE__, __LINE__))) 1344 #else 1345 #define ASSERT(X) ((void)0) 1346 #endif 1347 1348 #ifdef __cplusplus 1349 } 1350 #endif 1351 1352 #endif /* _SYS_DTRACE_IMPL_H */ 1353