1 //! ## Per-Layer Filtering
2 //!
3 //! Per-layer filters permit individual `Layer`s to have their own filter
4 //! configurations without interfering with other `Layer`s.
5 //!
6 //! This module is not public; the public APIs defined in this module are
7 //! re-exported in the top-level `filter` module. Therefore, this documentation
8 //! primarily concerns the internal implementation details. For the user-facing
9 //! public API documentation, see the individual public types in this module, as
10 //! well as the, see the `Layer` trait documentation's [per-layer filtering
11 //! section]][1].
12 //!
13 //! ## How does per-layer filtering work?
14 //!
15 //! As described in the API documentation, the [`Filter`] trait defines a
16 //! filtering strategy for a per-layer filter. We expect there will be a variety
17 //! of implementations of [`Filter`], both in `tracing-subscriber` and in user
18 //! code.
19 //!
20 //! To actually *use* a [`Filter`] implementation, it is combined with a
21 //! [`Layer`] by the [`Filtered`] struct defined in this module. [`Filtered`]
22 //! implements [`Layer`] by calling into the wrapped [`Layer`], or not, based on
23 //! the filtering strategy. While there will be a variety of types that implement
24 //! [`Filter`], all actual *uses* of per-layer filtering will occur through the
25 //! [`Filtered`] struct. Therefore, most of the implementation details live
26 //! there.
27 //!
28 //! [1]: crate::layer#per-layer-filtering
29 //! [`Filter`]: crate::layer::Filter
30 use crate::{
31 filter::LevelFilter,
32 layer::{self, Context, Layer},
33 registry,
34 };
35 use std::{
36 any::TypeId,
37 cell::{Cell, RefCell},
38 fmt,
39 marker::PhantomData,
40 sync::Arc,
41 thread_local,
42 };
43 use tracing_core::{
44 span,
45 subscriber::{Interest, Subscriber},
46 Event, Metadata,
47 };
48
49 /// A [`Layer`] that wraps an inner [`Layer`] and adds a [`Filter`] which
50 /// controls what spans and events are enabled for that layer.
51 ///
52 /// This is returned by the [`Layer::with_filter`] method. See the
53 /// [documentation on per-layer filtering][plf] for details.
54 ///
55 /// [`Filter`]: crate::layer::Filter
56 /// [plf]: crate::layer#per-layer-filtering
57 #[cfg_attr(docsrs, doc(cfg(feature = "registry")))]
58 #[derive(Clone)]
59 pub struct Filtered<L, F, S> {
60 filter: F,
61 layer: L,
62 id: MagicPlfDowncastMarker,
63 _s: PhantomData<fn(S)>,
64 }
65
66 /// Uniquely identifies an individual [`Filter`] instance in the context of
67 /// a [`Subscriber`].
68 ///
69 /// When adding a [`Filtered`] [`Layer`] to a [`Subscriber`], the [`Subscriber`]
70 /// generates a `FilterId` for that [`Filtered`] layer. The [`Filtered`] layer
71 /// will then use the generated ID to query whether a particular span was
72 /// previously enabled by that layer's [`Filter`].
73 ///
74 /// **Note**: Currently, the [`Registry`] type provided by this crate is the
75 /// **only** [`Subscriber`] implementation capable of participating in per-layer
76 /// filtering. Therefore, the `FilterId` type cannot currently be constructed by
77 /// code outside of `tracing-subscriber`. In the future, new APIs will be added to `tracing-subscriber` to
78 /// allow non-Registry [`Subscriber`]s to also participate in per-layer
79 /// filtering. When those APIs are added, subscribers will be responsible
80 /// for generating and assigning `FilterId`s.
81 ///
82 /// [`Filter`]: crate::layer::Filter
83 /// [`Subscriber`]: tracing_core::Subscriber
84 /// [`Layer`]: crate::layer::Layer
85 /// [`Registry`]: crate::registry::Registry
86 #[cfg(feature = "registry")]
87 #[cfg_attr(docsrs, doc(cfg(feature = "registry")))]
88 #[derive(Copy, Clone)]
89 pub struct FilterId(u64);
90
91 /// A bitmap tracking which [`FilterId`]s have enabled a given span or
92 /// event.
93 ///
94 /// This is currently a private type that's used exclusively by the
95 /// [`Registry`]. However, in the future, this may become a public API, in order
96 /// to allow user subscribers to host [`Filter`]s.
97 ///
98 /// [`Registry`]: crate::Registry
99 /// [`Filter`]: crate::layer::Filter
100 #[derive(Default, Copy, Clone, Eq, PartialEq)]
101 pub(crate) struct FilterMap {
102 bits: u64,
103 }
104
105 /// The current state of `enabled` calls to per-layer filters on this
106 /// thread.
107 ///
108 /// When `Filtered::enabled` is called, the filter will set the bit
109 /// corresponding to its ID if the filter will disable the event/span being
110 /// filtered. When the event or span is recorded, the per-layer filter will
111 /// check its bit to determine if it disabled that event or span, and skip
112 /// forwarding the event or span to the inner layer if the bit is set. Once
113 /// a span or event has been skipped by a per-layer filter, it unsets its
114 /// bit, so that the `FilterMap` has been cleared for the next set of
115 /// `enabled` calls.
116 ///
117 /// FilterState is also read by the `Registry`, for two reasons:
118 ///
119 /// 1. When filtering a span, the Registry must store the `FilterMap`
120 /// generated by `Filtered::enabled` calls for that span as part of the
121 /// span's per-span data. This allows `Filtered` layers to determine
122 /// whether they had previously disabled a given span, and avoid showing it
123 /// to the wrapped layer if it was disabled.
124 ///
125 /// This allows `Filtered` layers to also filter out the spans they
126 /// disable from span traversals (such as iterating over parents, etc).
127 /// 2. If all the bits are set, then every per-layer filter has decided it
128 /// doesn't want to enable that span or event. In that case, the
129 /// `Registry`'s `enabled` method will return `false`, so that
130 /// recording a span or event can be skipped entirely.
131 #[derive(Debug)]
132 pub(crate) struct FilterState {
133 enabled: Cell<FilterMap>,
134 // TODO(eliza): `Interest`s should _probably_ be `Copy`. The only reason
135 // they're not is our Obsessive Commitment to Forwards-Compatibility. If
136 // this changes in tracing-core`, we can make this a `Cell` rather than
137 // `RefCell`...
138 interest: RefCell<Option<Interest>>,
139
140 #[cfg(debug_assertions)]
141 counters: DebugCounters,
142 }
143
144 /// Extra counters added to `FilterState` used only to make debug assertions.
145 #[cfg(debug_assertions)]
146 #[derive(Debug, Default)]
147 struct DebugCounters {
148 /// How many per-layer filters have participated in the current `enabled`
149 /// call?
150 in_filter_pass: Cell<usize>,
151
152 /// How many per-layer filters have participated in the current `register_callsite`
153 /// call?
154 in_interest_pass: Cell<usize>,
155 }
156
157 thread_local! {
158 pub(crate) static FILTERING: FilterState = FilterState::new();
159 }
160
161 // === impl Filter ===
162 #[cfg(feature = "registry")]
163 #[cfg_attr(docsrs, doc(cfg(feature = "registry")))]
164 impl<S> layer::Filter<S> for LevelFilter {
enabled(&self, meta: &Metadata<'_>, _: &Context<'_, S>) -> bool165 fn enabled(&self, meta: &Metadata<'_>, _: &Context<'_, S>) -> bool {
166 meta.level() <= self
167 }
168
callsite_enabled(&self, meta: &'static Metadata<'static>) -> Interest169 fn callsite_enabled(&self, meta: &'static Metadata<'static>) -> Interest {
170 if meta.level() <= self {
171 Interest::always()
172 } else {
173 Interest::never()
174 }
175 }
176
max_level_hint(&self) -> Option<LevelFilter>177 fn max_level_hint(&self) -> Option<LevelFilter> {
178 Some(*self)
179 }
180 }
181
182 impl<S> layer::Filter<S> for Arc<dyn layer::Filter<S> + Send + Sync + 'static> {
183 #[inline]
enabled(&self, meta: &Metadata<'_>, cx: &Context<'_, S>) -> bool184 fn enabled(&self, meta: &Metadata<'_>, cx: &Context<'_, S>) -> bool {
185 (**self).enabled(meta, cx)
186 }
187
188 #[inline]
callsite_enabled(&self, meta: &'static Metadata<'static>) -> Interest189 fn callsite_enabled(&self, meta: &'static Metadata<'static>) -> Interest {
190 (**self).callsite_enabled(meta)
191 }
192
193 #[inline]
max_level_hint(&self) -> Option<LevelFilter>194 fn max_level_hint(&self) -> Option<LevelFilter> {
195 (**self).max_level_hint()
196 }
197 }
198
199 impl<S> layer::Filter<S> for Box<dyn layer::Filter<S> + Send + Sync + 'static> {
200 #[inline]
enabled(&self, meta: &Metadata<'_>, cx: &Context<'_, S>) -> bool201 fn enabled(&self, meta: &Metadata<'_>, cx: &Context<'_, S>) -> bool {
202 (**self).enabled(meta, cx)
203 }
204
205 #[inline]
callsite_enabled(&self, meta: &'static Metadata<'static>) -> Interest206 fn callsite_enabled(&self, meta: &'static Metadata<'static>) -> Interest {
207 (**self).callsite_enabled(meta)
208 }
209
210 #[inline]
max_level_hint(&self) -> Option<LevelFilter>211 fn max_level_hint(&self) -> Option<LevelFilter> {
212 (**self).max_level_hint()
213 }
214 }
215
216 // === impl Filtered ===
217
218 impl<L, F, S> Filtered<L, F, S> {
219 /// Wraps the provided [`Layer`] so that it is filtered by the given
220 /// [`Filter`].
221 ///
222 /// This is equivalent to calling the [`Layer::with_filter`] method.
223 ///
224 /// See the [documentation on per-layer filtering][plf] for details.
225 ///
226 /// [`Filter`]: crate::layer::Filter
227 /// [plf]: crate::layer#per-layer-filtering
new(layer: L, filter: F) -> Self228 pub fn new(layer: L, filter: F) -> Self {
229 Self {
230 layer,
231 filter,
232 id: MagicPlfDowncastMarker(FilterId::disabled()),
233 _s: PhantomData,
234 }
235 }
236
237 #[inline(always)]
id(&self) -> FilterId238 fn id(&self) -> FilterId {
239 debug_assert!(
240 !self.id.0.is_disabled(),
241 "a `Filtered` layer was used, but it had no `FilterId`; \
242 was it registered with the subscriber?"
243 );
244 self.id.0
245 }
246
did_enable(&self, f: impl FnOnce())247 fn did_enable(&self, f: impl FnOnce()) {
248 FILTERING.with(|filtering| filtering.did_enable(self.id(), f))
249 }
250 }
251
252 impl<S, L, F> Layer<S> for Filtered<L, F, S>
253 where
254 S: Subscriber + for<'span> registry::LookupSpan<'span> + 'static,
255 F: layer::Filter<S> + 'static,
256 L: Layer<S>,
257 {
on_layer(&mut self, subscriber: &mut S)258 fn on_layer(&mut self, subscriber: &mut S) {
259 self.id = MagicPlfDowncastMarker(subscriber.register_filter());
260 self.layer.on_layer(subscriber);
261 }
262
263 // TODO(eliza): can we figure out a nice way to make the `Filtered` layer
264 // not call `is_enabled_for` in hooks that the inner layer doesn't actually
265 // have real implementations of? probably not...
266 //
267 // it would be cool if there was some wild rust reflection way of checking
268 // if a trait impl has the default impl of a trait method or not, but that's
269 // almsot certainly impossible...right?
270
register_callsite(&self, metadata: &'static Metadata<'static>) -> Interest271 fn register_callsite(&self, metadata: &'static Metadata<'static>) -> Interest {
272 let interest = self.filter.callsite_enabled(metadata);
273
274 // If the filter didn't disable the callsite, allow the inner layer to
275 // register it — since `register_callsite` is also used for purposes
276 // such as reserving/caching per-callsite data, we want the inner layer
277 // to be able to perform any other registration steps. However, we'll
278 // ignore its `Interest`.
279 if !interest.is_never() {
280 self.layer.register_callsite(metadata);
281 }
282
283 // Add our `Interest` to the current sum of per-layer filter `Interest`s
284 // for this callsite.
285 FILTERING.with(|filtering| filtering.add_interest(interest));
286
287 // don't short circuit! if the stack consists entirely of `Layer`s with
288 // per-layer filters, the `Registry` will return the actual `Interest`
289 // value that's the sum of all the `register_callsite` calls to those
290 // per-layer filters. if we returned an actual `never` interest here, a
291 // `Layered` layer would short-circuit and not allow any `Filtered`
292 // layers below us if _they_ are interested in the callsite.
293 Interest::always()
294 }
295
enabled(&self, metadata: &Metadata<'_>, cx: Context<'_, S>) -> bool296 fn enabled(&self, metadata: &Metadata<'_>, cx: Context<'_, S>) -> bool {
297 let cx = cx.with_filter(self.id());
298 let enabled = self.filter.enabled(metadata, &cx);
299 FILTERING.with(|filtering| filtering.set(self.id(), enabled));
300
301 if enabled {
302 // If the filter enabled this metadata, ask the wrapped layer if
303 // _it_ wants it --- it might have a global filter.
304 self.layer.enabled(metadata, cx)
305 } else {
306 // Otherwise, return `true`. The _per-layer_ filter disabled this
307 // metadata, but returning `false` in `Layer::enabled` will
308 // short-circuit and globally disable the span or event. This is
309 // *not* what we want for per-layer filters, as other layers may
310 // still want this event. Returning `true` here means we'll continue
311 // asking the next layer in the stack.
312 //
313 // Once all per-layer filters have been evaluated, the `Registry`
314 // at the root of the stack will return `false` from its `enabled`
315 // method if *every* per-layer filter disabled this metadata.
316 // Otherwise, the individual per-layer filters will skip the next
317 // `new_span` or `on_event` call for their layer if *they* disabled
318 // the span or event, but it was not globally disabled.
319 true
320 }
321 }
322
new_span(&self, attrs: &span::Attributes<'_>, id: &span::Id, cx: Context<'_, S>)323 fn new_span(&self, attrs: &span::Attributes<'_>, id: &span::Id, cx: Context<'_, S>) {
324 self.did_enable(|| {
325 self.layer.new_span(attrs, id, cx.with_filter(self.id()));
326 })
327 }
328
329 #[doc(hidden)]
max_level_hint(&self) -> Option<LevelFilter>330 fn max_level_hint(&self) -> Option<LevelFilter> {
331 self.filter.max_level_hint()
332 }
333
on_record(&self, span: &span::Id, values: &span::Record<'_>, cx: Context<'_, S>)334 fn on_record(&self, span: &span::Id, values: &span::Record<'_>, cx: Context<'_, S>) {
335 if let Some(cx) = cx.if_enabled_for(span, self.id()) {
336 self.layer.on_record(span, values, cx)
337 }
338 }
339
on_follows_from(&self, span: &span::Id, follows: &span::Id, cx: Context<'_, S>)340 fn on_follows_from(&self, span: &span::Id, follows: &span::Id, cx: Context<'_, S>) {
341 // only call `on_follows_from` if both spans are enabled by us
342 if cx.is_enabled_for(span, self.id()) && cx.is_enabled_for(follows, self.id()) {
343 self.layer
344 .on_follows_from(span, follows, cx.with_filter(self.id()))
345 }
346 }
347
on_event(&self, event: &Event<'_>, cx: Context<'_, S>)348 fn on_event(&self, event: &Event<'_>, cx: Context<'_, S>) {
349 self.did_enable(|| {
350 self.layer.on_event(event, cx.with_filter(self.id()));
351 })
352 }
353
on_enter(&self, id: &span::Id, cx: Context<'_, S>)354 fn on_enter(&self, id: &span::Id, cx: Context<'_, S>) {
355 if let Some(cx) = cx.if_enabled_for(id, self.id()) {
356 self.layer.on_enter(id, cx)
357 }
358 }
359
on_exit(&self, id: &span::Id, cx: Context<'_, S>)360 fn on_exit(&self, id: &span::Id, cx: Context<'_, S>) {
361 if let Some(cx) = cx.if_enabled_for(id, self.id()) {
362 self.layer.on_exit(id, cx)
363 }
364 }
365
on_close(&self, id: span::Id, cx: Context<'_, S>)366 fn on_close(&self, id: span::Id, cx: Context<'_, S>) {
367 if let Some(cx) = cx.if_enabled_for(&id, self.id()) {
368 self.layer.on_close(id, cx)
369 }
370 }
371
372 // XXX(eliza): the existence of this method still makes me sad...
on_id_change(&self, old: &span::Id, new: &span::Id, cx: Context<'_, S>)373 fn on_id_change(&self, old: &span::Id, new: &span::Id, cx: Context<'_, S>) {
374 if let Some(cx) = cx.if_enabled_for(old, self.id()) {
375 self.layer.on_id_change(old, new, cx)
376 }
377 }
378
379 #[doc(hidden)]
380 #[inline]
downcast_raw(&self, id: TypeId) -> Option<*const ()>381 unsafe fn downcast_raw(&self, id: TypeId) -> Option<*const ()> {
382 match id {
383 id if id == TypeId::of::<Self>() => Some(self as *const _ as *const ()),
384 id if id == TypeId::of::<L>() => Some(&self.layer as *const _ as *const ()),
385 id if id == TypeId::of::<F>() => Some(&self.filter as *const _ as *const ()),
386 id if id == TypeId::of::<MagicPlfDowncastMarker>() => {
387 Some(&self.id as *const _ as *const ())
388 }
389 _ => self.layer.downcast_raw(id),
390 }
391 }
392 }
393
394 impl<F, L, S> fmt::Debug for Filtered<F, L, S>
395 where
396 F: fmt::Debug,
397 L: fmt::Debug,
398 {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result399 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
400 f.debug_struct("Filtered")
401 .field("filter", &self.filter)
402 .field("layer", &self.layer)
403 .field("id", &self.id)
404 .finish()
405 }
406 }
407
408 // === impl FilterId ===
409
410 impl FilterId {
disabled() -> Self411 const fn disabled() -> Self {
412 Self(std::u64::MAX)
413 }
414
415 /// Returns a `FilterId` that will consider _all_ spans enabled.
none() -> Self416 pub(crate) const fn none() -> Self {
417 Self(0)
418 }
419
new(id: u8) -> Self420 pub(crate) fn new(id: u8) -> Self {
421 assert!(id < 64, "filter IDs may not be greater than 64");
422 Self(1 << id as usize)
423 }
424
425 /// Combines two `FilterId`s, returning a new `FilterId` that will match a
426 /// [`FilterMap`] where the span was disabled by _either_ this `FilterId`
427 /// *or* the combined `FilterId`.
428 ///
429 /// This method is called by [`Context`]s when adding the `FilterId` of a
430 /// [`Filtered`] layer to the context.
431 ///
432 /// This is necessary for cases where we have a tree of nested [`Filtered`]
433 /// layers, like this:
434 ///
435 /// ```text
436 /// Filtered {
437 /// filter1,
438 /// Layered {
439 /// layer1,
440 /// Filtered {
441 /// filter2,
442 /// layer2,
443 /// },
444 /// }
445 /// ```
446 ///
447 /// We want `layer2` to be affected by both `filter1` _and_ `filter2`.
448 /// Without combining `FilterId`s, this works fine when filtering
449 /// `on_event`/`new_span`, because the outer `Filtered` layer (`filter1`)
450 /// won't call the inner layer's `on_event` or `new_span` callbacks if it
451 /// disabled the event/span.
452 ///
453 /// However, it _doesn't_ work when filtering span lookups and traversals
454 /// (e.g. `scope`). This is because the [`Context`] passed to `layer2`
455 /// would set its filter ID to the filter ID of `filter2`, and would skip
456 /// spans that were disabled by `filter2`. However, what if a span was
457 /// disabled by `filter1`? We wouldn't see it in `new_span`, but we _would_
458 /// see it in lookups and traversals...which we don't want.
459 ///
460 /// When a [`Filtered`] layer adds its ID to a [`Context`], it _combines_ it
461 /// with any previous filter ID that the context had, rather than replacing
462 /// it. That way, `layer2`'s context will check if a span was disabled by
463 /// `filter1` _or_ `filter2`. The way we do this, instead of representing
464 /// `FilterId`s as a number number that we shift a 1 over by to get a mask,
465 /// we just store the actual mask,so we can combine them with a bitwise-OR.
466 ///
467 /// For example, if we consider the following case (pretending that the
468 /// masks are 8 bits instead of 64 just so i don't have to write out a bunch
469 /// of extra zeroes):
470 ///
471 /// - `filter1` has the filter id 1 (`0b0000_0001`)
472 /// - `filter2` has the filter id 2 (`0b0000_0010`)
473 ///
474 /// A span that gets disabled by filter 1 would have the [`FilterMap`] with
475 /// bits `0b0000_0001`.
476 ///
477 /// If the `FilterId` was internally represented as `(bits to shift + 1),
478 /// when `layer2`'s [`Context`] checked if it enabled the span, it would
479 /// make the mask `0b0000_0010` (`1 << 1`). That bit would not be set in the
480 /// [`FilterMap`], so it would see that it _didn't_ disable the span. Which
481 /// is *true*, it just doesn't reflect the tree-like shape of the actual
482 /// subscriber.
483 ///
484 /// By having the IDs be masks instead of shifts, though, when the
485 /// [`Filtered`] with `filter2` gets the [`Context`] with `filter1`'s filter ID,
486 /// instead of replacing it, it ors them together:
487 ///
488 /// ```ignore
489 /// 0b0000_0001 | 0b0000_0010 == 0b0000_0011;
490 /// ```
491 ///
492 /// We then test if the span was disabled by seeing if _any_ bits in the
493 /// mask are `1`:
494 ///
495 /// ```ignore
496 /// filtermap & mask != 0;
497 /// 0b0000_0001 & 0b0000_0011 != 0;
498 /// 0b0000_0001 != 0;
499 /// true;
500 /// ```
501 ///
502 /// [`Context`]: crate::layer::Context
and(self, FilterId(other): Self) -> Self503 pub(crate) fn and(self, FilterId(other): Self) -> Self {
504 // If this mask is disabled, just return the other --- otherwise, we
505 // would always see that every span is disabled.
506 if self.0 == Self::disabled().0 {
507 return Self(other);
508 }
509
510 Self(self.0 | other)
511 }
512
is_disabled(self) -> bool513 fn is_disabled(self) -> bool {
514 self.0 == Self::disabled().0
515 }
516 }
517
518 impl fmt::Debug for FilterId {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result519 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
520 // don't print a giant set of the numbers 0..63 if the filter ID is disabled.
521 if self.0 == Self::disabled().0 {
522 return f
523 .debug_tuple("FilterId")
524 .field(&format_args!("DISABLED"))
525 .finish();
526 }
527
528 if f.alternate() {
529 f.debug_struct("FilterId")
530 .field("ids", &format_args!("{:?}", FmtBitset(self.0)))
531 .field("bits", &format_args!("{:b}", self.0))
532 .finish()
533 } else {
534 f.debug_tuple("FilterId").field(&FmtBitset(self.0)).finish()
535 }
536 }
537 }
538
539 impl fmt::Binary for FilterId {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result540 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
541 f.debug_tuple("FilterId")
542 .field(&format_args!("{:b}", self.0))
543 .finish()
544 }
545 }
546
547 // === impl FilterMap ===
548
549 impl FilterMap {
set(self, FilterId(mask): FilterId, enabled: bool) -> Self550 pub(crate) fn set(self, FilterId(mask): FilterId, enabled: bool) -> Self {
551 if mask == std::u64::MAX {
552 return self;
553 }
554
555 if enabled {
556 Self {
557 bits: self.bits & (!mask),
558 }
559 } else {
560 Self {
561 bits: self.bits | mask,
562 }
563 }
564 }
565
566 #[inline]
is_enabled(self, FilterId(mask): FilterId) -> bool567 pub(crate) fn is_enabled(self, FilterId(mask): FilterId) -> bool {
568 self.bits & mask == 0
569 }
570
571 #[inline]
any_enabled(self) -> bool572 pub(crate) fn any_enabled(self) -> bool {
573 self.bits != std::u64::MAX
574 }
575 }
576
577 impl fmt::Debug for FilterMap {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result578 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
579 let alt = f.alternate();
580 let mut s = f.debug_struct("FilterMap");
581 s.field("disabled_by", &format_args!("{:?}", &FmtBitset(self.bits)));
582
583 if alt {
584 s.field("bits", &format_args!("{:b}", self.bits));
585 }
586
587 s.finish()
588 }
589 }
590
591 impl fmt::Binary for FilterMap {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result592 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
593 f.debug_struct("FilterMap")
594 .field("bits", &format_args!("{:b}", self.bits))
595 .finish()
596 }
597 }
598
599 // === impl FilterState ===
600
601 impl FilterState {
new() -> Self602 fn new() -> Self {
603 Self {
604 enabled: Cell::new(FilterMap::default()),
605 interest: RefCell::new(None),
606
607 #[cfg(debug_assertions)]
608 counters: DebugCounters::default(),
609 }
610 }
611
set(&self, filter: FilterId, enabled: bool)612 fn set(&self, filter: FilterId, enabled: bool) {
613 #[cfg(debug_assertions)]
614 {
615 let in_current_pass = self.counters.in_filter_pass.get();
616 if in_current_pass == 0 {
617 debug_assert_eq!(self.enabled.get(), FilterMap::default());
618 }
619 self.counters.in_filter_pass.set(in_current_pass + 1);
620 debug_assert_eq!(
621 self.counters.in_interest_pass.get(),
622 0,
623 "if we are in or starting a filter pass, we must not be in an interest pass."
624 )
625 }
626
627 self.enabled.set(self.enabled.get().set(filter, enabled))
628 }
629
add_interest(&self, interest: Interest)630 fn add_interest(&self, interest: Interest) {
631 let mut curr_interest = self.interest.borrow_mut();
632
633 #[cfg(debug_assertions)]
634 {
635 let in_current_pass = self.counters.in_interest_pass.get();
636 if in_current_pass == 0 {
637 debug_assert!(curr_interest.is_none());
638 }
639 self.counters.in_interest_pass.set(in_current_pass + 1);
640 }
641
642 if let Some(curr_interest) = curr_interest.as_mut() {
643 if (curr_interest.is_always() && !interest.is_always())
644 || (curr_interest.is_never() && !interest.is_never())
645 {
646 *curr_interest = Interest::sometimes();
647 }
648 // If the two interests are the same, do nothing. If the current
649 // interest is `sometimes`, stay sometimes.
650 } else {
651 *curr_interest = Some(interest);
652 }
653 }
654
event_enabled() -> bool655 pub(crate) fn event_enabled() -> bool {
656 FILTERING
657 .try_with(|this| {
658 let enabled = this.enabled.get().any_enabled();
659 #[cfg(debug_assertions)]
660 {
661 if this.counters.in_filter_pass.get() == 0 {
662 debug_assert_eq!(this.enabled.get(), FilterMap::default());
663 }
664
665 // Nothing enabled this event, we won't tick back down the
666 // counter in `did_enable`. Reset it.
667 if !enabled {
668 this.counters.in_filter_pass.set(0);
669 }
670 }
671 enabled
672 })
673 .unwrap_or(true)
674 }
675
676 /// Executes a closure if the filter with the provided ID did not disable
677 /// the current span/event.
678 ///
679 /// This is used to implement the `on_event` and `new_span` methods for
680 /// `Filtered`.
did_enable(&self, filter: FilterId, f: impl FnOnce())681 fn did_enable(&self, filter: FilterId, f: impl FnOnce()) {
682 let map = self.enabled.get();
683 if map.is_enabled(filter) {
684 // If the filter didn't disable the current span/event, run the
685 // callback.
686 f();
687 } else {
688 // Otherwise, if this filter _did_ disable the span or event
689 // currently being processed, clear its bit from this thread's
690 // `FilterState`. The bit has already been "consumed" by skipping
691 // this callback, and we need to ensure that the `FilterMap` for
692 // this thread is reset when the *next* `enabled` call occurs.
693 self.enabled.set(map.set(filter, true));
694 }
695 #[cfg(debug_assertions)]
696 {
697 let in_current_pass = self.counters.in_filter_pass.get();
698 if in_current_pass <= 1 {
699 debug_assert_eq!(self.enabled.get(), FilterMap::default());
700 }
701 self.counters
702 .in_filter_pass
703 .set(in_current_pass.saturating_sub(1));
704 debug_assert_eq!(
705 self.counters.in_interest_pass.get(),
706 0,
707 "if we are in a filter pass, we must not be in an interest pass."
708 )
709 }
710 }
711
712 /// Clears the current in-progress filter state.
713 ///
714 /// This resets the [`FilterMap`] and current [`Interest`] as well as
715 /// clearing the debug counters.
clear_enabled()716 pub(crate) fn clear_enabled() {
717 // Drop the `Result` returned by `try_with` --- if we are in the middle
718 // a panic and the thread-local has been torn down, that's fine, just
719 // ignore it ratehr than panicking.
720 let _ = FILTERING.try_with(|filtering| {
721 filtering.enabled.set(FilterMap::default());
722
723 #[cfg(debug_assertions)]
724 filtering.counters.in_filter_pass.set(0);
725 });
726 }
727
take_interest() -> Option<Interest>728 pub(crate) fn take_interest() -> Option<Interest> {
729 FILTERING
730 .try_with(|filtering| {
731 #[cfg(debug_assertions)]
732 {
733 if filtering.counters.in_interest_pass.get() == 0 {
734 debug_assert!(filtering.interest.try_borrow().ok()?.is_none());
735 }
736 filtering.counters.in_interest_pass.set(0);
737 }
738 filtering.interest.try_borrow_mut().ok()?.take()
739 })
740 .ok()?
741 }
742
filter_map(&self) -> FilterMap743 pub(crate) fn filter_map(&self) -> FilterMap {
744 let map = self.enabled.get();
745 #[cfg(debug_assertions)]
746 {
747 if self.counters.in_filter_pass.get() == 0 {
748 debug_assert_eq!(map, FilterMap::default());
749 }
750 }
751
752 map
753 }
754 }
755 /// This is a horrible and bad abuse of the downcasting system to expose
756 /// *internally* whether a layer has per-layer filtering, within
757 /// `tracing-subscriber`, without exposing a public API for it.
758 ///
759 /// If a `Layer` has per-layer filtering, it will downcast to a
760 /// `MagicPlfDowncastMarker`. Since layers which contain other layers permit
761 /// downcasting to recurse to their children, this will do the Right Thing with
762 /// layers like Reload, Option, etc.
763 ///
764 /// Why is this a wrapper around the `FilterId`, you may ask? Because
765 /// downcasting works by returning a pointer, and we don't want to risk
766 /// introducing UB by constructing pointers that _don't_ point to a valid
767 /// instance of the type they claim to be. In this case, we don't _intend_ for
768 /// this pointer to be dereferenced, so it would actually be fine to return one
769 /// that isn't a valid pointer...but we can't guarantee that the caller won't
770 /// (accidentally) dereference it, so it's better to be safe than sorry. We
771 /// could, alternatively, add an additional field to the type that's used only
772 /// for returning pointers to as as part of the evil downcasting hack, but I
773 /// thought it was nicer to just add a `repr(transparent)` wrapper to the
774 /// existing `FilterId` field, since it won't make the struct any bigger.
775 ///
776 /// Don't worry, this isn't on the test. :)
777 #[derive(Clone, Copy)]
778 #[repr(transparent)]
779 struct MagicPlfDowncastMarker(FilterId);
780 impl fmt::Debug for MagicPlfDowncastMarker {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result781 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
782 // Just pretend that `MagicPlfDowncastMarker` doesn't exist for
783 // `fmt::Debug` purposes...if no one *sees* it in their `Debug` output,
784 // they don't have to know I thought this code would be a good idea.
785 fmt::Debug::fmt(&self.0, f)
786 }
787 }
788
is_plf_downcast_marker(type_id: TypeId) -> bool789 pub(crate) fn is_plf_downcast_marker(type_id: TypeId) -> bool {
790 type_id == TypeId::of::<MagicPlfDowncastMarker>()
791 }
792
793 /// Does a type implementing `Subscriber` contain any per-layer filters?
subscriber_has_plf<S>(subscriber: &S) -> bool where S: Subscriber,794 pub(crate) fn subscriber_has_plf<S>(subscriber: &S) -> bool
795 where
796 S: Subscriber,
797 {
798 (subscriber as &dyn Subscriber).is::<MagicPlfDowncastMarker>()
799 }
800
801 /// Does a type implementing `Layer` contain any per-layer filters?
layer_has_plf<L, S>(layer: &L) -> bool where L: Layer<S>, S: Subscriber,802 pub(crate) fn layer_has_plf<L, S>(layer: &L) -> bool
803 where
804 L: Layer<S>,
805 S: Subscriber,
806 {
807 unsafe {
808 // Safety: we're not actually *doing* anything with this pointer --- we
809 // only care about the `Option`, which we're turning into a `bool`. So
810 // even if the layer decides to be evil and give us some kind of invalid
811 // pointer, we don't ever dereference it, so this is always safe.
812 layer.downcast_raw(TypeId::of::<MagicPlfDowncastMarker>())
813 }
814 .is_some()
815 }
816
817 struct FmtBitset(u64);
818
819 impl fmt::Debug for FmtBitset {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result820 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
821 let mut set = f.debug_set();
822 for bit in 0..64 {
823 // if the `bit`-th bit is set, add it to the debug set
824 if self.0 & (1 << bit) != 0 {
825 set.entry(&bit);
826 }
827 }
828 set.finish()
829 }
830 }
831