1 /* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this
3 * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
4
5 #![allow(unsafe_code)]
6
7 use crate::properties::Importance;
8 use crate::shared_lock::StylesheetGuards;
9 use malloc_size_of::{MallocShallowSizeOf, MallocSizeOf, MallocSizeOfOps};
10 use parking_lot::RwLock;
11 use smallvec::SmallVec;
12 use std::fmt;
13 use std::hash;
14 use std::io::Write;
15 use std::mem;
16 use std::ptr;
17 use std::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
18
19 use super::map::{Entry, Map};
20 use super::unsafe_box::UnsafeBox;
21 use super::{CascadeLevel, StyleSource};
22
23 /// The rule tree, the structure servo uses to preserve the results of selector
24 /// matching.
25 ///
26 /// This is organized as a tree of rules. When a node matches a set of rules,
27 /// they're inserted in order in the tree, starting with the less specific one.
28 ///
29 /// When a rule is inserted in the tree, other elements may share the path up to
30 /// a given rule. If that's the case, we don't duplicate child nodes, but share
31 /// them.
32 ///
33 /// When the rule node refcount drops to zero, it doesn't get freed. It gets
34 /// instead put into a free list, and it is potentially GC'd after a while.
35 ///
36 /// That way, a rule node that represents a likely-to-match-again rule (like a
37 /// :hover rule) can be reused if we haven't GC'd it yet.
38 #[derive(Debug)]
39 pub struct RuleTree {
40 root: StrongRuleNode,
41 }
42
43 impl Drop for RuleTree {
drop(&mut self)44 fn drop(&mut self) {
45 unsafe { self.swap_free_list_and_gc(ptr::null_mut()) }
46 }
47 }
48
49 impl MallocSizeOf for RuleTree {
size_of(&self, ops: &mut MallocSizeOfOps) -> usize50 fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
51 let mut n = 0;
52 let mut stack = SmallVec::<[_; 32]>::new();
53 stack.push(self.root.clone());
54
55 while let Some(node) = stack.pop() {
56 n += unsafe { ops.malloc_size_of(&*node.p) };
57 let children = node.p.children.read();
58 children.shallow_size_of(ops);
59 for c in &*children {
60 stack.push(unsafe { c.upgrade() });
61 }
62 }
63
64 n
65 }
66 }
67
68 #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
69 struct ChildKey(CascadeLevel, ptr::NonNull<()>);
70 unsafe impl Send for ChildKey {}
71 unsafe impl Sync for ChildKey {}
72
73 impl RuleTree {
74 /// Construct a new rule tree.
new() -> Self75 pub fn new() -> Self {
76 RuleTree {
77 root: StrongRuleNode::new(Box::new(RuleNode::root())),
78 }
79 }
80
81 /// Get the root rule node.
root(&self) -> &StrongRuleNode82 pub fn root(&self) -> &StrongRuleNode {
83 &self.root
84 }
85
86 /// This can only be called when no other threads is accessing this tree.
gc(&self)87 pub fn gc(&self) {
88 unsafe { self.swap_free_list_and_gc(RuleNode::DANGLING_PTR) }
89 }
90
91 /// This can only be called when no other threads is accessing this tree.
maybe_gc(&self)92 pub fn maybe_gc(&self) {
93 #[cfg(debug_assertions)]
94 self.maybe_dump_stats();
95
96 if self.root.p.approximate_free_count.load(Ordering::Relaxed) > RULE_TREE_GC_INTERVAL {
97 self.gc();
98 }
99 }
100
101 #[cfg(debug_assertions)]
maybe_dump_stats(&self)102 fn maybe_dump_stats(&self) {
103 use itertools::Itertools;
104 use std::cell::Cell;
105 use std::time::{Duration, Instant};
106
107 if !log_enabled!(log::Level::Trace) {
108 return;
109 }
110
111 const RULE_TREE_STATS_INTERVAL: Duration = Duration::from_secs(2);
112
113 thread_local! {
114 pub static LAST_STATS: Cell<Instant> = Cell::new(Instant::now());
115 };
116
117 let should_dump = LAST_STATS.with(|s| {
118 let now = Instant::now();
119 if now.duration_since(s.get()) < RULE_TREE_STATS_INTERVAL {
120 return false;
121 }
122 s.set(now);
123 true
124 });
125
126 if !should_dump {
127 return;
128 }
129
130 let mut children_count = crate::hash::FxHashMap::default();
131
132 let mut stack = SmallVec::<[_; 32]>::new();
133 stack.push(self.root.clone());
134 while let Some(node) = stack.pop() {
135 let children = node.p.children.read();
136 *children_count.entry(children.len()).or_insert(0) += 1;
137 for c in &*children {
138 stack.push(unsafe { c.upgrade() });
139 }
140 }
141
142 trace!("Rule tree stats:");
143 let counts = children_count.keys().sorted();
144 for count in counts {
145 trace!(" {} - {}", count, children_count[count]);
146 }
147 }
148
149 /// Steals the free list and drops its contents.
swap_free_list_and_gc(&self, ptr: *mut RuleNode)150 unsafe fn swap_free_list_and_gc(&self, ptr: *mut RuleNode) {
151 let root = &self.root.p;
152
153 debug_assert!(!root.next_free.load(Ordering::Relaxed).is_null());
154
155 // Reset the approximate free count to zero, as we are going to steal
156 // the free list.
157 root.approximate_free_count.store(0, Ordering::Relaxed);
158
159 // Steal the free list head. Memory loads on nodes while iterating it
160 // must observe any prior changes that occured so this requires
161 // acquire ordering, but there are no writes that need to be kept
162 // before this swap so there is no need for release.
163 let mut head = root.next_free.swap(ptr, Ordering::Acquire);
164
165 while head != RuleNode::DANGLING_PTR {
166 debug_assert!(!head.is_null());
167
168 let mut node = UnsafeBox::from_raw(head);
169
170 // The root node cannot go on the free list.
171 debug_assert!(node.root.is_some());
172
173 // The refcount of nodes on the free list never goes below 1.
174 debug_assert!(node.refcount.load(Ordering::Relaxed) > 0);
175
176 // No one else is currently writing to that field. Get the address
177 // of the next node in the free list and replace it with null,
178 // other threads will now consider that this node is not on the
179 // free list.
180 head = node.next_free.swap(ptr::null_mut(), Ordering::Relaxed);
181
182 // This release write synchronises with the acquire fence in
183 // `WeakRuleNode::upgrade`, making sure that if `upgrade` observes
184 // decrements the refcount to 0, it will also observe the
185 // `node.next_free` swap to null above.
186 if node.refcount.fetch_sub(1, Ordering::Release) == 1 {
187 // And given it observed the null swap above, it will need
188 // `pretend_to_be_on_free_list` to finish its job, writing
189 // `RuleNode::DANGLING_PTR` in `node.next_free`.
190 RuleNode::pretend_to_be_on_free_list(&node);
191
192 // Drop this node now that we just observed its refcount going
193 // down to zero.
194 RuleNode::drop_without_free_list(&mut node);
195 }
196 }
197 }
198 }
199
200 /// The number of RuleNodes added to the free list before we will consider
201 /// doing a GC when calling maybe_gc(). (The value is copied from Gecko,
202 /// where it likely did not result from a rigorous performance analysis.)
203 const RULE_TREE_GC_INTERVAL: usize = 300;
204
205 /// Used for some size assertions.
206 pub const RULE_NODE_SIZE: usize = std::mem::size_of::<RuleNode>();
207
208 /// A node in the rule tree.
209 struct RuleNode {
210 /// The root node. Only the root has no root pointer, for obvious reasons.
211 root: Option<WeakRuleNode>,
212
213 /// The parent rule node. Only the root has no parent.
214 parent: Option<StrongRuleNode>,
215
216 /// The actual style source, either coming from a selector in a StyleRule,
217 /// or a raw property declaration block (like the style attribute).
218 ///
219 /// None for the root node.
220 source: Option<StyleSource>,
221
222 /// The cascade level this rule is positioned at.
223 level: CascadeLevel,
224
225 /// The refcount of this node.
226 ///
227 /// Starts at one. Incremented in `StrongRuleNode::clone` and
228 /// `WeakRuleNode::upgrade`. Decremented in `StrongRuleNode::drop`
229 /// and `RuleTree::swap_free_list_and_gc`.
230 ///
231 /// If a non-root node's refcount reaches zero, it is incremented back to at
232 /// least one in `RuleNode::pretend_to_be_on_free_list` until the caller who
233 /// observed it dropping to zero had a chance to try to remove it from its
234 /// parent's children list.
235 ///
236 /// The refcount should never be decremented to zero if the value in
237 /// `next_free` is not null.
238 refcount: AtomicUsize,
239
240 /// Only used for the root, stores the number of free rule nodes that are
241 /// around.
242 approximate_free_count: AtomicUsize,
243
244 /// The children of a given rule node. Children remove themselves from here
245 /// when they go away.
246 children: RwLock<Map<ChildKey, WeakRuleNode>>,
247
248 /// This field has two different meanings depending on whether this is the
249 /// root node or not.
250 ///
251 /// If it is the root, it represents the head of the free list. It may be
252 /// null, which means the free list is gone because the tree was dropped,
253 /// and it may be `RuleNode::DANGLING_PTR`, which means the free list is
254 /// empty.
255 ///
256 /// If it is not the root node, this field is either null if the node is
257 /// not on the free list, `RuleNode::DANGLING_PTR` if it is the last item
258 /// on the free list or the node is pretending to be on the free list, or
259 /// any valid non-null pointer representing the next item on the free list
260 /// after this one.
261 ///
262 /// See `RuleNode::push_on_free_list`, `swap_free_list_and_gc`, and
263 /// `WeakRuleNode::upgrade`.
264 ///
265 /// Two threads should never attempt to put the same node on the free list
266 /// both at the same time.
267 next_free: AtomicPtr<RuleNode>,
268 }
269
270 // On Gecko builds, hook into the leak checking machinery.
271 #[cfg(feature = "gecko_refcount_logging")]
272 mod gecko_leak_checking {
273 use super::RuleNode;
274 use std::mem::size_of;
275 use std::os::raw::{c_char, c_void};
276
277 extern "C" {
NS_LogCtor(aPtr: *mut c_void, aTypeName: *const c_char, aSize: u32)278 fn NS_LogCtor(aPtr: *mut c_void, aTypeName: *const c_char, aSize: u32);
NS_LogDtor(aPtr: *mut c_void, aTypeName: *const c_char, aSize: u32)279 fn NS_LogDtor(aPtr: *mut c_void, aTypeName: *const c_char, aSize: u32);
280 }
281 static NAME: &'static [u8] = b"RuleNode\0";
282
283 /// Logs the creation of a heap-allocated object to Gecko's leak-checking machinery.
log_ctor(ptr: *const RuleNode)284 pub(super) fn log_ctor(ptr: *const RuleNode) {
285 let s = NAME as *const [u8] as *const u8 as *const c_char;
286 unsafe {
287 NS_LogCtor(ptr as *mut c_void, s, size_of::<RuleNode>() as u32);
288 }
289 }
290
291 /// Logs the destruction of a heap-allocated object to Gecko's leak-checking machinery.
log_dtor(ptr: *const RuleNode)292 pub(super) fn log_dtor(ptr: *const RuleNode) {
293 let s = NAME as *const [u8] as *const u8 as *const c_char;
294 unsafe {
295 NS_LogDtor(ptr as *mut c_void, s, size_of::<RuleNode>() as u32);
296 }
297 }
298 }
299
300 #[inline(always)]
log_new(_ptr: *const RuleNode)301 fn log_new(_ptr: *const RuleNode) {
302 #[cfg(feature = "gecko_refcount_logging")]
303 gecko_leak_checking::log_ctor(_ptr);
304 }
305
306 #[inline(always)]
log_drop(_ptr: *const RuleNode)307 fn log_drop(_ptr: *const RuleNode) {
308 #[cfg(feature = "gecko_refcount_logging")]
309 gecko_leak_checking::log_dtor(_ptr);
310 }
311
312 impl RuleNode {
313 const DANGLING_PTR: *mut Self = ptr::NonNull::dangling().as_ptr();
314
new( root: WeakRuleNode, parent: StrongRuleNode, source: StyleSource, level: CascadeLevel, ) -> Self315 unsafe fn new(
316 root: WeakRuleNode,
317 parent: StrongRuleNode,
318 source: StyleSource,
319 level: CascadeLevel,
320 ) -> Self {
321 debug_assert!(root.p.parent.is_none());
322 RuleNode {
323 root: Some(root),
324 parent: Some(parent),
325 source: Some(source),
326 level: level,
327 refcount: AtomicUsize::new(1),
328 children: Default::default(),
329 approximate_free_count: AtomicUsize::new(0),
330 next_free: AtomicPtr::new(ptr::null_mut()),
331 }
332 }
333
root() -> Self334 fn root() -> Self {
335 RuleNode {
336 root: None,
337 parent: None,
338 source: None,
339 level: CascadeLevel::UANormal,
340 refcount: AtomicUsize::new(1),
341 approximate_free_count: AtomicUsize::new(0),
342 children: Default::default(),
343 next_free: AtomicPtr::new(RuleNode::DANGLING_PTR),
344 }
345 }
346
key(&self) -> ChildKey347 fn key(&self) -> ChildKey {
348 ChildKey(
349 self.level,
350 self.source
351 .as_ref()
352 .expect("Called key() on the root node")
353 .key(),
354 )
355 }
356
357 /// Drops a node without ever putting it on the free list.
358 ///
359 /// Note that the node may not be dropped if we observe that its refcount
360 /// isn't zero anymore when we write-lock its parent's children map to
361 /// remove it.
362 ///
363 /// This loops over parents of dropped nodes if their own refcount reaches
364 /// zero to avoid recursion when dropping deep hierarchies of nodes.
365 ///
366 /// For non-root nodes, this should always be preceded by a call of
367 /// `RuleNode::pretend_to_be_on_free_list`.
drop_without_free_list(this: &mut UnsafeBox<Self>)368 unsafe fn drop_without_free_list(this: &mut UnsafeBox<Self>) {
369 // We clone the box and shadow the original one to be able to loop
370 // over its ancestors if they also need to be dropped.
371 let mut this = UnsafeBox::clone(this);
372 loop {
373 // If the node has a parent, we need to remove it from its parent's
374 // children list.
375 if let Some(parent) = this.parent.as_ref() {
376 debug_assert!(!this.next_free.load(Ordering::Relaxed).is_null());
377
378 // We lock the parent's children list, which means no other
379 // thread will have any more opportunity to resurrect the node
380 // anymore.
381 let mut children = parent.p.children.write();
382
383 this.next_free.store(ptr::null_mut(), Ordering::Relaxed);
384
385 // We decrement the counter to remove the "pretend to be
386 // on the free list" reference.
387 let old_refcount = this.refcount.fetch_sub(1, Ordering::Release);
388 debug_assert!(old_refcount != 0);
389 if old_refcount != 1 {
390 // Other threads resurrected this node and those references
391 // are still alive, we have nothing to do anymore.
392 return;
393 }
394
395 // We finally remove the node from its parent's children list,
396 // there are now no other references to it and it cannot
397 // be resurrected anymore even after we unlock the list.
398 debug!(
399 "Remove from child list: {:?}, parent: {:?}",
400 this.as_mut_ptr(),
401 this.parent.as_ref().map(|p| p.p.as_mut_ptr())
402 );
403 let weak = children.remove(&this.key(), |node| node.p.key()).unwrap();
404 assert_eq!(weak.p.as_mut_ptr(), this.as_mut_ptr());
405 } else {
406 debug_assert_eq!(this.next_free.load(Ordering::Relaxed), ptr::null_mut());
407 debug_assert_eq!(this.refcount.load(Ordering::Relaxed), 0);
408 }
409
410 // We are going to drop this node for good this time, as per the
411 // usual refcounting protocol we need an acquire fence here before
412 // we run the destructor.
413 //
414 // See https://github.com/rust-lang/rust/pull/41714#issuecomment-298996916
415 // for why it doesn't matter whether this is a load or a fence.
416 atomic::fence(Ordering::Acquire);
417
418 // Remove the parent reference from the child to avoid
419 // recursively dropping it and putting it on the free list.
420 let parent = UnsafeBox::deref_mut(&mut this).parent.take();
421
422 // We now drop the actual box and its contents, no one should
423 // access the current value in `this` anymore.
424 log_drop(&*this);
425 UnsafeBox::drop(&mut this);
426
427 if let Some(parent) = parent {
428 // We will attempt to drop the node's parent without the free
429 // list, so we clone the inner unsafe box and forget the
430 // original parent to avoid running its `StrongRuleNode`
431 // destructor which would attempt to use the free list if it
432 // still exists.
433 this = UnsafeBox::clone(&parent.p);
434 mem::forget(parent);
435 if this.refcount.fetch_sub(1, Ordering::Release) == 1 {
436 debug_assert_eq!(this.next_free.load(Ordering::Relaxed), ptr::null_mut());
437 if this.root.is_some() {
438 RuleNode::pretend_to_be_on_free_list(&this);
439 }
440 // Parent also reached refcount zero, we loop to drop it.
441 continue;
442 }
443 }
444
445 return;
446 }
447 }
448
449 /// Pushes this node on the tree's free list. Returns false if the free list
450 /// is gone. Should only be called after we decremented a node's refcount
451 /// to zero and pretended to be on the free list.
push_on_free_list(this: &UnsafeBox<Self>) -> bool452 unsafe fn push_on_free_list(this: &UnsafeBox<Self>) -> bool {
453 let root = &this.root.as_ref().unwrap().p;
454
455 debug_assert!(this.refcount.load(Ordering::Relaxed) > 0);
456 debug_assert_eq!(this.next_free.load(Ordering::Relaxed), Self::DANGLING_PTR);
457
458 // Increment the approximate free count by one.
459 root.approximate_free_count.fetch_add(1, Ordering::Relaxed);
460
461 // If the compare-exchange operation fails in the loop, we will retry
462 // with the new head value, so this can be a relaxed load.
463 let mut head = root.next_free.load(Ordering::Relaxed);
464
465 while !head.is_null() {
466 // Two threads can never attempt to push the same node on the free
467 // list both at the same time, so whoever else pushed a node on the
468 // free list cannot have done so with this node.
469 debug_assert_ne!(head, this.as_mut_ptr());
470
471 // Store the current head of the free list in this node.
472 this.next_free.store(head, Ordering::Relaxed);
473
474 // Any thread acquiring the free list must observe the previous
475 // next_free changes that occured, hence the release ordering
476 // on success.
477 match root.next_free.compare_exchange_weak(
478 head,
479 this.as_mut_ptr(),
480 Ordering::Release,
481 Ordering::Relaxed,
482 ) {
483 Ok(_) => {
484 // This node is now on the free list, caller should not use
485 // the node anymore.
486 return true;
487 },
488 Err(new_head) => head = new_head,
489 }
490 }
491
492 // Tree was dropped and free list has been destroyed. We did not push
493 // this node on the free list but we still pretend to be on the free
494 // list to be ready to call `drop_without_free_list`.
495 false
496 }
497
498 /// Makes the node pretend to be on the free list. This will increment the
499 /// refcount by 1 and store `Self::DANGLING_PTR` in `next_free`. This
500 /// method should only be called after caller decremented the refcount to
501 /// zero, with the null pointer stored in `next_free`.
pretend_to_be_on_free_list(this: &UnsafeBox<Self>)502 unsafe fn pretend_to_be_on_free_list(this: &UnsafeBox<Self>) {
503 debug_assert_eq!(this.next_free.load(Ordering::Relaxed), ptr::null_mut());
504 this.refcount.fetch_add(1, Ordering::Relaxed);
505 this.next_free.store(Self::DANGLING_PTR, Ordering::Release);
506 }
507
as_mut_ptr(&self) -> *mut RuleNode508 fn as_mut_ptr(&self) -> *mut RuleNode {
509 self as *const RuleNode as *mut RuleNode
510 }
511 }
512
513 pub(crate) struct WeakRuleNode {
514 p: UnsafeBox<RuleNode>,
515 }
516
517 /// A strong reference to a rule node.
518 pub struct StrongRuleNode {
519 p: UnsafeBox<RuleNode>,
520 }
521
522 #[cfg(feature = "servo")]
523 malloc_size_of_is_0!(StrongRuleNode);
524
525 impl StrongRuleNode {
new(n: Box<RuleNode>) -> Self526 fn new(n: Box<RuleNode>) -> Self {
527 debug_assert_eq!(n.parent.is_none(), !n.source.is_some());
528
529 log_new(&*n);
530
531 debug!("Creating rule node: {:p}", &*n);
532
533 Self {
534 p: UnsafeBox::from_box(n),
535 }
536 }
537
from_unsafe_box(p: UnsafeBox<RuleNode>) -> Self538 unsafe fn from_unsafe_box(p: UnsafeBox<RuleNode>) -> Self {
539 Self { p }
540 }
541
downgrade(&self) -> WeakRuleNode542 unsafe fn downgrade(&self) -> WeakRuleNode {
543 WeakRuleNode {
544 p: UnsafeBox::clone(&self.p),
545 }
546 }
547
548 /// Get the parent rule node of this rule node.
parent(&self) -> Option<&StrongRuleNode>549 pub fn parent(&self) -> Option<&StrongRuleNode> {
550 self.p.parent.as_ref()
551 }
552
ensure_child( &self, root: &StrongRuleNode, source: StyleSource, level: CascadeLevel, ) -> StrongRuleNode553 pub(super) fn ensure_child(
554 &self,
555 root: &StrongRuleNode,
556 source: StyleSource,
557 level: CascadeLevel,
558 ) -> StrongRuleNode {
559 use parking_lot::RwLockUpgradableReadGuard;
560
561 debug_assert!(
562 self.p.level <= level,
563 "Should be ordered (instead {:?} > {:?}), from {:?} and {:?}",
564 self.p.level,
565 level,
566 self.p.source,
567 source,
568 );
569
570 let key = ChildKey(level, source.key());
571 let children = self.p.children.upgradable_read();
572 if let Some(child) = children.get(&key, |node| node.p.key()) {
573 // Sound to call because we read-locked the parent's children.
574 return unsafe { child.upgrade() };
575 }
576 let mut children = RwLockUpgradableReadGuard::upgrade(children);
577 match children.entry(key, |node| node.p.key()) {
578 Entry::Occupied(child) => {
579 // Sound to call because we write-locked the parent's children.
580 unsafe { child.upgrade() }
581 },
582 Entry::Vacant(entry) => unsafe {
583 let node = StrongRuleNode::new(Box::new(RuleNode::new(
584 root.downgrade(),
585 self.clone(),
586 source,
587 level,
588 )));
589 // Sound to call because we still own a strong reference to
590 // this node, through the `node` variable itself that we are
591 // going to return to the caller.
592 entry.insert(node.downgrade());
593 node
594 },
595 }
596 }
597
598 /// Get the style source corresponding to this rule node. May return `None`
599 /// if it's the root node, which means that the node hasn't matched any
600 /// rules.
style_source(&self) -> Option<&StyleSource>601 pub fn style_source(&self) -> Option<&StyleSource> {
602 self.p.source.as_ref()
603 }
604
605 /// The cascade level for this node
cascade_level(&self) -> CascadeLevel606 pub fn cascade_level(&self) -> CascadeLevel {
607 self.p.level
608 }
609
610 /// Get the importance that this rule node represents.
importance(&self) -> Importance611 pub fn importance(&self) -> Importance {
612 self.p.level.importance()
613 }
614
615 /// Returns whether this node has any child, only intended for testing
616 /// purposes.
has_children_for_testing(&self) -> bool617 pub unsafe fn has_children_for_testing(&self) -> bool {
618 !self.p.children.read().is_empty()
619 }
620
dump<W: Write>(&self, guards: &StylesheetGuards, writer: &mut W, indent: usize)621 pub(super) fn dump<W: Write>(&self, guards: &StylesheetGuards, writer: &mut W, indent: usize) {
622 const INDENT_INCREMENT: usize = 4;
623
624 for _ in 0..indent {
625 let _ = write!(writer, " ");
626 }
627
628 let _ = writeln!(
629 writer,
630 " - {:p} (ref: {:?}, parent: {:?})",
631 &*self.p,
632 self.p.refcount.load(Ordering::Relaxed),
633 self.parent().map(|p| &*p.p as *const RuleNode)
634 );
635
636 for _ in 0..indent {
637 let _ = write!(writer, " ");
638 }
639
640 if let Some(source) = self.style_source() {
641 source.dump(self.cascade_level().guard(guards), writer);
642 } else {
643 if indent != 0 {
644 warn!("How has this happened?");
645 }
646 let _ = write!(writer, "(root)");
647 }
648
649 let _ = write!(writer, "\n");
650 for child in &*self.p.children.read() {
651 unsafe {
652 child
653 .upgrade()
654 .dump(guards, writer, indent + INDENT_INCREMENT);
655 }
656 }
657 }
658 }
659
660 impl Clone for StrongRuleNode {
clone(&self) -> Self661 fn clone(&self) -> Self {
662 debug!(
663 "{:p}: {:?}+",
664 &*self.p,
665 self.p.refcount.load(Ordering::Relaxed)
666 );
667 debug_assert!(self.p.refcount.load(Ordering::Relaxed) > 0);
668 self.p.refcount.fetch_add(1, Ordering::Relaxed);
669 unsafe { StrongRuleNode::from_unsafe_box(UnsafeBox::clone(&self.p)) }
670 }
671 }
672
673 impl Drop for StrongRuleNode {
674 #[cfg_attr(feature = "servo", allow(unused_mut))]
drop(&mut self)675 fn drop(&mut self) {
676 let node = &*self.p;
677 debug!("{:p}: {:?}-", node, node.refcount.load(Ordering::Relaxed));
678 debug!(
679 "Dropping node: {:p}, root: {:?}, parent: {:?}",
680 node,
681 node.root.as_ref().map(|r| &*r.p as *const RuleNode),
682 node.parent.as_ref().map(|p| &*p.p as *const RuleNode)
683 );
684
685 let should_drop = {
686 debug_assert!(node.refcount.load(Ordering::Relaxed) > 0);
687 node.refcount.fetch_sub(1, Ordering::Release) == 1
688 };
689
690 if !should_drop {
691 // The refcount didn't even drop zero yet, there is nothing for us
692 // to do anymore.
693 return;
694 }
695
696 unsafe {
697 if node.root.is_some() {
698 // This is a non-root node and we just observed the refcount
699 // dropping to zero, we need to pretend to be on the free list
700 // to unstuck any thread who tried to resurrect this node first
701 // through `WeakRuleNode::upgrade`.
702 RuleNode::pretend_to_be_on_free_list(&self.p);
703
704 // Attempt to push the node on the free list. This may fail
705 // if the free list is gone.
706 if RuleNode::push_on_free_list(&self.p) {
707 return;
708 }
709 }
710
711 // Either this was the last reference of the root node, or the
712 // tree rule is gone and there is no free list anymore. Drop the
713 // node.
714 RuleNode::drop_without_free_list(&mut self.p);
715 }
716 }
717 }
718
719 impl WeakRuleNode {
720 /// Upgrades this weak node reference, returning a strong one.
721 ///
722 /// Must be called with items stored in a node's children list. The children
723 /// list must at least be read-locked when this is called.
upgrade(&self) -> StrongRuleNode724 unsafe fn upgrade(&self) -> StrongRuleNode {
725 debug!("Upgrading weak node: {:p}", &*self.p);
726
727 if self.p.refcount.fetch_add(1, Ordering::Relaxed) == 0 {
728 // We observed a refcount of 0, we need to wait for this node to
729 // be put on the free list. Resetting the `next_free` pointer to
730 // null is only done in `RuleNode::drop_without_free_list`, just
731 // before a release refcount decrement, so this acquire fence here
732 // makes sure that we observed the write to null before we loop
733 // until there is a non-null value.
734 atomic::fence(Ordering::Acquire);
735 while self.p.next_free.load(Ordering::Relaxed).is_null() {}
736 }
737 StrongRuleNode::from_unsafe_box(UnsafeBox::clone(&self.p))
738 }
739 }
740
741 impl fmt::Debug for StrongRuleNode {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result742 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
743 (&*self.p as *const RuleNode).fmt(f)
744 }
745 }
746
747 impl Eq for StrongRuleNode {}
748 impl PartialEq for StrongRuleNode {
eq(&self, other: &Self) -> bool749 fn eq(&self, other: &Self) -> bool {
750 &*self.p as *const RuleNode == &*other.p
751 }
752 }
753
754 impl hash::Hash for StrongRuleNode {
hash<H>(&self, state: &mut H) where H: hash::Hasher,755 fn hash<H>(&self, state: &mut H)
756 where
757 H: hash::Hasher,
758 {
759 (&*self.p as *const RuleNode).hash(state)
760 }
761 }
762