1 //===-- sanitizer_stackdepot.cpp ------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is shared between AddressSanitizer and ThreadSanitizer
10 // run-time libraries.
11 //===----------------------------------------------------------------------===//
12 
13 #include "sanitizer_stackdepot.h"
14 
15 #include "sanitizer_common.h"
16 #include "sanitizer_hash.h"
17 #include "sanitizer_stackdepotbase.h"
18 
19 namespace __sanitizer {
20 
21 struct StackDepotNode {
22   StackDepotNode *link;
23   u32 id;
24   atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20;
25   u32 size;
26   u32 tag;
27   uptr stack[1];  // [size]
28 
29   static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20;
30   // Lower kTabSizeLog bits are equal for all items in one bucket.
31   // We use these bits to store the per-stack use counter.
32   static const u32 kUseCountBits = kTabSizeLog;
33   static const u32 kMaxUseCount = 1 << kUseCountBits;
34   static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
35   static const u32 kHashMask = ~kUseCountMask;
36 
37   typedef StackTrace args_type;
38   bool eq(u32 hash, const args_type &args) const {
39     u32 hash_bits =
40         atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask;
41     if ((hash & kHashMask) != hash_bits || args.size != size || args.tag != tag)
42       return false;
43     uptr i = 0;
44     for (; i < size; i++) {
45       if (stack[i] != args.trace[i]) return false;
46     }
47     return true;
48   }
49   static uptr storage_size(const args_type &args) {
50     return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
51   }
52   static u32 hash(const args_type &args) {
53     MurMur2HashBuilder H(args.size * sizeof(uptr));
54     for (uptr i = 0; i < args.size; i++) H.add(args.trace[i]);
55     return H.get();
56   }
57   static bool is_valid(const args_type &args) {
58     return args.size > 0 && args.trace;
59   }
60   void store(const args_type &args, u32 hash) {
61     atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed);
62     size = args.size;
63     tag = args.tag;
64     internal_memcpy(stack, args.trace, size * sizeof(uptr));
65   }
66   args_type load() const {
67     return args_type(&stack[0], size, tag);
68   }
69   StackDepotHandle get_handle() { return StackDepotHandle(this); }
70 
71   typedef StackDepotHandle handle_type;
72 };
73 
74 COMPILER_CHECK(StackDepotNode::kMaxUseCount == (u32)kStackDepotMaxUseCount);
75 
76 u32 StackDepotHandle::id() { return node_->id; }
77 int StackDepotHandle::use_count() {
78   return atomic_load(&node_->hash_and_use_count, memory_order_relaxed) &
79          StackDepotNode::kUseCountMask;
80 }
81 void StackDepotHandle::inc_use_count_unsafe() {
82   u32 prev =
83       atomic_fetch_add(&node_->hash_and_use_count, 1, memory_order_relaxed) &
84       StackDepotNode::kUseCountMask;
85   CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
86 }
87 
88 // FIXME(dvyukov): this single reserved bit is used in TSan.
89 typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
90     StackDepot;
91 static StackDepot theDepot;
92 
93 StackDepotStats *StackDepotGetStats() {
94   return theDepot.GetStats();
95 }
96 
97 u32 StackDepotPut(StackTrace stack) {
98   StackDepotHandle h = theDepot.Put(stack);
99   return h.valid() ? h.id() : 0;
100 }
101 
102 StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) {
103   return theDepot.Put(stack);
104 }
105 
106 StackTrace StackDepotGet(u32 id) {
107   return theDepot.Get(id);
108 }
109 
110 void StackDepotLockAll() {
111   theDepot.LockAll();
112 }
113 
114 void StackDepotUnlockAll() {
115   theDepot.UnlockAll();
116 }
117 
118 bool StackDepotReverseMap::IdDescPair::IdComparator(
119     const StackDepotReverseMap::IdDescPair &a,
120     const StackDepotReverseMap::IdDescPair &b) {
121   return a.id < b.id;
122 }
123 
124 StackDepotReverseMap::StackDepotReverseMap() {
125   map_.reserve(StackDepotGetStats()->n_uniq_ids + 100);
126   for (int idx = 0; idx < StackDepot::kTabSize; idx++) {
127     atomic_uintptr_t *p = &theDepot.tab[idx];
128     uptr v = atomic_load(p, memory_order_consume);
129     StackDepotNode *s = (StackDepotNode*)(v & ~1);
130     for (; s; s = s->link) {
131       IdDescPair pair = {s->id, s};
132       map_.push_back(pair);
133     }
134   }
135   Sort(map_.data(), map_.size(), &IdDescPair::IdComparator);
136 }
137 
138 StackTrace StackDepotReverseMap::Get(u32 id) {
139   if (!map_.size())
140     return StackTrace();
141   IdDescPair pair = {id, nullptr};
142   uptr idx =
143       InternalLowerBound(map_, 0, map_.size(), pair, IdDescPair::IdComparator);
144   if (idx > map_.size() || map_[idx].id != id)
145     return StackTrace();
146   return map_[idx].desc->load();
147 }
148 
149 } // namespace __sanitizer
150