1*ee754c2dSkamil //===-- sanitizer_stackdepot.cc -------------------------------------------===//
2*ee754c2dSkamil //
3*ee754c2dSkamil // The LLVM Compiler Infrastructure
4*ee754c2dSkamil //
5*ee754c2dSkamil // This file is distributed under the University of Illinois Open Source
6*ee754c2dSkamil // License. See LICENSE.TXT for details.
7*ee754c2dSkamil //
8*ee754c2dSkamil //===----------------------------------------------------------------------===//
9*ee754c2dSkamil //
10*ee754c2dSkamil // This file is shared between AddressSanitizer and ThreadSanitizer
11*ee754c2dSkamil // run-time libraries.
12*ee754c2dSkamil //===----------------------------------------------------------------------===//
13*ee754c2dSkamil
14*ee754c2dSkamil #include "sanitizer_stackdepot.h"
15*ee754c2dSkamil
16*ee754c2dSkamil #include "sanitizer_common.h"
17*ee754c2dSkamil #include "sanitizer_stackdepotbase.h"
18*ee754c2dSkamil
19*ee754c2dSkamil namespace __sanitizer {
20*ee754c2dSkamil
21*ee754c2dSkamil struct StackDepotNode {
22*ee754c2dSkamil StackDepotNode *link;
23*ee754c2dSkamil u32 id;
24*ee754c2dSkamil atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20;
25*ee754c2dSkamil u32 size;
26*ee754c2dSkamil u32 tag;
27*ee754c2dSkamil uptr stack[1]; // [size]
28*ee754c2dSkamil
29*ee754c2dSkamil static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20;
30*ee754c2dSkamil // Lower kTabSizeLog bits are equal for all items in one bucket.
31*ee754c2dSkamil // We use these bits to store the per-stack use counter.
32*ee754c2dSkamil static const u32 kUseCountBits = kTabSizeLog;
33*ee754c2dSkamil static const u32 kMaxUseCount = 1 << kUseCountBits;
34*ee754c2dSkamil static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
35*ee754c2dSkamil static const u32 kHashMask = ~kUseCountMask;
36*ee754c2dSkamil
37*ee754c2dSkamil typedef StackTrace args_type;
eq__sanitizer::StackDepotNode38*ee754c2dSkamil bool eq(u32 hash, const args_type &args) const {
39*ee754c2dSkamil u32 hash_bits =
40*ee754c2dSkamil atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask;
41*ee754c2dSkamil if ((hash & kHashMask) != hash_bits || args.size != size || args.tag != tag)
42*ee754c2dSkamil return false;
43*ee754c2dSkamil uptr i = 0;
44*ee754c2dSkamil for (; i < size; i++) {
45*ee754c2dSkamil if (stack[i] != args.trace[i]) return false;
46*ee754c2dSkamil }
47*ee754c2dSkamil return true;
48*ee754c2dSkamil }
storage_size__sanitizer::StackDepotNode49*ee754c2dSkamil static uptr storage_size(const args_type &args) {
50*ee754c2dSkamil return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
51*ee754c2dSkamil }
hash__sanitizer::StackDepotNode52*ee754c2dSkamil static u32 hash(const args_type &args) {
53*ee754c2dSkamil // murmur2
54*ee754c2dSkamil const u32 m = 0x5bd1e995;
55*ee754c2dSkamil const u32 seed = 0x9747b28c;
56*ee754c2dSkamil const u32 r = 24;
57*ee754c2dSkamil u32 h = seed ^ (args.size * sizeof(uptr));
58*ee754c2dSkamil for (uptr i = 0; i < args.size; i++) {
59*ee754c2dSkamil u32 k = args.trace[i];
60*ee754c2dSkamil k *= m;
61*ee754c2dSkamil k ^= k >> r;
62*ee754c2dSkamil k *= m;
63*ee754c2dSkamil h *= m;
64*ee754c2dSkamil h ^= k;
65*ee754c2dSkamil }
66*ee754c2dSkamil h ^= h >> 13;
67*ee754c2dSkamil h *= m;
68*ee754c2dSkamil h ^= h >> 15;
69*ee754c2dSkamil return h;
70*ee754c2dSkamil }
is_valid__sanitizer::StackDepotNode71*ee754c2dSkamil static bool is_valid(const args_type &args) {
72*ee754c2dSkamil return args.size > 0 && args.trace;
73*ee754c2dSkamil }
store__sanitizer::StackDepotNode74*ee754c2dSkamil void store(const args_type &args, u32 hash) {
75*ee754c2dSkamil atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed);
76*ee754c2dSkamil size = args.size;
77*ee754c2dSkamil tag = args.tag;
78*ee754c2dSkamil internal_memcpy(stack, args.trace, size * sizeof(uptr));
79*ee754c2dSkamil }
load__sanitizer::StackDepotNode80*ee754c2dSkamil args_type load() const {
81*ee754c2dSkamil return args_type(&stack[0], size, tag);
82*ee754c2dSkamil }
get_handle__sanitizer::StackDepotNode83*ee754c2dSkamil StackDepotHandle get_handle() { return StackDepotHandle(this); }
84*ee754c2dSkamil
85*ee754c2dSkamil typedef StackDepotHandle handle_type;
86*ee754c2dSkamil };
87*ee754c2dSkamil
88*ee754c2dSkamil COMPILER_CHECK(StackDepotNode::kMaxUseCount == (u32)kStackDepotMaxUseCount);
89*ee754c2dSkamil
id()90*ee754c2dSkamil u32 StackDepotHandle::id() { return node_->id; }
use_count()91*ee754c2dSkamil int StackDepotHandle::use_count() {
92*ee754c2dSkamil return atomic_load(&node_->hash_and_use_count, memory_order_relaxed) &
93*ee754c2dSkamil StackDepotNode::kUseCountMask;
94*ee754c2dSkamil }
inc_use_count_unsafe()95*ee754c2dSkamil void StackDepotHandle::inc_use_count_unsafe() {
96*ee754c2dSkamil u32 prev =
97*ee754c2dSkamil atomic_fetch_add(&node_->hash_and_use_count, 1, memory_order_relaxed) &
98*ee754c2dSkamil StackDepotNode::kUseCountMask;
99*ee754c2dSkamil CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
100*ee754c2dSkamil }
101*ee754c2dSkamil
102*ee754c2dSkamil // FIXME(dvyukov): this single reserved bit is used in TSan.
103*ee754c2dSkamil typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
104*ee754c2dSkamil StackDepot;
105*ee754c2dSkamil static StackDepot theDepot;
106*ee754c2dSkamil
StackDepotGetStats()107*ee754c2dSkamil StackDepotStats *StackDepotGetStats() {
108*ee754c2dSkamil return theDepot.GetStats();
109*ee754c2dSkamil }
110*ee754c2dSkamil
StackDepotPut(StackTrace stack)111*ee754c2dSkamil u32 StackDepotPut(StackTrace stack) {
112*ee754c2dSkamil StackDepotHandle h = theDepot.Put(stack);
113*ee754c2dSkamil return h.valid() ? h.id() : 0;
114*ee754c2dSkamil }
115*ee754c2dSkamil
StackDepotPut_WithHandle(StackTrace stack)116*ee754c2dSkamil StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) {
117*ee754c2dSkamil return theDepot.Put(stack);
118*ee754c2dSkamil }
119*ee754c2dSkamil
StackDepotGet(u32 id)120*ee754c2dSkamil StackTrace StackDepotGet(u32 id) {
121*ee754c2dSkamil return theDepot.Get(id);
122*ee754c2dSkamil }
123*ee754c2dSkamil
StackDepotLockAll()124*ee754c2dSkamil void StackDepotLockAll() {
125*ee754c2dSkamil theDepot.LockAll();
126*ee754c2dSkamil }
127*ee754c2dSkamil
StackDepotUnlockAll()128*ee754c2dSkamil void StackDepotUnlockAll() {
129*ee754c2dSkamil theDepot.UnlockAll();
130*ee754c2dSkamil }
131*ee754c2dSkamil
IdComparator(const StackDepotReverseMap::IdDescPair & a,const StackDepotReverseMap::IdDescPair & b)132*ee754c2dSkamil bool StackDepotReverseMap::IdDescPair::IdComparator(
133*ee754c2dSkamil const StackDepotReverseMap::IdDescPair &a,
134*ee754c2dSkamil const StackDepotReverseMap::IdDescPair &b) {
135*ee754c2dSkamil return a.id < b.id;
136*ee754c2dSkamil }
137*ee754c2dSkamil
StackDepotReverseMap()138*ee754c2dSkamil StackDepotReverseMap::StackDepotReverseMap() {
139*ee754c2dSkamil map_.reserve(StackDepotGetStats()->n_uniq_ids + 100);
140*ee754c2dSkamil for (int idx = 0; idx < StackDepot::kTabSize; idx++) {
141*ee754c2dSkamil atomic_uintptr_t *p = &theDepot.tab[idx];
142*ee754c2dSkamil uptr v = atomic_load(p, memory_order_consume);
143*ee754c2dSkamil StackDepotNode *s = (StackDepotNode*)(v & ~1);
144*ee754c2dSkamil for (; s; s = s->link) {
145*ee754c2dSkamil IdDescPair pair = {s->id, s};
146*ee754c2dSkamil map_.push_back(pair);
147*ee754c2dSkamil }
148*ee754c2dSkamil }
149*ee754c2dSkamil Sort(map_.data(), map_.size(), &IdDescPair::IdComparator);
150*ee754c2dSkamil }
151*ee754c2dSkamil
Get(u32 id)152*ee754c2dSkamil StackTrace StackDepotReverseMap::Get(u32 id) {
153*ee754c2dSkamil if (!map_.size())
154*ee754c2dSkamil return StackTrace();
155*ee754c2dSkamil IdDescPair pair = {id, nullptr};
156*ee754c2dSkamil uptr idx =
157*ee754c2dSkamil InternalLowerBound(map_, 0, map_.size(), pair, IdDescPair::IdComparator);
158*ee754c2dSkamil if (idx > map_.size() || map_[idx].id != id)
159*ee754c2dSkamil return StackTrace();
160*ee754c2dSkamil return map_[idx].desc->load();
161*ee754c2dSkamil }
162*ee754c2dSkamil
163*ee754c2dSkamil } // namespace __sanitizer
164