1 //===-- tsan_clock.h --------------------------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
11 #ifndef TSAN_CLOCK_H
12 #define TSAN_CLOCK_H
13
14 #include "tsan_defs.h"
15 #include "tsan_dense_alloc.h"
16
17 namespace __tsan {
18
19 typedef DenseSlabAlloc<ClockBlock, 1<<16, 1<<10> ClockAlloc;
20 typedef DenseSlabAllocCache ClockCache;
21
22 // The clock that lives in sync variables (mutexes, atomics, etc).
23 class SyncClock {
24 public:
25 SyncClock();
26 ~SyncClock();
27
28 uptr size() const;
29
30 // These are used only in tests.
31 u64 get(unsigned tid) const;
32 u64 get_clean(unsigned tid) const;
33
34 void Resize(ClockCache *c, uptr nclk);
35 void Reset(ClockCache *c);
36
37 void DebugDump(int(*printf)(const char *s, ...));
38
39 // Clock element iterator.
40 // Note: it iterates only over the table without regard to dirty entries.
41 class Iter {
42 public:
43 explicit Iter(SyncClock* parent);
44 Iter& operator++();
45 bool operator!=(const Iter& other);
46 ClockElem &operator*();
47
48 private:
49 SyncClock *parent_;
50 // [pos_, end_) is the current continuous range of clock elements.
51 ClockElem *pos_;
52 ClockElem *end_;
53 int block_; // Current number of second level block.
54
55 NOINLINE void Next();
56 };
57
58 Iter begin();
59 Iter end();
60
61 private:
62 friend class ThreadClock;
63 friend class Iter;
64 static const uptr kDirtyTids = 2;
65
66 struct Dirty {
67 u64 epoch : kClkBits;
68 u64 tid : 64 - kClkBits; // kInvalidId if not active
69 };
70
71 unsigned release_store_tid_;
72 unsigned release_store_reused_;
73 Dirty dirty_[kDirtyTids];
74 // If size_ is 0, tab_ is nullptr.
75 // If size <= 64 (kClockCount), tab_ contains pointer to an array with
76 // 64 ClockElem's (ClockBlock::clock).
77 // Otherwise, tab_ points to an array with up to 127 u32 elements,
78 // each pointing to the second-level 512b block with 64 ClockElem's.
79 // Unused space in the first level ClockBlock is used to store additional
80 // clock elements.
81 // The last u32 element in the first level ClockBlock is always used as
82 // reference counter.
83 //
84 // See the following scheme for details.
85 // All memory blocks are 512 bytes (allocated from ClockAlloc).
86 // Clock (clk) elements are 64 bits.
87 // Idx and ref are 32 bits.
88 //
89 // tab_
90 // |
91 // \/
92 // +----------------------------------------------------+
93 // | clk128 | clk129 | ...unused... | idx1 | idx0 | ref |
94 // +----------------------------------------------------+
95 // | |
96 // | \/
97 // | +----------------+
98 // | | clk0 ... clk63 |
99 // | +----------------+
100 // \/
101 // +------------------+
102 // | clk64 ... clk127 |
103 // +------------------+
104 //
105 // Note: dirty entries, if active, always override what's stored in the clock.
106 ClockBlock *tab_;
107 u32 tab_idx_;
108 u16 size_;
109 u16 blocks_; // Number of second level blocks.
110
111 void Unshare(ClockCache *c);
112 bool IsShared() const;
113 bool Cachable() const;
114 void ResetImpl();
115 void FlushDirty();
116 uptr capacity() const;
117 u32 get_block(uptr bi) const;
118 void append_block(u32 idx);
119 ClockElem &elem(unsigned tid) const;
120 };
121
122 // The clock that lives in threads.
123 class ThreadClock {
124 public:
125 typedef DenseSlabAllocCache Cache;
126
127 explicit ThreadClock(unsigned tid, unsigned reused = 0);
128
129 u64 get(unsigned tid) const;
130 void set(ClockCache *c, unsigned tid, u64 v);
131 void set(u64 v);
132 void tick();
133 uptr size() const;
134
135 void acquire(ClockCache *c, SyncClock *src);
136 void release(ClockCache *c, SyncClock *dst);
137 void acq_rel(ClockCache *c, SyncClock *dst);
138 void ReleaseStore(ClockCache *c, SyncClock *dst);
139 void ResetCached(ClockCache *c);
140
141 void DebugReset();
142 void DebugDump(int(*printf)(const char *s, ...));
143
144 private:
145 static const uptr kDirtyTids = SyncClock::kDirtyTids;
146 // Index of the thread associated with he clock ("current thread").
147 const unsigned tid_;
148 const unsigned reused_; // tid_ reuse count.
149 // Current thread time when it acquired something from other threads.
150 u64 last_acquire_;
151
152 // Cached SyncClock (without dirty entries and release_store_tid_).
153 // We reuse it for subsequent store-release operations without intervening
154 // acquire operations. Since it is shared (and thus constant), clock value
155 // for the current thread is then stored in dirty entries in the SyncClock.
156 // We host a refernece to the table while it is cached here.
157 u32 cached_idx_;
158 u16 cached_size_;
159 u16 cached_blocks_;
160
161 // Number of active elements in the clk_ table (the rest is zeros).
162 uptr nclk_;
163 u64 clk_[kMaxTidInClock]; // Fixed size vector clock.
164
165 bool IsAlreadyAcquired(const SyncClock *src) const;
166 void UpdateCurrentThread(ClockCache *c, SyncClock *dst) const;
167 };
168
get(unsigned tid)169 ALWAYS_INLINE u64 ThreadClock::get(unsigned tid) const {
170 DCHECK_LT(tid, kMaxTidInClock);
171 return clk_[tid];
172 }
173
set(u64 v)174 ALWAYS_INLINE void ThreadClock::set(u64 v) {
175 DCHECK_GE(v, clk_[tid_]);
176 clk_[tid_] = v;
177 }
178
tick()179 ALWAYS_INLINE void ThreadClock::tick() {
180 clk_[tid_]++;
181 }
182
size()183 ALWAYS_INLINE uptr ThreadClock::size() const {
184 return nclk_;
185 }
186
begin()187 ALWAYS_INLINE SyncClock::Iter SyncClock::begin() {
188 return Iter(this);
189 }
190
end()191 ALWAYS_INLINE SyncClock::Iter SyncClock::end() {
192 return Iter(nullptr);
193 }
194
size()195 ALWAYS_INLINE uptr SyncClock::size() const {
196 return size_;
197 }
198
Iter(SyncClock * parent)199 ALWAYS_INLINE SyncClock::Iter::Iter(SyncClock* parent)
200 : parent_(parent)
201 , pos_(nullptr)
202 , end_(nullptr)
203 , block_(-1) {
204 if (parent)
205 Next();
206 }
207
208 ALWAYS_INLINE SyncClock::Iter& SyncClock::Iter::operator++() {
209 pos_++;
210 if (UNLIKELY(pos_ >= end_))
211 Next();
212 return *this;
213 }
214
215 ALWAYS_INLINE bool SyncClock::Iter::operator!=(const SyncClock::Iter& other) {
216 return parent_ != other.parent_;
217 }
218
219 ALWAYS_INLINE ClockElem &SyncClock::Iter::operator*() {
220 return *pos_;
221 }
222 } // namespace __tsan
223
224 #endif // TSAN_CLOCK_H
225