1 //===-- tsan_trace.h --------------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12 #ifndef TSAN_TRACE_H
13 #define TSAN_TRACE_H
14 
15 #include "tsan_defs.h"
16 #include "tsan_ilist.h"
17 #include "tsan_mutexset.h"
18 #include "tsan_stack_trace.h"
19 
20 namespace __tsan {
21 
22 const int kTracePartSizeBits = 13;
23 const int kTracePartSize = 1 << kTracePartSizeBits;
24 const int kTraceParts = 2 * 1024 * 1024 / kTracePartSize;
25 const int kTraceSize = kTracePartSize * kTraceParts;
26 
27 // Must fit into 3 bits.
28 enum EventType {
29   EventTypeMop,
30   EventTypeFuncEnter,
31   EventTypeFuncExit,
32   EventTypeLock,
33   EventTypeUnlock,
34   EventTypeRLock,
35   EventTypeRUnlock
36 };
37 
38 // Represents a thread event (from most significant bit):
39 // u64 typ  : 3;   // EventType.
40 // u64 addr : 61;  // Associated pc.
41 typedef u64 Event;
42 
43 const uptr kEventPCBits = 61;
44 
45 struct TraceHeader {
46 #if !SANITIZER_GO
47   BufferedStackTrace stack0;  // Start stack for the trace.
48 #else
49   VarSizeStackTrace stack0;
50 #endif
51   u64        epoch0;  // Start epoch for the trace.
52   MutexSet   mset0;
53 
TraceHeaderTraceHeader54   TraceHeader() : stack0(), epoch0() {}
55 };
56 
57 struct Trace {
58   Mutex mtx;
59 #if !SANITIZER_GO
60   // Must be last to catch overflow as paging fault.
61   // Go shadow stack is dynamically allocated.
62   uptr shadow_stack[kShadowStackSize];
63 #endif
64   // Must be the last field, because we unmap the unused part in
65   // CreateThreadContext.
66   TraceHeader headers[kTraceParts];
67 
TraceTrace68   Trace() : mtx(MutexTypeTrace) {}
69 };
70 
71 namespace v3 {
72 
73 enum class EventType : u64 {
74   kAccessExt,
75   kAccessRange,
76   kLock,
77   kRLock,
78   kUnlock,
79   kTime,
80 };
81 
82 // "Base" type for all events for type dispatch.
83 struct Event {
84   // We use variable-length type encoding to give more bits to some event
85   // types that need them. If is_access is set, this is EventAccess.
86   // Otherwise, if is_func is set, this is EventFunc.
87   // Otherwise type denotes the type.
88   u64 is_access : 1;
89   u64 is_func : 1;
90   EventType type : 3;
91   u64 _ : 59;
92 };
93 static_assert(sizeof(Event) == 8, "bad Event size");
94 
95 // Nop event used as padding and does not affect state during replay.
96 static constexpr Event NopEvent = {1, 0, EventType::kAccessExt, 0};
97 
98 // Compressed memory access can represent only some events with PCs
99 // close enough to each other. Otherwise we fall back to EventAccessExt.
100 struct EventAccess {
101   static constexpr uptr kPCBits = 15;
102 
103   u64 is_access : 1;  // = 1
104   u64 is_read : 1;
105   u64 is_atomic : 1;
106   u64 size_log : 2;
107   u64 pc_delta : kPCBits;  // signed delta from the previous memory access PC
108   u64 addr : kCompressedAddrBits;
109 };
110 static_assert(sizeof(EventAccess) == 8, "bad EventAccess size");
111 
112 // Function entry (pc != 0) or exit (pc == 0).
113 struct EventFunc {
114   u64 is_access : 1;  // = 0
115   u64 is_func : 1;    // = 1
116   u64 pc : 62;
117 };
118 static_assert(sizeof(EventFunc) == 8, "bad EventFunc size");
119 
120 // Extended memory access with full PC.
121 struct EventAccessExt {
122   u64 is_access : 1;   // = 0
123   u64 is_func : 1;     // = 0
124   EventType type : 3;  // = EventType::kAccessExt
125   u64 is_read : 1;
126   u64 is_atomic : 1;
127   u64 size_log : 2;
128   u64 _ : 11;
129   u64 addr : kCompressedAddrBits;
130   u64 pc;
131 };
132 static_assert(sizeof(EventAccessExt) == 16, "bad EventAccessExt size");
133 
134 // Access to a memory range.
135 struct EventAccessRange {
136   static constexpr uptr kSizeLoBits = 13;
137 
138   u64 is_access : 1;   // = 0
139   u64 is_func : 1;     // = 0
140   EventType type : 3;  // = EventType::kAccessRange
141   u64 is_read : 1;
142   u64 is_free : 1;
143   u64 size_lo : kSizeLoBits;
144   u64 pc : kCompressedAddrBits;
145   u64 addr : kCompressedAddrBits;
146   u64 size_hi : 64 - kCompressedAddrBits;
147 };
148 static_assert(sizeof(EventAccessRange) == 16, "bad EventAccessRange size");
149 
150 // Mutex lock.
151 struct EventLock {
152   static constexpr uptr kStackIDLoBits = 15;
153 
154   u64 is_access : 1;   // = 0
155   u64 is_func : 1;     // = 0
156   EventType type : 3;  // = EventType::kLock or EventType::kRLock
157   u64 pc : kCompressedAddrBits;
158   u64 stack_lo : kStackIDLoBits;
159   u64 stack_hi : sizeof(StackID) * kByteBits - kStackIDLoBits;
160   u64 _ : 3;
161   u64 addr : kCompressedAddrBits;
162 };
163 static_assert(sizeof(EventLock) == 16, "bad EventLock size");
164 
165 // Mutex unlock.
166 struct EventUnlock {
167   u64 is_access : 1;   // = 0
168   u64 is_func : 1;     // = 0
169   EventType type : 3;  // = EventType::kUnlock
170   u64 _ : 15;
171   u64 addr : kCompressedAddrBits;
172 };
173 static_assert(sizeof(EventUnlock) == 8, "bad EventUnlock size");
174 
175 // Time change event.
176 struct EventTime {
177   u64 is_access : 1;   // = 0
178   u64 is_func : 1;     // = 0
179   EventType type : 3;  // = EventType::kTime
180   u64 sid : sizeof(Sid) * kByteBits;
181   u64 epoch : kEpochBits;
182   u64 _ : 64 - 5 - sizeof(Sid) * kByteBits - kEpochBits;
183 };
184 static_assert(sizeof(EventTime) == 8, "bad EventTime size");
185 
186 struct Trace;
187 
188 struct TraceHeader {
189   Trace* trace = nullptr;  // back-pointer to Trace containing this part
190   INode trace_parts;       // in Trace::parts
191 };
192 
193 struct TracePart : TraceHeader {
194   static constexpr uptr kByteSize = 256 << 10;
195   static constexpr uptr kSize =
196       (kByteSize - sizeof(TraceHeader)) / sizeof(Event);
197   // TraceAcquire does a fast event pointer overflow check by comparing
198   // pointer into TracePart::events with kAlignment mask. Since TracePart's
199   // are allocated page-aligned, this check detects end of the array
200   // (it also have false positives in the middle that are filtered separately).
201   // This also requires events to be the last field.
202   static constexpr uptr kAlignment = 0xff0;
203   Event events[kSize];
204 
TracePartTracePart205   TracePart() {}
206 };
207 static_assert(sizeof(TracePart) == TracePart::kByteSize, "bad TracePart size");
208 
209 struct Trace {
210   Mutex mtx;
211   IList<TraceHeader, &TraceHeader::trace_parts, TracePart> parts;
212   Event* final_pos =
213       nullptr;  // final position in the last part for finished threads
214 
TraceTrace215   Trace() : mtx(MutexTypeTrace) {}
216 };
217 
218 }  // namespace v3
219 
220 }  // namespace __tsan
221 
222 #endif  // TSAN_TRACE_H
223