1 //===-- tsan_fd.cpp -------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "tsan_fd.h"
14 #include "tsan_rtl.h"
15 #include <sanitizer_common/sanitizer_atomic.h>
16 
17 namespace __tsan {
18 
19 const int kTableSizeL1 = 1024;
20 const int kTableSizeL2 = 1024;
21 const int kTableSize = kTableSizeL1 * kTableSizeL2;
22 
23 struct FdSync {
24   atomic_uint64_t rc;
25 };
26 
27 struct FdDesc {
28   FdSync *sync;
29   int creation_tid;
30   u32 creation_stack;
31 };
32 
33 struct FdContext {
34   atomic_uintptr_t tab[kTableSizeL1];
35   // Addresses used for synchronization.
36   FdSync globsync;
37   FdSync filesync;
38   FdSync socksync;
39   u64 connectsync;
40 };
41 
42 static FdContext fdctx;
43 
44 static bool bogusfd(int fd) {
45   // Apparently a bogus fd value.
46   return fd < 0 || fd >= kTableSize;
47 }
48 
49 static FdSync *allocsync(ThreadState *thr, uptr pc) {
50   FdSync *s = (FdSync*)user_alloc_internal(thr, pc, sizeof(FdSync),
51       kDefaultAlignment, false);
52   atomic_store(&s->rc, 1, memory_order_relaxed);
53   return s;
54 }
55 
56 static FdSync *ref(FdSync *s) {
57   if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1)
58     atomic_fetch_add(&s->rc, 1, memory_order_relaxed);
59   return s;
60 }
61 
62 static void unref(ThreadState *thr, uptr pc, FdSync *s) {
63   if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1) {
64     if (atomic_fetch_sub(&s->rc, 1, memory_order_acq_rel) == 1) {
65       CHECK_NE(s, &fdctx.globsync);
66       CHECK_NE(s, &fdctx.filesync);
67       CHECK_NE(s, &fdctx.socksync);
68       user_free(thr, pc, s, false);
69     }
70   }
71 }
72 
73 static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
74   CHECK_GE(fd, 0);
75   CHECK_LT(fd, kTableSize);
76   atomic_uintptr_t *pl1 = &fdctx.tab[fd / kTableSizeL2];
77   uptr l1 = atomic_load(pl1, memory_order_consume);
78   if (l1 == 0) {
79     uptr size = kTableSizeL2 * sizeof(FdDesc);
80     // We need this to reside in user memory to properly catch races on it.
81     void *p = user_alloc_internal(thr, pc, size, kDefaultAlignment, false);
82     internal_memset(p, 0, size);
83     MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
84     if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))
85       l1 = (uptr)p;
86     else
87       user_free(thr, pc, p, false);
88   }
89   FdDesc *fds = reinterpret_cast<FdDesc *>(l1);
90   return &fds[fd % kTableSizeL2];
91 }
92 
93 // pd must be already ref'ed.
94 static void init(ThreadState *thr, uptr pc, int fd, FdSync *s,
95     bool write = true) {
96   FdDesc *d = fddesc(thr, pc, fd);
97   // As a matter of fact, we don't intercept all close calls.
98   // See e.g. libc __res_iclose().
99   if (d->sync) {
100     unref(thr, pc, d->sync);
101     d->sync = 0;
102   }
103   if (flags()->io_sync == 0) {
104     unref(thr, pc, s);
105   } else if (flags()->io_sync == 1) {
106     d->sync = s;
107   } else if (flags()->io_sync == 2) {
108     unref(thr, pc, s);
109     d->sync = &fdctx.globsync;
110   }
111   d->creation_tid = thr->tid;
112   d->creation_stack = CurrentStackId(thr, pc);
113   if (write) {
114     // To catch races between fd usage and open.
115     MemoryRangeImitateWrite(thr, pc, (uptr)d, 8);
116   } else {
117     // See the dup-related comment in FdClose.
118     MemoryRead(thr, pc, (uptr)d, kSizeLog8);
119   }
120 }
121 
122 void FdInit() {
123   atomic_store(&fdctx.globsync.rc, (u64)-1, memory_order_relaxed);
124   atomic_store(&fdctx.filesync.rc, (u64)-1, memory_order_relaxed);
125   atomic_store(&fdctx.socksync.rc, (u64)-1, memory_order_relaxed);
126 }
127 
128 void FdOnFork(ThreadState *thr, uptr pc) {
129   // On fork() we need to reset all fd's, because the child is going
130   // close all them, and that will cause races between previous read/write
131   // and the close.
132   for (int l1 = 0; l1 < kTableSizeL1; l1++) {
133     FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
134     if (tab == 0)
135       break;
136     for (int l2 = 0; l2 < kTableSizeL2; l2++) {
137       FdDesc *d = &tab[l2];
138       MemoryResetRange(thr, pc, (uptr)d, 8);
139     }
140   }
141 }
142 
143 bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack) {
144   for (int l1 = 0; l1 < kTableSizeL1; l1++) {
145     FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
146     if (tab == 0)
147       break;
148     if (addr >= (uptr)tab && addr < (uptr)(tab + kTableSizeL2)) {
149       int l2 = (addr - (uptr)tab) / sizeof(FdDesc);
150       FdDesc *d = &tab[l2];
151       *fd = l1 * kTableSizeL1 + l2;
152       *tid = d->creation_tid;
153       *stack = d->creation_stack;
154       return true;
155     }
156   }
157   return false;
158 }
159 
160 void FdAcquire(ThreadState *thr, uptr pc, int fd) {
161   if (bogusfd(fd))
162     return;
163   FdDesc *d = fddesc(thr, pc, fd);
164   FdSync *s = d->sync;
165   DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s);
166   MemoryRead(thr, pc, (uptr)d, kSizeLog8);
167   if (s)
168     Acquire(thr, pc, (uptr)s);
169 }
170 
171 void FdRelease(ThreadState *thr, uptr pc, int fd) {
172   if (bogusfd(fd))
173     return;
174   FdDesc *d = fddesc(thr, pc, fd);
175   FdSync *s = d->sync;
176   DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s);
177   MemoryRead(thr, pc, (uptr)d, kSizeLog8);
178   if (s)
179     Release(thr, pc, (uptr)s);
180 }
181 
182 void FdAccess(ThreadState *thr, uptr pc, int fd) {
183   DPrintf("#%d: FdAccess(%d)\n", thr->tid, fd);
184   if (bogusfd(fd))
185     return;
186   FdDesc *d = fddesc(thr, pc, fd);
187   MemoryRead(thr, pc, (uptr)d, kSizeLog8);
188 }
189 
190 void FdClose(ThreadState *thr, uptr pc, int fd, bool write) {
191   DPrintf("#%d: FdClose(%d)\n", thr->tid, fd);
192   if (bogusfd(fd))
193     return;
194   FdDesc *d = fddesc(thr, pc, fd);
195   if (write) {
196     // To catch races between fd usage and close.
197     MemoryWrite(thr, pc, (uptr)d, kSizeLog8);
198   } else {
199     // This path is used only by dup2/dup3 calls.
200     // We do read instead of write because there is a number of legitimate
201     // cases where write would lead to false positives:
202     // 1. Some software dups a closed pipe in place of a socket before closing
203     //    the socket (to prevent races actually).
204     // 2. Some daemons dup /dev/null in place of stdin/stdout.
205     // On the other hand we have not seen cases when write here catches real
206     // bugs.
207     MemoryRead(thr, pc, (uptr)d, kSizeLog8);
208   }
209   // We need to clear it, because if we do not intercept any call out there
210   // that creates fd, we will hit false postives.
211   MemoryResetRange(thr, pc, (uptr)d, 8);
212   unref(thr, pc, d->sync);
213   d->sync = 0;
214   d->creation_tid = 0;
215   d->creation_stack = 0;
216 }
217 
218 void FdFileCreate(ThreadState *thr, uptr pc, int fd) {
219   DPrintf("#%d: FdFileCreate(%d)\n", thr->tid, fd);
220   if (bogusfd(fd))
221     return;
222   init(thr, pc, fd, &fdctx.filesync);
223 }
224 
225 void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd, bool write) {
226   DPrintf("#%d: FdDup(%d, %d)\n", thr->tid, oldfd, newfd);
227   if (bogusfd(oldfd) || bogusfd(newfd))
228     return;
229   // Ignore the case when user dups not yet connected socket.
230   FdDesc *od = fddesc(thr, pc, oldfd);
231   MemoryRead(thr, pc, (uptr)od, kSizeLog8);
232   FdClose(thr, pc, newfd, write);
233   init(thr, pc, newfd, ref(od->sync), write);
234 }
235 
236 void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) {
237   DPrintf("#%d: FdCreatePipe(%d, %d)\n", thr->tid, rfd, wfd);
238   FdSync *s = allocsync(thr, pc);
239   init(thr, pc, rfd, ref(s));
240   init(thr, pc, wfd, ref(s));
241   unref(thr, pc, s);
242 }
243 
244 void FdEventCreate(ThreadState *thr, uptr pc, int fd) {
245   DPrintf("#%d: FdEventCreate(%d)\n", thr->tid, fd);
246   if (bogusfd(fd))
247     return;
248   init(thr, pc, fd, allocsync(thr, pc));
249 }
250 
251 void FdSignalCreate(ThreadState *thr, uptr pc, int fd) {
252   DPrintf("#%d: FdSignalCreate(%d)\n", thr->tid, fd);
253   if (bogusfd(fd))
254     return;
255   init(thr, pc, fd, 0);
256 }
257 
258 void FdInotifyCreate(ThreadState *thr, uptr pc, int fd) {
259   DPrintf("#%d: FdInotifyCreate(%d)\n", thr->tid, fd);
260   if (bogusfd(fd))
261     return;
262   init(thr, pc, fd, 0);
263 }
264 
265 void FdPollCreate(ThreadState *thr, uptr pc, int fd) {
266   DPrintf("#%d: FdPollCreate(%d)\n", thr->tid, fd);
267   if (bogusfd(fd))
268     return;
269   init(thr, pc, fd, allocsync(thr, pc));
270 }
271 
272 void FdSocketCreate(ThreadState *thr, uptr pc, int fd) {
273   DPrintf("#%d: FdSocketCreate(%d)\n", thr->tid, fd);
274   if (bogusfd(fd))
275     return;
276   // It can be a UDP socket.
277   init(thr, pc, fd, &fdctx.socksync);
278 }
279 
280 void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd) {
281   DPrintf("#%d: FdSocketAccept(%d, %d)\n", thr->tid, fd, newfd);
282   if (bogusfd(fd))
283     return;
284   // Synchronize connect->accept.
285   Acquire(thr, pc, (uptr)&fdctx.connectsync);
286   init(thr, pc, newfd, &fdctx.socksync);
287 }
288 
289 void FdSocketConnecting(ThreadState *thr, uptr pc, int fd) {
290   DPrintf("#%d: FdSocketConnecting(%d)\n", thr->tid, fd);
291   if (bogusfd(fd))
292     return;
293   // Synchronize connect->accept.
294   Release(thr, pc, (uptr)&fdctx.connectsync);
295 }
296 
297 void FdSocketConnect(ThreadState *thr, uptr pc, int fd) {
298   DPrintf("#%d: FdSocketConnect(%d)\n", thr->tid, fd);
299   if (bogusfd(fd))
300     return;
301   init(thr, pc, fd, &fdctx.socksync);
302 }
303 
304 uptr File2addr(const char *path) {
305   (void)path;
306   static u64 addr;
307   return (uptr)&addr;
308 }
309 
310 uptr Dir2addr(const char *path) {
311   (void)path;
312   static u64 addr;
313   return (uptr)&addr;
314 }
315 
316 }  //  namespace __tsan
317