1 //===-- tsan_fd.cpp -------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "tsan_fd.h"
14 
15 #include <sanitizer_common/sanitizer_atomic.h>
16 
17 #include "tsan_interceptors.h"
18 #include "tsan_rtl.h"
19 
20 namespace __tsan {
21 
22 const int kTableSizeL1 = 1024;
23 const int kTableSizeL2 = 1024;
24 const int kTableSize = kTableSizeL1 * kTableSizeL2;
25 
26 struct FdSync {
27   atomic_uint64_t rc;
28 };
29 
30 struct FdDesc {
31   FdSync *sync;
32   Tid creation_tid;
33   StackID creation_stack;
34 };
35 
36 struct FdContext {
37   atomic_uintptr_t tab[kTableSizeL1];
38   // Addresses used for synchronization.
39   FdSync globsync;
40   FdSync filesync;
41   FdSync socksync;
42   u64 connectsync;
43 };
44 
45 static FdContext fdctx;
46 
47 static bool bogusfd(int fd) {
48   // Apparently a bogus fd value.
49   return fd < 0 || fd >= kTableSize;
50 }
51 
52 static FdSync *allocsync(ThreadState *thr, uptr pc) {
53   FdSync *s = (FdSync*)user_alloc_internal(thr, pc, sizeof(FdSync),
54       kDefaultAlignment, false);
55   atomic_store(&s->rc, 1, memory_order_relaxed);
56   return s;
57 }
58 
59 static FdSync *ref(FdSync *s) {
60   if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1)
61     atomic_fetch_add(&s->rc, 1, memory_order_relaxed);
62   return s;
63 }
64 
65 static void unref(ThreadState *thr, uptr pc, FdSync *s) {
66   if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1) {
67     if (atomic_fetch_sub(&s->rc, 1, memory_order_acq_rel) == 1) {
68       CHECK_NE(s, &fdctx.globsync);
69       CHECK_NE(s, &fdctx.filesync);
70       CHECK_NE(s, &fdctx.socksync);
71       user_free(thr, pc, s, false);
72     }
73   }
74 }
75 
76 static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
77   CHECK_GE(fd, 0);
78   CHECK_LT(fd, kTableSize);
79   atomic_uintptr_t *pl1 = &fdctx.tab[fd / kTableSizeL2];
80   uptr l1 = atomic_load(pl1, memory_order_consume);
81   if (l1 == 0) {
82     uptr size = kTableSizeL2 * sizeof(FdDesc);
83     // We need this to reside in user memory to properly catch races on it.
84     void *p = user_alloc_internal(thr, pc, size, kDefaultAlignment, false);
85     internal_memset(p, 0, size);
86     MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
87     if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))
88       l1 = (uptr)p;
89     else
90       user_free(thr, pc, p, false);
91   }
92   FdDesc *fds = reinterpret_cast<FdDesc *>(l1);
93   return &fds[fd % kTableSizeL2];
94 }
95 
96 // pd must be already ref'ed.
97 static void init(ThreadState *thr, uptr pc, int fd, FdSync *s,
98     bool write = true) {
99   FdDesc *d = fddesc(thr, pc, fd);
100   // As a matter of fact, we don't intercept all close calls.
101   // See e.g. libc __res_iclose().
102   if (d->sync) {
103     unref(thr, pc, d->sync);
104     d->sync = 0;
105   }
106   if (flags()->io_sync == 0) {
107     unref(thr, pc, s);
108   } else if (flags()->io_sync == 1) {
109     d->sync = s;
110   } else if (flags()->io_sync == 2) {
111     unref(thr, pc, s);
112     d->sync = &fdctx.globsync;
113   }
114   d->creation_tid = thr->tid;
115   d->creation_stack = CurrentStackId(thr, pc);
116   if (write) {
117     // To catch races between fd usage and open.
118     MemoryRangeImitateWrite(thr, pc, (uptr)d, 8);
119   } else {
120     // See the dup-related comment in FdClose.
121     MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
122   }
123 }
124 
125 void FdInit() {
126   atomic_store(&fdctx.globsync.rc, (u64)-1, memory_order_relaxed);
127   atomic_store(&fdctx.filesync.rc, (u64)-1, memory_order_relaxed);
128   atomic_store(&fdctx.socksync.rc, (u64)-1, memory_order_relaxed);
129 }
130 
131 void FdOnFork(ThreadState *thr, uptr pc) {
132   // On fork() we need to reset all fd's, because the child is going
133   // close all them, and that will cause races between previous read/write
134   // and the close.
135   for (int l1 = 0; l1 < kTableSizeL1; l1++) {
136     FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
137     if (tab == 0)
138       break;
139     for (int l2 = 0; l2 < kTableSizeL2; l2++) {
140       FdDesc *d = &tab[l2];
141       MemoryResetRange(thr, pc, (uptr)d, 8);
142     }
143   }
144 }
145 
146 bool FdLocation(uptr addr, int *fd, Tid *tid, StackID *stack) {
147   for (int l1 = 0; l1 < kTableSizeL1; l1++) {
148     FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
149     if (tab == 0)
150       break;
151     if (addr >= (uptr)tab && addr < (uptr)(tab + kTableSizeL2)) {
152       int l2 = (addr - (uptr)tab) / sizeof(FdDesc);
153       FdDesc *d = &tab[l2];
154       *fd = l1 * kTableSizeL1 + l2;
155       *tid = d->creation_tid;
156       *stack = d->creation_stack;
157       return true;
158     }
159   }
160   return false;
161 }
162 
163 void FdAcquire(ThreadState *thr, uptr pc, int fd) {
164   if (bogusfd(fd))
165     return;
166   FdDesc *d = fddesc(thr, pc, fd);
167   FdSync *s = d->sync;
168   DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s);
169   MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
170   if (s)
171     Acquire(thr, pc, (uptr)s);
172 }
173 
174 void FdRelease(ThreadState *thr, uptr pc, int fd) {
175   if (bogusfd(fd))
176     return;
177   FdDesc *d = fddesc(thr, pc, fd);
178   FdSync *s = d->sync;
179   DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s);
180   MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
181   if (s)
182     Release(thr, pc, (uptr)s);
183 }
184 
185 void FdAccess(ThreadState *thr, uptr pc, int fd) {
186   DPrintf("#%d: FdAccess(%d)\n", thr->tid, fd);
187   if (bogusfd(fd))
188     return;
189   FdDesc *d = fddesc(thr, pc, fd);
190   MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
191 }
192 
193 void FdClose(ThreadState *thr, uptr pc, int fd, bool write) {
194   DPrintf("#%d: FdClose(%d)\n", thr->tid, fd);
195   if (bogusfd(fd))
196     return;
197   FdDesc *d = fddesc(thr, pc, fd);
198   if (!MustIgnoreInterceptor(thr)) {
199     if (write) {
200       // To catch races between fd usage and close.
201       MemoryAccess(thr, pc, (uptr)d, 8, kAccessWrite);
202     } else {
203       // This path is used only by dup2/dup3 calls.
204       // We do read instead of write because there is a number of legitimate
205       // cases where write would lead to false positives:
206       // 1. Some software dups a closed pipe in place of a socket before closing
207       //    the socket (to prevent races actually).
208       // 2. Some daemons dup /dev/null in place of stdin/stdout.
209       // On the other hand we have not seen cases when write here catches real
210       // bugs.
211       MemoryAccess(thr, pc, (uptr)d, 8, kAccessRead);
212     }
213   }
214   // We need to clear it, because if we do not intercept any call out there
215   // that creates fd, we will hit false postives.
216   MemoryResetRange(thr, pc, (uptr)d, 8);
217   unref(thr, pc, d->sync);
218   d->sync = 0;
219   d->creation_tid = kInvalidTid;
220   d->creation_stack = kInvalidStackID;
221 }
222 
223 void FdFileCreate(ThreadState *thr, uptr pc, int fd) {
224   DPrintf("#%d: FdFileCreate(%d)\n", thr->tid, fd);
225   if (bogusfd(fd))
226     return;
227   init(thr, pc, fd, &fdctx.filesync);
228 }
229 
230 void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd, bool write) {
231   DPrintf("#%d: FdDup(%d, %d)\n", thr->tid, oldfd, newfd);
232   if (bogusfd(oldfd) || bogusfd(newfd))
233     return;
234   // Ignore the case when user dups not yet connected socket.
235   FdDesc *od = fddesc(thr, pc, oldfd);
236   MemoryAccess(thr, pc, (uptr)od, 8, kAccessRead);
237   FdClose(thr, pc, newfd, write);
238   init(thr, pc, newfd, ref(od->sync), write);
239 }
240 
241 void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) {
242   DPrintf("#%d: FdCreatePipe(%d, %d)\n", thr->tid, rfd, wfd);
243   FdSync *s = allocsync(thr, pc);
244   init(thr, pc, rfd, ref(s));
245   init(thr, pc, wfd, ref(s));
246   unref(thr, pc, s);
247 }
248 
249 void FdEventCreate(ThreadState *thr, uptr pc, int fd) {
250   DPrintf("#%d: FdEventCreate(%d)\n", thr->tid, fd);
251   if (bogusfd(fd))
252     return;
253   init(thr, pc, fd, allocsync(thr, pc));
254 }
255 
256 void FdSignalCreate(ThreadState *thr, uptr pc, int fd) {
257   DPrintf("#%d: FdSignalCreate(%d)\n", thr->tid, fd);
258   if (bogusfd(fd))
259     return;
260   init(thr, pc, fd, 0);
261 }
262 
263 void FdInotifyCreate(ThreadState *thr, uptr pc, int fd) {
264   DPrintf("#%d: FdInotifyCreate(%d)\n", thr->tid, fd);
265   if (bogusfd(fd))
266     return;
267   init(thr, pc, fd, 0);
268 }
269 
270 void FdPollCreate(ThreadState *thr, uptr pc, int fd) {
271   DPrintf("#%d: FdPollCreate(%d)\n", thr->tid, fd);
272   if (bogusfd(fd))
273     return;
274   init(thr, pc, fd, allocsync(thr, pc));
275 }
276 
277 void FdSocketCreate(ThreadState *thr, uptr pc, int fd) {
278   DPrintf("#%d: FdSocketCreate(%d)\n", thr->tid, fd);
279   if (bogusfd(fd))
280     return;
281   // It can be a UDP socket.
282   init(thr, pc, fd, &fdctx.socksync);
283 }
284 
285 void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd) {
286   DPrintf("#%d: FdSocketAccept(%d, %d)\n", thr->tid, fd, newfd);
287   if (bogusfd(fd))
288     return;
289   // Synchronize connect->accept.
290   Acquire(thr, pc, (uptr)&fdctx.connectsync);
291   init(thr, pc, newfd, &fdctx.socksync);
292 }
293 
294 void FdSocketConnecting(ThreadState *thr, uptr pc, int fd) {
295   DPrintf("#%d: FdSocketConnecting(%d)\n", thr->tid, fd);
296   if (bogusfd(fd))
297     return;
298   // Synchronize connect->accept.
299   Release(thr, pc, (uptr)&fdctx.connectsync);
300 }
301 
302 void FdSocketConnect(ThreadState *thr, uptr pc, int fd) {
303   DPrintf("#%d: FdSocketConnect(%d)\n", thr->tid, fd);
304   if (bogusfd(fd))
305     return;
306   init(thr, pc, fd, &fdctx.socksync);
307 }
308 
309 uptr File2addr(const char *path) {
310   (void)path;
311   static u64 addr;
312   return (uptr)&addr;
313 }
314 
315 uptr Dir2addr(const char *path) {
316   (void)path;
317   static u64 addr;
318   return (uptr)&addr;
319 }
320 
321 }  //  namespace __tsan
322