1 //===- ThreadSafety.cpp ---------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // A intra-procedural analysis for thread safety (e.g. deadlocks and race
10 // conditions), based off of an annotation system.
11 //
12 // See http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
13 // for more information.
14 //
15 //===----------------------------------------------------------------------===//
16
17 #include "clang/Analysis/Analyses/ThreadSafety.h"
18 #include "clang/AST/Attr.h"
19 #include "clang/AST/Decl.h"
20 #include "clang/AST/DeclCXX.h"
21 #include "clang/AST/DeclGroup.h"
22 #include "clang/AST/Expr.h"
23 #include "clang/AST/ExprCXX.h"
24 #include "clang/AST/OperationKinds.h"
25 #include "clang/AST/Stmt.h"
26 #include "clang/AST/StmtVisitor.h"
27 #include "clang/AST/Type.h"
28 #include "clang/Analysis/Analyses/PostOrderCFGView.h"
29 #include "clang/Analysis/Analyses/ThreadSafetyCommon.h"
30 #include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
31 #include "clang/Analysis/Analyses/ThreadSafetyTraverse.h"
32 #include "clang/Analysis/Analyses/ThreadSafetyUtil.h"
33 #include "clang/Analysis/AnalysisDeclContext.h"
34 #include "clang/Analysis/CFG.h"
35 #include "clang/Basic/Builtins.h"
36 #include "clang/Basic/LLVM.h"
37 #include "clang/Basic/OperatorKinds.h"
38 #include "clang/Basic/SourceLocation.h"
39 #include "clang/Basic/Specifiers.h"
40 #include "llvm/ADT/ArrayRef.h"
41 #include "llvm/ADT/DenseMap.h"
42 #include "llvm/ADT/ImmutableMap.h"
43 #include "llvm/ADT/STLExtras.h"
44 #include "llvm/ADT/SmallVector.h"
45 #include "llvm/ADT/StringRef.h"
46 #include "llvm/Support/Allocator.h"
47 #include "llvm/Support/Casting.h"
48 #include "llvm/Support/ErrorHandling.h"
49 #include "llvm/Support/raw_ostream.h"
50 #include <algorithm>
51 #include <cassert>
52 #include <functional>
53 #include <iterator>
54 #include <memory>
55 #include <optional>
56 #include <string>
57 #include <type_traits>
58 #include <utility>
59 #include <vector>
60
61 using namespace clang;
62 using namespace threadSafety;
63
64 // Key method definition
65 ThreadSafetyHandler::~ThreadSafetyHandler() = default;
66
67 /// Issue a warning about an invalid lock expression
warnInvalidLock(ThreadSafetyHandler & Handler,const Expr * MutexExp,const NamedDecl * D,const Expr * DeclExp,StringRef Kind)68 static void warnInvalidLock(ThreadSafetyHandler &Handler,
69 const Expr *MutexExp, const NamedDecl *D,
70 const Expr *DeclExp, StringRef Kind) {
71 SourceLocation Loc;
72 if (DeclExp)
73 Loc = DeclExp->getExprLoc();
74
75 // FIXME: add a note about the attribute location in MutexExp or D
76 if (Loc.isValid())
77 Handler.handleInvalidLockExp(Loc);
78 }
79
80 namespace {
81
82 /// A set of CapabilityExpr objects, which are compiled from thread safety
83 /// attributes on a function.
84 class CapExprSet : public SmallVector<CapabilityExpr, 4> {
85 public:
86 /// Push M onto list, but discard duplicates.
push_back_nodup(const CapabilityExpr & CapE)87 void push_back_nodup(const CapabilityExpr &CapE) {
88 if (llvm::none_of(*this, [=](const CapabilityExpr &CapE2) {
89 return CapE.equals(CapE2);
90 }))
91 push_back(CapE);
92 }
93 };
94
95 class FactManager;
96 class FactSet;
97
98 /// This is a helper class that stores a fact that is known at a
99 /// particular point in program execution. Currently, a fact is a capability,
100 /// along with additional information, such as where it was acquired, whether
101 /// it is exclusive or shared, etc.
102 ///
103 /// FIXME: this analysis does not currently support re-entrant locking.
104 class FactEntry : public CapabilityExpr {
105 public:
106 /// Where a fact comes from.
107 enum SourceKind {
108 Acquired, ///< The fact has been directly acquired.
109 Asserted, ///< The fact has been asserted to be held.
110 Declared, ///< The fact is assumed to be held by callers.
111 Managed, ///< The fact has been acquired through a scoped capability.
112 };
113
114 private:
115 /// Exclusive or shared.
116 LockKind LKind : 8;
117
118 // How it was acquired.
119 SourceKind Source : 8;
120
121 /// Where it was acquired.
122 SourceLocation AcquireLoc;
123
124 public:
FactEntry(const CapabilityExpr & CE,LockKind LK,SourceLocation Loc,SourceKind Src)125 FactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
126 SourceKind Src)
127 : CapabilityExpr(CE), LKind(LK), Source(Src), AcquireLoc(Loc) {}
128 virtual ~FactEntry() = default;
129
kind() const130 LockKind kind() const { return LKind; }
loc() const131 SourceLocation loc() const { return AcquireLoc; }
132
asserted() const133 bool asserted() const { return Source == Asserted; }
declared() const134 bool declared() const { return Source == Declared; }
managed() const135 bool managed() const { return Source == Managed; }
136
137 virtual void
138 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
139 SourceLocation JoinLoc, LockErrorKind LEK,
140 ThreadSafetyHandler &Handler) const = 0;
141 virtual void handleLock(FactSet &FSet, FactManager &FactMan,
142 const FactEntry &entry,
143 ThreadSafetyHandler &Handler) const = 0;
144 virtual void handleUnlock(FactSet &FSet, FactManager &FactMan,
145 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
146 bool FullyRemove,
147 ThreadSafetyHandler &Handler) const = 0;
148
149 // Return true if LKind >= LK, where exclusive > shared
isAtLeast(LockKind LK) const150 bool isAtLeast(LockKind LK) const {
151 return (LKind == LK_Exclusive) || (LK == LK_Shared);
152 }
153 };
154
155 using FactID = unsigned short;
156
157 /// FactManager manages the memory for all facts that are created during
158 /// the analysis of a single routine.
159 class FactManager {
160 private:
161 std::vector<std::unique_ptr<const FactEntry>> Facts;
162
163 public:
newFact(std::unique_ptr<FactEntry> Entry)164 FactID newFact(std::unique_ptr<FactEntry> Entry) {
165 Facts.push_back(std::move(Entry));
166 return static_cast<unsigned short>(Facts.size() - 1);
167 }
168
operator [](FactID F) const169 const FactEntry &operator[](FactID F) const { return *Facts[F]; }
170 };
171
172 /// A FactSet is the set of facts that are known to be true at a
173 /// particular program point. FactSets must be small, because they are
174 /// frequently copied, and are thus implemented as a set of indices into a
175 /// table maintained by a FactManager. A typical FactSet only holds 1 or 2
176 /// locks, so we can get away with doing a linear search for lookup. Note
177 /// that a hashtable or map is inappropriate in this case, because lookups
178 /// may involve partial pattern matches, rather than exact matches.
179 class FactSet {
180 private:
181 using FactVec = SmallVector<FactID, 4>;
182
183 FactVec FactIDs;
184
185 public:
186 using iterator = FactVec::iterator;
187 using const_iterator = FactVec::const_iterator;
188
begin()189 iterator begin() { return FactIDs.begin(); }
begin() const190 const_iterator begin() const { return FactIDs.begin(); }
191
end()192 iterator end() { return FactIDs.end(); }
end() const193 const_iterator end() const { return FactIDs.end(); }
194
isEmpty() const195 bool isEmpty() const { return FactIDs.size() == 0; }
196
197 // Return true if the set contains only negative facts
isEmpty(FactManager & FactMan) const198 bool isEmpty(FactManager &FactMan) const {
199 for (const auto FID : *this) {
200 if (!FactMan[FID].negative())
201 return false;
202 }
203 return true;
204 }
205
addLockByID(FactID ID)206 void addLockByID(FactID ID) { FactIDs.push_back(ID); }
207
addLock(FactManager & FM,std::unique_ptr<FactEntry> Entry)208 FactID addLock(FactManager &FM, std::unique_ptr<FactEntry> Entry) {
209 FactID F = FM.newFact(std::move(Entry));
210 FactIDs.push_back(F);
211 return F;
212 }
213
removeLock(FactManager & FM,const CapabilityExpr & CapE)214 bool removeLock(FactManager& FM, const CapabilityExpr &CapE) {
215 unsigned n = FactIDs.size();
216 if (n == 0)
217 return false;
218
219 for (unsigned i = 0; i < n-1; ++i) {
220 if (FM[FactIDs[i]].matches(CapE)) {
221 FactIDs[i] = FactIDs[n-1];
222 FactIDs.pop_back();
223 return true;
224 }
225 }
226 if (FM[FactIDs[n-1]].matches(CapE)) {
227 FactIDs.pop_back();
228 return true;
229 }
230 return false;
231 }
232
findLockIter(FactManager & FM,const CapabilityExpr & CapE)233 iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) {
234 return std::find_if(begin(), end(), [&](FactID ID) {
235 return FM[ID].matches(CapE);
236 });
237 }
238
findLock(FactManager & FM,const CapabilityExpr & CapE) const239 const FactEntry *findLock(FactManager &FM, const CapabilityExpr &CapE) const {
240 auto I = std::find_if(begin(), end(), [&](FactID ID) {
241 return FM[ID].matches(CapE);
242 });
243 return I != end() ? &FM[*I] : nullptr;
244 }
245
findLockUniv(FactManager & FM,const CapabilityExpr & CapE) const246 const FactEntry *findLockUniv(FactManager &FM,
247 const CapabilityExpr &CapE) const {
248 auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool {
249 return FM[ID].matchesUniv(CapE);
250 });
251 return I != end() ? &FM[*I] : nullptr;
252 }
253
findPartialMatch(FactManager & FM,const CapabilityExpr & CapE) const254 const FactEntry *findPartialMatch(FactManager &FM,
255 const CapabilityExpr &CapE) const {
256 auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool {
257 return FM[ID].partiallyMatches(CapE);
258 });
259 return I != end() ? &FM[*I] : nullptr;
260 }
261
containsMutexDecl(FactManager & FM,const ValueDecl * Vd) const262 bool containsMutexDecl(FactManager &FM, const ValueDecl* Vd) const {
263 auto I = std::find_if(begin(), end(), [&](FactID ID) -> bool {
264 return FM[ID].valueDecl() == Vd;
265 });
266 return I != end();
267 }
268 };
269
270 class ThreadSafetyAnalyzer;
271
272 } // namespace
273
274 namespace clang {
275 namespace threadSafety {
276
277 class BeforeSet {
278 private:
279 using BeforeVect = SmallVector<const ValueDecl *, 4>;
280
281 struct BeforeInfo {
282 BeforeVect Vect;
283 int Visited = 0;
284
285 BeforeInfo() = default;
286 BeforeInfo(BeforeInfo &&) = default;
287 };
288
289 using BeforeMap =
290 llvm::DenseMap<const ValueDecl *, std::unique_ptr<BeforeInfo>>;
291 using CycleMap = llvm::DenseMap<const ValueDecl *, bool>;
292
293 public:
294 BeforeSet() = default;
295
296 BeforeInfo* insertAttrExprs(const ValueDecl* Vd,
297 ThreadSafetyAnalyzer& Analyzer);
298
299 BeforeInfo *getBeforeInfoForDecl(const ValueDecl *Vd,
300 ThreadSafetyAnalyzer &Analyzer);
301
302 void checkBeforeAfter(const ValueDecl* Vd,
303 const FactSet& FSet,
304 ThreadSafetyAnalyzer& Analyzer,
305 SourceLocation Loc, StringRef CapKind);
306
307 private:
308 BeforeMap BMap;
309 CycleMap CycMap;
310 };
311
312 } // namespace threadSafety
313 } // namespace clang
314
315 namespace {
316
317 class LocalVariableMap;
318
319 using LocalVarContext = llvm::ImmutableMap<const NamedDecl *, unsigned>;
320
321 /// A side (entry or exit) of a CFG node.
322 enum CFGBlockSide { CBS_Entry, CBS_Exit };
323
324 /// CFGBlockInfo is a struct which contains all the information that is
325 /// maintained for each block in the CFG. See LocalVariableMap for more
326 /// information about the contexts.
327 struct CFGBlockInfo {
328 // Lockset held at entry to block
329 FactSet EntrySet;
330
331 // Lockset held at exit from block
332 FactSet ExitSet;
333
334 // Context held at entry to block
335 LocalVarContext EntryContext;
336
337 // Context held at exit from block
338 LocalVarContext ExitContext;
339
340 // Location of first statement in block
341 SourceLocation EntryLoc;
342
343 // Location of last statement in block.
344 SourceLocation ExitLoc;
345
346 // Used to replay contexts later
347 unsigned EntryIndex;
348
349 // Is this block reachable?
350 bool Reachable = false;
351
getSet__anon5d8b947e0811::CFGBlockInfo352 const FactSet &getSet(CFGBlockSide Side) const {
353 return Side == CBS_Entry ? EntrySet : ExitSet;
354 }
355
getLocation__anon5d8b947e0811::CFGBlockInfo356 SourceLocation getLocation(CFGBlockSide Side) const {
357 return Side == CBS_Entry ? EntryLoc : ExitLoc;
358 }
359
360 private:
CFGBlockInfo__anon5d8b947e0811::CFGBlockInfo361 CFGBlockInfo(LocalVarContext EmptyCtx)
362 : EntryContext(EmptyCtx), ExitContext(EmptyCtx) {}
363
364 public:
365 static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M);
366 };
367
368 // A LocalVariableMap maintains a map from local variables to their currently
369 // valid definitions. It provides SSA-like functionality when traversing the
370 // CFG. Like SSA, each definition or assignment to a variable is assigned a
371 // unique name (an integer), which acts as the SSA name for that definition.
372 // The total set of names is shared among all CFG basic blocks.
373 // Unlike SSA, we do not rewrite expressions to replace local variables declrefs
374 // with their SSA-names. Instead, we compute a Context for each point in the
375 // code, which maps local variables to the appropriate SSA-name. This map
376 // changes with each assignment.
377 //
378 // The map is computed in a single pass over the CFG. Subsequent analyses can
379 // then query the map to find the appropriate Context for a statement, and use
380 // that Context to look up the definitions of variables.
381 class LocalVariableMap {
382 public:
383 using Context = LocalVarContext;
384
385 /// A VarDefinition consists of an expression, representing the value of the
386 /// variable, along with the context in which that expression should be
387 /// interpreted. A reference VarDefinition does not itself contain this
388 /// information, but instead contains a pointer to a previous VarDefinition.
389 struct VarDefinition {
390 public:
391 friend class LocalVariableMap;
392
393 // The original declaration for this variable.
394 const NamedDecl *Dec;
395
396 // The expression for this variable, OR
397 const Expr *Exp = nullptr;
398
399 // Reference to another VarDefinition
400 unsigned Ref = 0;
401
402 // The map with which Exp should be interpreted.
403 Context Ctx;
404
isReference__anon5d8b947e0811::LocalVariableMap::VarDefinition405 bool isReference() { return !Exp; }
406
407 private:
408 // Create ordinary variable definition
VarDefinition__anon5d8b947e0811::LocalVariableMap::VarDefinition409 VarDefinition(const NamedDecl *D, const Expr *E, Context C)
410 : Dec(D), Exp(E), Ctx(C) {}
411
412 // Create reference to previous definition
VarDefinition__anon5d8b947e0811::LocalVariableMap::VarDefinition413 VarDefinition(const NamedDecl *D, unsigned R, Context C)
414 : Dec(D), Ref(R), Ctx(C) {}
415 };
416
417 private:
418 Context::Factory ContextFactory;
419 std::vector<VarDefinition> VarDefinitions;
420 std::vector<std::pair<const Stmt *, Context>> SavedContexts;
421
422 public:
LocalVariableMap()423 LocalVariableMap() {
424 // index 0 is a placeholder for undefined variables (aka phi-nodes).
425 VarDefinitions.push_back(VarDefinition(nullptr, 0u, getEmptyContext()));
426 }
427
428 /// Look up a definition, within the given context.
lookup(const NamedDecl * D,Context Ctx)429 const VarDefinition* lookup(const NamedDecl *D, Context Ctx) {
430 const unsigned *i = Ctx.lookup(D);
431 if (!i)
432 return nullptr;
433 assert(*i < VarDefinitions.size());
434 return &VarDefinitions[*i];
435 }
436
437 /// Look up the definition for D within the given context. Returns
438 /// NULL if the expression is not statically known. If successful, also
439 /// modifies Ctx to hold the context of the return Expr.
lookupExpr(const NamedDecl * D,Context & Ctx)440 const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) {
441 const unsigned *P = Ctx.lookup(D);
442 if (!P)
443 return nullptr;
444
445 unsigned i = *P;
446 while (i > 0) {
447 if (VarDefinitions[i].Exp) {
448 Ctx = VarDefinitions[i].Ctx;
449 return VarDefinitions[i].Exp;
450 }
451 i = VarDefinitions[i].Ref;
452 }
453 return nullptr;
454 }
455
getEmptyContext()456 Context getEmptyContext() { return ContextFactory.getEmptyMap(); }
457
458 /// Return the next context after processing S. This function is used by
459 /// clients of the class to get the appropriate context when traversing the
460 /// CFG. It must be called for every assignment or DeclStmt.
getNextContext(unsigned & CtxIndex,const Stmt * S,Context C)461 Context getNextContext(unsigned &CtxIndex, const Stmt *S, Context C) {
462 if (SavedContexts[CtxIndex+1].first == S) {
463 CtxIndex++;
464 Context Result = SavedContexts[CtxIndex].second;
465 return Result;
466 }
467 return C;
468 }
469
dumpVarDefinitionName(unsigned i)470 void dumpVarDefinitionName(unsigned i) {
471 if (i == 0) {
472 llvm::errs() << "Undefined";
473 return;
474 }
475 const NamedDecl *Dec = VarDefinitions[i].Dec;
476 if (!Dec) {
477 llvm::errs() << "<<NULL>>";
478 return;
479 }
480 Dec->printName(llvm::errs());
481 llvm::errs() << "." << i << " " << ((const void*) Dec);
482 }
483
484 /// Dumps an ASCII representation of the variable map to llvm::errs()
dump()485 void dump() {
486 for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) {
487 const Expr *Exp = VarDefinitions[i].Exp;
488 unsigned Ref = VarDefinitions[i].Ref;
489
490 dumpVarDefinitionName(i);
491 llvm::errs() << " = ";
492 if (Exp) Exp->dump();
493 else {
494 dumpVarDefinitionName(Ref);
495 llvm::errs() << "\n";
496 }
497 }
498 }
499
500 /// Dumps an ASCII representation of a Context to llvm::errs()
dumpContext(Context C)501 void dumpContext(Context C) {
502 for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
503 const NamedDecl *D = I.getKey();
504 D->printName(llvm::errs());
505 const unsigned *i = C.lookup(D);
506 llvm::errs() << " -> ";
507 dumpVarDefinitionName(*i);
508 llvm::errs() << "\n";
509 }
510 }
511
512 /// Builds the variable map.
513 void traverseCFG(CFG *CFGraph, const PostOrderCFGView *SortedGraph,
514 std::vector<CFGBlockInfo> &BlockInfo);
515
516 protected:
517 friend class VarMapBuilder;
518
519 // Get the current context index
getContextIndex()520 unsigned getContextIndex() { return SavedContexts.size()-1; }
521
522 // Save the current context for later replay
saveContext(const Stmt * S,Context C)523 void saveContext(const Stmt *S, Context C) {
524 SavedContexts.push_back(std::make_pair(S, C));
525 }
526
527 // Adds a new definition to the given context, and returns a new context.
528 // This method should be called when declaring a new variable.
addDefinition(const NamedDecl * D,const Expr * Exp,Context Ctx)529 Context addDefinition(const NamedDecl *D, const Expr *Exp, Context Ctx) {
530 assert(!Ctx.contains(D));
531 unsigned newID = VarDefinitions.size();
532 Context NewCtx = ContextFactory.add(Ctx, D, newID);
533 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
534 return NewCtx;
535 }
536
537 // Add a new reference to an existing definition.
addReference(const NamedDecl * D,unsigned i,Context Ctx)538 Context addReference(const NamedDecl *D, unsigned i, Context Ctx) {
539 unsigned newID = VarDefinitions.size();
540 Context NewCtx = ContextFactory.add(Ctx, D, newID);
541 VarDefinitions.push_back(VarDefinition(D, i, Ctx));
542 return NewCtx;
543 }
544
545 // Updates a definition only if that definition is already in the map.
546 // This method should be called when assigning to an existing variable.
updateDefinition(const NamedDecl * D,Expr * Exp,Context Ctx)547 Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) {
548 if (Ctx.contains(D)) {
549 unsigned newID = VarDefinitions.size();
550 Context NewCtx = ContextFactory.remove(Ctx, D);
551 NewCtx = ContextFactory.add(NewCtx, D, newID);
552 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
553 return NewCtx;
554 }
555 return Ctx;
556 }
557
558 // Removes a definition from the context, but keeps the variable name
559 // as a valid variable. The index 0 is a placeholder for cleared definitions.
clearDefinition(const NamedDecl * D,Context Ctx)560 Context clearDefinition(const NamedDecl *D, Context Ctx) {
561 Context NewCtx = Ctx;
562 if (NewCtx.contains(D)) {
563 NewCtx = ContextFactory.remove(NewCtx, D);
564 NewCtx = ContextFactory.add(NewCtx, D, 0);
565 }
566 return NewCtx;
567 }
568
569 // Remove a definition entirely frmo the context.
removeDefinition(const NamedDecl * D,Context Ctx)570 Context removeDefinition(const NamedDecl *D, Context Ctx) {
571 Context NewCtx = Ctx;
572 if (NewCtx.contains(D)) {
573 NewCtx = ContextFactory.remove(NewCtx, D);
574 }
575 return NewCtx;
576 }
577
578 Context intersectContexts(Context C1, Context C2);
579 Context createReferenceContext(Context C);
580 void intersectBackEdge(Context C1, Context C2);
581 };
582
583 } // namespace
584
585 // This has to be defined after LocalVariableMap.
getEmptyBlockInfo(LocalVariableMap & M)586 CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) {
587 return CFGBlockInfo(M.getEmptyContext());
588 }
589
590 namespace {
591
592 /// Visitor which builds a LocalVariableMap
593 class VarMapBuilder : public ConstStmtVisitor<VarMapBuilder> {
594 public:
595 LocalVariableMap* VMap;
596 LocalVariableMap::Context Ctx;
597
VarMapBuilder(LocalVariableMap * VM,LocalVariableMap::Context C)598 VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C)
599 : VMap(VM), Ctx(C) {}
600
601 void VisitDeclStmt(const DeclStmt *S);
602 void VisitBinaryOperator(const BinaryOperator *BO);
603 };
604
605 } // namespace
606
607 // Add new local variables to the variable map
VisitDeclStmt(const DeclStmt * S)608 void VarMapBuilder::VisitDeclStmt(const DeclStmt *S) {
609 bool modifiedCtx = false;
610 const DeclGroupRef DGrp = S->getDeclGroup();
611 for (const auto *D : DGrp) {
612 if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) {
613 const Expr *E = VD->getInit();
614
615 // Add local variables with trivial type to the variable map
616 QualType T = VD->getType();
617 if (T.isTrivialType(VD->getASTContext())) {
618 Ctx = VMap->addDefinition(VD, E, Ctx);
619 modifiedCtx = true;
620 }
621 }
622 }
623 if (modifiedCtx)
624 VMap->saveContext(S, Ctx);
625 }
626
627 // Update local variable definitions in variable map
VisitBinaryOperator(const BinaryOperator * BO)628 void VarMapBuilder::VisitBinaryOperator(const BinaryOperator *BO) {
629 if (!BO->isAssignmentOp())
630 return;
631
632 Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
633
634 // Update the variable map and current context.
635 if (const auto *DRE = dyn_cast<DeclRefExpr>(LHSExp)) {
636 const ValueDecl *VDec = DRE->getDecl();
637 if (Ctx.lookup(VDec)) {
638 if (BO->getOpcode() == BO_Assign)
639 Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx);
640 else
641 // FIXME -- handle compound assignment operators
642 Ctx = VMap->clearDefinition(VDec, Ctx);
643 VMap->saveContext(BO, Ctx);
644 }
645 }
646 }
647
648 // Computes the intersection of two contexts. The intersection is the
649 // set of variables which have the same definition in both contexts;
650 // variables with different definitions are discarded.
651 LocalVariableMap::Context
intersectContexts(Context C1,Context C2)652 LocalVariableMap::intersectContexts(Context C1, Context C2) {
653 Context Result = C1;
654 for (const auto &P : C1) {
655 const NamedDecl *Dec = P.first;
656 const unsigned *i2 = C2.lookup(Dec);
657 if (!i2) // variable doesn't exist on second path
658 Result = removeDefinition(Dec, Result);
659 else if (*i2 != P.second) // variable exists, but has different definition
660 Result = clearDefinition(Dec, Result);
661 }
662 return Result;
663 }
664
665 // For every variable in C, create a new variable that refers to the
666 // definition in C. Return a new context that contains these new variables.
667 // (We use this for a naive implementation of SSA on loop back-edges.)
createReferenceContext(Context C)668 LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) {
669 Context Result = getEmptyContext();
670 for (const auto &P : C)
671 Result = addReference(P.first, P.second, Result);
672 return Result;
673 }
674
675 // This routine also takes the intersection of C1 and C2, but it does so by
676 // altering the VarDefinitions. C1 must be the result of an earlier call to
677 // createReferenceContext.
intersectBackEdge(Context C1,Context C2)678 void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
679 for (const auto &P : C1) {
680 unsigned i1 = P.second;
681 VarDefinition *VDef = &VarDefinitions[i1];
682 assert(VDef->isReference());
683
684 const unsigned *i2 = C2.lookup(P.first);
685 if (!i2 || (*i2 != i1))
686 VDef->Ref = 0; // Mark this variable as undefined
687 }
688 }
689
690 // Traverse the CFG in topological order, so all predecessors of a block
691 // (excluding back-edges) are visited before the block itself. At
692 // each point in the code, we calculate a Context, which holds the set of
693 // variable definitions which are visible at that point in execution.
694 // Visible variables are mapped to their definitions using an array that
695 // contains all definitions.
696 //
697 // At join points in the CFG, the set is computed as the intersection of
698 // the incoming sets along each edge, E.g.
699 //
700 // { Context | VarDefinitions }
701 // int x = 0; { x -> x1 | x1 = 0 }
702 // int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
703 // if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... }
704 // else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... }
705 // ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... }
706 //
707 // This is essentially a simpler and more naive version of the standard SSA
708 // algorithm. Those definitions that remain in the intersection are from blocks
709 // that strictly dominate the current block. We do not bother to insert proper
710 // phi nodes, because they are not used in our analysis; instead, wherever
711 // a phi node would be required, we simply remove that definition from the
712 // context (E.g. x above).
713 //
714 // The initial traversal does not capture back-edges, so those need to be
715 // handled on a separate pass. Whenever the first pass encounters an
716 // incoming back edge, it duplicates the context, creating new definitions
717 // that refer back to the originals. (These correspond to places where SSA
718 // might have to insert a phi node.) On the second pass, these definitions are
719 // set to NULL if the variable has changed on the back-edge (i.e. a phi
720 // node was actually required.) E.g.
721 //
722 // { Context | VarDefinitions }
723 // int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
724 // while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; }
725 // x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... }
726 // ... { y -> y1 | x3 = 2, x2 = 1, ... }
traverseCFG(CFG * CFGraph,const PostOrderCFGView * SortedGraph,std::vector<CFGBlockInfo> & BlockInfo)727 void LocalVariableMap::traverseCFG(CFG *CFGraph,
728 const PostOrderCFGView *SortedGraph,
729 std::vector<CFGBlockInfo> &BlockInfo) {
730 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
731
732 for (const auto *CurrBlock : *SortedGraph) {
733 unsigned CurrBlockID = CurrBlock->getBlockID();
734 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
735
736 VisitedBlocks.insert(CurrBlock);
737
738 // Calculate the entry context for the current block
739 bool HasBackEdges = false;
740 bool CtxInit = true;
741 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
742 PE = CurrBlock->pred_end(); PI != PE; ++PI) {
743 // if *PI -> CurrBlock is a back edge, so skip it
744 if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) {
745 HasBackEdges = true;
746 continue;
747 }
748
749 unsigned PrevBlockID = (*PI)->getBlockID();
750 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
751
752 if (CtxInit) {
753 CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext;
754 CtxInit = false;
755 }
756 else {
757 CurrBlockInfo->EntryContext =
758 intersectContexts(CurrBlockInfo->EntryContext,
759 PrevBlockInfo->ExitContext);
760 }
761 }
762
763 // Duplicate the context if we have back-edges, so we can call
764 // intersectBackEdges later.
765 if (HasBackEdges)
766 CurrBlockInfo->EntryContext =
767 createReferenceContext(CurrBlockInfo->EntryContext);
768
769 // Create a starting context index for the current block
770 saveContext(nullptr, CurrBlockInfo->EntryContext);
771 CurrBlockInfo->EntryIndex = getContextIndex();
772
773 // Visit all the statements in the basic block.
774 VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext);
775 for (const auto &BI : *CurrBlock) {
776 switch (BI.getKind()) {
777 case CFGElement::Statement: {
778 CFGStmt CS = BI.castAs<CFGStmt>();
779 VMapBuilder.Visit(CS.getStmt());
780 break;
781 }
782 default:
783 break;
784 }
785 }
786 CurrBlockInfo->ExitContext = VMapBuilder.Ctx;
787
788 // Mark variables on back edges as "unknown" if they've been changed.
789 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
790 SE = CurrBlock->succ_end(); SI != SE; ++SI) {
791 // if CurrBlock -> *SI is *not* a back edge
792 if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI))
793 continue;
794
795 CFGBlock *FirstLoopBlock = *SI;
796 Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext;
797 Context LoopEnd = CurrBlockInfo->ExitContext;
798 intersectBackEdge(LoopBegin, LoopEnd);
799 }
800 }
801
802 // Put an extra entry at the end of the indexed context array
803 unsigned exitID = CFGraph->getExit().getBlockID();
804 saveContext(nullptr, BlockInfo[exitID].ExitContext);
805 }
806
807 /// Find the appropriate source locations to use when producing diagnostics for
808 /// each block in the CFG.
findBlockLocations(CFG * CFGraph,const PostOrderCFGView * SortedGraph,std::vector<CFGBlockInfo> & BlockInfo)809 static void findBlockLocations(CFG *CFGraph,
810 const PostOrderCFGView *SortedGraph,
811 std::vector<CFGBlockInfo> &BlockInfo) {
812 for (const auto *CurrBlock : *SortedGraph) {
813 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()];
814
815 // Find the source location of the last statement in the block, if the
816 // block is not empty.
817 if (const Stmt *S = CurrBlock->getTerminatorStmt()) {
818 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getBeginLoc();
819 } else {
820 for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(),
821 BE = CurrBlock->rend(); BI != BE; ++BI) {
822 // FIXME: Handle other CFGElement kinds.
823 if (std::optional<CFGStmt> CS = BI->getAs<CFGStmt>()) {
824 CurrBlockInfo->ExitLoc = CS->getStmt()->getBeginLoc();
825 break;
826 }
827 }
828 }
829
830 if (CurrBlockInfo->ExitLoc.isValid()) {
831 // This block contains at least one statement. Find the source location
832 // of the first statement in the block.
833 for (const auto &BI : *CurrBlock) {
834 // FIXME: Handle other CFGElement kinds.
835 if (std::optional<CFGStmt> CS = BI.getAs<CFGStmt>()) {
836 CurrBlockInfo->EntryLoc = CS->getStmt()->getBeginLoc();
837 break;
838 }
839 }
840 } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() &&
841 CurrBlock != &CFGraph->getExit()) {
842 // The block is empty, and has a single predecessor. Use its exit
843 // location.
844 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
845 BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc;
846 } else if (CurrBlock->succ_size() == 1 && *CurrBlock->succ_begin()) {
847 // The block is empty, and has a single successor. Use its entry
848 // location.
849 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
850 BlockInfo[(*CurrBlock->succ_begin())->getBlockID()].EntryLoc;
851 }
852 }
853 }
854
855 namespace {
856
857 class LockableFactEntry : public FactEntry {
858 public:
LockableFactEntry(const CapabilityExpr & CE,LockKind LK,SourceLocation Loc,SourceKind Src=Acquired)859 LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
860 SourceKind Src = Acquired)
861 : FactEntry(CE, LK, Loc, Src) {}
862
863 void
handleRemovalFromIntersection(const FactSet & FSet,FactManager & FactMan,SourceLocation JoinLoc,LockErrorKind LEK,ThreadSafetyHandler & Handler) const864 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
865 SourceLocation JoinLoc, LockErrorKind LEK,
866 ThreadSafetyHandler &Handler) const override {
867 if (!asserted() && !negative() && !isUniversal()) {
868 Handler.handleMutexHeldEndOfScope(getKind(), toString(), loc(), JoinLoc,
869 LEK);
870 }
871 }
872
handleLock(FactSet & FSet,FactManager & FactMan,const FactEntry & entry,ThreadSafetyHandler & Handler) const873 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
874 ThreadSafetyHandler &Handler) const override {
875 Handler.handleDoubleLock(entry.getKind(), entry.toString(), loc(),
876 entry.loc());
877 }
878
handleUnlock(FactSet & FSet,FactManager & FactMan,const CapabilityExpr & Cp,SourceLocation UnlockLoc,bool FullyRemove,ThreadSafetyHandler & Handler) const879 void handleUnlock(FactSet &FSet, FactManager &FactMan,
880 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
881 bool FullyRemove,
882 ThreadSafetyHandler &Handler) const override {
883 FSet.removeLock(FactMan, Cp);
884 if (!Cp.negative()) {
885 FSet.addLock(FactMan, std::make_unique<LockableFactEntry>(
886 !Cp, LK_Exclusive, UnlockLoc));
887 }
888 }
889 };
890
891 class ScopedLockableFactEntry : public FactEntry {
892 private:
893 enum UnderlyingCapabilityKind {
894 UCK_Acquired, ///< Any kind of acquired capability.
895 UCK_ReleasedShared, ///< Shared capability that was released.
896 UCK_ReleasedExclusive, ///< Exclusive capability that was released.
897 };
898
899 struct UnderlyingCapability {
900 CapabilityExpr Cap;
901 UnderlyingCapabilityKind Kind;
902 };
903
904 SmallVector<UnderlyingCapability, 2> UnderlyingMutexes;
905
906 public:
ScopedLockableFactEntry(const CapabilityExpr & CE,SourceLocation Loc)907 ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc)
908 : FactEntry(CE, LK_Exclusive, Loc, Acquired) {}
909
addLock(const CapabilityExpr & M)910 void addLock(const CapabilityExpr &M) {
911 UnderlyingMutexes.push_back(UnderlyingCapability{M, UCK_Acquired});
912 }
913
addExclusiveUnlock(const CapabilityExpr & M)914 void addExclusiveUnlock(const CapabilityExpr &M) {
915 UnderlyingMutexes.push_back(UnderlyingCapability{M, UCK_ReleasedExclusive});
916 }
917
addSharedUnlock(const CapabilityExpr & M)918 void addSharedUnlock(const CapabilityExpr &M) {
919 UnderlyingMutexes.push_back(UnderlyingCapability{M, UCK_ReleasedShared});
920 }
921
922 void
handleRemovalFromIntersection(const FactSet & FSet,FactManager & FactMan,SourceLocation JoinLoc,LockErrorKind LEK,ThreadSafetyHandler & Handler) const923 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
924 SourceLocation JoinLoc, LockErrorKind LEK,
925 ThreadSafetyHandler &Handler) const override {
926 for (const auto &UnderlyingMutex : UnderlyingMutexes) {
927 const auto *Entry = FSet.findLock(FactMan, UnderlyingMutex.Cap);
928 if ((UnderlyingMutex.Kind == UCK_Acquired && Entry) ||
929 (UnderlyingMutex.Kind != UCK_Acquired && !Entry)) {
930 // If this scoped lock manages another mutex, and if the underlying
931 // mutex is still/not held, then warn about the underlying mutex.
932 Handler.handleMutexHeldEndOfScope(UnderlyingMutex.Cap.getKind(),
933 UnderlyingMutex.Cap.toString(), loc(),
934 JoinLoc, LEK);
935 }
936 }
937 }
938
handleLock(FactSet & FSet,FactManager & FactMan,const FactEntry & entry,ThreadSafetyHandler & Handler) const939 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
940 ThreadSafetyHandler &Handler) const override {
941 for (const auto &UnderlyingMutex : UnderlyingMutexes) {
942 if (UnderlyingMutex.Kind == UCK_Acquired)
943 lock(FSet, FactMan, UnderlyingMutex.Cap, entry.kind(), entry.loc(),
944 &Handler);
945 else
946 unlock(FSet, FactMan, UnderlyingMutex.Cap, entry.loc(), &Handler);
947 }
948 }
949
handleUnlock(FactSet & FSet,FactManager & FactMan,const CapabilityExpr & Cp,SourceLocation UnlockLoc,bool FullyRemove,ThreadSafetyHandler & Handler) const950 void handleUnlock(FactSet &FSet, FactManager &FactMan,
951 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
952 bool FullyRemove,
953 ThreadSafetyHandler &Handler) const override {
954 assert(!Cp.negative() && "Managing object cannot be negative.");
955 for (const auto &UnderlyingMutex : UnderlyingMutexes) {
956 // Remove/lock the underlying mutex if it exists/is still unlocked; warn
957 // on double unlocking/locking if we're not destroying the scoped object.
958 ThreadSafetyHandler *TSHandler = FullyRemove ? nullptr : &Handler;
959 if (UnderlyingMutex.Kind == UCK_Acquired) {
960 unlock(FSet, FactMan, UnderlyingMutex.Cap, UnlockLoc, TSHandler);
961 } else {
962 LockKind kind = UnderlyingMutex.Kind == UCK_ReleasedShared
963 ? LK_Shared
964 : LK_Exclusive;
965 lock(FSet, FactMan, UnderlyingMutex.Cap, kind, UnlockLoc, TSHandler);
966 }
967 }
968 if (FullyRemove)
969 FSet.removeLock(FactMan, Cp);
970 }
971
972 private:
lock(FactSet & FSet,FactManager & FactMan,const CapabilityExpr & Cp,LockKind kind,SourceLocation loc,ThreadSafetyHandler * Handler) const973 void lock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
974 LockKind kind, SourceLocation loc,
975 ThreadSafetyHandler *Handler) const {
976 if (const FactEntry *Fact = FSet.findLock(FactMan, Cp)) {
977 if (Handler)
978 Handler->handleDoubleLock(Cp.getKind(), Cp.toString(), Fact->loc(),
979 loc);
980 } else {
981 FSet.removeLock(FactMan, !Cp);
982 FSet.addLock(FactMan,
983 std::make_unique<LockableFactEntry>(Cp, kind, loc, Managed));
984 }
985 }
986
unlock(FactSet & FSet,FactManager & FactMan,const CapabilityExpr & Cp,SourceLocation loc,ThreadSafetyHandler * Handler) const987 void unlock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
988 SourceLocation loc, ThreadSafetyHandler *Handler) const {
989 if (FSet.findLock(FactMan, Cp)) {
990 FSet.removeLock(FactMan, Cp);
991 FSet.addLock(FactMan, std::make_unique<LockableFactEntry>(
992 !Cp, LK_Exclusive, loc));
993 } else if (Handler) {
994 SourceLocation PrevLoc;
995 if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp))
996 PrevLoc = Neg->loc();
997 Handler->handleUnmatchedUnlock(Cp.getKind(), Cp.toString(), loc, PrevLoc);
998 }
999 }
1000 };
1001
1002 /// Class which implements the core thread safety analysis routines.
1003 class ThreadSafetyAnalyzer {
1004 friend class BuildLockset;
1005 friend class threadSafety::BeforeSet;
1006
1007 llvm::BumpPtrAllocator Bpa;
1008 threadSafety::til::MemRegionRef Arena;
1009 threadSafety::SExprBuilder SxBuilder;
1010
1011 ThreadSafetyHandler &Handler;
1012 const CXXMethodDecl *CurrentMethod;
1013 LocalVariableMap LocalVarMap;
1014 FactManager FactMan;
1015 std::vector<CFGBlockInfo> BlockInfo;
1016
1017 BeforeSet *GlobalBeforeSet;
1018
1019 public:
ThreadSafetyAnalyzer(ThreadSafetyHandler & H,BeforeSet * Bset)1020 ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet* Bset)
1021 : Arena(&Bpa), SxBuilder(Arena), Handler(H), GlobalBeforeSet(Bset) {}
1022
1023 bool inCurrentScope(const CapabilityExpr &CapE);
1024
1025 void addLock(FactSet &FSet, std::unique_ptr<FactEntry> Entry,
1026 bool ReqAttr = false);
1027 void removeLock(FactSet &FSet, const CapabilityExpr &CapE,
1028 SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind);
1029
1030 template <typename AttrType>
1031 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
1032 const NamedDecl *D, til::SExpr *Self = nullptr);
1033
1034 template <class AttrType>
1035 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
1036 const NamedDecl *D,
1037 const CFGBlock *PredBlock, const CFGBlock *CurrBlock,
1038 Expr *BrE, bool Neg);
1039
1040 const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C,
1041 bool &Negate);
1042
1043 void getEdgeLockset(FactSet &Result, const FactSet &ExitSet,
1044 const CFGBlock* PredBlock,
1045 const CFGBlock *CurrBlock);
1046
1047 bool join(const FactEntry &a, const FactEntry &b, bool CanModify);
1048
1049 void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
1050 SourceLocation JoinLoc, LockErrorKind EntryLEK,
1051 LockErrorKind ExitLEK);
1052
intersectAndWarn(FactSet & EntrySet,const FactSet & ExitSet,SourceLocation JoinLoc,LockErrorKind LEK)1053 void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
1054 SourceLocation JoinLoc, LockErrorKind LEK) {
1055 intersectAndWarn(EntrySet, ExitSet, JoinLoc, LEK, LEK);
1056 }
1057
1058 void runAnalysis(AnalysisDeclContext &AC);
1059 };
1060
1061 } // namespace
1062
1063 /// Process acquired_before and acquired_after attributes on Vd.
insertAttrExprs(const ValueDecl * Vd,ThreadSafetyAnalyzer & Analyzer)1064 BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd,
1065 ThreadSafetyAnalyzer& Analyzer) {
1066 // Create a new entry for Vd.
1067 BeforeInfo *Info = nullptr;
1068 {
1069 // Keep InfoPtr in its own scope in case BMap is modified later and the
1070 // reference becomes invalid.
1071 std::unique_ptr<BeforeInfo> &InfoPtr = BMap[Vd];
1072 if (!InfoPtr)
1073 InfoPtr.reset(new BeforeInfo());
1074 Info = InfoPtr.get();
1075 }
1076
1077 for (const auto *At : Vd->attrs()) {
1078 switch (At->getKind()) {
1079 case attr::AcquiredBefore: {
1080 const auto *A = cast<AcquiredBeforeAttr>(At);
1081
1082 // Read exprs from the attribute, and add them to BeforeVect.
1083 for (const auto *Arg : A->args()) {
1084 CapabilityExpr Cp =
1085 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
1086 if (const ValueDecl *Cpvd = Cp.valueDecl()) {
1087 Info->Vect.push_back(Cpvd);
1088 const auto It = BMap.find(Cpvd);
1089 if (It == BMap.end())
1090 insertAttrExprs(Cpvd, Analyzer);
1091 }
1092 }
1093 break;
1094 }
1095 case attr::AcquiredAfter: {
1096 const auto *A = cast<AcquiredAfterAttr>(At);
1097
1098 // Read exprs from the attribute, and add them to BeforeVect.
1099 for (const auto *Arg : A->args()) {
1100 CapabilityExpr Cp =
1101 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
1102 if (const ValueDecl *ArgVd = Cp.valueDecl()) {
1103 // Get entry for mutex listed in attribute
1104 BeforeInfo *ArgInfo = getBeforeInfoForDecl(ArgVd, Analyzer);
1105 ArgInfo->Vect.push_back(Vd);
1106 }
1107 }
1108 break;
1109 }
1110 default:
1111 break;
1112 }
1113 }
1114
1115 return Info;
1116 }
1117
1118 BeforeSet::BeforeInfo *
getBeforeInfoForDecl(const ValueDecl * Vd,ThreadSafetyAnalyzer & Analyzer)1119 BeforeSet::getBeforeInfoForDecl(const ValueDecl *Vd,
1120 ThreadSafetyAnalyzer &Analyzer) {
1121 auto It = BMap.find(Vd);
1122 BeforeInfo *Info = nullptr;
1123 if (It == BMap.end())
1124 Info = insertAttrExprs(Vd, Analyzer);
1125 else
1126 Info = It->second.get();
1127 assert(Info && "BMap contained nullptr?");
1128 return Info;
1129 }
1130
1131 /// Return true if any mutexes in FSet are in the acquired_before set of Vd.
checkBeforeAfter(const ValueDecl * StartVd,const FactSet & FSet,ThreadSafetyAnalyzer & Analyzer,SourceLocation Loc,StringRef CapKind)1132 void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd,
1133 const FactSet& FSet,
1134 ThreadSafetyAnalyzer& Analyzer,
1135 SourceLocation Loc, StringRef CapKind) {
1136 SmallVector<BeforeInfo*, 8> InfoVect;
1137
1138 // Do a depth-first traversal of Vd.
1139 // Return true if there are cycles.
1140 std::function<bool (const ValueDecl*)> traverse = [&](const ValueDecl* Vd) {
1141 if (!Vd)
1142 return false;
1143
1144 BeforeSet::BeforeInfo *Info = getBeforeInfoForDecl(Vd, Analyzer);
1145
1146 if (Info->Visited == 1)
1147 return true;
1148
1149 if (Info->Visited == 2)
1150 return false;
1151
1152 if (Info->Vect.empty())
1153 return false;
1154
1155 InfoVect.push_back(Info);
1156 Info->Visited = 1;
1157 for (const auto *Vdb : Info->Vect) {
1158 // Exclude mutexes in our immediate before set.
1159 if (FSet.containsMutexDecl(Analyzer.FactMan, Vdb)) {
1160 StringRef L1 = StartVd->getName();
1161 StringRef L2 = Vdb->getName();
1162 Analyzer.Handler.handleLockAcquiredBefore(CapKind, L1, L2, Loc);
1163 }
1164 // Transitively search other before sets, and warn on cycles.
1165 if (traverse(Vdb)) {
1166 if (CycMap.find(Vd) == CycMap.end()) {
1167 CycMap.insert(std::make_pair(Vd, true));
1168 StringRef L1 = Vd->getName();
1169 Analyzer.Handler.handleBeforeAfterCycle(L1, Vd->getLocation());
1170 }
1171 }
1172 }
1173 Info->Visited = 2;
1174 return false;
1175 };
1176
1177 traverse(StartVd);
1178
1179 for (auto *Info : InfoVect)
1180 Info->Visited = 0;
1181 }
1182
1183 /// Gets the value decl pointer from DeclRefExprs or MemberExprs.
getValueDecl(const Expr * Exp)1184 static const ValueDecl *getValueDecl(const Expr *Exp) {
1185 if (const auto *CE = dyn_cast<ImplicitCastExpr>(Exp))
1186 return getValueDecl(CE->getSubExpr());
1187
1188 if (const auto *DR = dyn_cast<DeclRefExpr>(Exp))
1189 return DR->getDecl();
1190
1191 if (const auto *ME = dyn_cast<MemberExpr>(Exp))
1192 return ME->getMemberDecl();
1193
1194 return nullptr;
1195 }
1196
1197 namespace {
1198
1199 template <typename Ty>
1200 class has_arg_iterator_range {
1201 using yes = char[1];
1202 using no = char[2];
1203
1204 template <typename Inner>
1205 static yes& test(Inner *I, decltype(I->args()) * = nullptr);
1206
1207 template <typename>
1208 static no& test(...);
1209
1210 public:
1211 static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes);
1212 };
1213
1214 } // namespace
1215
inCurrentScope(const CapabilityExpr & CapE)1216 bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
1217 const threadSafety::til::SExpr *SExp = CapE.sexpr();
1218 assert(SExp && "Null expressions should be ignored");
1219
1220 if (const auto *LP = dyn_cast<til::LiteralPtr>(SExp)) {
1221 const ValueDecl *VD = LP->clangDecl();
1222 // Variables defined in a function are always inaccessible.
1223 if (!VD || !VD->isDefinedOutsideFunctionOrMethod())
1224 return false;
1225 // For now we consider static class members to be inaccessible.
1226 if (isa<CXXRecordDecl>(VD->getDeclContext()))
1227 return false;
1228 // Global variables are always in scope.
1229 return true;
1230 }
1231
1232 // Members are in scope from methods of the same class.
1233 if (const auto *P = dyn_cast<til::Project>(SExp)) {
1234 if (!CurrentMethod)
1235 return false;
1236 const ValueDecl *VD = P->clangDecl();
1237 return VD->getDeclContext() == CurrentMethod->getDeclContext();
1238 }
1239
1240 return false;
1241 }
1242
1243 /// Add a new lock to the lockset, warning if the lock is already there.
1244 /// \param ReqAttr -- true if this is part of an initial Requires attribute.
addLock(FactSet & FSet,std::unique_ptr<FactEntry> Entry,bool ReqAttr)1245 void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
1246 std::unique_ptr<FactEntry> Entry,
1247 bool ReqAttr) {
1248 if (Entry->shouldIgnore())
1249 return;
1250
1251 if (!ReqAttr && !Entry->negative()) {
1252 // look for the negative capability, and remove it from the fact set.
1253 CapabilityExpr NegC = !*Entry;
1254 const FactEntry *Nen = FSet.findLock(FactMan, NegC);
1255 if (Nen) {
1256 FSet.removeLock(FactMan, NegC);
1257 }
1258 else {
1259 if (inCurrentScope(*Entry) && !Entry->asserted())
1260 Handler.handleNegativeNotHeld(Entry->getKind(), Entry->toString(),
1261 NegC.toString(), Entry->loc());
1262 }
1263 }
1264
1265 // Check before/after constraints
1266 if (Handler.issueBetaWarnings() &&
1267 !Entry->asserted() && !Entry->declared()) {
1268 GlobalBeforeSet->checkBeforeAfter(Entry->valueDecl(), FSet, *this,
1269 Entry->loc(), Entry->getKind());
1270 }
1271
1272 // FIXME: Don't always warn when we have support for reentrant locks.
1273 if (const FactEntry *Cp = FSet.findLock(FactMan, *Entry)) {
1274 if (!Entry->asserted())
1275 Cp->handleLock(FSet, FactMan, *Entry, Handler);
1276 } else {
1277 FSet.addLock(FactMan, std::move(Entry));
1278 }
1279 }
1280
1281 /// Remove a lock from the lockset, warning if the lock is not there.
1282 /// \param UnlockLoc The source location of the unlock (only used in error msg)
removeLock(FactSet & FSet,const CapabilityExpr & Cp,SourceLocation UnlockLoc,bool FullyRemove,LockKind ReceivedKind)1283 void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
1284 SourceLocation UnlockLoc,
1285 bool FullyRemove, LockKind ReceivedKind) {
1286 if (Cp.shouldIgnore())
1287 return;
1288
1289 const FactEntry *LDat = FSet.findLock(FactMan, Cp);
1290 if (!LDat) {
1291 SourceLocation PrevLoc;
1292 if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp))
1293 PrevLoc = Neg->loc();
1294 Handler.handleUnmatchedUnlock(Cp.getKind(), Cp.toString(), UnlockLoc,
1295 PrevLoc);
1296 return;
1297 }
1298
1299 // Generic lock removal doesn't care about lock kind mismatches, but
1300 // otherwise diagnose when the lock kinds are mismatched.
1301 if (ReceivedKind != LK_Generic && LDat->kind() != ReceivedKind) {
1302 Handler.handleIncorrectUnlockKind(Cp.getKind(), Cp.toString(), LDat->kind(),
1303 ReceivedKind, LDat->loc(), UnlockLoc);
1304 }
1305
1306 LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler);
1307 }
1308
1309 /// Extract the list of mutexIDs from the attribute on an expression,
1310 /// and push them onto Mtxs, discarding any duplicates.
1311 template <typename AttrType>
getMutexIDs(CapExprSet & Mtxs,AttrType * Attr,const Expr * Exp,const NamedDecl * D,til::SExpr * Self)1312 void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
1313 const Expr *Exp, const NamedDecl *D,
1314 til::SExpr *Self) {
1315 if (Attr->args_size() == 0) {
1316 // The mutex held is the "this" object.
1317 CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, Self);
1318 if (Cp.isInvalid()) {
1319 warnInvalidLock(Handler, nullptr, D, Exp, Cp.getKind());
1320 return;
1321 }
1322 //else
1323 if (!Cp.shouldIgnore())
1324 Mtxs.push_back_nodup(Cp);
1325 return;
1326 }
1327
1328 for (const auto *Arg : Attr->args()) {
1329 CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, Self);
1330 if (Cp.isInvalid()) {
1331 warnInvalidLock(Handler, nullptr, D, Exp, Cp.getKind());
1332 continue;
1333 }
1334 //else
1335 if (!Cp.shouldIgnore())
1336 Mtxs.push_back_nodup(Cp);
1337 }
1338 }
1339
1340 /// Extract the list of mutexIDs from a trylock attribute. If the
1341 /// trylock applies to the given edge, then push them onto Mtxs, discarding
1342 /// any duplicates.
1343 template <class AttrType>
getMutexIDs(CapExprSet & Mtxs,AttrType * Attr,const Expr * Exp,const NamedDecl * D,const CFGBlock * PredBlock,const CFGBlock * CurrBlock,Expr * BrE,bool Neg)1344 void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
1345 const Expr *Exp, const NamedDecl *D,
1346 const CFGBlock *PredBlock,
1347 const CFGBlock *CurrBlock,
1348 Expr *BrE, bool Neg) {
1349 // Find out which branch has the lock
1350 bool branch = false;
1351 if (const auto *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE))
1352 branch = BLE->getValue();
1353 else if (const auto *ILE = dyn_cast_or_null<IntegerLiteral>(BrE))
1354 branch = ILE->getValue().getBoolValue();
1355
1356 int branchnum = branch ? 0 : 1;
1357 if (Neg)
1358 branchnum = !branchnum;
1359
1360 // If we've taken the trylock branch, then add the lock
1361 int i = 0;
1362 for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
1363 SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) {
1364 if (*SI == CurrBlock && i == branchnum)
1365 getMutexIDs(Mtxs, Attr, Exp, D);
1366 }
1367 }
1368
getStaticBooleanValue(Expr * E,bool & TCond)1369 static bool getStaticBooleanValue(Expr *E, bool &TCond) {
1370 if (isa<CXXNullPtrLiteralExpr>(E) || isa<GNUNullExpr>(E)) {
1371 TCond = false;
1372 return true;
1373 } else if (const auto *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) {
1374 TCond = BLE->getValue();
1375 return true;
1376 } else if (const auto *ILE = dyn_cast<IntegerLiteral>(E)) {
1377 TCond = ILE->getValue().getBoolValue();
1378 return true;
1379 } else if (auto *CE = dyn_cast<ImplicitCastExpr>(E))
1380 return getStaticBooleanValue(CE->getSubExpr(), TCond);
1381 return false;
1382 }
1383
1384 // If Cond can be traced back to a function call, return the call expression.
1385 // The negate variable should be called with false, and will be set to true
1386 // if the function call is negated, e.g. if (!mu.tryLock(...))
getTrylockCallExpr(const Stmt * Cond,LocalVarContext C,bool & Negate)1387 const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
1388 LocalVarContext C,
1389 bool &Negate) {
1390 if (!Cond)
1391 return nullptr;
1392
1393 if (const auto *CallExp = dyn_cast<CallExpr>(Cond)) {
1394 if (CallExp->getBuiltinCallee() == Builtin::BI__builtin_expect)
1395 return getTrylockCallExpr(CallExp->getArg(0), C, Negate);
1396 return CallExp;
1397 }
1398 else if (const auto *PE = dyn_cast<ParenExpr>(Cond))
1399 return getTrylockCallExpr(PE->getSubExpr(), C, Negate);
1400 else if (const auto *CE = dyn_cast<ImplicitCastExpr>(Cond))
1401 return getTrylockCallExpr(CE->getSubExpr(), C, Negate);
1402 else if (const auto *FE = dyn_cast<FullExpr>(Cond))
1403 return getTrylockCallExpr(FE->getSubExpr(), C, Negate);
1404 else if (const auto *DRE = dyn_cast<DeclRefExpr>(Cond)) {
1405 const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C);
1406 return getTrylockCallExpr(E, C, Negate);
1407 }
1408 else if (const auto *UOP = dyn_cast<UnaryOperator>(Cond)) {
1409 if (UOP->getOpcode() == UO_LNot) {
1410 Negate = !Negate;
1411 return getTrylockCallExpr(UOP->getSubExpr(), C, Negate);
1412 }
1413 return nullptr;
1414 }
1415 else if (const auto *BOP = dyn_cast<BinaryOperator>(Cond)) {
1416 if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) {
1417 if (BOP->getOpcode() == BO_NE)
1418 Negate = !Negate;
1419
1420 bool TCond = false;
1421 if (getStaticBooleanValue(BOP->getRHS(), TCond)) {
1422 if (!TCond) Negate = !Negate;
1423 return getTrylockCallExpr(BOP->getLHS(), C, Negate);
1424 }
1425 TCond = false;
1426 if (getStaticBooleanValue(BOP->getLHS(), TCond)) {
1427 if (!TCond) Negate = !Negate;
1428 return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1429 }
1430 return nullptr;
1431 }
1432 if (BOP->getOpcode() == BO_LAnd) {
1433 // LHS must have been evaluated in a different block.
1434 return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1435 }
1436 if (BOP->getOpcode() == BO_LOr)
1437 return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1438 return nullptr;
1439 } else if (const auto *COP = dyn_cast<ConditionalOperator>(Cond)) {
1440 bool TCond, FCond;
1441 if (getStaticBooleanValue(COP->getTrueExpr(), TCond) &&
1442 getStaticBooleanValue(COP->getFalseExpr(), FCond)) {
1443 if (TCond && !FCond)
1444 return getTrylockCallExpr(COP->getCond(), C, Negate);
1445 if (!TCond && FCond) {
1446 Negate = !Negate;
1447 return getTrylockCallExpr(COP->getCond(), C, Negate);
1448 }
1449 }
1450 }
1451 return nullptr;
1452 }
1453
1454 /// Find the lockset that holds on the edge between PredBlock
1455 /// and CurrBlock. The edge set is the exit set of PredBlock (passed
1456 /// as the ExitSet parameter) plus any trylocks, which are conditionally held.
getEdgeLockset(FactSet & Result,const FactSet & ExitSet,const CFGBlock * PredBlock,const CFGBlock * CurrBlock)1457 void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
1458 const FactSet &ExitSet,
1459 const CFGBlock *PredBlock,
1460 const CFGBlock *CurrBlock) {
1461 Result = ExitSet;
1462
1463 const Stmt *Cond = PredBlock->getTerminatorCondition();
1464 // We don't acquire try-locks on ?: branches, only when its result is used.
1465 if (!Cond || isa<ConditionalOperator>(PredBlock->getTerminatorStmt()))
1466 return;
1467
1468 bool Negate = false;
1469 const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()];
1470 const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
1471
1472 const auto *Exp = getTrylockCallExpr(Cond, LVarCtx, Negate);
1473 if (!Exp)
1474 return;
1475
1476 auto *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
1477 if(!FunDecl || !FunDecl->hasAttrs())
1478 return;
1479
1480 CapExprSet ExclusiveLocksToAdd;
1481 CapExprSet SharedLocksToAdd;
1482
1483 // If the condition is a call to a Trylock function, then grab the attributes
1484 for (const auto *Attr : FunDecl->attrs()) {
1485 switch (Attr->getKind()) {
1486 case attr::TryAcquireCapability: {
1487 auto *A = cast<TryAcquireCapabilityAttr>(Attr);
1488 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
1489 Exp, FunDecl, PredBlock, CurrBlock, A->getSuccessValue(),
1490 Negate);
1491 break;
1492 };
1493 case attr::ExclusiveTrylockFunction: {
1494 const auto *A = cast<ExclusiveTrylockFunctionAttr>(Attr);
1495 getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl, PredBlock, CurrBlock,
1496 A->getSuccessValue(), Negate);
1497 break;
1498 }
1499 case attr::SharedTrylockFunction: {
1500 const auto *A = cast<SharedTrylockFunctionAttr>(Attr);
1501 getMutexIDs(SharedLocksToAdd, A, Exp, FunDecl, PredBlock, CurrBlock,
1502 A->getSuccessValue(), Negate);
1503 break;
1504 }
1505 default:
1506 break;
1507 }
1508 }
1509
1510 // Add and remove locks.
1511 SourceLocation Loc = Exp->getExprLoc();
1512 for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
1513 addLock(Result, std::make_unique<LockableFactEntry>(ExclusiveLockToAdd,
1514 LK_Exclusive, Loc));
1515 for (const auto &SharedLockToAdd : SharedLocksToAdd)
1516 addLock(Result, std::make_unique<LockableFactEntry>(SharedLockToAdd,
1517 LK_Shared, Loc));
1518 }
1519
1520 namespace {
1521
1522 /// We use this class to visit different types of expressions in
1523 /// CFGBlocks, and build up the lockset.
1524 /// An expression may cause us to add or remove locks from the lockset, or else
1525 /// output error messages related to missing locks.
1526 /// FIXME: In future, we may be able to not inherit from a visitor.
1527 class BuildLockset : public ConstStmtVisitor<BuildLockset> {
1528 friend class ThreadSafetyAnalyzer;
1529
1530 ThreadSafetyAnalyzer *Analyzer;
1531 FactSet FSet;
1532 /// Maps constructed objects to `this` placeholder prior to initialization.
1533 llvm::SmallDenseMap<const Expr *, til::LiteralPtr *> ConstructedObjects;
1534 LocalVariableMap::Context LVarCtx;
1535 unsigned CtxIndex;
1536
1537 // helper functions
1538 void warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp, AccessKind AK,
1539 Expr *MutexExp, ProtectedOperationKind POK,
1540 til::LiteralPtr *Self, SourceLocation Loc);
1541 void warnIfMutexHeld(const NamedDecl *D, const Expr *Exp, Expr *MutexExp,
1542 til::LiteralPtr *Self, SourceLocation Loc);
1543
1544 void checkAccess(const Expr *Exp, AccessKind AK,
1545 ProtectedOperationKind POK = POK_VarAccess);
1546 void checkPtAccess(const Expr *Exp, AccessKind AK,
1547 ProtectedOperationKind POK = POK_VarAccess);
1548
1549 void handleCall(const Expr *Exp, const NamedDecl *D,
1550 til::LiteralPtr *Self = nullptr,
1551 SourceLocation Loc = SourceLocation());
1552 void examineArguments(const FunctionDecl *FD,
1553 CallExpr::const_arg_iterator ArgBegin,
1554 CallExpr::const_arg_iterator ArgEnd,
1555 bool SkipFirstParam = false);
1556
1557 public:
BuildLockset(ThreadSafetyAnalyzer * Anlzr,CFGBlockInfo & Info)1558 BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info)
1559 : ConstStmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet),
1560 LVarCtx(Info.EntryContext), CtxIndex(Info.EntryIndex) {}
1561
1562 void VisitUnaryOperator(const UnaryOperator *UO);
1563 void VisitBinaryOperator(const BinaryOperator *BO);
1564 void VisitCastExpr(const CastExpr *CE);
1565 void VisitCallExpr(const CallExpr *Exp);
1566 void VisitCXXConstructExpr(const CXXConstructExpr *Exp);
1567 void VisitDeclStmt(const DeclStmt *S);
1568 void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Exp);
1569 };
1570
1571 } // namespace
1572
1573 /// Warn if the LSet does not contain a lock sufficient to protect access
1574 /// of at least the passed in AccessKind.
warnIfMutexNotHeld(const NamedDecl * D,const Expr * Exp,AccessKind AK,Expr * MutexExp,ProtectedOperationKind POK,til::LiteralPtr * Self,SourceLocation Loc)1575 void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp,
1576 AccessKind AK, Expr *MutexExp,
1577 ProtectedOperationKind POK,
1578 til::LiteralPtr *Self,
1579 SourceLocation Loc) {
1580 LockKind LK = getLockKindFromAccessKind(AK);
1581
1582 CapabilityExpr Cp =
1583 Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp, Self);
1584 if (Cp.isInvalid()) {
1585 warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, Cp.getKind());
1586 return;
1587 } else if (Cp.shouldIgnore()) {
1588 return;
1589 }
1590
1591 if (Cp.negative()) {
1592 // Negative capabilities act like locks excluded
1593 const FactEntry *LDat = FSet.findLock(Analyzer->FactMan, !Cp);
1594 if (LDat) {
1595 Analyzer->Handler.handleFunExcludesLock(
1596 Cp.getKind(), D->getNameAsString(), (!Cp).toString(), Loc);
1597 return;
1598 }
1599
1600 // If this does not refer to a negative capability in the same class,
1601 // then stop here.
1602 if (!Analyzer->inCurrentScope(Cp))
1603 return;
1604
1605 // Otherwise the negative requirement must be propagated to the caller.
1606 LDat = FSet.findLock(Analyzer->FactMan, Cp);
1607 if (!LDat) {
1608 Analyzer->Handler.handleNegativeNotHeld(D, Cp.toString(), Loc);
1609 }
1610 return;
1611 }
1612
1613 const FactEntry *LDat = FSet.findLockUniv(Analyzer->FactMan, Cp);
1614 bool NoError = true;
1615 if (!LDat) {
1616 // No exact match found. Look for a partial match.
1617 LDat = FSet.findPartialMatch(Analyzer->FactMan, Cp);
1618 if (LDat) {
1619 // Warn that there's no precise match.
1620 std::string PartMatchStr = LDat->toString();
1621 StringRef PartMatchName(PartMatchStr);
1622 Analyzer->Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(),
1623 LK, Loc, &PartMatchName);
1624 } else {
1625 // Warn that there's no match at all.
1626 Analyzer->Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(),
1627 LK, Loc);
1628 }
1629 NoError = false;
1630 }
1631 // Make sure the mutex we found is the right kind.
1632 if (NoError && LDat && !LDat->isAtLeast(LK)) {
1633 Analyzer->Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(),
1634 LK, Loc);
1635 }
1636 }
1637
1638 /// Warn if the LSet contains the given lock.
warnIfMutexHeld(const NamedDecl * D,const Expr * Exp,Expr * MutexExp,til::LiteralPtr * Self,SourceLocation Loc)1639 void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr *Exp,
1640 Expr *MutexExp, til::LiteralPtr *Self,
1641 SourceLocation Loc) {
1642 CapabilityExpr Cp =
1643 Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp, Self);
1644 if (Cp.isInvalid()) {
1645 warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, Cp.getKind());
1646 return;
1647 } else if (Cp.shouldIgnore()) {
1648 return;
1649 }
1650
1651 const FactEntry *LDat = FSet.findLock(Analyzer->FactMan, Cp);
1652 if (LDat) {
1653 Analyzer->Handler.handleFunExcludesLock(Cp.getKind(), D->getNameAsString(),
1654 Cp.toString(), Loc);
1655 }
1656 }
1657
1658 /// Checks guarded_by and pt_guarded_by attributes.
1659 /// Whenever we identify an access (read or write) to a DeclRefExpr that is
1660 /// marked with guarded_by, we must ensure the appropriate mutexes are held.
1661 /// Similarly, we check if the access is to an expression that dereferences
1662 /// a pointer marked with pt_guarded_by.
checkAccess(const Expr * Exp,AccessKind AK,ProtectedOperationKind POK)1663 void BuildLockset::checkAccess(const Expr *Exp, AccessKind AK,
1664 ProtectedOperationKind POK) {
1665 Exp = Exp->IgnoreImplicit()->IgnoreParenCasts();
1666
1667 SourceLocation Loc = Exp->getExprLoc();
1668
1669 // Local variables of reference type cannot be re-assigned;
1670 // map them to their initializer.
1671 while (const auto *DRE = dyn_cast<DeclRefExpr>(Exp)) {
1672 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()->getCanonicalDecl());
1673 if (VD && VD->isLocalVarDecl() && VD->getType()->isReferenceType()) {
1674 if (const auto *E = VD->getInit()) {
1675 // Guard against self-initialization. e.g., int &i = i;
1676 if (E == Exp)
1677 break;
1678 Exp = E;
1679 continue;
1680 }
1681 }
1682 break;
1683 }
1684
1685 if (const auto *UO = dyn_cast<UnaryOperator>(Exp)) {
1686 // For dereferences
1687 if (UO->getOpcode() == UO_Deref)
1688 checkPtAccess(UO->getSubExpr(), AK, POK);
1689 return;
1690 }
1691
1692 if (const auto *BO = dyn_cast<BinaryOperator>(Exp)) {
1693 switch (BO->getOpcode()) {
1694 case BO_PtrMemD: // .*
1695 return checkAccess(BO->getLHS(), AK, POK);
1696 case BO_PtrMemI: // ->*
1697 return checkPtAccess(BO->getLHS(), AK, POK);
1698 default:
1699 return;
1700 }
1701 }
1702
1703 if (const auto *AE = dyn_cast<ArraySubscriptExpr>(Exp)) {
1704 checkPtAccess(AE->getLHS(), AK, POK);
1705 return;
1706 }
1707
1708 if (const auto *ME = dyn_cast<MemberExpr>(Exp)) {
1709 if (ME->isArrow())
1710 checkPtAccess(ME->getBase(), AK, POK);
1711 else
1712 checkAccess(ME->getBase(), AK, POK);
1713 }
1714
1715 const ValueDecl *D = getValueDecl(Exp);
1716 if (!D || !D->hasAttrs())
1717 return;
1718
1719 if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty(Analyzer->FactMan)) {
1720 Analyzer->Handler.handleNoMutexHeld(D, POK, AK, Loc);
1721 }
1722
1723 for (const auto *I : D->specific_attrs<GuardedByAttr>())
1724 warnIfMutexNotHeld(D, Exp, AK, I->getArg(), POK, nullptr, Loc);
1725 }
1726
1727 /// Checks pt_guarded_by and pt_guarded_var attributes.
1728 /// POK is the same operationKind that was passed to checkAccess.
checkPtAccess(const Expr * Exp,AccessKind AK,ProtectedOperationKind POK)1729 void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK,
1730 ProtectedOperationKind POK) {
1731 while (true) {
1732 if (const auto *PE = dyn_cast<ParenExpr>(Exp)) {
1733 Exp = PE->getSubExpr();
1734 continue;
1735 }
1736 if (const auto *CE = dyn_cast<CastExpr>(Exp)) {
1737 if (CE->getCastKind() == CK_ArrayToPointerDecay) {
1738 // If it's an actual array, and not a pointer, then it's elements
1739 // are protected by GUARDED_BY, not PT_GUARDED_BY;
1740 checkAccess(CE->getSubExpr(), AK, POK);
1741 return;
1742 }
1743 Exp = CE->getSubExpr();
1744 continue;
1745 }
1746 break;
1747 }
1748
1749 // Pass by reference warnings are under a different flag.
1750 ProtectedOperationKind PtPOK = POK_VarDereference;
1751 if (POK == POK_PassByRef) PtPOK = POK_PtPassByRef;
1752
1753 const ValueDecl *D = getValueDecl(Exp);
1754 if (!D || !D->hasAttrs())
1755 return;
1756
1757 if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty(Analyzer->FactMan))
1758 Analyzer->Handler.handleNoMutexHeld(D, PtPOK, AK, Exp->getExprLoc());
1759
1760 for (auto const *I : D->specific_attrs<PtGuardedByAttr>())
1761 warnIfMutexNotHeld(D, Exp, AK, I->getArg(), PtPOK, nullptr,
1762 Exp->getExprLoc());
1763 }
1764
1765 /// Process a function call, method call, constructor call,
1766 /// or destructor call. This involves looking at the attributes on the
1767 /// corresponding function/method/constructor/destructor, issuing warnings,
1768 /// and updating the locksets accordingly.
1769 ///
1770 /// FIXME: For classes annotated with one of the guarded annotations, we need
1771 /// to treat const method calls as reads and non-const method calls as writes,
1772 /// and check that the appropriate locks are held. Non-const method calls with
1773 /// the same signature as const method calls can be also treated as reads.
1774 ///
1775 /// \param Exp The call expression.
1776 /// \param D The callee declaration.
1777 /// \param Self If \p Exp = nullptr, the implicit this argument.
1778 /// \param Loc If \p Exp = nullptr, the location.
handleCall(const Expr * Exp,const NamedDecl * D,til::LiteralPtr * Self,SourceLocation Loc)1779 void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
1780 til::LiteralPtr *Self, SourceLocation Loc) {
1781 CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd;
1782 CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
1783 CapExprSet ScopedReqsAndExcludes;
1784
1785 // Figure out if we're constructing an object of scoped lockable class
1786 CapabilityExpr Scp;
1787 if (Exp) {
1788 assert(!Self);
1789 const auto *TagT = Exp->getType()->getAs<TagType>();
1790 if (TagT && Exp->isPRValue()) {
1791 std::pair<til::LiteralPtr *, StringRef> Placeholder =
1792 Analyzer->SxBuilder.createThisPlaceholder(Exp);
1793 [[maybe_unused]] auto inserted =
1794 ConstructedObjects.insert({Exp, Placeholder.first});
1795 assert(inserted.second && "Are we visiting the same expression again?");
1796 if (isa<CXXConstructExpr>(Exp))
1797 Self = Placeholder.first;
1798 if (TagT->getDecl()->hasAttr<ScopedLockableAttr>())
1799 Scp = CapabilityExpr(Placeholder.first, Placeholder.second, false);
1800 }
1801
1802 assert(Loc.isInvalid());
1803 Loc = Exp->getExprLoc();
1804 }
1805
1806 for(const Attr *At : D->attrs()) {
1807 switch (At->getKind()) {
1808 // When we encounter a lock function, we need to add the lock to our
1809 // lockset.
1810 case attr::AcquireCapability: {
1811 const auto *A = cast<AcquireCapabilityAttr>(At);
1812 Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd
1813 : ExclusiveLocksToAdd,
1814 A, Exp, D, Self);
1815 break;
1816 }
1817
1818 // An assert will add a lock to the lockset, but will not generate
1819 // a warning if it is already there, and will not generate a warning
1820 // if it is not removed.
1821 case attr::AssertExclusiveLock: {
1822 const auto *A = cast<AssertExclusiveLockAttr>(At);
1823
1824 CapExprSet AssertLocks;
1825 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, Self);
1826 for (const auto &AssertLock : AssertLocks)
1827 Analyzer->addLock(
1828 FSet, std::make_unique<LockableFactEntry>(
1829 AssertLock, LK_Exclusive, Loc, FactEntry::Asserted));
1830 break;
1831 }
1832 case attr::AssertSharedLock: {
1833 const auto *A = cast<AssertSharedLockAttr>(At);
1834
1835 CapExprSet AssertLocks;
1836 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, Self);
1837 for (const auto &AssertLock : AssertLocks)
1838 Analyzer->addLock(
1839 FSet, std::make_unique<LockableFactEntry>(
1840 AssertLock, LK_Shared, Loc, FactEntry::Asserted));
1841 break;
1842 }
1843
1844 case attr::AssertCapability: {
1845 const auto *A = cast<AssertCapabilityAttr>(At);
1846 CapExprSet AssertLocks;
1847 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, Self);
1848 for (const auto &AssertLock : AssertLocks)
1849 Analyzer->addLock(FSet, std::make_unique<LockableFactEntry>(
1850 AssertLock,
1851 A->isShared() ? LK_Shared : LK_Exclusive,
1852 Loc, FactEntry::Asserted));
1853 break;
1854 }
1855
1856 // When we encounter an unlock function, we need to remove unlocked
1857 // mutexes from the lockset, and flag a warning if they are not there.
1858 case attr::ReleaseCapability: {
1859 const auto *A = cast<ReleaseCapabilityAttr>(At);
1860 if (A->isGeneric())
1861 Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, Self);
1862 else if (A->isShared())
1863 Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, Self);
1864 else
1865 Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, Self);
1866 break;
1867 }
1868
1869 case attr::RequiresCapability: {
1870 const auto *A = cast<RequiresCapabilityAttr>(At);
1871 for (auto *Arg : A->args()) {
1872 warnIfMutexNotHeld(D, Exp, A->isShared() ? AK_Read : AK_Written, Arg,
1873 POK_FunctionCall, Self, Loc);
1874 // use for adopting a lock
1875 if (!Scp.shouldIgnore())
1876 Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, Self);
1877 }
1878 break;
1879 }
1880
1881 case attr::LocksExcluded: {
1882 const auto *A = cast<LocksExcludedAttr>(At);
1883 for (auto *Arg : A->args()) {
1884 warnIfMutexHeld(D, Exp, Arg, Self, Loc);
1885 // use for deferring a lock
1886 if (!Scp.shouldIgnore())
1887 Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, Self);
1888 }
1889 break;
1890 }
1891
1892 // Ignore attributes unrelated to thread-safety
1893 default:
1894 break;
1895 }
1896 }
1897
1898 // Remove locks first to allow lock upgrading/downgrading.
1899 // FIXME -- should only fully remove if the attribute refers to 'this'.
1900 bool Dtor = isa<CXXDestructorDecl>(D);
1901 for (const auto &M : ExclusiveLocksToRemove)
1902 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive);
1903 for (const auto &M : SharedLocksToRemove)
1904 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared);
1905 for (const auto &M : GenericLocksToRemove)
1906 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic);
1907
1908 // Add locks.
1909 FactEntry::SourceKind Source =
1910 !Scp.shouldIgnore() ? FactEntry::Managed : FactEntry::Acquired;
1911 for (const auto &M : ExclusiveLocksToAdd)
1912 Analyzer->addLock(FSet, std::make_unique<LockableFactEntry>(M, LK_Exclusive,
1913 Loc, Source));
1914 for (const auto &M : SharedLocksToAdd)
1915 Analyzer->addLock(
1916 FSet, std::make_unique<LockableFactEntry>(M, LK_Shared, Loc, Source));
1917
1918 if (!Scp.shouldIgnore()) {
1919 // Add the managing object as a dummy mutex, mapped to the underlying mutex.
1920 auto ScopedEntry = std::make_unique<ScopedLockableFactEntry>(Scp, Loc);
1921 for (const auto &M : ExclusiveLocksToAdd)
1922 ScopedEntry->addLock(M);
1923 for (const auto &M : SharedLocksToAdd)
1924 ScopedEntry->addLock(M);
1925 for (const auto &M : ScopedReqsAndExcludes)
1926 ScopedEntry->addLock(M);
1927 for (const auto &M : ExclusiveLocksToRemove)
1928 ScopedEntry->addExclusiveUnlock(M);
1929 for (const auto &M : SharedLocksToRemove)
1930 ScopedEntry->addSharedUnlock(M);
1931 Analyzer->addLock(FSet, std::move(ScopedEntry));
1932 }
1933 }
1934
1935 /// For unary operations which read and write a variable, we need to
1936 /// check whether we hold any required mutexes. Reads are checked in
1937 /// VisitCastExpr.
VisitUnaryOperator(const UnaryOperator * UO)1938 void BuildLockset::VisitUnaryOperator(const UnaryOperator *UO) {
1939 switch (UO->getOpcode()) {
1940 case UO_PostDec:
1941 case UO_PostInc:
1942 case UO_PreDec:
1943 case UO_PreInc:
1944 checkAccess(UO->getSubExpr(), AK_Written);
1945 break;
1946 default:
1947 break;
1948 }
1949 }
1950
1951 /// For binary operations which assign to a variable (writes), we need to check
1952 /// whether we hold any required mutexes.
1953 /// FIXME: Deal with non-primitive types.
VisitBinaryOperator(const BinaryOperator * BO)1954 void BuildLockset::VisitBinaryOperator(const BinaryOperator *BO) {
1955 if (!BO->isAssignmentOp())
1956 return;
1957
1958 // adjust the context
1959 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx);
1960
1961 checkAccess(BO->getLHS(), AK_Written);
1962 }
1963
1964 /// Whenever we do an LValue to Rvalue cast, we are reading a variable and
1965 /// need to ensure we hold any required mutexes.
1966 /// FIXME: Deal with non-primitive types.
VisitCastExpr(const CastExpr * CE)1967 void BuildLockset::VisitCastExpr(const CastExpr *CE) {
1968 if (CE->getCastKind() != CK_LValueToRValue)
1969 return;
1970 checkAccess(CE->getSubExpr(), AK_Read);
1971 }
1972
examineArguments(const FunctionDecl * FD,CallExpr::const_arg_iterator ArgBegin,CallExpr::const_arg_iterator ArgEnd,bool SkipFirstParam)1973 void BuildLockset::examineArguments(const FunctionDecl *FD,
1974 CallExpr::const_arg_iterator ArgBegin,
1975 CallExpr::const_arg_iterator ArgEnd,
1976 bool SkipFirstParam) {
1977 // Currently we can't do anything if we don't know the function declaration.
1978 if (!FD)
1979 return;
1980
1981 // NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it
1982 // only turns off checking within the body of a function, but we also
1983 // use it to turn off checking in arguments to the function. This
1984 // could result in some false negatives, but the alternative is to
1985 // create yet another attribute.
1986 if (FD->hasAttr<NoThreadSafetyAnalysisAttr>())
1987 return;
1988
1989 const ArrayRef<ParmVarDecl *> Params = FD->parameters();
1990 auto Param = Params.begin();
1991 if (SkipFirstParam)
1992 ++Param;
1993
1994 // There can be default arguments, so we stop when one iterator is at end().
1995 for (auto Arg = ArgBegin; Param != Params.end() && Arg != ArgEnd;
1996 ++Param, ++Arg) {
1997 QualType Qt = (*Param)->getType();
1998 if (Qt->isReferenceType())
1999 checkAccess(*Arg, AK_Read, POK_PassByRef);
2000 }
2001 }
2002
VisitCallExpr(const CallExpr * Exp)2003 void BuildLockset::VisitCallExpr(const CallExpr *Exp) {
2004 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Exp)) {
2005 const auto *ME = dyn_cast<MemberExpr>(CE->getCallee());
2006 // ME can be null when calling a method pointer
2007 const CXXMethodDecl *MD = CE->getMethodDecl();
2008
2009 if (ME && MD) {
2010 if (ME->isArrow()) {
2011 // Should perhaps be AK_Written if !MD->isConst().
2012 checkPtAccess(CE->getImplicitObjectArgument(), AK_Read);
2013 } else {
2014 // Should perhaps be AK_Written if !MD->isConst().
2015 checkAccess(CE->getImplicitObjectArgument(), AK_Read);
2016 }
2017 }
2018
2019 examineArguments(CE->getDirectCallee(), CE->arg_begin(), CE->arg_end());
2020 } else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) {
2021 OverloadedOperatorKind OEop = OE->getOperator();
2022 switch (OEop) {
2023 case OO_Equal:
2024 case OO_PlusEqual:
2025 case OO_MinusEqual:
2026 case OO_StarEqual:
2027 case OO_SlashEqual:
2028 case OO_PercentEqual:
2029 case OO_CaretEqual:
2030 case OO_AmpEqual:
2031 case OO_PipeEqual:
2032 case OO_LessLessEqual:
2033 case OO_GreaterGreaterEqual:
2034 checkAccess(OE->getArg(1), AK_Read);
2035 [[fallthrough]];
2036 case OO_PlusPlus:
2037 case OO_MinusMinus:
2038 checkAccess(OE->getArg(0), AK_Written);
2039 break;
2040 case OO_Star:
2041 case OO_ArrowStar:
2042 case OO_Arrow:
2043 case OO_Subscript:
2044 if (!(OEop == OO_Star && OE->getNumArgs() > 1)) {
2045 // Grrr. operator* can be multiplication...
2046 checkPtAccess(OE->getArg(0), AK_Read);
2047 }
2048 [[fallthrough]];
2049 default: {
2050 // TODO: get rid of this, and rely on pass-by-ref instead.
2051 const Expr *Obj = OE->getArg(0);
2052 checkAccess(Obj, AK_Read);
2053 // Check the remaining arguments. For method operators, the first
2054 // argument is the implicit self argument, and doesn't appear in the
2055 // FunctionDecl, but for non-methods it does.
2056 const FunctionDecl *FD = OE->getDirectCallee();
2057 examineArguments(FD, std::next(OE->arg_begin()), OE->arg_end(),
2058 /*SkipFirstParam*/ !isa<CXXMethodDecl>(FD));
2059 break;
2060 }
2061 }
2062 } else {
2063 examineArguments(Exp->getDirectCallee(), Exp->arg_begin(), Exp->arg_end());
2064 }
2065
2066 auto *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
2067 if(!D || !D->hasAttrs())
2068 return;
2069 handleCall(Exp, D);
2070 }
2071
VisitCXXConstructExpr(const CXXConstructExpr * Exp)2072 void BuildLockset::VisitCXXConstructExpr(const CXXConstructExpr *Exp) {
2073 const CXXConstructorDecl *D = Exp->getConstructor();
2074 if (D && D->isCopyConstructor()) {
2075 const Expr* Source = Exp->getArg(0);
2076 checkAccess(Source, AK_Read);
2077 } else {
2078 examineArguments(D, Exp->arg_begin(), Exp->arg_end());
2079 }
2080 if (D && D->hasAttrs())
2081 handleCall(Exp, D);
2082 }
2083
UnpackConstruction(const Expr * E)2084 static const Expr *UnpackConstruction(const Expr *E) {
2085 if (auto *CE = dyn_cast<CastExpr>(E))
2086 if (CE->getCastKind() == CK_NoOp)
2087 E = CE->getSubExpr()->IgnoreParens();
2088 if (auto *CE = dyn_cast<CastExpr>(E))
2089 if (CE->getCastKind() == CK_ConstructorConversion ||
2090 CE->getCastKind() == CK_UserDefinedConversion)
2091 E = CE->getSubExpr();
2092 if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(E))
2093 E = BTE->getSubExpr();
2094 return E;
2095 }
2096
VisitDeclStmt(const DeclStmt * S)2097 void BuildLockset::VisitDeclStmt(const DeclStmt *S) {
2098 // adjust the context
2099 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx);
2100
2101 for (auto *D : S->getDeclGroup()) {
2102 if (auto *VD = dyn_cast_or_null<VarDecl>(D)) {
2103 const Expr *E = VD->getInit();
2104 if (!E)
2105 continue;
2106 E = E->IgnoreParens();
2107
2108 // handle constructors that involve temporaries
2109 if (auto *EWC = dyn_cast<ExprWithCleanups>(E))
2110 E = EWC->getSubExpr()->IgnoreParens();
2111 E = UnpackConstruction(E);
2112
2113 if (auto Object = ConstructedObjects.find(E);
2114 Object != ConstructedObjects.end()) {
2115 Object->second->setClangDecl(VD);
2116 ConstructedObjects.erase(Object);
2117 }
2118 }
2119 }
2120 }
2121
VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr * Exp)2122 void BuildLockset::VisitMaterializeTemporaryExpr(
2123 const MaterializeTemporaryExpr *Exp) {
2124 if (const ValueDecl *ExtD = Exp->getExtendingDecl()) {
2125 if (auto Object =
2126 ConstructedObjects.find(UnpackConstruction(Exp->getSubExpr()));
2127 Object != ConstructedObjects.end()) {
2128 Object->second->setClangDecl(ExtD);
2129 ConstructedObjects.erase(Object);
2130 }
2131 }
2132 }
2133
2134 /// Given two facts merging on a join point, possibly warn and decide whether to
2135 /// keep or replace.
2136 ///
2137 /// \param CanModify Whether we can replace \p A by \p B.
2138 /// \return false if we should keep \p A, true if we should take \p B.
join(const FactEntry & A,const FactEntry & B,bool CanModify)2139 bool ThreadSafetyAnalyzer::join(const FactEntry &A, const FactEntry &B,
2140 bool CanModify) {
2141 if (A.kind() != B.kind()) {
2142 // For managed capabilities, the destructor should unlock in the right mode
2143 // anyway. For asserted capabilities no unlocking is needed.
2144 if ((A.managed() || A.asserted()) && (B.managed() || B.asserted())) {
2145 // The shared capability subsumes the exclusive capability, if possible.
2146 bool ShouldTakeB = B.kind() == LK_Shared;
2147 if (CanModify || !ShouldTakeB)
2148 return ShouldTakeB;
2149 }
2150 Handler.handleExclusiveAndShared(B.getKind(), B.toString(), B.loc(),
2151 A.loc());
2152 // Take the exclusive capability to reduce further warnings.
2153 return CanModify && B.kind() == LK_Exclusive;
2154 } else {
2155 // The non-asserted capability is the one we want to track.
2156 return CanModify && A.asserted() && !B.asserted();
2157 }
2158 }
2159
2160 /// Compute the intersection of two locksets and issue warnings for any
2161 /// locks in the symmetric difference.
2162 ///
2163 /// This function is used at a merge point in the CFG when comparing the lockset
2164 /// of each branch being merged. For example, given the following sequence:
2165 /// A; if () then B; else C; D; we need to check that the lockset after B and C
2166 /// are the same. In the event of a difference, we use the intersection of these
2167 /// two locksets at the start of D.
2168 ///
2169 /// \param EntrySet A lockset for entry into a (possibly new) block.
2170 /// \param ExitSet The lockset on exiting a preceding block.
2171 /// \param JoinLoc The location of the join point for error reporting
2172 /// \param EntryLEK The warning if a mutex is missing from \p EntrySet.
2173 /// \param ExitLEK The warning if a mutex is missing from \p ExitSet.
intersectAndWarn(FactSet & EntrySet,const FactSet & ExitSet,SourceLocation JoinLoc,LockErrorKind EntryLEK,LockErrorKind ExitLEK)2174 void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &EntrySet,
2175 const FactSet &ExitSet,
2176 SourceLocation JoinLoc,
2177 LockErrorKind EntryLEK,
2178 LockErrorKind ExitLEK) {
2179 FactSet EntrySetOrig = EntrySet;
2180
2181 // Find locks in ExitSet that conflict or are not in EntrySet, and warn.
2182 for (const auto &Fact : ExitSet) {
2183 const FactEntry &ExitFact = FactMan[Fact];
2184
2185 FactSet::iterator EntryIt = EntrySet.findLockIter(FactMan, ExitFact);
2186 if (EntryIt != EntrySet.end()) {
2187 if (join(FactMan[*EntryIt], ExitFact,
2188 EntryLEK != LEK_LockedSomeLoopIterations))
2189 *EntryIt = Fact;
2190 } else if (!ExitFact.managed()) {
2191 ExitFact.handleRemovalFromIntersection(ExitSet, FactMan, JoinLoc,
2192 EntryLEK, Handler);
2193 }
2194 }
2195
2196 // Find locks in EntrySet that are not in ExitSet, and remove them.
2197 for (const auto &Fact : EntrySetOrig) {
2198 const FactEntry *EntryFact = &FactMan[Fact];
2199 const FactEntry *ExitFact = ExitSet.findLock(FactMan, *EntryFact);
2200
2201 if (!ExitFact) {
2202 if (!EntryFact->managed() || ExitLEK == LEK_LockedSomeLoopIterations)
2203 EntryFact->handleRemovalFromIntersection(EntrySetOrig, FactMan, JoinLoc,
2204 ExitLEK, Handler);
2205 if (ExitLEK == LEK_LockedSomePredecessors)
2206 EntrySet.removeLock(FactMan, *EntryFact);
2207 }
2208 }
2209 }
2210
2211 // Return true if block B never continues to its successors.
neverReturns(const CFGBlock * B)2212 static bool neverReturns(const CFGBlock *B) {
2213 if (B->hasNoReturnElement())
2214 return true;
2215 if (B->empty())
2216 return false;
2217
2218 CFGElement Last = B->back();
2219 if (std::optional<CFGStmt> S = Last.getAs<CFGStmt>()) {
2220 if (isa<CXXThrowExpr>(S->getStmt()))
2221 return true;
2222 }
2223 return false;
2224 }
2225
2226 /// Check a function's CFG for thread-safety violations.
2227 ///
2228 /// We traverse the blocks in the CFG, compute the set of mutexes that are held
2229 /// at the end of each block, and issue warnings for thread safety violations.
2230 /// Each block in the CFG is traversed exactly once.
runAnalysis(AnalysisDeclContext & AC)2231 void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
2232 // TODO: this whole function needs be rewritten as a visitor for CFGWalker.
2233 // For now, we just use the walker to set things up.
2234 threadSafety::CFGWalker walker;
2235 if (!walker.init(AC))
2236 return;
2237
2238 // AC.dumpCFG(true);
2239 // threadSafety::printSCFG(walker);
2240
2241 CFG *CFGraph = walker.getGraph();
2242 const NamedDecl *D = walker.getDecl();
2243 const auto *CurrentFunction = dyn_cast<FunctionDecl>(D);
2244 CurrentMethod = dyn_cast<CXXMethodDecl>(D);
2245
2246 if (D->hasAttr<NoThreadSafetyAnalysisAttr>())
2247 return;
2248
2249 // FIXME: Do something a bit more intelligent inside constructor and
2250 // destructor code. Constructors and destructors must assume unique access
2251 // to 'this', so checks on member variable access is disabled, but we should
2252 // still enable checks on other objects.
2253 if (isa<CXXConstructorDecl>(D))
2254 return; // Don't check inside constructors.
2255 if (isa<CXXDestructorDecl>(D))
2256 return; // Don't check inside destructors.
2257
2258 Handler.enterFunction(CurrentFunction);
2259
2260 BlockInfo.resize(CFGraph->getNumBlockIDs(),
2261 CFGBlockInfo::getEmptyBlockInfo(LocalVarMap));
2262
2263 // We need to explore the CFG via a "topological" ordering.
2264 // That way, we will be guaranteed to have information about required
2265 // predecessor locksets when exploring a new block.
2266 const PostOrderCFGView *SortedGraph = walker.getSortedGraph();
2267 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
2268
2269 // Mark entry block as reachable
2270 BlockInfo[CFGraph->getEntry().getBlockID()].Reachable = true;
2271
2272 // Compute SSA names for local variables
2273 LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo);
2274
2275 // Fill in source locations for all CFGBlocks.
2276 findBlockLocations(CFGraph, SortedGraph, BlockInfo);
2277
2278 CapExprSet ExclusiveLocksAcquired;
2279 CapExprSet SharedLocksAcquired;
2280 CapExprSet LocksReleased;
2281
2282 // Add locks from exclusive_locks_required and shared_locks_required
2283 // to initial lockset. Also turn off checking for lock and unlock functions.
2284 // FIXME: is there a more intelligent way to check lock/unlock functions?
2285 if (!SortedGraph->empty() && D->hasAttrs()) {
2286 const CFGBlock *FirstBlock = *SortedGraph->begin();
2287 FactSet &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet;
2288
2289 CapExprSet ExclusiveLocksToAdd;
2290 CapExprSet SharedLocksToAdd;
2291
2292 SourceLocation Loc = D->getLocation();
2293 for (const auto *Attr : D->attrs()) {
2294 Loc = Attr->getLocation();
2295 if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) {
2296 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2297 nullptr, D);
2298 } else if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) {
2299 // UNLOCK_FUNCTION() is used to hide the underlying lock implementation.
2300 // We must ignore such methods.
2301 if (A->args_size() == 0)
2302 return;
2303 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2304 nullptr, D);
2305 getMutexIDs(LocksReleased, A, nullptr, D);
2306 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) {
2307 if (A->args_size() == 0)
2308 return;
2309 getMutexIDs(A->isShared() ? SharedLocksAcquired
2310 : ExclusiveLocksAcquired,
2311 A, nullptr, D);
2312 } else if (isa<ExclusiveTrylockFunctionAttr>(Attr)) {
2313 // Don't try to check trylock functions for now.
2314 return;
2315 } else if (isa<SharedTrylockFunctionAttr>(Attr)) {
2316 // Don't try to check trylock functions for now.
2317 return;
2318 } else if (isa<TryAcquireCapabilityAttr>(Attr)) {
2319 // Don't try to check trylock functions for now.
2320 return;
2321 }
2322 }
2323
2324 // FIXME -- Loc can be wrong here.
2325 for (const auto &Mu : ExclusiveLocksToAdd) {
2326 auto Entry = std::make_unique<LockableFactEntry>(Mu, LK_Exclusive, Loc,
2327 FactEntry::Declared);
2328 addLock(InitialLockset, std::move(Entry), true);
2329 }
2330 for (const auto &Mu : SharedLocksToAdd) {
2331 auto Entry = std::make_unique<LockableFactEntry>(Mu, LK_Shared, Loc,
2332 FactEntry::Declared);
2333 addLock(InitialLockset, std::move(Entry), true);
2334 }
2335 }
2336
2337 for (const auto *CurrBlock : *SortedGraph) {
2338 unsigned CurrBlockID = CurrBlock->getBlockID();
2339 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
2340
2341 // Use the default initial lockset in case there are no predecessors.
2342 VisitedBlocks.insert(CurrBlock);
2343
2344 // Iterate through the predecessor blocks and warn if the lockset for all
2345 // predecessors is not the same. We take the entry lockset of the current
2346 // block to be the intersection of all previous locksets.
2347 // FIXME: By keeping the intersection, we may output more errors in future
2348 // for a lock which is not in the intersection, but was in the union. We
2349 // may want to also keep the union in future. As an example, let's say
2350 // the intersection contains Mutex L, and the union contains L and M.
2351 // Later we unlock M. At this point, we would output an error because we
2352 // never locked M; although the real error is probably that we forgot to
2353 // lock M on all code paths. Conversely, let's say that later we lock M.
2354 // In this case, we should compare against the intersection instead of the
2355 // union because the real error is probably that we forgot to unlock M on
2356 // all code paths.
2357 bool LocksetInitialized = false;
2358 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
2359 PE = CurrBlock->pred_end(); PI != PE; ++PI) {
2360 // if *PI -> CurrBlock is a back edge
2361 if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI))
2362 continue;
2363
2364 unsigned PrevBlockID = (*PI)->getBlockID();
2365 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
2366
2367 // Ignore edges from blocks that can't return.
2368 if (neverReturns(*PI) || !PrevBlockInfo->Reachable)
2369 continue;
2370
2371 // Okay, we can reach this block from the entry.
2372 CurrBlockInfo->Reachable = true;
2373
2374 FactSet PrevLockset;
2375 getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, *PI, CurrBlock);
2376
2377 if (!LocksetInitialized) {
2378 CurrBlockInfo->EntrySet = PrevLockset;
2379 LocksetInitialized = true;
2380 } else {
2381 // Surprisingly 'continue' doesn't always produce back edges, because
2382 // the CFG has empty "transition" blocks where they meet with the end
2383 // of the regular loop body. We still want to diagnose them as loop.
2384 intersectAndWarn(
2385 CurrBlockInfo->EntrySet, PrevLockset, CurrBlockInfo->EntryLoc,
2386 isa_and_nonnull<ContinueStmt>((*PI)->getTerminatorStmt())
2387 ? LEK_LockedSomeLoopIterations
2388 : LEK_LockedSomePredecessors);
2389 }
2390 }
2391
2392 // Skip rest of block if it's not reachable.
2393 if (!CurrBlockInfo->Reachable)
2394 continue;
2395
2396 BuildLockset LocksetBuilder(this, *CurrBlockInfo);
2397
2398 // Visit all the statements in the basic block.
2399 for (const auto &BI : *CurrBlock) {
2400 switch (BI.getKind()) {
2401 case CFGElement::Statement: {
2402 CFGStmt CS = BI.castAs<CFGStmt>();
2403 LocksetBuilder.Visit(CS.getStmt());
2404 break;
2405 }
2406 // Ignore BaseDtor and MemberDtor for now.
2407 case CFGElement::AutomaticObjectDtor: {
2408 CFGAutomaticObjDtor AD = BI.castAs<CFGAutomaticObjDtor>();
2409 const auto *DD = AD.getDestructorDecl(AC.getASTContext());
2410 if (!DD->hasAttrs())
2411 break;
2412
2413 LocksetBuilder.handleCall(nullptr, DD,
2414 SxBuilder.createVariable(AD.getVarDecl()),
2415 AD.getTriggerStmt()->getEndLoc());
2416 break;
2417 }
2418 case CFGElement::TemporaryDtor: {
2419 auto TD = BI.castAs<CFGTemporaryDtor>();
2420
2421 // Clean up constructed object even if there are no attributes to
2422 // keep the number of objects in limbo as small as possible.
2423 if (auto Object = LocksetBuilder.ConstructedObjects.find(
2424 TD.getBindTemporaryExpr()->getSubExpr());
2425 Object != LocksetBuilder.ConstructedObjects.end()) {
2426 const auto *DD = TD.getDestructorDecl(AC.getASTContext());
2427 if (DD->hasAttrs())
2428 // TODO: the location here isn't quite correct.
2429 LocksetBuilder.handleCall(nullptr, DD, Object->second,
2430 TD.getBindTemporaryExpr()->getEndLoc());
2431 LocksetBuilder.ConstructedObjects.erase(Object);
2432 }
2433 break;
2434 }
2435 default:
2436 break;
2437 }
2438 }
2439 CurrBlockInfo->ExitSet = LocksetBuilder.FSet;
2440
2441 // For every back edge from CurrBlock (the end of the loop) to another block
2442 // (FirstLoopBlock) we need to check that the Lockset of Block is equal to
2443 // the one held at the beginning of FirstLoopBlock. We can look up the
2444 // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
2445 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
2446 SE = CurrBlock->succ_end(); SI != SE; ++SI) {
2447 // if CurrBlock -> *SI is *not* a back edge
2448 if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI))
2449 continue;
2450
2451 CFGBlock *FirstLoopBlock = *SI;
2452 CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()];
2453 CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID];
2454 intersectAndWarn(PreLoop->EntrySet, LoopEnd->ExitSet, PreLoop->EntryLoc,
2455 LEK_LockedSomeLoopIterations);
2456 }
2457 }
2458
2459 CFGBlockInfo *Initial = &BlockInfo[CFGraph->getEntry().getBlockID()];
2460 CFGBlockInfo *Final = &BlockInfo[CFGraph->getExit().getBlockID()];
2461
2462 // Skip the final check if the exit block is unreachable.
2463 if (!Final->Reachable)
2464 return;
2465
2466 // By default, we expect all locks held on entry to be held on exit.
2467 FactSet ExpectedExitSet = Initial->EntrySet;
2468
2469 // Adjust the expected exit set by adding or removing locks, as declared
2470 // by *-LOCK_FUNCTION and UNLOCK_FUNCTION. The intersect below will then
2471 // issue the appropriate warning.
2472 // FIXME: the location here is not quite right.
2473 for (const auto &Lock : ExclusiveLocksAcquired)
2474 ExpectedExitSet.addLock(FactMan, std::make_unique<LockableFactEntry>(
2475 Lock, LK_Exclusive, D->getLocation()));
2476 for (const auto &Lock : SharedLocksAcquired)
2477 ExpectedExitSet.addLock(FactMan, std::make_unique<LockableFactEntry>(
2478 Lock, LK_Shared, D->getLocation()));
2479 for (const auto &Lock : LocksReleased)
2480 ExpectedExitSet.removeLock(FactMan, Lock);
2481
2482 // FIXME: Should we call this function for all blocks which exit the function?
2483 intersectAndWarn(ExpectedExitSet, Final->ExitSet, Final->ExitLoc,
2484 LEK_LockedAtEndOfFunction, LEK_NotLockedAtEndOfFunction);
2485
2486 Handler.leaveFunction(CurrentFunction);
2487 }
2488
2489 /// Check a function's CFG for thread-safety violations.
2490 ///
2491 /// We traverse the blocks in the CFG, compute the set of mutexes that are held
2492 /// at the end of each block, and issue warnings for thread safety violations.
2493 /// Each block in the CFG is traversed exactly once.
runThreadSafetyAnalysis(AnalysisDeclContext & AC,ThreadSafetyHandler & Handler,BeforeSet ** BSet)2494 void threadSafety::runThreadSafetyAnalysis(AnalysisDeclContext &AC,
2495 ThreadSafetyHandler &Handler,
2496 BeforeSet **BSet) {
2497 if (!*BSet)
2498 *BSet = new BeforeSet;
2499 ThreadSafetyAnalyzer Analyzer(Handler, *BSet);
2500 Analyzer.runAnalysis(AC);
2501 }
2502
threadSafetyCleanup(BeforeSet * Cache)2503 void threadSafety::threadSafetyCleanup(BeforeSet *Cache) { delete Cache; }
2504
2505 /// Helper function that returns a LockKind required for the given level
2506 /// of access.
getLockKindFromAccessKind(AccessKind AK)2507 LockKind threadSafety::getLockKindFromAccessKind(AccessKind AK) {
2508 switch (AK) {
2509 case AK_Read :
2510 return LK_Shared;
2511 case AK_Written :
2512 return LK_Exclusive;
2513 }
2514 llvm_unreachable("Unknown AccessKind");
2515 }
2516