1 //===- MemoryBuiltins.cpp - Identify calls to memory builtins -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This family of functions identifies calls to builtin functions that allocate 10 // or free memory. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Analysis/MemoryBuiltins.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/None.h" 17 #include "llvm/ADT/Optional.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/Analysis/AliasAnalysis.h" 21 #include "llvm/Analysis/TargetFolder.h" 22 #include "llvm/Analysis/TargetLibraryInfo.h" 23 #include "llvm/Analysis/Utils/Local.h" 24 #include "llvm/Analysis/ValueTracking.h" 25 #include "llvm/IR/Argument.h" 26 #include "llvm/IR/Attributes.h" 27 #include "llvm/IR/Constants.h" 28 #include "llvm/IR/DataLayout.h" 29 #include "llvm/IR/DerivedTypes.h" 30 #include "llvm/IR/Function.h" 31 #include "llvm/IR/GlobalAlias.h" 32 #include "llvm/IR/GlobalVariable.h" 33 #include "llvm/IR/Instruction.h" 34 #include "llvm/IR/Instructions.h" 35 #include "llvm/IR/IntrinsicInst.h" 36 #include "llvm/IR/Operator.h" 37 #include "llvm/IR/Type.h" 38 #include "llvm/IR/Value.h" 39 #include "llvm/Support/Casting.h" 40 #include "llvm/Support/Debug.h" 41 #include "llvm/Support/MathExtras.h" 42 #include "llvm/Support/raw_ostream.h" 43 #include <cassert> 44 #include <cstdint> 45 #include <iterator> 46 #include <numeric> 47 #include <type_traits> 48 #include <utility> 49 50 using namespace llvm; 51 52 #define DEBUG_TYPE "memory-builtins" 53 54 enum AllocType : uint8_t { 55 OpNewLike = 1<<0, // allocates; never returns null 56 MallocLike = 1<<1, // allocates; may return null 57 AlignedAllocLike = 1<<2, // allocates with alignment; may return null 58 CallocLike = 1<<3, // allocates + bzero 59 ReallocLike = 1<<4, // reallocates 60 StrDupLike = 1<<5, 61 MallocOrOpNewLike = MallocLike | OpNewLike, 62 MallocOrCallocLike = MallocLike | OpNewLike | CallocLike | AlignedAllocLike, 63 AllocLike = MallocOrCallocLike | StrDupLike, 64 AnyAlloc = AllocLike | ReallocLike 65 }; 66 67 enum class MallocFamily { 68 Malloc, 69 CPPNew, // new(unsigned int) 70 CPPNewAligned, // new(unsigned int, align_val_t) 71 CPPNewArray, // new[](unsigned int) 72 CPPNewArrayAligned, // new[](unsigned long, align_val_t) 73 MSVCNew, // new(unsigned int) 74 MSVCArrayNew, // new[](unsigned int) 75 VecMalloc, 76 KmpcAllocShared, 77 }; 78 79 StringRef mangledNameForMallocFamily(const MallocFamily &Family) { 80 switch (Family) { 81 case MallocFamily::Malloc: 82 return "malloc"; 83 case MallocFamily::CPPNew: 84 return "_Znwm"; 85 case MallocFamily::CPPNewAligned: 86 return "_ZnwmSt11align_val_t"; 87 case MallocFamily::CPPNewArray: 88 return "_Znam"; 89 case MallocFamily::CPPNewArrayAligned: 90 return "_ZnamSt11align_val_t"; 91 case MallocFamily::MSVCNew: 92 return "??2@YAPAXI@Z"; 93 case MallocFamily::MSVCArrayNew: 94 return "??_U@YAPAXI@Z"; 95 case MallocFamily::VecMalloc: 96 return "vec_malloc"; 97 case MallocFamily::KmpcAllocShared: 98 return "__kmpc_alloc_shared"; 99 } 100 llvm_unreachable("missing an alloc family"); 101 } 102 103 struct AllocFnsTy { 104 AllocType AllocTy; 105 unsigned NumParams; 106 // First and Second size parameters (or -1 if unused) 107 int FstParam, SndParam; 108 // Alignment parameter for aligned_alloc and aligned new 109 int AlignParam; 110 // Name of default allocator function to group malloc/free calls by family 111 MallocFamily Family; 112 }; 113 114 // clang-format off 115 // FIXME: certain users need more information. E.g., SimplifyLibCalls needs to 116 // know which functions are nounwind, noalias, nocapture parameters, etc. 117 static const std::pair<LibFunc, AllocFnsTy> AllocationFnData[] = { 118 {LibFunc_malloc, {MallocLike, 1, 0, -1, -1, MallocFamily::Malloc}}, 119 {LibFunc_vec_malloc, {MallocLike, 1, 0, -1, -1, MallocFamily::VecMalloc}}, 120 {LibFunc_valloc, {MallocLike, 1, 0, -1, -1, MallocFamily::Malloc}}, 121 {LibFunc_Znwj, {OpNewLike, 1, 0, -1, -1, MallocFamily::CPPNew}}, // new(unsigned int) 122 {LibFunc_ZnwjRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1, MallocFamily::CPPNew}}, // new(unsigned int, nothrow) 123 {LibFunc_ZnwjSt11align_val_t, {OpNewLike, 2, 0, -1, 1, MallocFamily::CPPNewAligned}}, // new(unsigned int, align_val_t) 124 {LibFunc_ZnwjSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1, MallocFamily::CPPNewAligned}}, // new(unsigned int, align_val_t, nothrow) 125 {LibFunc_Znwm, {OpNewLike, 1, 0, -1, -1, MallocFamily::CPPNew}}, // new(unsigned long) 126 {LibFunc_ZnwmRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1, MallocFamily::CPPNew}}, // new(unsigned long, nothrow) 127 {LibFunc_ZnwmSt11align_val_t, {OpNewLike, 2, 0, -1, 1, MallocFamily::CPPNewAligned}}, // new(unsigned long, align_val_t) 128 {LibFunc_ZnwmSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1, MallocFamily::CPPNewAligned}}, // new(unsigned long, align_val_t, nothrow) 129 {LibFunc_Znaj, {OpNewLike, 1, 0, -1, -1, MallocFamily::CPPNewArray}}, // new[](unsigned int) 130 {LibFunc_ZnajRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1, MallocFamily::CPPNewArray}}, // new[](unsigned int, nothrow) 131 {LibFunc_ZnajSt11align_val_t, {OpNewLike, 2, 0, -1, 1, MallocFamily::CPPNewArrayAligned}}, // new[](unsigned int, align_val_t) 132 {LibFunc_ZnajSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1, MallocFamily::CPPNewArrayAligned}}, // new[](unsigned int, align_val_t, nothrow) 133 {LibFunc_Znam, {OpNewLike, 1, 0, -1, -1, MallocFamily::CPPNewArray}}, // new[](unsigned long) 134 {LibFunc_ZnamRKSt9nothrow_t, {MallocLike, 2, 0, -1, -1, MallocFamily::CPPNewArray}}, // new[](unsigned long, nothrow) 135 {LibFunc_ZnamSt11align_val_t, {OpNewLike, 2, 0, -1, 1, MallocFamily::CPPNewArrayAligned}}, // new[](unsigned long, align_val_t) 136 {LibFunc_ZnamSt11align_val_tRKSt9nothrow_t, {MallocLike, 3, 0, -1, 1, MallocFamily::CPPNewArrayAligned}}, // new[](unsigned long, align_val_t, nothrow) 137 {LibFunc_msvc_new_int, {OpNewLike, 1, 0, -1, -1, MallocFamily::MSVCNew}}, // new(unsigned int) 138 {LibFunc_msvc_new_int_nothrow, {MallocLike, 2, 0, -1, -1, MallocFamily::MSVCNew}}, // new(unsigned int, nothrow) 139 {LibFunc_msvc_new_longlong, {OpNewLike, 1, 0, -1, -1, MallocFamily::MSVCNew}}, // new(unsigned long long) 140 {LibFunc_msvc_new_longlong_nothrow, {MallocLike, 2, 0, -1, -1, MallocFamily::MSVCNew}}, // new(unsigned long long, nothrow) 141 {LibFunc_msvc_new_array_int, {OpNewLike, 1, 0, -1, -1, MallocFamily::MSVCArrayNew}}, // new[](unsigned int) 142 {LibFunc_msvc_new_array_int_nothrow, {MallocLike, 2, 0, -1, -1, MallocFamily::MSVCArrayNew}}, // new[](unsigned int, nothrow) 143 {LibFunc_msvc_new_array_longlong, {OpNewLike, 1, 0, -1, -1, MallocFamily::MSVCArrayNew}}, // new[](unsigned long long) 144 {LibFunc_msvc_new_array_longlong_nothrow, {MallocLike, 2, 0, -1, -1, MallocFamily::MSVCArrayNew}}, // new[](unsigned long long, nothrow) 145 {LibFunc_aligned_alloc, {AlignedAllocLike, 2, 1, -1, 0, MallocFamily::Malloc}}, 146 {LibFunc_memalign, {AlignedAllocLike, 2, 1, -1, 0, MallocFamily::Malloc}}, 147 {LibFunc_calloc, {CallocLike, 2, 0, 1, -1, MallocFamily::Malloc}}, 148 {LibFunc_vec_calloc, {CallocLike, 2, 0, 1, -1, MallocFamily::VecMalloc}}, 149 {LibFunc_realloc, {ReallocLike, 2, 1, -1, -1, MallocFamily::Malloc}}, 150 {LibFunc_vec_realloc, {ReallocLike, 2, 1, -1, -1, MallocFamily::VecMalloc}}, 151 {LibFunc_reallocf, {ReallocLike, 2, 1, -1, -1, MallocFamily::Malloc}}, 152 {LibFunc_strdup, {StrDupLike, 1, -1, -1, -1, MallocFamily::Malloc}}, 153 {LibFunc_dunder_strdup, {StrDupLike, 1, -1, -1, -1, MallocFamily::Malloc}}, 154 {LibFunc_strndup, {StrDupLike, 2, 1, -1, -1, MallocFamily::Malloc}}, 155 {LibFunc_dunder_strndup, {StrDupLike, 2, 1, -1, -1, MallocFamily::Malloc}}, 156 {LibFunc___kmpc_alloc_shared, {MallocLike, 1, 0, -1, -1, MallocFamily::KmpcAllocShared}}, 157 }; 158 // clang-format on 159 160 static const Function *getCalledFunction(const Value *V, 161 bool &IsNoBuiltin) { 162 // Don't care about intrinsics in this case. 163 if (isa<IntrinsicInst>(V)) 164 return nullptr; 165 166 const auto *CB = dyn_cast<CallBase>(V); 167 if (!CB) 168 return nullptr; 169 170 IsNoBuiltin = CB->isNoBuiltin(); 171 172 if (const Function *Callee = CB->getCalledFunction()) 173 return Callee; 174 return nullptr; 175 } 176 177 /// Returns the allocation data for the given value if it's a call to a known 178 /// allocation function. 179 static Optional<AllocFnsTy> 180 getAllocationDataForFunction(const Function *Callee, AllocType AllocTy, 181 const TargetLibraryInfo *TLI) { 182 // Make sure that the function is available. 183 LibFunc TLIFn; 184 if (!TLI || !TLI->getLibFunc(*Callee, TLIFn) || !TLI->has(TLIFn)) 185 return None; 186 187 const auto *Iter = find_if( 188 AllocationFnData, [TLIFn](const std::pair<LibFunc, AllocFnsTy> &P) { 189 return P.first == TLIFn; 190 }); 191 192 if (Iter == std::end(AllocationFnData)) 193 return None; 194 195 const AllocFnsTy *FnData = &Iter->second; 196 if ((FnData->AllocTy & AllocTy) != FnData->AllocTy) 197 return None; 198 199 // Check function prototype. 200 int FstParam = FnData->FstParam; 201 int SndParam = FnData->SndParam; 202 FunctionType *FTy = Callee->getFunctionType(); 203 204 if (FTy->getReturnType() == Type::getInt8PtrTy(FTy->getContext()) && 205 FTy->getNumParams() == FnData->NumParams && 206 (FstParam < 0 || 207 (FTy->getParamType(FstParam)->isIntegerTy(32) || 208 FTy->getParamType(FstParam)->isIntegerTy(64))) && 209 (SndParam < 0 || 210 FTy->getParamType(SndParam)->isIntegerTy(32) || 211 FTy->getParamType(SndParam)->isIntegerTy(64))) 212 return *FnData; 213 return None; 214 } 215 216 static Optional<AllocFnsTy> getAllocationData(const Value *V, AllocType AllocTy, 217 const TargetLibraryInfo *TLI) { 218 bool IsNoBuiltinCall; 219 if (const Function *Callee = getCalledFunction(V, IsNoBuiltinCall)) 220 if (!IsNoBuiltinCall) 221 return getAllocationDataForFunction(Callee, AllocTy, TLI); 222 return None; 223 } 224 225 static Optional<AllocFnsTy> 226 getAllocationData(const Value *V, AllocType AllocTy, 227 function_ref<const TargetLibraryInfo &(Function &)> GetTLI) { 228 bool IsNoBuiltinCall; 229 if (const Function *Callee = getCalledFunction(V, IsNoBuiltinCall)) 230 if (!IsNoBuiltinCall) 231 return getAllocationDataForFunction( 232 Callee, AllocTy, &GetTLI(const_cast<Function &>(*Callee))); 233 return None; 234 } 235 236 static Optional<AllocFnsTy> getAllocationSize(const Value *V, 237 const TargetLibraryInfo *TLI) { 238 bool IsNoBuiltinCall; 239 const Function *Callee = 240 getCalledFunction(V, IsNoBuiltinCall); 241 if (!Callee) 242 return None; 243 244 // Prefer to use existing information over allocsize. This will give us an 245 // accurate AllocTy. 246 if (!IsNoBuiltinCall) 247 if (Optional<AllocFnsTy> Data = 248 getAllocationDataForFunction(Callee, AnyAlloc, TLI)) 249 return Data; 250 251 Attribute Attr = Callee->getFnAttribute(Attribute::AllocSize); 252 if (Attr == Attribute()) 253 return None; 254 255 std::pair<unsigned, Optional<unsigned>> Args = Attr.getAllocSizeArgs(); 256 257 AllocFnsTy Result; 258 // Because allocsize only tells us how many bytes are allocated, we're not 259 // really allowed to assume anything, so we use MallocLike. 260 Result.AllocTy = MallocLike; 261 Result.NumParams = Callee->getNumOperands(); 262 Result.FstParam = Args.first; 263 Result.SndParam = Args.second.value_or(-1); 264 // Allocsize has no way to specify an alignment argument 265 Result.AlignParam = -1; 266 return Result; 267 } 268 269 /// Tests if a value is a call or invoke to a library function that 270 /// allocates or reallocates memory (either malloc, calloc, realloc, or strdup 271 /// like). 272 bool llvm::isAllocationFn(const Value *V, const TargetLibraryInfo *TLI) { 273 return getAllocationData(V, AnyAlloc, TLI).has_value(); 274 } 275 bool llvm::isAllocationFn( 276 const Value *V, function_ref<const TargetLibraryInfo &(Function &)> GetTLI) { 277 return getAllocationData(V, AnyAlloc, GetTLI).has_value(); 278 } 279 280 /// Tests if a value is a call or invoke to a library function that 281 /// allocates uninitialized memory (such as malloc). 282 static bool isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 283 return getAllocationData(V, MallocOrOpNewLike, TLI).has_value(); 284 } 285 286 /// Tests if a value is a call or invoke to a library function that 287 /// allocates uninitialized memory with alignment (such as aligned_alloc). 288 static bool isAlignedAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 289 return getAllocationData(V, AlignedAllocLike, TLI).has_value(); 290 } 291 292 /// Tests if a value is a call or invoke to a library function that 293 /// allocates zero-filled memory (such as calloc). 294 static bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 295 return getAllocationData(V, CallocLike, TLI).has_value(); 296 } 297 298 /// Tests if a value is a call or invoke to a library function that 299 /// allocates memory similar to malloc or calloc. 300 bool llvm::isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 301 return getAllocationData(V, MallocOrCallocLike, TLI).has_value(); 302 } 303 304 /// Tests if a value is a call or invoke to a library function that 305 /// allocates memory (either malloc, calloc, or strdup like). 306 bool llvm::isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 307 return getAllocationData(V, AllocLike, TLI).has_value(); 308 } 309 310 /// Tests if a value is a call or invoke to a library function that 311 /// reallocates memory (e.g., realloc). 312 bool llvm::isReallocLikeFn(const Value *V, const TargetLibraryInfo *TLI) { 313 return getAllocationData(V, ReallocLike, TLI).has_value(); 314 } 315 316 /// Tests if a functions is a call or invoke to a library function that 317 /// reallocates memory (e.g., realloc). 318 bool llvm::isReallocLikeFn(const Function *F, const TargetLibraryInfo *TLI) { 319 return getAllocationDataForFunction(F, ReallocLike, TLI).has_value(); 320 } 321 322 bool llvm::isAllocRemovable(const CallBase *CB, const TargetLibraryInfo *TLI) { 323 assert(isAllocationFn(CB, TLI)); 324 325 // Note: Removability is highly dependent on the source language. For 326 // example, recent C++ requires direct calls to the global allocation 327 // [basic.stc.dynamic.allocation] to be observable unless part of a new 328 // expression [expr.new paragraph 13]. 329 330 // Historically we've treated the C family allocation routines as removable 331 return isAllocLikeFn(CB, TLI); 332 } 333 334 Value *llvm::getAllocAlignment(const CallBase *V, 335 const TargetLibraryInfo *TLI) { 336 const Optional<AllocFnsTy> FnData = getAllocationData(V, AnyAlloc, TLI); 337 if (FnData && FnData->AlignParam >= 0) { 338 return V->getOperand(FnData->AlignParam); 339 } 340 return V->getArgOperandWithAttribute(Attribute::AllocAlign); 341 } 342 343 /// When we're compiling N-bit code, and the user uses parameters that are 344 /// greater than N bits (e.g. uint64_t on a 32-bit build), we can run into 345 /// trouble with APInt size issues. This function handles resizing + overflow 346 /// checks for us. Check and zext or trunc \p I depending on IntTyBits and 347 /// I's value. 348 static bool CheckedZextOrTrunc(APInt &I, unsigned IntTyBits) { 349 // More bits than we can handle. Checking the bit width isn't necessary, but 350 // it's faster than checking active bits, and should give `false` in the 351 // vast majority of cases. 352 if (I.getBitWidth() > IntTyBits && I.getActiveBits() > IntTyBits) 353 return false; 354 if (I.getBitWidth() != IntTyBits) 355 I = I.zextOrTrunc(IntTyBits); 356 return true; 357 } 358 359 Optional<APInt> 360 llvm::getAllocSize(const CallBase *CB, 361 const TargetLibraryInfo *TLI, 362 std::function<const Value*(const Value*)> Mapper) { 363 // Note: This handles both explicitly listed allocation functions and 364 // allocsize. The code structure could stand to be cleaned up a bit. 365 Optional<AllocFnsTy> FnData = getAllocationSize(CB, TLI); 366 if (!FnData) 367 return None; 368 369 // Get the index type for this address space, results and intermediate 370 // computations are performed at that width. 371 auto &DL = CB->getModule()->getDataLayout(); 372 const unsigned IntTyBits = DL.getIndexTypeSizeInBits(CB->getType()); 373 374 // Handle strdup-like functions separately. 375 if (FnData->AllocTy == StrDupLike) { 376 APInt Size(IntTyBits, GetStringLength(Mapper(CB->getArgOperand(0)))); 377 if (!Size) 378 return None; 379 380 // Strndup limits strlen. 381 if (FnData->FstParam > 0) { 382 const ConstantInt *Arg = 383 dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->FstParam))); 384 if (!Arg) 385 return None; 386 387 APInt MaxSize = Arg->getValue().zext(IntTyBits); 388 if (Size.ugt(MaxSize)) 389 Size = MaxSize + 1; 390 } 391 return Size; 392 } 393 394 const ConstantInt *Arg = 395 dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->FstParam))); 396 if (!Arg) 397 return None; 398 399 APInt Size = Arg->getValue(); 400 if (!CheckedZextOrTrunc(Size, IntTyBits)) 401 return None; 402 403 // Size is determined by just 1 parameter. 404 if (FnData->SndParam < 0) 405 return Size; 406 407 Arg = dyn_cast<ConstantInt>(Mapper(CB->getArgOperand(FnData->SndParam))); 408 if (!Arg) 409 return None; 410 411 APInt NumElems = Arg->getValue(); 412 if (!CheckedZextOrTrunc(NumElems, IntTyBits)) 413 return None; 414 415 bool Overflow; 416 Size = Size.umul_ov(NumElems, Overflow); 417 if (Overflow) 418 return None; 419 return Size; 420 } 421 422 Constant *llvm::getInitialValueOfAllocation(const Value *V, 423 const TargetLibraryInfo *TLI, 424 Type *Ty) { 425 auto *Alloc = dyn_cast<CallBase>(V); 426 if (!Alloc) 427 return nullptr; 428 429 // malloc and aligned_alloc are uninitialized (undef) 430 if (isMallocLikeFn(Alloc, TLI) || isAlignedAllocLikeFn(Alloc, TLI)) 431 return UndefValue::get(Ty); 432 433 // calloc zero initializes 434 if (isCallocLikeFn(Alloc, TLI)) 435 return Constant::getNullValue(Ty); 436 437 return nullptr; 438 } 439 440 struct FreeFnsTy { 441 unsigned NumParams; 442 // Name of default allocator function to group malloc/free calls by family 443 MallocFamily Family; 444 }; 445 446 // clang-format off 447 static const std::pair<LibFunc, FreeFnsTy> FreeFnData[] = { 448 {LibFunc_free, {1, MallocFamily::Malloc}}, 449 {LibFunc_vec_free, {1, MallocFamily::VecMalloc}}, 450 {LibFunc_ZdlPv, {1, MallocFamily::CPPNew}}, // operator delete(void*) 451 {LibFunc_ZdaPv, {1, MallocFamily::CPPNewArray}}, // operator delete[](void*) 452 {LibFunc_msvc_delete_ptr32, {1, MallocFamily::MSVCNew}}, // operator delete(void*) 453 {LibFunc_msvc_delete_ptr64, {1, MallocFamily::MSVCNew}}, // operator delete(void*) 454 {LibFunc_msvc_delete_array_ptr32, {1, MallocFamily::MSVCArrayNew}}, // operator delete[](void*) 455 {LibFunc_msvc_delete_array_ptr64, {1, MallocFamily::MSVCArrayNew}}, // operator delete[](void*) 456 {LibFunc_ZdlPvj, {2, MallocFamily::CPPNew}}, // delete(void*, uint) 457 {LibFunc_ZdlPvm, {2, MallocFamily::CPPNew}}, // delete(void*, ulong) 458 {LibFunc_ZdlPvRKSt9nothrow_t, {2, MallocFamily::CPPNew}}, // delete(void*, nothrow) 459 {LibFunc_ZdlPvSt11align_val_t, {2, MallocFamily::CPPNewAligned}}, // delete(void*, align_val_t) 460 {LibFunc_ZdaPvj, {2, MallocFamily::CPPNewArray}}, // delete[](void*, uint) 461 {LibFunc_ZdaPvm, {2, MallocFamily::CPPNewArray}}, // delete[](void*, ulong) 462 {LibFunc_ZdaPvRKSt9nothrow_t, {2, MallocFamily::CPPNewArray}}, // delete[](void*, nothrow) 463 {LibFunc_ZdaPvSt11align_val_t, {2, MallocFamily::CPPNewArrayAligned}}, // delete[](void*, align_val_t) 464 {LibFunc_msvc_delete_ptr32_int, {2, MallocFamily::MSVCNew}}, // delete(void*, uint) 465 {LibFunc_msvc_delete_ptr64_longlong, {2, MallocFamily::MSVCNew}}, // delete(void*, ulonglong) 466 {LibFunc_msvc_delete_ptr32_nothrow, {2, MallocFamily::MSVCNew}}, // delete(void*, nothrow) 467 {LibFunc_msvc_delete_ptr64_nothrow, {2, MallocFamily::MSVCNew}}, // delete(void*, nothrow) 468 {LibFunc_msvc_delete_array_ptr32_int, {2, MallocFamily::MSVCArrayNew}}, // delete[](void*, uint) 469 {LibFunc_msvc_delete_array_ptr64_longlong, {2, MallocFamily::MSVCArrayNew}}, // delete[](void*, ulonglong) 470 {LibFunc_msvc_delete_array_ptr32_nothrow, {2, MallocFamily::MSVCArrayNew}}, // delete[](void*, nothrow) 471 {LibFunc_msvc_delete_array_ptr64_nothrow, {2, MallocFamily::MSVCArrayNew}}, // delete[](void*, nothrow) 472 {LibFunc___kmpc_free_shared, {2, MallocFamily::KmpcAllocShared}}, // OpenMP Offloading RTL free 473 {LibFunc_ZdlPvSt11align_val_tRKSt9nothrow_t, {3, MallocFamily::CPPNewAligned}}, // delete(void*, align_val_t, nothrow) 474 {LibFunc_ZdaPvSt11align_val_tRKSt9nothrow_t, {3, MallocFamily::CPPNewArrayAligned}}, // delete[](void*, align_val_t, nothrow) 475 {LibFunc_ZdlPvjSt11align_val_t, {3, MallocFamily::CPPNewAligned}}, // delete(void*, unsigned int, align_val_t) 476 {LibFunc_ZdlPvmSt11align_val_t, {3, MallocFamily::CPPNewAligned}}, // delete(void*, unsigned long, align_val_t) 477 {LibFunc_ZdaPvjSt11align_val_t, {3, MallocFamily::CPPNewArrayAligned}}, // delete[](void*, unsigned int, align_val_t) 478 {LibFunc_ZdaPvmSt11align_val_t, {3, MallocFamily::CPPNewArrayAligned}}, // delete[](void*, unsigned long, align_val_t) 479 }; 480 // clang-format on 481 482 Optional<FreeFnsTy> getFreeFunctionDataForFunction(const Function *Callee, 483 const LibFunc TLIFn) { 484 const auto *Iter = 485 find_if(FreeFnData, [TLIFn](const std::pair<LibFunc, FreeFnsTy> &P) { 486 return P.first == TLIFn; 487 }); 488 if (Iter == std::end(FreeFnData)) 489 return None; 490 return Iter->second; 491 } 492 493 Optional<StringRef> llvm::getAllocationFamily(const Value *I, 494 const TargetLibraryInfo *TLI) { 495 bool IsNoBuiltin; 496 const Function *Callee = getCalledFunction(I, IsNoBuiltin); 497 if (Callee == nullptr || IsNoBuiltin) 498 return None; 499 LibFunc TLIFn; 500 if (!TLI || !TLI->getLibFunc(*Callee, TLIFn) || !TLI->has(TLIFn)) 501 return None; 502 const auto AllocData = getAllocationDataForFunction(Callee, AnyAlloc, TLI); 503 if (AllocData) 504 return mangledNameForMallocFamily(AllocData.getValue().Family); 505 const auto FreeData = getFreeFunctionDataForFunction(Callee, TLIFn); 506 if (FreeData) 507 return mangledNameForMallocFamily(FreeData.getValue().Family); 508 return None; 509 } 510 511 /// isLibFreeFunction - Returns true if the function is a builtin free() 512 bool llvm::isLibFreeFunction(const Function *F, const LibFunc TLIFn) { 513 Optional<FreeFnsTy> FnData = getFreeFunctionDataForFunction(F, TLIFn); 514 if (!FnData) 515 return false; 516 517 // Check free prototype. 518 // FIXME: workaround for PR5130, this will be obsolete when a nobuiltin 519 // attribute will exist. 520 FunctionType *FTy = F->getFunctionType(); 521 if (!FTy->getReturnType()->isVoidTy()) 522 return false; 523 if (FTy->getNumParams() != FnData->NumParams) 524 return false; 525 if (FTy->getParamType(0) != Type::getInt8PtrTy(F->getContext())) 526 return false; 527 528 return true; 529 } 530 531 /// isFreeCall - Returns non-null if the value is a call to the builtin free() 532 const CallInst *llvm::isFreeCall(const Value *I, const TargetLibraryInfo *TLI) { 533 bool IsNoBuiltinCall; 534 const Function *Callee = getCalledFunction(I, IsNoBuiltinCall); 535 if (Callee == nullptr || IsNoBuiltinCall) 536 return nullptr; 537 538 LibFunc TLIFn; 539 if (!TLI || !TLI->getLibFunc(*Callee, TLIFn) || !TLI->has(TLIFn)) 540 return nullptr; 541 542 return isLibFreeFunction(Callee, TLIFn) ? dyn_cast<CallInst>(I) : nullptr; 543 } 544 545 546 //===----------------------------------------------------------------------===// 547 // Utility functions to compute size of objects. 548 // 549 static APInt getSizeWithOverflow(const SizeOffsetType &Data) { 550 if (Data.second.isNegative() || Data.first.ult(Data.second)) 551 return APInt(Data.first.getBitWidth(), 0); 552 return Data.first - Data.second; 553 } 554 555 /// Compute the size of the object pointed by Ptr. Returns true and the 556 /// object size in Size if successful, and false otherwise. 557 /// If RoundToAlign is true, then Size is rounded up to the alignment of 558 /// allocas, byval arguments, and global variables. 559 bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, 560 const TargetLibraryInfo *TLI, ObjectSizeOpts Opts) { 561 ObjectSizeOffsetVisitor Visitor(DL, TLI, Ptr->getContext(), Opts); 562 SizeOffsetType Data = Visitor.compute(const_cast<Value*>(Ptr)); 563 if (!Visitor.bothKnown(Data)) 564 return false; 565 566 Size = getSizeWithOverflow(Data).getZExtValue(); 567 return true; 568 } 569 570 Value *llvm::lowerObjectSizeCall(IntrinsicInst *ObjectSize, 571 const DataLayout &DL, 572 const TargetLibraryInfo *TLI, 573 bool MustSucceed) { 574 return lowerObjectSizeCall(ObjectSize, DL, TLI, /*AAResults=*/nullptr, 575 MustSucceed); 576 } 577 578 Value *llvm::lowerObjectSizeCall(IntrinsicInst *ObjectSize, 579 const DataLayout &DL, 580 const TargetLibraryInfo *TLI, AAResults *AA, 581 bool MustSucceed) { 582 assert(ObjectSize->getIntrinsicID() == Intrinsic::objectsize && 583 "ObjectSize must be a call to llvm.objectsize!"); 584 585 bool MaxVal = cast<ConstantInt>(ObjectSize->getArgOperand(1))->isZero(); 586 ObjectSizeOpts EvalOptions; 587 EvalOptions.AA = AA; 588 589 // Unless we have to fold this to something, try to be as accurate as 590 // possible. 591 if (MustSucceed) 592 EvalOptions.EvalMode = 593 MaxVal ? ObjectSizeOpts::Mode::Max : ObjectSizeOpts::Mode::Min; 594 else 595 EvalOptions.EvalMode = ObjectSizeOpts::Mode::Exact; 596 597 EvalOptions.NullIsUnknownSize = 598 cast<ConstantInt>(ObjectSize->getArgOperand(2))->isOne(); 599 600 auto *ResultType = cast<IntegerType>(ObjectSize->getType()); 601 bool StaticOnly = cast<ConstantInt>(ObjectSize->getArgOperand(3))->isZero(); 602 if (StaticOnly) { 603 // FIXME: Does it make sense to just return a failure value if the size won't 604 // fit in the output and `!MustSucceed`? 605 uint64_t Size; 606 if (getObjectSize(ObjectSize->getArgOperand(0), Size, DL, TLI, EvalOptions) && 607 isUIntN(ResultType->getBitWidth(), Size)) 608 return ConstantInt::get(ResultType, Size); 609 } else { 610 LLVMContext &Ctx = ObjectSize->getFunction()->getContext(); 611 ObjectSizeOffsetEvaluator Eval(DL, TLI, Ctx, EvalOptions); 612 SizeOffsetEvalType SizeOffsetPair = 613 Eval.compute(ObjectSize->getArgOperand(0)); 614 615 if (SizeOffsetPair != ObjectSizeOffsetEvaluator::unknown()) { 616 IRBuilder<TargetFolder> Builder(Ctx, TargetFolder(DL)); 617 Builder.SetInsertPoint(ObjectSize); 618 619 // If we've outside the end of the object, then we can always access 620 // exactly 0 bytes. 621 Value *ResultSize = 622 Builder.CreateSub(SizeOffsetPair.first, SizeOffsetPair.second); 623 Value *UseZero = 624 Builder.CreateICmpULT(SizeOffsetPair.first, SizeOffsetPair.second); 625 ResultSize = Builder.CreateZExtOrTrunc(ResultSize, ResultType); 626 Value *Ret = Builder.CreateSelect( 627 UseZero, ConstantInt::get(ResultType, 0), ResultSize); 628 629 // The non-constant size expression cannot evaluate to -1. 630 if (!isa<Constant>(SizeOffsetPair.first) || 631 !isa<Constant>(SizeOffsetPair.second)) 632 Builder.CreateAssumption( 633 Builder.CreateICmpNE(Ret, ConstantInt::get(ResultType, -1))); 634 635 return Ret; 636 } 637 } 638 639 if (!MustSucceed) 640 return nullptr; 641 642 return ConstantInt::get(ResultType, MaxVal ? -1ULL : 0); 643 } 644 645 STATISTIC(ObjectVisitorArgument, 646 "Number of arguments with unsolved size and offset"); 647 STATISTIC(ObjectVisitorLoad, 648 "Number of load instructions with unsolved size and offset"); 649 650 APInt ObjectSizeOffsetVisitor::align(APInt Size, MaybeAlign Alignment) { 651 if (Options.RoundToAlign && Alignment) 652 return APInt(IntTyBits, alignTo(Size.getZExtValue(), *Alignment)); 653 return Size; 654 } 655 656 ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout &DL, 657 const TargetLibraryInfo *TLI, 658 LLVMContext &Context, 659 ObjectSizeOpts Options) 660 : DL(DL), TLI(TLI), Options(Options) { 661 // Pointer size must be rechecked for each object visited since it could have 662 // a different address space. 663 } 664 665 SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) { 666 unsigned InitialIntTyBits = DL.getIndexTypeSizeInBits(V->getType()); 667 668 // Stripping pointer casts can strip address space casts which can change the 669 // index type size. The invariant is that we use the value type to determine 670 // the index type size and if we stripped address space casts we have to 671 // readjust the APInt as we pass it upwards in order for the APInt to match 672 // the type the caller passed in. 673 APInt Offset(InitialIntTyBits, 0); 674 V = V->stripAndAccumulateConstantOffsets( 675 DL, Offset, /* AllowNonInbounds */ true, /* AllowInvariantGroup */ true); 676 677 // Later we use the index type size and zero but it will match the type of the 678 // value that is passed to computeImpl. 679 IntTyBits = DL.getIndexTypeSizeInBits(V->getType()); 680 Zero = APInt::getZero(IntTyBits); 681 682 bool IndexTypeSizeChanged = InitialIntTyBits != IntTyBits; 683 if (!IndexTypeSizeChanged && Offset.isZero()) 684 return computeImpl(V); 685 686 // We stripped an address space cast that changed the index type size or we 687 // accumulated some constant offset (or both). Readjust the bit width to match 688 // the argument index type size and apply the offset, as required. 689 SizeOffsetType SOT = computeImpl(V); 690 if (IndexTypeSizeChanged) { 691 if (knownSize(SOT) && !::CheckedZextOrTrunc(SOT.first, InitialIntTyBits)) 692 SOT.first = APInt(); 693 if (knownOffset(SOT) && !::CheckedZextOrTrunc(SOT.second, InitialIntTyBits)) 694 SOT.second = APInt(); 695 } 696 // If the computed offset is "unknown" we cannot add the stripped offset. 697 return {SOT.first, 698 SOT.second.getBitWidth() > 1 ? SOT.second + Offset : SOT.second}; 699 } 700 701 SizeOffsetType ObjectSizeOffsetVisitor::computeImpl(Value *V) { 702 if (Instruction *I = dyn_cast<Instruction>(V)) { 703 // If we have already seen this instruction, bail out. Cycles can happen in 704 // unreachable code after constant propagation. 705 if (!SeenInsts.insert(I).second) 706 return unknown(); 707 708 return visit(*I); 709 } 710 if (Argument *A = dyn_cast<Argument>(V)) 711 return visitArgument(*A); 712 if (ConstantPointerNull *P = dyn_cast<ConstantPointerNull>(V)) 713 return visitConstantPointerNull(*P); 714 if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) 715 return visitGlobalAlias(*GA); 716 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) 717 return visitGlobalVariable(*GV); 718 if (UndefValue *UV = dyn_cast<UndefValue>(V)) 719 return visitUndefValue(*UV); 720 721 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor::compute() unhandled value: " 722 << *V << '\n'); 723 return unknown(); 724 } 725 726 bool ObjectSizeOffsetVisitor::CheckedZextOrTrunc(APInt &I) { 727 return ::CheckedZextOrTrunc(I, IntTyBits); 728 } 729 730 SizeOffsetType ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) { 731 if (!I.getAllocatedType()->isSized()) 732 return unknown(); 733 734 TypeSize ElemSize = DL.getTypeAllocSize(I.getAllocatedType()); 735 if (ElemSize.isScalable() && Options.EvalMode != ObjectSizeOpts::Mode::Min) 736 return unknown(); 737 APInt Size(IntTyBits, ElemSize.getKnownMinSize()); 738 if (!I.isArrayAllocation()) 739 return std::make_pair(align(Size, I.getAlign()), Zero); 740 741 Value *ArraySize = I.getArraySize(); 742 if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) { 743 APInt NumElems = C->getValue(); 744 if (!CheckedZextOrTrunc(NumElems)) 745 return unknown(); 746 747 bool Overflow; 748 Size = Size.umul_ov(NumElems, Overflow); 749 return Overflow ? unknown() 750 : std::make_pair(align(Size, I.getAlign()), Zero); 751 } 752 return unknown(); 753 } 754 755 SizeOffsetType ObjectSizeOffsetVisitor::visitArgument(Argument &A) { 756 Type *MemoryTy = A.getPointeeInMemoryValueType(); 757 // No interprocedural analysis is done at the moment. 758 if (!MemoryTy|| !MemoryTy->isSized()) { 759 ++ObjectVisitorArgument; 760 return unknown(); 761 } 762 763 APInt Size(IntTyBits, DL.getTypeAllocSize(MemoryTy)); 764 return std::make_pair(align(Size, A.getParamAlign()), Zero); 765 } 766 767 SizeOffsetType ObjectSizeOffsetVisitor::visitCallBase(CallBase &CB) { 768 auto Mapper = [](const Value *V) { return V; }; 769 if (Optional<APInt> Size = getAllocSize(&CB, TLI, Mapper)) 770 return std::make_pair(*Size, Zero); 771 return unknown(); 772 } 773 774 SizeOffsetType 775 ObjectSizeOffsetVisitor::visitConstantPointerNull(ConstantPointerNull& CPN) { 776 // If null is unknown, there's nothing we can do. Additionally, non-zero 777 // address spaces can make use of null, so we don't presume to know anything 778 // about that. 779 // 780 // TODO: How should this work with address space casts? We currently just drop 781 // them on the floor, but it's unclear what we should do when a NULL from 782 // addrspace(1) gets casted to addrspace(0) (or vice-versa). 783 if (Options.NullIsUnknownSize || CPN.getType()->getAddressSpace()) 784 return unknown(); 785 return std::make_pair(Zero, Zero); 786 } 787 788 SizeOffsetType 789 ObjectSizeOffsetVisitor::visitExtractElementInst(ExtractElementInst&) { 790 return unknown(); 791 } 792 793 SizeOffsetType 794 ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst&) { 795 // Easy cases were already folded by previous passes. 796 return unknown(); 797 } 798 799 SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalAlias(GlobalAlias &GA) { 800 if (GA.isInterposable()) 801 return unknown(); 802 return compute(GA.getAliasee()); 803 } 804 805 SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalVariable(GlobalVariable &GV){ 806 if (!GV.hasDefinitiveInitializer()) 807 return unknown(); 808 809 APInt Size(IntTyBits, DL.getTypeAllocSize(GV.getValueType())); 810 return std::make_pair(align(Size, GV.getAlign()), Zero); 811 } 812 813 SizeOffsetType ObjectSizeOffsetVisitor::visitIntToPtrInst(IntToPtrInst&) { 814 // clueless 815 return unknown(); 816 } 817 818 SizeOffsetType ObjectSizeOffsetVisitor::findLoadSizeOffset( 819 LoadInst &Load, BasicBlock &BB, BasicBlock::iterator From, 820 SmallDenseMap<BasicBlock *, SizeOffsetType, 8> &VisitedBlocks, 821 unsigned &ScannedInstCount) { 822 constexpr unsigned MaxInstsToScan = 128; 823 824 auto Where = VisitedBlocks.find(&BB); 825 if (Where != VisitedBlocks.end()) 826 return Where->second; 827 828 auto Unknown = [this, &BB, &VisitedBlocks]() { 829 return VisitedBlocks[&BB] = unknown(); 830 }; 831 auto Known = [&BB, &VisitedBlocks](SizeOffsetType SO) { 832 return VisitedBlocks[&BB] = SO; 833 }; 834 835 do { 836 Instruction &I = *From; 837 838 if (I.isDebugOrPseudoInst()) 839 continue; 840 841 if (++ScannedInstCount > MaxInstsToScan) 842 return Unknown(); 843 844 if (!I.mayWriteToMemory()) 845 continue; 846 847 if (auto *SI = dyn_cast<StoreInst>(&I)) { 848 AliasResult AR = 849 Options.AA->alias(SI->getPointerOperand(), Load.getPointerOperand()); 850 switch ((AliasResult::Kind)AR) { 851 case AliasResult::NoAlias: 852 continue; 853 case AliasResult::MustAlias: 854 if (SI->getValueOperand()->getType()->isPointerTy()) 855 return Known(compute(SI->getValueOperand())); 856 else 857 return Unknown(); // No handling of non-pointer values by `compute`. 858 default: 859 return Unknown(); 860 } 861 } 862 863 if (auto *CB = dyn_cast<CallBase>(&I)) { 864 Function *Callee = CB->getCalledFunction(); 865 // Bail out on indirect call. 866 if (!Callee) 867 return Unknown(); 868 869 LibFunc TLIFn; 870 if (!TLI || !TLI->getLibFunc(*CB->getCalledFunction(), TLIFn) || 871 !TLI->has(TLIFn)) 872 return Unknown(); 873 874 // TODO: There's probably more interesting case to support here. 875 if (TLIFn != LibFunc_posix_memalign) 876 return Unknown(); 877 878 AliasResult AR = 879 Options.AA->alias(CB->getOperand(0), Load.getPointerOperand()); 880 switch ((AliasResult::Kind)AR) { 881 case AliasResult::NoAlias: 882 continue; 883 case AliasResult::MustAlias: 884 break; 885 default: 886 return Unknown(); 887 } 888 889 // Is the error status of posix_memalign correctly checked? If not it 890 // would be incorrect to assume it succeeds and load doesn't see the 891 // previous value. 892 Optional<bool> Checked = isImpliedByDomCondition( 893 ICmpInst::ICMP_EQ, CB, ConstantInt::get(CB->getType(), 0), &Load, DL); 894 if (!Checked || !*Checked) 895 return Unknown(); 896 897 Value *Size = CB->getOperand(2); 898 auto *C = dyn_cast<ConstantInt>(Size); 899 if (!C) 900 return Unknown(); 901 902 return Known({C->getValue(), APInt(C->getValue().getBitWidth(), 0)}); 903 } 904 905 return Unknown(); 906 } while (From-- != BB.begin()); 907 908 SmallVector<SizeOffsetType> PredecessorSizeOffsets; 909 for (auto *PredBB : predecessors(&BB)) { 910 PredecessorSizeOffsets.push_back(findLoadSizeOffset( 911 Load, *PredBB, BasicBlock::iterator(PredBB->getTerminator()), 912 VisitedBlocks, ScannedInstCount)); 913 if (!bothKnown(PredecessorSizeOffsets.back())) 914 return Unknown(); 915 } 916 917 if (PredecessorSizeOffsets.empty()) 918 return Unknown(); 919 920 return Known(std::accumulate(PredecessorSizeOffsets.begin() + 1, 921 PredecessorSizeOffsets.end(), 922 PredecessorSizeOffsets.front(), 923 [this](SizeOffsetType LHS, SizeOffsetType RHS) { 924 return combineSizeOffset(LHS, RHS); 925 })); 926 } 927 928 SizeOffsetType ObjectSizeOffsetVisitor::visitLoadInst(LoadInst &LI) { 929 if (!Options.AA) { 930 ++ObjectVisitorLoad; 931 return unknown(); 932 } 933 934 SmallDenseMap<BasicBlock *, SizeOffsetType, 8> VisitedBlocks; 935 unsigned ScannedInstCount = 0; 936 SizeOffsetType SO = 937 findLoadSizeOffset(LI, *LI.getParent(), BasicBlock::iterator(LI), 938 VisitedBlocks, ScannedInstCount); 939 if (!bothKnown(SO)) 940 ++ObjectVisitorLoad; 941 return SO; 942 } 943 944 SizeOffsetType ObjectSizeOffsetVisitor::combineSizeOffset(SizeOffsetType LHS, 945 SizeOffsetType RHS) { 946 if (!bothKnown(LHS) || !bothKnown(RHS)) 947 return unknown(); 948 949 switch (Options.EvalMode) { 950 case ObjectSizeOpts::Mode::Min: 951 return (getSizeWithOverflow(LHS).slt(getSizeWithOverflow(RHS))) ? LHS : RHS; 952 case ObjectSizeOpts::Mode::Max: 953 return (getSizeWithOverflow(LHS).sgt(getSizeWithOverflow(RHS))) ? LHS : RHS; 954 case ObjectSizeOpts::Mode::Exact: 955 return (getSizeWithOverflow(LHS).eq(getSizeWithOverflow(RHS))) ? LHS 956 : unknown(); 957 } 958 llvm_unreachable("missing an eval mode"); 959 } 960 961 SizeOffsetType ObjectSizeOffsetVisitor::visitPHINode(PHINode &PN) { 962 auto IncomingValues = PN.incoming_values(); 963 return std::accumulate(IncomingValues.begin() + 1, IncomingValues.end(), 964 compute(*IncomingValues.begin()), 965 [this](SizeOffsetType LHS, Value *VRHS) { 966 return combineSizeOffset(LHS, compute(VRHS)); 967 }); 968 } 969 970 SizeOffsetType ObjectSizeOffsetVisitor::visitSelectInst(SelectInst &I) { 971 return combineSizeOffset(compute(I.getTrueValue()), 972 compute(I.getFalseValue())); 973 } 974 975 SizeOffsetType ObjectSizeOffsetVisitor::visitUndefValue(UndefValue&) { 976 return std::make_pair(Zero, Zero); 977 } 978 979 SizeOffsetType ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) { 980 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor unknown instruction:" << I 981 << '\n'); 982 return unknown(); 983 } 984 985 ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator( 986 const DataLayout &DL, const TargetLibraryInfo *TLI, LLVMContext &Context, 987 ObjectSizeOpts EvalOpts) 988 : DL(DL), TLI(TLI), Context(Context), 989 Builder(Context, TargetFolder(DL), 990 IRBuilderCallbackInserter( 991 [&](Instruction *I) { InsertedInstructions.insert(I); })), 992 EvalOpts(EvalOpts) { 993 // IntTy and Zero must be set for each compute() since the address space may 994 // be different for later objects. 995 } 996 997 SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute(Value *V) { 998 // XXX - Are vectors of pointers possible here? 999 IntTy = cast<IntegerType>(DL.getIndexType(V->getType())); 1000 Zero = ConstantInt::get(IntTy, 0); 1001 1002 SizeOffsetEvalType Result = compute_(V); 1003 1004 if (!bothKnown(Result)) { 1005 // Erase everything that was computed in this iteration from the cache, so 1006 // that no dangling references are left behind. We could be a bit smarter if 1007 // we kept a dependency graph. It's probably not worth the complexity. 1008 for (const Value *SeenVal : SeenVals) { 1009 CacheMapTy::iterator CacheIt = CacheMap.find(SeenVal); 1010 // non-computable results can be safely cached 1011 if (CacheIt != CacheMap.end() && anyKnown(CacheIt->second)) 1012 CacheMap.erase(CacheIt); 1013 } 1014 1015 // Erase any instructions we inserted as part of the traversal. 1016 for (Instruction *I : InsertedInstructions) { 1017 I->replaceAllUsesWith(PoisonValue::get(I->getType())); 1018 I->eraseFromParent(); 1019 } 1020 } 1021 1022 SeenVals.clear(); 1023 InsertedInstructions.clear(); 1024 return Result; 1025 } 1026 1027 SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) { 1028 ObjectSizeOffsetVisitor Visitor(DL, TLI, Context, EvalOpts); 1029 SizeOffsetType Const = Visitor.compute(V); 1030 if (Visitor.bothKnown(Const)) 1031 return std::make_pair(ConstantInt::get(Context, Const.first), 1032 ConstantInt::get(Context, Const.second)); 1033 1034 V = V->stripPointerCasts(); 1035 1036 // Check cache. 1037 CacheMapTy::iterator CacheIt = CacheMap.find(V); 1038 if (CacheIt != CacheMap.end()) 1039 return CacheIt->second; 1040 1041 // Always generate code immediately before the instruction being 1042 // processed, so that the generated code dominates the same BBs. 1043 BuilderTy::InsertPointGuard Guard(Builder); 1044 if (Instruction *I = dyn_cast<Instruction>(V)) 1045 Builder.SetInsertPoint(I); 1046 1047 // Now compute the size and offset. 1048 SizeOffsetEvalType Result; 1049 1050 // Record the pointers that were handled in this run, so that they can be 1051 // cleaned later if something fails. We also use this set to break cycles that 1052 // can occur in dead code. 1053 if (!SeenVals.insert(V).second) { 1054 Result = unknown(); 1055 } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) { 1056 Result = visitGEPOperator(*GEP); 1057 } else if (Instruction *I = dyn_cast<Instruction>(V)) { 1058 Result = visit(*I); 1059 } else if (isa<Argument>(V) || 1060 (isa<ConstantExpr>(V) && 1061 cast<ConstantExpr>(V)->getOpcode() == Instruction::IntToPtr) || 1062 isa<GlobalAlias>(V) || 1063 isa<GlobalVariable>(V)) { 1064 // Ignore values where we cannot do more than ObjectSizeVisitor. 1065 Result = unknown(); 1066 } else { 1067 LLVM_DEBUG( 1068 dbgs() << "ObjectSizeOffsetEvaluator::compute() unhandled value: " << *V 1069 << '\n'); 1070 Result = unknown(); 1071 } 1072 1073 // Don't reuse CacheIt since it may be invalid at this point. 1074 CacheMap[V] = Result; 1075 return Result; 1076 } 1077 1078 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitAllocaInst(AllocaInst &I) { 1079 if (!I.getAllocatedType()->isSized()) 1080 return unknown(); 1081 1082 // must be a VLA 1083 assert(I.isArrayAllocation()); 1084 1085 // If needed, adjust the alloca's operand size to match the pointer size. 1086 // Subsequent math operations expect the types to match. 1087 Value *ArraySize = Builder.CreateZExtOrTrunc( 1088 I.getArraySize(), DL.getIntPtrType(I.getContext())); 1089 assert(ArraySize->getType() == Zero->getType() && 1090 "Expected zero constant to have pointer type"); 1091 1092 Value *Size = ConstantInt::get(ArraySize->getType(), 1093 DL.getTypeAllocSize(I.getAllocatedType())); 1094 Size = Builder.CreateMul(Size, ArraySize); 1095 return std::make_pair(Size, Zero); 1096 } 1097 1098 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitCallBase(CallBase &CB) { 1099 Optional<AllocFnsTy> FnData = getAllocationSize(&CB, TLI); 1100 if (!FnData) 1101 return unknown(); 1102 1103 // Handle strdup-like functions separately. 1104 if (FnData->AllocTy == StrDupLike) { 1105 // TODO: implement evaluation of strdup/strndup 1106 return unknown(); 1107 } 1108 1109 Value *FirstArg = CB.getArgOperand(FnData->FstParam); 1110 FirstArg = Builder.CreateZExtOrTrunc(FirstArg, IntTy); 1111 if (FnData->SndParam < 0) 1112 return std::make_pair(FirstArg, Zero); 1113 1114 Value *SecondArg = CB.getArgOperand(FnData->SndParam); 1115 SecondArg = Builder.CreateZExtOrTrunc(SecondArg, IntTy); 1116 Value *Size = Builder.CreateMul(FirstArg, SecondArg); 1117 return std::make_pair(Size, Zero); 1118 } 1119 1120 SizeOffsetEvalType 1121 ObjectSizeOffsetEvaluator::visitExtractElementInst(ExtractElementInst&) { 1122 return unknown(); 1123 } 1124 1125 SizeOffsetEvalType 1126 ObjectSizeOffsetEvaluator::visitExtractValueInst(ExtractValueInst&) { 1127 return unknown(); 1128 } 1129 1130 SizeOffsetEvalType 1131 ObjectSizeOffsetEvaluator::visitGEPOperator(GEPOperator &GEP) { 1132 SizeOffsetEvalType PtrData = compute_(GEP.getPointerOperand()); 1133 if (!bothKnown(PtrData)) 1134 return unknown(); 1135 1136 Value *Offset = EmitGEPOffset(&Builder, DL, &GEP, /*NoAssumptions=*/true); 1137 Offset = Builder.CreateAdd(PtrData.second, Offset); 1138 return std::make_pair(PtrData.first, Offset); 1139 } 1140 1141 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitIntToPtrInst(IntToPtrInst&) { 1142 // clueless 1143 return unknown(); 1144 } 1145 1146 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitLoadInst(LoadInst &LI) { 1147 return unknown(); 1148 } 1149 1150 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitPHINode(PHINode &PHI) { 1151 // Create 2 PHIs: one for size and another for offset. 1152 PHINode *SizePHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues()); 1153 PHINode *OffsetPHI = Builder.CreatePHI(IntTy, PHI.getNumIncomingValues()); 1154 1155 // Insert right away in the cache to handle recursive PHIs. 1156 CacheMap[&PHI] = std::make_pair(SizePHI, OffsetPHI); 1157 1158 // Compute offset/size for each PHI incoming pointer. 1159 for (unsigned i = 0, e = PHI.getNumIncomingValues(); i != e; ++i) { 1160 Builder.SetInsertPoint(&*PHI.getIncomingBlock(i)->getFirstInsertionPt()); 1161 SizeOffsetEvalType EdgeData = compute_(PHI.getIncomingValue(i)); 1162 1163 if (!bothKnown(EdgeData)) { 1164 OffsetPHI->replaceAllUsesWith(PoisonValue::get(IntTy)); 1165 OffsetPHI->eraseFromParent(); 1166 InsertedInstructions.erase(OffsetPHI); 1167 SizePHI->replaceAllUsesWith(PoisonValue::get(IntTy)); 1168 SizePHI->eraseFromParent(); 1169 InsertedInstructions.erase(SizePHI); 1170 return unknown(); 1171 } 1172 SizePHI->addIncoming(EdgeData.first, PHI.getIncomingBlock(i)); 1173 OffsetPHI->addIncoming(EdgeData.second, PHI.getIncomingBlock(i)); 1174 } 1175 1176 Value *Size = SizePHI, *Offset = OffsetPHI; 1177 if (Value *Tmp = SizePHI->hasConstantValue()) { 1178 Size = Tmp; 1179 SizePHI->replaceAllUsesWith(Size); 1180 SizePHI->eraseFromParent(); 1181 InsertedInstructions.erase(SizePHI); 1182 } 1183 if (Value *Tmp = OffsetPHI->hasConstantValue()) { 1184 Offset = Tmp; 1185 OffsetPHI->replaceAllUsesWith(Offset); 1186 OffsetPHI->eraseFromParent(); 1187 InsertedInstructions.erase(OffsetPHI); 1188 } 1189 return std::make_pair(Size, Offset); 1190 } 1191 1192 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitSelectInst(SelectInst &I) { 1193 SizeOffsetEvalType TrueSide = compute_(I.getTrueValue()); 1194 SizeOffsetEvalType FalseSide = compute_(I.getFalseValue()); 1195 1196 if (!bothKnown(TrueSide) || !bothKnown(FalseSide)) 1197 return unknown(); 1198 if (TrueSide == FalseSide) 1199 return TrueSide; 1200 1201 Value *Size = Builder.CreateSelect(I.getCondition(), TrueSide.first, 1202 FalseSide.first); 1203 Value *Offset = Builder.CreateSelect(I.getCondition(), TrueSide.second, 1204 FalseSide.second); 1205 return std::make_pair(Size, Offset); 1206 } 1207 1208 SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitInstruction(Instruction &I) { 1209 LLVM_DEBUG(dbgs() << "ObjectSizeOffsetEvaluator unknown instruction:" << I 1210 << '\n'); 1211 return unknown(); 1212 } 1213