1 //===- InstrProf.cpp - Instrumented profiling format support --------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains support for clang's instrumentation based PGO and 10 // coverage. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/ProfileData/InstrProf.h" 15 #include "llvm/ADT/ArrayRef.h" 16 #include "llvm/ADT/SmallString.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/ADT/StringExtras.h" 19 #include "llvm/ADT/StringRef.h" 20 #include "llvm/ADT/Triple.h" 21 #include "llvm/Config/config.h" 22 #include "llvm/IR/Constant.h" 23 #include "llvm/IR/Constants.h" 24 #include "llvm/IR/Function.h" 25 #include "llvm/IR/GlobalValue.h" 26 #include "llvm/IR/GlobalVariable.h" 27 #include "llvm/IR/Instruction.h" 28 #include "llvm/IR/LLVMContext.h" 29 #include "llvm/IR/MDBuilder.h" 30 #include "llvm/IR/Metadata.h" 31 #include "llvm/IR/Module.h" 32 #include "llvm/IR/Type.h" 33 #include "llvm/ProfileData/InstrProfReader.h" 34 #include "llvm/Support/Casting.h" 35 #include "llvm/Support/CommandLine.h" 36 #include "llvm/Support/Compiler.h" 37 #include "llvm/Support/Compression.h" 38 #include "llvm/Support/Endian.h" 39 #include "llvm/Support/Error.h" 40 #include "llvm/Support/ErrorHandling.h" 41 #include "llvm/Support/LEB128.h" 42 #include "llvm/Support/ManagedStatic.h" 43 #include "llvm/Support/MathExtras.h" 44 #include "llvm/Support/Path.h" 45 #include "llvm/Support/SwapByteOrder.h" 46 #include <algorithm> 47 #include <cassert> 48 #include <cstddef> 49 #include <cstdint> 50 #include <cstring> 51 #include <memory> 52 #include <string> 53 #include <system_error> 54 #include <type_traits> 55 #include <utility> 56 #include <vector> 57 58 using namespace llvm; 59 60 static cl::opt<bool> StaticFuncFullModulePrefix( 61 "static-func-full-module-prefix", cl::init(true), cl::Hidden, 62 cl::desc("Use full module build paths in the profile counter names for " 63 "static functions.")); 64 65 // This option is tailored to users that have different top-level directory in 66 // profile-gen and profile-use compilation. Users need to specific the number 67 // of levels to strip. A value larger than the number of directories in the 68 // source file will strip all the directory names and only leave the basename. 69 // 70 // Note current ThinLTO module importing for the indirect-calls assumes 71 // the source directory name not being stripped. A non-zero option value here 72 // can potentially prevent some inter-module indirect-call-promotions. 73 static cl::opt<unsigned> StaticFuncStripDirNamePrefix( 74 "static-func-strip-dirname-prefix", cl::init(0), cl::Hidden, 75 cl::desc("Strip specified level of directory name from source path in " 76 "the profile counter name for static functions.")); 77 78 static std::string getInstrProfErrString(instrprof_error Err, 79 const std::string &ErrMsg = "") { 80 std::string Msg; 81 raw_string_ostream OS(Msg); 82 83 switch (Err) { 84 case instrprof_error::success: 85 OS << "success"; 86 break; 87 case instrprof_error::eof: 88 OS << "end of File"; 89 break; 90 case instrprof_error::unrecognized_format: 91 OS << "unrecognized instrumentation profile encoding format"; 92 break; 93 case instrprof_error::bad_magic: 94 OS << "invalid instrumentation profile data (bad magic)"; 95 break; 96 case instrprof_error::bad_header: 97 OS << "invalid instrumentation profile data (file header is corrupt)"; 98 break; 99 case instrprof_error::unsupported_version: 100 OS << "unsupported instrumentation profile format version"; 101 break; 102 case instrprof_error::unsupported_hash_type: 103 OS << "unsupported instrumentation profile hash type"; 104 break; 105 case instrprof_error::too_large: 106 OS << "too much profile data"; 107 break; 108 case instrprof_error::truncated: 109 OS << "truncated profile data"; 110 break; 111 case instrprof_error::malformed: 112 OS << "malformed instrumentation profile data"; 113 break; 114 case instrprof_error::missing_debug_info_for_correlation: 115 OS << "debug info for correlation is required"; 116 break; 117 case instrprof_error::unexpected_debug_info_for_correlation: 118 OS << "debug info for correlation is not necessary"; 119 break; 120 case instrprof_error::unable_to_correlate_profile: 121 OS << "unable to correlate profile"; 122 break; 123 case instrprof_error::invalid_prof: 124 OS << "invalid profile created. Please file a bug " 125 "at: " BUG_REPORT_URL 126 " and include the profraw files that caused this error."; 127 break; 128 case instrprof_error::unknown_function: 129 OS << "no profile data available for function"; 130 break; 131 case instrprof_error::hash_mismatch: 132 OS << "function control flow change detected (hash mismatch)"; 133 break; 134 case instrprof_error::count_mismatch: 135 OS << "function basic block count change detected (counter mismatch)"; 136 break; 137 case instrprof_error::counter_overflow: 138 OS << "counter overflow"; 139 break; 140 case instrprof_error::value_site_count_mismatch: 141 OS << "function value site count change detected (counter mismatch)"; 142 break; 143 case instrprof_error::compress_failed: 144 OS << "failed to compress data (zlib)"; 145 break; 146 case instrprof_error::uncompress_failed: 147 OS << "failed to uncompress data (zlib)"; 148 break; 149 case instrprof_error::empty_raw_profile: 150 OS << "empty raw profile file"; 151 break; 152 case instrprof_error::zlib_unavailable: 153 OS << "profile uses zlib compression but the profile reader was built " 154 "without zlib support"; 155 break; 156 } 157 158 // If optional error message is not empty, append it to the message. 159 if (!ErrMsg.empty()) 160 OS << ": " << ErrMsg; 161 162 return OS.str(); 163 } 164 165 namespace { 166 167 // FIXME: This class is only here to support the transition to llvm::Error. It 168 // will be removed once this transition is complete. Clients should prefer to 169 // deal with the Error value directly, rather than converting to error_code. 170 class InstrProfErrorCategoryType : public std::error_category { 171 const char *name() const noexcept override { return "llvm.instrprof"; } 172 173 std::string message(int IE) const override { 174 return getInstrProfErrString(static_cast<instrprof_error>(IE)); 175 } 176 }; 177 178 } // end anonymous namespace 179 180 static ManagedStatic<InstrProfErrorCategoryType> ErrorCategory; 181 182 const std::error_category &llvm::instrprof_category() { 183 return *ErrorCategory; 184 } 185 186 namespace { 187 188 const char *InstrProfSectNameCommon[] = { 189 #define INSTR_PROF_SECT_ENTRY(Kind, SectNameCommon, SectNameCoff, Prefix) \ 190 SectNameCommon, 191 #include "llvm/ProfileData/InstrProfData.inc" 192 }; 193 194 const char *InstrProfSectNameCoff[] = { 195 #define INSTR_PROF_SECT_ENTRY(Kind, SectNameCommon, SectNameCoff, Prefix) \ 196 SectNameCoff, 197 #include "llvm/ProfileData/InstrProfData.inc" 198 }; 199 200 const char *InstrProfSectNamePrefix[] = { 201 #define INSTR_PROF_SECT_ENTRY(Kind, SectNameCommon, SectNameCoff, Prefix) \ 202 Prefix, 203 #include "llvm/ProfileData/InstrProfData.inc" 204 }; 205 206 } // namespace 207 208 namespace llvm { 209 210 cl::opt<bool> DoInstrProfNameCompression( 211 "enable-name-compression", 212 cl::desc("Enable name/filename string compression"), cl::init(true)); 213 214 std::string getInstrProfSectionName(InstrProfSectKind IPSK, 215 Triple::ObjectFormatType OF, 216 bool AddSegmentInfo) { 217 std::string SectName; 218 219 if (OF == Triple::MachO && AddSegmentInfo) 220 SectName = InstrProfSectNamePrefix[IPSK]; 221 222 if (OF == Triple::COFF) 223 SectName += InstrProfSectNameCoff[IPSK]; 224 else 225 SectName += InstrProfSectNameCommon[IPSK]; 226 227 if (OF == Triple::MachO && IPSK == IPSK_data && AddSegmentInfo) 228 SectName += ",regular,live_support"; 229 230 return SectName; 231 } 232 233 void SoftInstrProfErrors::addError(instrprof_error IE) { 234 if (IE == instrprof_error::success) 235 return; 236 237 if (FirstError == instrprof_error::success) 238 FirstError = IE; 239 240 switch (IE) { 241 case instrprof_error::hash_mismatch: 242 ++NumHashMismatches; 243 break; 244 case instrprof_error::count_mismatch: 245 ++NumCountMismatches; 246 break; 247 case instrprof_error::counter_overflow: 248 ++NumCounterOverflows; 249 break; 250 case instrprof_error::value_site_count_mismatch: 251 ++NumValueSiteCountMismatches; 252 break; 253 default: 254 llvm_unreachable("Not a soft error"); 255 } 256 } 257 258 std::string InstrProfError::message() const { 259 return getInstrProfErrString(Err, Msg); 260 } 261 262 char InstrProfError::ID = 0; 263 264 std::string getPGOFuncName(StringRef RawFuncName, 265 GlobalValue::LinkageTypes Linkage, 266 StringRef FileName, 267 uint64_t Version LLVM_ATTRIBUTE_UNUSED) { 268 return GlobalValue::getGlobalIdentifier(RawFuncName, Linkage, FileName); 269 } 270 271 // Strip NumPrefix level of directory name from PathNameStr. If the number of 272 // directory separators is less than NumPrefix, strip all the directories and 273 // leave base file name only. 274 static StringRef stripDirPrefix(StringRef PathNameStr, uint32_t NumPrefix) { 275 uint32_t Count = NumPrefix; 276 uint32_t Pos = 0, LastPos = 0; 277 for (auto & CI : PathNameStr) { 278 ++Pos; 279 if (llvm::sys::path::is_separator(CI)) { 280 LastPos = Pos; 281 --Count; 282 } 283 if (Count == 0) 284 break; 285 } 286 return PathNameStr.substr(LastPos); 287 } 288 289 // Return the PGOFuncName. This function has some special handling when called 290 // in LTO optimization. The following only applies when calling in LTO passes 291 // (when \c InLTO is true): LTO's internalization privatizes many global linkage 292 // symbols. This happens after value profile annotation, but those internal 293 // linkage functions should not have a source prefix. 294 // Additionally, for ThinLTO mode, exported internal functions are promoted 295 // and renamed. We need to ensure that the original internal PGO name is 296 // used when computing the GUID that is compared against the profiled GUIDs. 297 // To differentiate compiler generated internal symbols from original ones, 298 // PGOFuncName meta data are created and attached to the original internal 299 // symbols in the value profile annotation step 300 // (PGOUseFunc::annotateIndirectCallSites). If a symbol does not have the meta 301 // data, its original linkage must be non-internal. 302 std::string getPGOFuncName(const Function &F, bool InLTO, uint64_t Version) { 303 if (!InLTO) { 304 StringRef FileName(F.getParent()->getSourceFileName()); 305 uint32_t StripLevel = StaticFuncFullModulePrefix ? 0 : (uint32_t)-1; 306 if (StripLevel < StaticFuncStripDirNamePrefix) 307 StripLevel = StaticFuncStripDirNamePrefix; 308 if (StripLevel) 309 FileName = stripDirPrefix(FileName, StripLevel); 310 return getPGOFuncName(F.getName(), F.getLinkage(), FileName, Version); 311 } 312 313 // In LTO mode (when InLTO is true), first check if there is a meta data. 314 if (MDNode *MD = getPGOFuncNameMetadata(F)) { 315 StringRef S = cast<MDString>(MD->getOperand(0))->getString(); 316 return S.str(); 317 } 318 319 // If there is no meta data, the function must be a global before the value 320 // profile annotation pass. Its current linkage may be internal if it is 321 // internalized in LTO mode. 322 return getPGOFuncName(F.getName(), GlobalValue::ExternalLinkage, ""); 323 } 324 325 StringRef getFuncNameWithoutPrefix(StringRef PGOFuncName, StringRef FileName) { 326 if (FileName.empty()) 327 return PGOFuncName; 328 // Drop the file name including ':'. See also getPGOFuncName. 329 if (PGOFuncName.startswith(FileName)) 330 PGOFuncName = PGOFuncName.drop_front(FileName.size() + 1); 331 return PGOFuncName; 332 } 333 334 // \p FuncName is the string used as profile lookup key for the function. A 335 // symbol is created to hold the name. Return the legalized symbol name. 336 std::string getPGOFuncNameVarName(StringRef FuncName, 337 GlobalValue::LinkageTypes Linkage) { 338 std::string VarName = std::string(getInstrProfNameVarPrefix()); 339 VarName += FuncName; 340 341 if (!GlobalValue::isLocalLinkage(Linkage)) 342 return VarName; 343 344 // Now fix up illegal chars in local VarName that may upset the assembler. 345 const char *InvalidChars = "-:<>/\"'"; 346 size_t found = VarName.find_first_of(InvalidChars); 347 while (found != std::string::npos) { 348 VarName[found] = '_'; 349 found = VarName.find_first_of(InvalidChars, found + 1); 350 } 351 return VarName; 352 } 353 354 GlobalVariable *createPGOFuncNameVar(Module &M, 355 GlobalValue::LinkageTypes Linkage, 356 StringRef PGOFuncName) { 357 // We generally want to match the function's linkage, but available_externally 358 // and extern_weak both have the wrong semantics, and anything that doesn't 359 // need to link across compilation units doesn't need to be visible at all. 360 if (Linkage == GlobalValue::ExternalWeakLinkage) 361 Linkage = GlobalValue::LinkOnceAnyLinkage; 362 else if (Linkage == GlobalValue::AvailableExternallyLinkage) 363 Linkage = GlobalValue::LinkOnceODRLinkage; 364 else if (Linkage == GlobalValue::InternalLinkage || 365 Linkage == GlobalValue::ExternalLinkage) 366 Linkage = GlobalValue::PrivateLinkage; 367 368 auto *Value = 369 ConstantDataArray::getString(M.getContext(), PGOFuncName, false); 370 auto FuncNameVar = 371 new GlobalVariable(M, Value->getType(), true, Linkage, Value, 372 getPGOFuncNameVarName(PGOFuncName, Linkage)); 373 374 // Hide the symbol so that we correctly get a copy for each executable. 375 if (!GlobalValue::isLocalLinkage(FuncNameVar->getLinkage())) 376 FuncNameVar->setVisibility(GlobalValue::HiddenVisibility); 377 378 return FuncNameVar; 379 } 380 381 GlobalVariable *createPGOFuncNameVar(Function &F, StringRef PGOFuncName) { 382 return createPGOFuncNameVar(*F.getParent(), F.getLinkage(), PGOFuncName); 383 } 384 385 Error InstrProfSymtab::create(Module &M, bool InLTO) { 386 for (Function &F : M) { 387 // Function may not have a name: like using asm("") to overwrite the name. 388 // Ignore in this case. 389 if (!F.hasName()) 390 continue; 391 const std::string &PGOFuncName = getPGOFuncName(F, InLTO); 392 if (Error E = addFuncName(PGOFuncName)) 393 return E; 394 MD5FuncMap.emplace_back(Function::getGUID(PGOFuncName), &F); 395 // In ThinLTO, local function may have been promoted to global and have 396 // suffix ".llvm." added to the function name. We need to add the 397 // stripped function name to the symbol table so that we can find a match 398 // from profile. 399 // 400 // We may have other suffixes similar as ".llvm." which are needed to 401 // be stripped before the matching, but ".__uniq." suffix which is used 402 // to differentiate internal linkage functions in different modules 403 // should be kept. Now this is the only suffix with the pattern ".xxx" 404 // which is kept before matching. 405 const std::string UniqSuffix = ".__uniq."; 406 auto pos = PGOFuncName.find(UniqSuffix); 407 // Search '.' after ".__uniq." if ".__uniq." exists, otherwise 408 // search '.' from the beginning. 409 if (pos != std::string::npos) 410 pos += UniqSuffix.length(); 411 else 412 pos = 0; 413 pos = PGOFuncName.find('.', pos); 414 if (pos != std::string::npos && pos != 0) { 415 const std::string &OtherFuncName = PGOFuncName.substr(0, pos); 416 if (Error E = addFuncName(OtherFuncName)) 417 return E; 418 MD5FuncMap.emplace_back(Function::getGUID(OtherFuncName), &F); 419 } 420 } 421 Sorted = false; 422 finalizeSymtab(); 423 return Error::success(); 424 } 425 426 uint64_t InstrProfSymtab::getFunctionHashFromAddress(uint64_t Address) { 427 finalizeSymtab(); 428 auto It = partition_point(AddrToMD5Map, [=](std::pair<uint64_t, uint64_t> A) { 429 return A.first < Address; 430 }); 431 // Raw function pointer collected by value profiler may be from 432 // external functions that are not instrumented. They won't have 433 // mapping data to be used by the deserializer. Force the value to 434 // be 0 in this case. 435 if (It != AddrToMD5Map.end() && It->first == Address) 436 return (uint64_t)It->second; 437 return 0; 438 } 439 440 Error collectPGOFuncNameStrings(ArrayRef<std::string> NameStrs, 441 bool doCompression, std::string &Result) { 442 assert(!NameStrs.empty() && "No name data to emit"); 443 444 uint8_t Header[16], *P = Header; 445 std::string UncompressedNameStrings = 446 join(NameStrs.begin(), NameStrs.end(), getInstrProfNameSeparator()); 447 448 assert(StringRef(UncompressedNameStrings) 449 .count(getInstrProfNameSeparator()) == (NameStrs.size() - 1) && 450 "PGO name is invalid (contains separator token)"); 451 452 unsigned EncLen = encodeULEB128(UncompressedNameStrings.length(), P); 453 P += EncLen; 454 455 auto WriteStringToResult = [&](size_t CompressedLen, StringRef InputStr) { 456 EncLen = encodeULEB128(CompressedLen, P); 457 P += EncLen; 458 char *HeaderStr = reinterpret_cast<char *>(&Header[0]); 459 unsigned HeaderLen = P - &Header[0]; 460 Result.append(HeaderStr, HeaderLen); 461 Result += InputStr; 462 return Error::success(); 463 }; 464 465 if (!doCompression) { 466 return WriteStringToResult(0, UncompressedNameStrings); 467 } 468 469 SmallString<128> CompressedNameStrings; 470 zlib::compress(StringRef(UncompressedNameStrings), CompressedNameStrings, 471 zlib::BestSizeCompression); 472 473 return WriteStringToResult(CompressedNameStrings.size(), 474 CompressedNameStrings); 475 } 476 477 StringRef getPGOFuncNameVarInitializer(GlobalVariable *NameVar) { 478 auto *Arr = cast<ConstantDataArray>(NameVar->getInitializer()); 479 StringRef NameStr = 480 Arr->isCString() ? Arr->getAsCString() : Arr->getAsString(); 481 return NameStr; 482 } 483 484 Error collectPGOFuncNameStrings(ArrayRef<GlobalVariable *> NameVars, 485 std::string &Result, bool doCompression) { 486 std::vector<std::string> NameStrs; 487 for (auto *NameVar : NameVars) { 488 NameStrs.push_back(std::string(getPGOFuncNameVarInitializer(NameVar))); 489 } 490 return collectPGOFuncNameStrings( 491 NameStrs, zlib::isAvailable() && doCompression, Result); 492 } 493 494 Error readPGOFuncNameStrings(StringRef NameStrings, InstrProfSymtab &Symtab) { 495 const uint8_t *P = NameStrings.bytes_begin(); 496 const uint8_t *EndP = NameStrings.bytes_end(); 497 while (P < EndP) { 498 uint32_t N; 499 uint64_t UncompressedSize = decodeULEB128(P, &N); 500 P += N; 501 uint64_t CompressedSize = decodeULEB128(P, &N); 502 P += N; 503 bool isCompressed = (CompressedSize != 0); 504 SmallString<128> UncompressedNameStrings; 505 StringRef NameStrings; 506 if (isCompressed) { 507 if (!llvm::zlib::isAvailable()) 508 return make_error<InstrProfError>(instrprof_error::zlib_unavailable); 509 510 StringRef CompressedNameStrings(reinterpret_cast<const char *>(P), 511 CompressedSize); 512 if (Error E = 513 zlib::uncompress(CompressedNameStrings, UncompressedNameStrings, 514 UncompressedSize)) { 515 consumeError(std::move(E)); 516 return make_error<InstrProfError>(instrprof_error::uncompress_failed); 517 } 518 P += CompressedSize; 519 NameStrings = StringRef(UncompressedNameStrings.data(), 520 UncompressedNameStrings.size()); 521 } else { 522 NameStrings = 523 StringRef(reinterpret_cast<const char *>(P), UncompressedSize); 524 P += UncompressedSize; 525 } 526 // Now parse the name strings. 527 SmallVector<StringRef, 0> Names; 528 NameStrings.split(Names, getInstrProfNameSeparator()); 529 for (StringRef &Name : Names) 530 if (Error E = Symtab.addFuncName(Name)) 531 return E; 532 533 while (P < EndP && *P == 0) 534 P++; 535 } 536 return Error::success(); 537 } 538 539 void InstrProfRecord::accumulateCounts(CountSumOrPercent &Sum) const { 540 uint64_t FuncSum = 0; 541 Sum.NumEntries += Counts.size(); 542 for (uint64_t Count : Counts) 543 FuncSum += Count; 544 Sum.CountSum += FuncSum; 545 546 for (uint32_t VK = IPVK_First; VK <= IPVK_Last; ++VK) { 547 uint64_t KindSum = 0; 548 uint32_t NumValueSites = getNumValueSites(VK); 549 for (size_t I = 0; I < NumValueSites; ++I) { 550 uint32_t NV = getNumValueDataForSite(VK, I); 551 std::unique_ptr<InstrProfValueData[]> VD = getValueForSite(VK, I); 552 for (uint32_t V = 0; V < NV; V++) 553 KindSum += VD[V].Count; 554 } 555 Sum.ValueCounts[VK] += KindSum; 556 } 557 } 558 559 void InstrProfValueSiteRecord::overlap(InstrProfValueSiteRecord &Input, 560 uint32_t ValueKind, 561 OverlapStats &Overlap, 562 OverlapStats &FuncLevelOverlap) { 563 this->sortByTargetValues(); 564 Input.sortByTargetValues(); 565 double Score = 0.0f, FuncLevelScore = 0.0f; 566 auto I = ValueData.begin(); 567 auto IE = ValueData.end(); 568 auto J = Input.ValueData.begin(); 569 auto JE = Input.ValueData.end(); 570 while (I != IE && J != JE) { 571 if (I->Value == J->Value) { 572 Score += OverlapStats::score(I->Count, J->Count, 573 Overlap.Base.ValueCounts[ValueKind], 574 Overlap.Test.ValueCounts[ValueKind]); 575 FuncLevelScore += OverlapStats::score( 576 I->Count, J->Count, FuncLevelOverlap.Base.ValueCounts[ValueKind], 577 FuncLevelOverlap.Test.ValueCounts[ValueKind]); 578 ++I; 579 } else if (I->Value < J->Value) { 580 ++I; 581 continue; 582 } 583 ++J; 584 } 585 Overlap.Overlap.ValueCounts[ValueKind] += Score; 586 FuncLevelOverlap.Overlap.ValueCounts[ValueKind] += FuncLevelScore; 587 } 588 589 // Return false on mismatch. 590 void InstrProfRecord::overlapValueProfData(uint32_t ValueKind, 591 InstrProfRecord &Other, 592 OverlapStats &Overlap, 593 OverlapStats &FuncLevelOverlap) { 594 uint32_t ThisNumValueSites = getNumValueSites(ValueKind); 595 assert(ThisNumValueSites == Other.getNumValueSites(ValueKind)); 596 if (!ThisNumValueSites) 597 return; 598 599 std::vector<InstrProfValueSiteRecord> &ThisSiteRecords = 600 getOrCreateValueSitesForKind(ValueKind); 601 MutableArrayRef<InstrProfValueSiteRecord> OtherSiteRecords = 602 Other.getValueSitesForKind(ValueKind); 603 for (uint32_t I = 0; I < ThisNumValueSites; I++) 604 ThisSiteRecords[I].overlap(OtherSiteRecords[I], ValueKind, Overlap, 605 FuncLevelOverlap); 606 } 607 608 void InstrProfRecord::overlap(InstrProfRecord &Other, OverlapStats &Overlap, 609 OverlapStats &FuncLevelOverlap, 610 uint64_t ValueCutoff) { 611 // FuncLevel CountSum for other should already computed and nonzero. 612 assert(FuncLevelOverlap.Test.CountSum >= 1.0f); 613 accumulateCounts(FuncLevelOverlap.Base); 614 bool Mismatch = (Counts.size() != Other.Counts.size()); 615 616 // Check if the value profiles mismatch. 617 if (!Mismatch) { 618 for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind) { 619 uint32_t ThisNumValueSites = getNumValueSites(Kind); 620 uint32_t OtherNumValueSites = Other.getNumValueSites(Kind); 621 if (ThisNumValueSites != OtherNumValueSites) { 622 Mismatch = true; 623 break; 624 } 625 } 626 } 627 if (Mismatch) { 628 Overlap.addOneMismatch(FuncLevelOverlap.Test); 629 return; 630 } 631 632 // Compute overlap for value counts. 633 for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind) 634 overlapValueProfData(Kind, Other, Overlap, FuncLevelOverlap); 635 636 double Score = 0.0; 637 uint64_t MaxCount = 0; 638 // Compute overlap for edge counts. 639 for (size_t I = 0, E = Other.Counts.size(); I < E; ++I) { 640 Score += OverlapStats::score(Counts[I], Other.Counts[I], 641 Overlap.Base.CountSum, Overlap.Test.CountSum); 642 MaxCount = std::max(Other.Counts[I], MaxCount); 643 } 644 Overlap.Overlap.CountSum += Score; 645 Overlap.Overlap.NumEntries += 1; 646 647 if (MaxCount >= ValueCutoff) { 648 double FuncScore = 0.0; 649 for (size_t I = 0, E = Other.Counts.size(); I < E; ++I) 650 FuncScore += OverlapStats::score(Counts[I], Other.Counts[I], 651 FuncLevelOverlap.Base.CountSum, 652 FuncLevelOverlap.Test.CountSum); 653 FuncLevelOverlap.Overlap.CountSum = FuncScore; 654 FuncLevelOverlap.Overlap.NumEntries = Other.Counts.size(); 655 FuncLevelOverlap.Valid = true; 656 } 657 } 658 659 void InstrProfValueSiteRecord::merge(InstrProfValueSiteRecord &Input, 660 uint64_t Weight, 661 function_ref<void(instrprof_error)> Warn) { 662 this->sortByTargetValues(); 663 Input.sortByTargetValues(); 664 auto I = ValueData.begin(); 665 auto IE = ValueData.end(); 666 for (const InstrProfValueData &J : Input.ValueData) { 667 while (I != IE && I->Value < J.Value) 668 ++I; 669 if (I != IE && I->Value == J.Value) { 670 bool Overflowed; 671 I->Count = SaturatingMultiplyAdd(J.Count, Weight, I->Count, &Overflowed); 672 if (Overflowed) 673 Warn(instrprof_error::counter_overflow); 674 ++I; 675 continue; 676 } 677 ValueData.insert(I, J); 678 } 679 } 680 681 void InstrProfValueSiteRecord::scale(uint64_t N, uint64_t D, 682 function_ref<void(instrprof_error)> Warn) { 683 for (InstrProfValueData &I : ValueData) { 684 bool Overflowed; 685 I.Count = SaturatingMultiply(I.Count, N, &Overflowed) / D; 686 if (Overflowed) 687 Warn(instrprof_error::counter_overflow); 688 } 689 } 690 691 // Merge Value Profile data from Src record to this record for ValueKind. 692 // Scale merged value counts by \p Weight. 693 void InstrProfRecord::mergeValueProfData( 694 uint32_t ValueKind, InstrProfRecord &Src, uint64_t Weight, 695 function_ref<void(instrprof_error)> Warn) { 696 uint32_t ThisNumValueSites = getNumValueSites(ValueKind); 697 uint32_t OtherNumValueSites = Src.getNumValueSites(ValueKind); 698 if (ThisNumValueSites != OtherNumValueSites) { 699 Warn(instrprof_error::value_site_count_mismatch); 700 return; 701 } 702 if (!ThisNumValueSites) 703 return; 704 std::vector<InstrProfValueSiteRecord> &ThisSiteRecords = 705 getOrCreateValueSitesForKind(ValueKind); 706 MutableArrayRef<InstrProfValueSiteRecord> OtherSiteRecords = 707 Src.getValueSitesForKind(ValueKind); 708 for (uint32_t I = 0; I < ThisNumValueSites; I++) 709 ThisSiteRecords[I].merge(OtherSiteRecords[I], Weight, Warn); 710 } 711 712 void InstrProfRecord::merge(InstrProfRecord &Other, uint64_t Weight, 713 function_ref<void(instrprof_error)> Warn) { 714 // If the number of counters doesn't match we either have bad data 715 // or a hash collision. 716 if (Counts.size() != Other.Counts.size()) { 717 Warn(instrprof_error::count_mismatch); 718 return; 719 } 720 721 for (size_t I = 0, E = Other.Counts.size(); I < E; ++I) { 722 bool Overflowed; 723 Counts[I] = 724 SaturatingMultiplyAdd(Other.Counts[I], Weight, Counts[I], &Overflowed); 725 if (Overflowed) 726 Warn(instrprof_error::counter_overflow); 727 } 728 729 for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind) 730 mergeValueProfData(Kind, Other, Weight, Warn); 731 } 732 733 void InstrProfRecord::scaleValueProfData( 734 uint32_t ValueKind, uint64_t N, uint64_t D, 735 function_ref<void(instrprof_error)> Warn) { 736 for (auto &R : getValueSitesForKind(ValueKind)) 737 R.scale(N, D, Warn); 738 } 739 740 void InstrProfRecord::scale(uint64_t N, uint64_t D, 741 function_ref<void(instrprof_error)> Warn) { 742 assert(D != 0 && "D cannot be 0"); 743 for (auto &Count : this->Counts) { 744 bool Overflowed; 745 Count = SaturatingMultiply(Count, N, &Overflowed) / D; 746 if (Overflowed) 747 Warn(instrprof_error::counter_overflow); 748 } 749 for (uint32_t Kind = IPVK_First; Kind <= IPVK_Last; ++Kind) 750 scaleValueProfData(Kind, N, D, Warn); 751 } 752 753 // Map indirect call target name hash to name string. 754 uint64_t InstrProfRecord::remapValue(uint64_t Value, uint32_t ValueKind, 755 InstrProfSymtab *SymTab) { 756 if (!SymTab) 757 return Value; 758 759 if (ValueKind == IPVK_IndirectCallTarget) 760 return SymTab->getFunctionHashFromAddress(Value); 761 762 return Value; 763 } 764 765 void InstrProfRecord::addValueData(uint32_t ValueKind, uint32_t Site, 766 InstrProfValueData *VData, uint32_t N, 767 InstrProfSymtab *ValueMap) { 768 for (uint32_t I = 0; I < N; I++) { 769 VData[I].Value = remapValue(VData[I].Value, ValueKind, ValueMap); 770 } 771 std::vector<InstrProfValueSiteRecord> &ValueSites = 772 getOrCreateValueSitesForKind(ValueKind); 773 if (N == 0) 774 ValueSites.emplace_back(); 775 else 776 ValueSites.emplace_back(VData, VData + N); 777 } 778 779 #define INSTR_PROF_COMMON_API_IMPL 780 #include "llvm/ProfileData/InstrProfData.inc" 781 782 /*! 783 * ValueProfRecordClosure Interface implementation for InstrProfRecord 784 * class. These C wrappers are used as adaptors so that C++ code can be 785 * invoked as callbacks. 786 */ 787 uint32_t getNumValueKindsInstrProf(const void *Record) { 788 return reinterpret_cast<const InstrProfRecord *>(Record)->getNumValueKinds(); 789 } 790 791 uint32_t getNumValueSitesInstrProf(const void *Record, uint32_t VKind) { 792 return reinterpret_cast<const InstrProfRecord *>(Record) 793 ->getNumValueSites(VKind); 794 } 795 796 uint32_t getNumValueDataInstrProf(const void *Record, uint32_t VKind) { 797 return reinterpret_cast<const InstrProfRecord *>(Record) 798 ->getNumValueData(VKind); 799 } 800 801 uint32_t getNumValueDataForSiteInstrProf(const void *R, uint32_t VK, 802 uint32_t S) { 803 return reinterpret_cast<const InstrProfRecord *>(R) 804 ->getNumValueDataForSite(VK, S); 805 } 806 807 void getValueForSiteInstrProf(const void *R, InstrProfValueData *Dst, 808 uint32_t K, uint32_t S) { 809 reinterpret_cast<const InstrProfRecord *>(R)->getValueForSite(Dst, K, S); 810 } 811 812 ValueProfData *allocValueProfDataInstrProf(size_t TotalSizeInBytes) { 813 ValueProfData *VD = 814 (ValueProfData *)(new (::operator new(TotalSizeInBytes)) ValueProfData()); 815 memset(VD, 0, TotalSizeInBytes); 816 return VD; 817 } 818 819 static ValueProfRecordClosure InstrProfRecordClosure = { 820 nullptr, 821 getNumValueKindsInstrProf, 822 getNumValueSitesInstrProf, 823 getNumValueDataInstrProf, 824 getNumValueDataForSiteInstrProf, 825 nullptr, 826 getValueForSiteInstrProf, 827 allocValueProfDataInstrProf}; 828 829 // Wrapper implementation using the closure mechanism. 830 uint32_t ValueProfData::getSize(const InstrProfRecord &Record) { 831 auto Closure = InstrProfRecordClosure; 832 Closure.Record = &Record; 833 return getValueProfDataSize(&Closure); 834 } 835 836 // Wrapper implementation using the closure mechanism. 837 std::unique_ptr<ValueProfData> 838 ValueProfData::serializeFrom(const InstrProfRecord &Record) { 839 InstrProfRecordClosure.Record = &Record; 840 841 std::unique_ptr<ValueProfData> VPD( 842 serializeValueProfDataFrom(&InstrProfRecordClosure, nullptr)); 843 return VPD; 844 } 845 846 void ValueProfRecord::deserializeTo(InstrProfRecord &Record, 847 InstrProfSymtab *SymTab) { 848 Record.reserveSites(Kind, NumValueSites); 849 850 InstrProfValueData *ValueData = getValueProfRecordValueData(this); 851 for (uint64_t VSite = 0; VSite < NumValueSites; ++VSite) { 852 uint8_t ValueDataCount = this->SiteCountArray[VSite]; 853 Record.addValueData(Kind, VSite, ValueData, ValueDataCount, SymTab); 854 ValueData += ValueDataCount; 855 } 856 } 857 858 // For writing/serializing, Old is the host endianness, and New is 859 // byte order intended on disk. For Reading/deserialization, Old 860 // is the on-disk source endianness, and New is the host endianness. 861 void ValueProfRecord::swapBytes(support::endianness Old, 862 support::endianness New) { 863 using namespace support; 864 865 if (Old == New) 866 return; 867 868 if (getHostEndianness() != Old) { 869 sys::swapByteOrder<uint32_t>(NumValueSites); 870 sys::swapByteOrder<uint32_t>(Kind); 871 } 872 uint32_t ND = getValueProfRecordNumValueData(this); 873 InstrProfValueData *VD = getValueProfRecordValueData(this); 874 875 // No need to swap byte array: SiteCountArrray. 876 for (uint32_t I = 0; I < ND; I++) { 877 sys::swapByteOrder<uint64_t>(VD[I].Value); 878 sys::swapByteOrder<uint64_t>(VD[I].Count); 879 } 880 if (getHostEndianness() == Old) { 881 sys::swapByteOrder<uint32_t>(NumValueSites); 882 sys::swapByteOrder<uint32_t>(Kind); 883 } 884 } 885 886 void ValueProfData::deserializeTo(InstrProfRecord &Record, 887 InstrProfSymtab *SymTab) { 888 if (NumValueKinds == 0) 889 return; 890 891 ValueProfRecord *VR = getFirstValueProfRecord(this); 892 for (uint32_t K = 0; K < NumValueKinds; K++) { 893 VR->deserializeTo(Record, SymTab); 894 VR = getValueProfRecordNext(VR); 895 } 896 } 897 898 template <class T> 899 static T swapToHostOrder(const unsigned char *&D, support::endianness Orig) { 900 using namespace support; 901 902 if (Orig == little) 903 return endian::readNext<T, little, unaligned>(D); 904 else 905 return endian::readNext<T, big, unaligned>(D); 906 } 907 908 static std::unique_ptr<ValueProfData> allocValueProfData(uint32_t TotalSize) { 909 return std::unique_ptr<ValueProfData>(new (::operator new(TotalSize)) 910 ValueProfData()); 911 } 912 913 Error ValueProfData::checkIntegrity() { 914 if (NumValueKinds > IPVK_Last + 1) 915 return make_error<InstrProfError>( 916 instrprof_error::malformed, "number of value profile kinds is invalid"); 917 // Total size needs to be multiple of quadword size. 918 if (TotalSize % sizeof(uint64_t)) 919 return make_error<InstrProfError>( 920 instrprof_error::malformed, "total size is not multiples of quardword"); 921 922 ValueProfRecord *VR = getFirstValueProfRecord(this); 923 for (uint32_t K = 0; K < this->NumValueKinds; K++) { 924 if (VR->Kind > IPVK_Last) 925 return make_error<InstrProfError>(instrprof_error::malformed, 926 "value kind is invalid"); 927 VR = getValueProfRecordNext(VR); 928 if ((char *)VR - (char *)this > (ptrdiff_t)TotalSize) 929 return make_error<InstrProfError>( 930 instrprof_error::malformed, 931 "value profile address is greater than total size"); 932 } 933 return Error::success(); 934 } 935 936 Expected<std::unique_ptr<ValueProfData>> 937 ValueProfData::getValueProfData(const unsigned char *D, 938 const unsigned char *const BufferEnd, 939 support::endianness Endianness) { 940 using namespace support; 941 942 if (D + sizeof(ValueProfData) > BufferEnd) 943 return make_error<InstrProfError>(instrprof_error::truncated); 944 945 const unsigned char *Header = D; 946 uint32_t TotalSize = swapToHostOrder<uint32_t>(Header, Endianness); 947 if (D + TotalSize > BufferEnd) 948 return make_error<InstrProfError>(instrprof_error::too_large); 949 950 std::unique_ptr<ValueProfData> VPD = allocValueProfData(TotalSize); 951 memcpy(VPD.get(), D, TotalSize); 952 // Byte swap. 953 VPD->swapBytesToHost(Endianness); 954 955 Error E = VPD->checkIntegrity(); 956 if (E) 957 return std::move(E); 958 959 return std::move(VPD); 960 } 961 962 void ValueProfData::swapBytesToHost(support::endianness Endianness) { 963 using namespace support; 964 965 if (Endianness == getHostEndianness()) 966 return; 967 968 sys::swapByteOrder<uint32_t>(TotalSize); 969 sys::swapByteOrder<uint32_t>(NumValueKinds); 970 971 ValueProfRecord *VR = getFirstValueProfRecord(this); 972 for (uint32_t K = 0; K < NumValueKinds; K++) { 973 VR->swapBytes(Endianness, getHostEndianness()); 974 VR = getValueProfRecordNext(VR); 975 } 976 } 977 978 void ValueProfData::swapBytesFromHost(support::endianness Endianness) { 979 using namespace support; 980 981 if (Endianness == getHostEndianness()) 982 return; 983 984 ValueProfRecord *VR = getFirstValueProfRecord(this); 985 for (uint32_t K = 0; K < NumValueKinds; K++) { 986 ValueProfRecord *NVR = getValueProfRecordNext(VR); 987 VR->swapBytes(getHostEndianness(), Endianness); 988 VR = NVR; 989 } 990 sys::swapByteOrder<uint32_t>(TotalSize); 991 sys::swapByteOrder<uint32_t>(NumValueKinds); 992 } 993 994 void annotateValueSite(Module &M, Instruction &Inst, 995 const InstrProfRecord &InstrProfR, 996 InstrProfValueKind ValueKind, uint32_t SiteIdx, 997 uint32_t MaxMDCount) { 998 uint32_t NV = InstrProfR.getNumValueDataForSite(ValueKind, SiteIdx); 999 if (!NV) 1000 return; 1001 1002 uint64_t Sum = 0; 1003 std::unique_ptr<InstrProfValueData[]> VD = 1004 InstrProfR.getValueForSite(ValueKind, SiteIdx, &Sum); 1005 1006 ArrayRef<InstrProfValueData> VDs(VD.get(), NV); 1007 annotateValueSite(M, Inst, VDs, Sum, ValueKind, MaxMDCount); 1008 } 1009 1010 void annotateValueSite(Module &M, Instruction &Inst, 1011 ArrayRef<InstrProfValueData> VDs, 1012 uint64_t Sum, InstrProfValueKind ValueKind, 1013 uint32_t MaxMDCount) { 1014 LLVMContext &Ctx = M.getContext(); 1015 MDBuilder MDHelper(Ctx); 1016 SmallVector<Metadata *, 3> Vals; 1017 // Tag 1018 Vals.push_back(MDHelper.createString("VP")); 1019 // Value Kind 1020 Vals.push_back(MDHelper.createConstant( 1021 ConstantInt::get(Type::getInt32Ty(Ctx), ValueKind))); 1022 // Total Count 1023 Vals.push_back( 1024 MDHelper.createConstant(ConstantInt::get(Type::getInt64Ty(Ctx), Sum))); 1025 1026 // Value Profile Data 1027 uint32_t MDCount = MaxMDCount; 1028 for (auto &VD : VDs) { 1029 Vals.push_back(MDHelper.createConstant( 1030 ConstantInt::get(Type::getInt64Ty(Ctx), VD.Value))); 1031 Vals.push_back(MDHelper.createConstant( 1032 ConstantInt::get(Type::getInt64Ty(Ctx), VD.Count))); 1033 if (--MDCount == 0) 1034 break; 1035 } 1036 Inst.setMetadata(LLVMContext::MD_prof, MDNode::get(Ctx, Vals)); 1037 } 1038 1039 bool getValueProfDataFromInst(const Instruction &Inst, 1040 InstrProfValueKind ValueKind, 1041 uint32_t MaxNumValueData, 1042 InstrProfValueData ValueData[], 1043 uint32_t &ActualNumValueData, uint64_t &TotalC, 1044 bool GetNoICPValue) { 1045 MDNode *MD = Inst.getMetadata(LLVMContext::MD_prof); 1046 if (!MD) 1047 return false; 1048 1049 unsigned NOps = MD->getNumOperands(); 1050 1051 if (NOps < 5) 1052 return false; 1053 1054 // Operand 0 is a string tag "VP": 1055 MDString *Tag = cast<MDString>(MD->getOperand(0)); 1056 if (!Tag) 1057 return false; 1058 1059 if (!Tag->getString().equals("VP")) 1060 return false; 1061 1062 // Now check kind: 1063 ConstantInt *KindInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(1)); 1064 if (!KindInt) 1065 return false; 1066 if (KindInt->getZExtValue() != ValueKind) 1067 return false; 1068 1069 // Get total count 1070 ConstantInt *TotalCInt = mdconst::dyn_extract<ConstantInt>(MD->getOperand(2)); 1071 if (!TotalCInt) 1072 return false; 1073 TotalC = TotalCInt->getZExtValue(); 1074 1075 ActualNumValueData = 0; 1076 1077 for (unsigned I = 3; I < NOps; I += 2) { 1078 if (ActualNumValueData >= MaxNumValueData) 1079 break; 1080 ConstantInt *Value = mdconst::dyn_extract<ConstantInt>(MD->getOperand(I)); 1081 ConstantInt *Count = 1082 mdconst::dyn_extract<ConstantInt>(MD->getOperand(I + 1)); 1083 if (!Value || !Count) 1084 return false; 1085 uint64_t CntValue = Count->getZExtValue(); 1086 if (!GetNoICPValue && (CntValue == NOMORE_ICP_MAGICNUM)) 1087 continue; 1088 ValueData[ActualNumValueData].Value = Value->getZExtValue(); 1089 ValueData[ActualNumValueData].Count = CntValue; 1090 ActualNumValueData++; 1091 } 1092 return true; 1093 } 1094 1095 MDNode *getPGOFuncNameMetadata(const Function &F) { 1096 return F.getMetadata(getPGOFuncNameMetadataName()); 1097 } 1098 1099 void createPGOFuncNameMetadata(Function &F, StringRef PGOFuncName) { 1100 // Only for internal linkage functions. 1101 if (PGOFuncName == F.getName()) 1102 return; 1103 // Don't create duplicated meta-data. 1104 if (getPGOFuncNameMetadata(F)) 1105 return; 1106 LLVMContext &C = F.getContext(); 1107 MDNode *N = MDNode::get(C, MDString::get(C, PGOFuncName)); 1108 F.setMetadata(getPGOFuncNameMetadataName(), N); 1109 } 1110 1111 bool needsComdatForCounter(const Function &F, const Module &M) { 1112 if (F.hasComdat()) 1113 return true; 1114 1115 if (!Triple(M.getTargetTriple()).supportsCOMDAT()) 1116 return false; 1117 1118 // See createPGOFuncNameVar for more details. To avoid link errors, profile 1119 // counters for function with available_externally linkage needs to be changed 1120 // to linkonce linkage. On ELF based systems, this leads to weak symbols to be 1121 // created. Without using comdat, duplicate entries won't be removed by the 1122 // linker leading to increased data segement size and raw profile size. Even 1123 // worse, since the referenced counter from profile per-function data object 1124 // will be resolved to the common strong definition, the profile counts for 1125 // available_externally functions will end up being duplicated in raw profile 1126 // data. This can result in distorted profile as the counts of those dups 1127 // will be accumulated by the profile merger. 1128 GlobalValue::LinkageTypes Linkage = F.getLinkage(); 1129 if (Linkage != GlobalValue::ExternalWeakLinkage && 1130 Linkage != GlobalValue::AvailableExternallyLinkage) 1131 return false; 1132 1133 return true; 1134 } 1135 1136 // Check if INSTR_PROF_RAW_VERSION_VAR is defined. 1137 bool isIRPGOFlagSet(const Module *M) { 1138 auto IRInstrVar = 1139 M->getNamedGlobal(INSTR_PROF_QUOTE(INSTR_PROF_RAW_VERSION_VAR)); 1140 if (!IRInstrVar || IRInstrVar->hasLocalLinkage()) 1141 return false; 1142 1143 // For CSPGO+LTO, this variable might be marked as non-prevailing and we only 1144 // have the decl. 1145 if (IRInstrVar->isDeclaration()) 1146 return true; 1147 1148 // Check if the flag is set. 1149 if (!IRInstrVar->hasInitializer()) 1150 return false; 1151 1152 auto *InitVal = dyn_cast_or_null<ConstantInt>(IRInstrVar->getInitializer()); 1153 if (!InitVal) 1154 return false; 1155 return (InitVal->getZExtValue() & VARIANT_MASK_IR_PROF) != 0; 1156 } 1157 1158 // Check if we can safely rename this Comdat function. 1159 bool canRenameComdatFunc(const Function &F, bool CheckAddressTaken) { 1160 if (F.getName().empty()) 1161 return false; 1162 if (!needsComdatForCounter(F, *(F.getParent()))) 1163 return false; 1164 // Unsafe to rename the address-taken function (which can be used in 1165 // function comparison). 1166 if (CheckAddressTaken && F.hasAddressTaken()) 1167 return false; 1168 // Only safe to do if this function may be discarded if it is not used 1169 // in the compilation unit. 1170 if (!GlobalValue::isDiscardableIfUnused(F.getLinkage())) 1171 return false; 1172 1173 // For AvailableExternallyLinkage functions. 1174 if (!F.hasComdat()) { 1175 assert(F.getLinkage() == GlobalValue::AvailableExternallyLinkage); 1176 return true; 1177 } 1178 return true; 1179 } 1180 1181 // Create the variable for the profile file name. 1182 void createProfileFileNameVar(Module &M, StringRef InstrProfileOutput) { 1183 if (InstrProfileOutput.empty()) 1184 return; 1185 Constant *ProfileNameConst = 1186 ConstantDataArray::getString(M.getContext(), InstrProfileOutput, true); 1187 GlobalVariable *ProfileNameVar = new GlobalVariable( 1188 M, ProfileNameConst->getType(), true, GlobalValue::WeakAnyLinkage, 1189 ProfileNameConst, INSTR_PROF_QUOTE(INSTR_PROF_PROFILE_NAME_VAR)); 1190 Triple TT(M.getTargetTriple()); 1191 if (TT.supportsCOMDAT()) { 1192 ProfileNameVar->setLinkage(GlobalValue::ExternalLinkage); 1193 ProfileNameVar->setComdat(M.getOrInsertComdat( 1194 StringRef(INSTR_PROF_QUOTE(INSTR_PROF_PROFILE_NAME_VAR)))); 1195 } 1196 } 1197 1198 Error OverlapStats::accumulateCounts(const std::string &BaseFilename, 1199 const std::string &TestFilename, 1200 bool IsCS) { 1201 auto getProfileSum = [IsCS](const std::string &Filename, 1202 CountSumOrPercent &Sum) -> Error { 1203 auto ReaderOrErr = InstrProfReader::create(Filename); 1204 if (Error E = ReaderOrErr.takeError()) { 1205 return E; 1206 } 1207 auto Reader = std::move(ReaderOrErr.get()); 1208 Reader->accumulateCounts(Sum, IsCS); 1209 return Error::success(); 1210 }; 1211 auto Ret = getProfileSum(BaseFilename, Base); 1212 if (Ret) 1213 return Ret; 1214 Ret = getProfileSum(TestFilename, Test); 1215 if (Ret) 1216 return Ret; 1217 this->BaseFilename = &BaseFilename; 1218 this->TestFilename = &TestFilename; 1219 Valid = true; 1220 return Error::success(); 1221 } 1222 1223 void OverlapStats::addOneMismatch(const CountSumOrPercent &MismatchFunc) { 1224 Mismatch.NumEntries += 1; 1225 Mismatch.CountSum += MismatchFunc.CountSum / Test.CountSum; 1226 for (unsigned I = 0; I < IPVK_Last - IPVK_First + 1; I++) { 1227 if (Test.ValueCounts[I] >= 1.0f) 1228 Mismatch.ValueCounts[I] += 1229 MismatchFunc.ValueCounts[I] / Test.ValueCounts[I]; 1230 } 1231 } 1232 1233 void OverlapStats::addOneUnique(const CountSumOrPercent &UniqueFunc) { 1234 Unique.NumEntries += 1; 1235 Unique.CountSum += UniqueFunc.CountSum / Test.CountSum; 1236 for (unsigned I = 0; I < IPVK_Last - IPVK_First + 1; I++) { 1237 if (Test.ValueCounts[I] >= 1.0f) 1238 Unique.ValueCounts[I] += UniqueFunc.ValueCounts[I] / Test.ValueCounts[I]; 1239 } 1240 } 1241 1242 void OverlapStats::dump(raw_fd_ostream &OS) const { 1243 if (!Valid) 1244 return; 1245 1246 const char *EntryName = 1247 (Level == ProgramLevel ? "functions" : "edge counters"); 1248 if (Level == ProgramLevel) { 1249 OS << "Profile overlap infomation for base_profile: " << *BaseFilename 1250 << " and test_profile: " << *TestFilename << "\nProgram level:\n"; 1251 } else { 1252 OS << "Function level:\n" 1253 << " Function: " << FuncName << " (Hash=" << FuncHash << ")\n"; 1254 } 1255 1256 OS << " # of " << EntryName << " overlap: " << Overlap.NumEntries << "\n"; 1257 if (Mismatch.NumEntries) 1258 OS << " # of " << EntryName << " mismatch: " << Mismatch.NumEntries 1259 << "\n"; 1260 if (Unique.NumEntries) 1261 OS << " # of " << EntryName 1262 << " only in test_profile: " << Unique.NumEntries << "\n"; 1263 1264 OS << " Edge profile overlap: " << format("%.3f%%", Overlap.CountSum * 100) 1265 << "\n"; 1266 if (Mismatch.NumEntries) 1267 OS << " Mismatched count percentage (Edge): " 1268 << format("%.3f%%", Mismatch.CountSum * 100) << "\n"; 1269 if (Unique.NumEntries) 1270 OS << " Percentage of Edge profile only in test_profile: " 1271 << format("%.3f%%", Unique.CountSum * 100) << "\n"; 1272 OS << " Edge profile base count sum: " << format("%.0f", Base.CountSum) 1273 << "\n" 1274 << " Edge profile test count sum: " << format("%.0f", Test.CountSum) 1275 << "\n"; 1276 1277 for (unsigned I = 0; I < IPVK_Last - IPVK_First + 1; I++) { 1278 if (Base.ValueCounts[I] < 1.0f && Test.ValueCounts[I] < 1.0f) 1279 continue; 1280 char ProfileKindName[20]; 1281 switch (I) { 1282 case IPVK_IndirectCallTarget: 1283 strncpy(ProfileKindName, "IndirectCall", 19); 1284 break; 1285 case IPVK_MemOPSize: 1286 strncpy(ProfileKindName, "MemOP", 19); 1287 break; 1288 default: 1289 snprintf(ProfileKindName, 19, "VP[%d]", I); 1290 break; 1291 } 1292 OS << " " << ProfileKindName 1293 << " profile overlap: " << format("%.3f%%", Overlap.ValueCounts[I] * 100) 1294 << "\n"; 1295 if (Mismatch.NumEntries) 1296 OS << " Mismatched count percentage (" << ProfileKindName 1297 << "): " << format("%.3f%%", Mismatch.ValueCounts[I] * 100) << "\n"; 1298 if (Unique.NumEntries) 1299 OS << " Percentage of " << ProfileKindName 1300 << " profile only in test_profile: " 1301 << format("%.3f%%", Unique.ValueCounts[I] * 100) << "\n"; 1302 OS << " " << ProfileKindName 1303 << " profile base count sum: " << format("%.0f", Base.ValueCounts[I]) 1304 << "\n" 1305 << " " << ProfileKindName 1306 << " profile test count sum: " << format("%.0f", Test.ValueCounts[I]) 1307 << "\n"; 1308 } 1309 } 1310 1311 namespace IndexedInstrProf { 1312 // A C++14 compatible version of the offsetof macro. 1313 template <typename T1, typename T2> 1314 inline size_t constexpr offsetOf(T1 T2::*Member) { 1315 constexpr T2 Object{}; 1316 return size_t(&(Object.*Member)) - size_t(&Object); 1317 } 1318 1319 static inline uint64_t read(const unsigned char *Buffer, size_t Offset) { 1320 return *reinterpret_cast<const uint64_t *>(Buffer + Offset); 1321 } 1322 1323 uint64_t Header::formatVersion() const { 1324 using namespace support; 1325 return endian::byte_swap<uint64_t, little>(Version); 1326 } 1327 1328 Expected<Header> Header::readFromBuffer(const unsigned char *Buffer) { 1329 using namespace support; 1330 static_assert(std::is_standard_layout<Header>::value, 1331 "The header should be standard layout type since we use offset " 1332 "of fields to read."); 1333 Header H; 1334 1335 H.Magic = read(Buffer, offsetOf(&Header::Magic)); 1336 // Check the magic number. 1337 uint64_t Magic = endian::byte_swap<uint64_t, little>(H.Magic); 1338 if (Magic != IndexedInstrProf::Magic) 1339 return make_error<InstrProfError>(instrprof_error::bad_magic); 1340 1341 // Read the version. 1342 H.Version = read(Buffer, offsetOf(&Header::Version)); 1343 if (GET_VERSION(H.formatVersion()) > 1344 IndexedInstrProf::ProfVersion::CurrentVersion) 1345 return make_error<InstrProfError>(instrprof_error::unsupported_version); 1346 1347 switch (GET_VERSION(H.formatVersion())) { 1348 // When a new field is added in the header add a case statement here to 1349 // populate it. 1350 static_assert( 1351 IndexedInstrProf::ProfVersion::CurrentVersion == Version8, 1352 "Please update the reading code below if a new field has been added, " 1353 "if not add a case statement to fall through to the latest version."); 1354 case 8ull: 1355 H.MemProfOffset = read(Buffer, offsetOf(&Header::MemProfOffset)); 1356 LLVM_FALLTHROUGH; 1357 default: // Version7 (when the backwards compatible header was introduced). 1358 H.HashType = read(Buffer, offsetOf(&Header::HashType)); 1359 H.HashOffset = read(Buffer, offsetOf(&Header::HashOffset)); 1360 } 1361 1362 return H; 1363 } 1364 1365 size_t Header::size() const { 1366 switch (GET_VERSION(formatVersion())) { 1367 // When a new field is added to the header add a case statement here to 1368 // compute the size as offset of the new field + size of the new field. This 1369 // relies on the field being added to the end of the list. 1370 static_assert(IndexedInstrProf::ProfVersion::CurrentVersion == Version8, 1371 "Please update the size computation below if a new field has " 1372 "been added to the header, if not add a case statement to " 1373 "fall through to the latest version."); 1374 case 8ull: 1375 return offsetOf(&Header::MemProfOffset) + sizeof(Header::MemProfOffset); 1376 default: // Version7 (when the backwards compatible header was introduced). 1377 return offsetOf(&Header::HashOffset) + sizeof(Header::HashOffset); 1378 } 1379 } 1380 1381 } // namespace IndexedInstrProf 1382 1383 } // end namespace llvm 1384