1 //===- llvm/CodeGen/DwarfExpression.cpp - Dwarf Debug Framework -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains support for writing dwarf debug info into asm files. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "DwarfExpression.h" 14 #include "DwarfCompileUnit.h" 15 #include "llvm/ADT/APInt.h" 16 #include "llvm/ADT/SmallBitVector.h" 17 #include "llvm/BinaryFormat/Dwarf.h" 18 #include "llvm/CodeGen/Register.h" 19 #include "llvm/CodeGen/TargetRegisterInfo.h" 20 #include "llvm/IR/DataLayout.h" 21 #include "llvm/Support/ErrorHandling.h" 22 #include <algorithm> 23 24 using namespace llvm; 25 26 #define DEBUG_TYPE "dwarfdebug" 27 28 void DwarfExpression::emitConstu(uint64_t Value) { 29 if (Value < 32) 30 emitOp(dwarf::DW_OP_lit0 + Value); 31 else if (Value == std::numeric_limits<uint64_t>::max()) { 32 // Only do this for 64-bit values as the DWARF expression stack uses 33 // target-address-size values. 34 emitOp(dwarf::DW_OP_lit0); 35 emitOp(dwarf::DW_OP_not); 36 } else { 37 emitOp(dwarf::DW_OP_constu); 38 emitUnsigned(Value); 39 } 40 } 41 42 void DwarfExpression::addReg(int DwarfReg, const char *Comment) { 43 assert(DwarfReg >= 0 && "invalid negative dwarf register number"); 44 assert((isUnknownLocation() || isRegisterLocation()) && 45 "location description already locked down"); 46 LocationKind = Register; 47 if (DwarfReg < 32) { 48 emitOp(dwarf::DW_OP_reg0 + DwarfReg, Comment); 49 } else { 50 emitOp(dwarf::DW_OP_regx, Comment); 51 emitUnsigned(DwarfReg); 52 } 53 } 54 55 void DwarfExpression::addBReg(int DwarfReg, int Offset) { 56 assert(DwarfReg >= 0 && "invalid negative dwarf register number"); 57 assert(!isRegisterLocation() && "location description already locked down"); 58 if (DwarfReg < 32) { 59 emitOp(dwarf::DW_OP_breg0 + DwarfReg); 60 } else { 61 emitOp(dwarf::DW_OP_bregx); 62 emitUnsigned(DwarfReg); 63 } 64 emitSigned(Offset); 65 } 66 67 void DwarfExpression::addFBReg(int Offset) { 68 emitOp(dwarf::DW_OP_fbreg); 69 emitSigned(Offset); 70 } 71 72 void DwarfExpression::addOpPiece(unsigned SizeInBits, unsigned OffsetInBits) { 73 if (!SizeInBits) 74 return; 75 76 const unsigned SizeOfByte = 8; 77 if (OffsetInBits > 0 || SizeInBits % SizeOfByte) { 78 emitOp(dwarf::DW_OP_bit_piece); 79 emitUnsigned(SizeInBits); 80 emitUnsigned(OffsetInBits); 81 } else { 82 emitOp(dwarf::DW_OP_piece); endModule()83 unsigned ByteSize = SizeInBits / SizeOfByte; 84 emitUnsigned(ByteSize); 85 } 86 this->OffsetInBits += SizeInBits; 87 } 88 89 void DwarfExpression::addShr(unsigned ShiftBy) { 90 emitConstu(ShiftBy); 91 emitOp(dwarf::DW_OP_shr); 92 } 93 94 void DwarfExpression::addAnd(unsigned Mask) { 95 emitConstu(Mask); 96 emitOp(dwarf::DW_OP_and); 97 } 98 99 bool DwarfExpression::addMachineReg(const TargetRegisterInfo &TRI, 100 llvm::Register MachineReg, endModule()101 unsigned MaxSize) { 102 if (!llvm::Register::isPhysicalRegister(MachineReg)) { 103 if (isFrameRegister(TRI, MachineReg)) { 104 DwarfRegs.push_back(Register::createRegister(-1, nullptr)); 105 return true; 106 } 107 return false; 108 } 109 110 int Reg = TRI.getDwarfRegNum(MachineReg, false); 111 112 // If this is a valid register number, emit it. 113 if (Reg >= 0) { 114 DwarfRegs.push_back(Register::createRegister(Reg, nullptr)); 115 return true; 116 } 117 118 // Walk up the super-register chain until we find a valid number. 119 // For example, EAX on x86_64 is a 32-bit fragment of RAX with offset 0. 120 for (MCSuperRegIterator SR(MachineReg, &TRI); SR.isValid(); ++SR) { 121 Reg = TRI.getDwarfRegNum(*SR, false); 122 if (Reg >= 0) { 123 unsigned Idx = TRI.getSubRegIndex(*SR, MachineReg); 124 unsigned Size = TRI.getSubRegIdxSize(Idx); 125 unsigned RegOffset = TRI.getSubRegIdxOffset(Idx); 126 DwarfRegs.push_back(Register::createRegister(Reg, "super-register")); 127 // Use a DW_OP_bit_piece to describe the sub-register. 128 setSubRegisterPiece(Size, RegOffset); 129 return true; 130 } 131 } 132 133 // Otherwise, attempt to find a covering set of sub-register numbers. 134 // For example, Q0 on ARM is a composition of D0+D1. 135 unsigned CurPos = 0; 136 // The size of the register in bits. 137 const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(MachineReg); 138 unsigned RegSize = TRI.getRegSizeInBits(*RC); 139 // Keep track of the bits in the register we already emitted, so we 140 // can avoid emitting redundant aliasing subregs. Because this is 141 // just doing a greedy scan of all subregisters, it is possible that 142 // this doesn't find a combination of subregisters that fully cover 143 // the register (even though one may exist). 144 SmallBitVector Coverage(RegSize, false); 145 for (MCSubRegIterator SR(MachineReg, &TRI); SR.isValid(); ++SR) { 146 unsigned Idx = TRI.getSubRegIndex(MachineReg, *SR); 147 unsigned Size = TRI.getSubRegIdxSize(Idx); 148 unsigned Offset = TRI.getSubRegIdxOffset(Idx); 149 Reg = TRI.getDwarfRegNum(*SR, false); 150 if (Reg < 0) 151 continue; 152 153 // Used to build the intersection between the bits we already 154 // emitted and the bits covered by this subregister. 155 SmallBitVector CurSubReg(RegSize, false); 156 CurSubReg.set(Offset, Offset + Size); 157 158 // If this sub-register has a DWARF number and we haven't covered 159 // its range, and its range covers the value, emit a DWARF piece for it. 160 if (Offset < MaxSize && CurSubReg.test(Coverage)) { 161 // Emit a piece for any gap in the coverage. 162 if (Offset > CurPos) 163 DwarfRegs.push_back(Register::createSubRegister( 164 -1, Offset - CurPos, "no DWARF register encoding")); 165 if (Offset == 0 && Size >= MaxSize) 166 DwarfRegs.push_back(Register::createRegister(Reg, "sub-register")); 167 else 168 DwarfRegs.push_back(Register::createSubRegister( 169 Reg, std::min<unsigned>(Size, MaxSize - Offset), "sub-register")); 170 } 171 // Mark it as emitted. 172 Coverage.set(Offset, Offset + Size); 173 CurPos = Offset + Size; 174 } 175 // Failed to find any DWARF encoding. 176 if (CurPos == 0) 177 return false; 178 // Found a partial or complete DWARF encoding. 179 if (CurPos < RegSize) 180 DwarfRegs.push_back(Register::createSubRegister( 181 -1, RegSize - CurPos, "no DWARF register encoding")); 182 return true; 183 } 184 185 void DwarfExpression::addStackValue() { 186 if (DwarfVersion >= 4) 187 emitOp(dwarf::DW_OP_stack_value); 188 } 189 190 void DwarfExpression::addSignedConstant(int64_t Value) { 191 assert(isImplicitLocation() || isUnknownLocation()); 192 LocationKind = Implicit; 193 emitOp(dwarf::DW_OP_consts); 194 emitSigned(Value); 195 } 196 197 void DwarfExpression::addUnsignedConstant(uint64_t Value) { 198 assert(isImplicitLocation() || isUnknownLocation()); 199 LocationKind = Implicit; 200 emitConstu(Value); 201 } 202 203 void DwarfExpression::addUnsignedConstant(const APInt &Value) { 204 assert(isImplicitLocation() || isUnknownLocation()); 205 LocationKind = Implicit; 206 207 unsigned Size = Value.getBitWidth(); 208 const uint64_t *Data = Value.getRawData(); 209 210 // Chop it up into 64-bit pieces, because that's the maximum that 211 // addUnsignedConstant takes. 212 unsigned Offset = 0; 213 while (Offset < Size) { 214 addUnsignedConstant(*Data++); 215 if (Offset == 0 && Size <= 64) 216 break; 217 addStackValue(); 218 addOpPiece(std::min(Size - Offset, 64u), Offset); 219 Offset += 64; 220 } 221 } 222 223 void DwarfExpression::addConstantFP(const APFloat &APF, const AsmPrinter &AP) { 224 assert(isImplicitLocation() || isUnknownLocation()); 225 APInt API = APF.bitcastToAPInt(); 226 int NumBytes = API.getBitWidth() / 8; 227 if (NumBytes == 4 /*float*/ || NumBytes == 8 /*double*/) { 228 // FIXME: Add support for `long double`. 229 emitOp(dwarf::DW_OP_implicit_value); 230 emitUnsigned(NumBytes /*Size of the block in bytes*/); 231 232 // The loop below is emitting the value starting at least significant byte, 233 // so we need to perform a byte-swap to get the byte order correct in case 234 // of a big-endian target. 235 if (AP.getDataLayout().isBigEndian()) 236 API = API.byteSwap(); 237 238 for (int i = 0; i < NumBytes; ++i) { 239 emitData1(API.getZExtValue() & 0xFF); 240 API = API.lshr(8); 241 } 242 243 return; 244 } 245 LLVM_DEBUG( 246 dbgs() << "Skipped DW_OP_implicit_value creation for ConstantFP of size: " 247 << API.getBitWidth() << " bits\n"); 248 } 249 250 bool DwarfExpression::addMachineRegExpression(const TargetRegisterInfo &TRI, 251 DIExpressionCursor &ExprCursor, 252 llvm::Register MachineReg, 253 unsigned FragmentOffsetInBits) { 254 auto Fragment = ExprCursor.getFragmentInfo(); 255 if (!addMachineReg(TRI, MachineReg, Fragment ? Fragment->SizeInBits : ~1U)) { 256 LocationKind = Unknown; 257 return false; 258 } 259 260 bool HasComplexExpression = false; 261 auto Op = ExprCursor.peek(); 262 if (Op && Op->getOp() != dwarf::DW_OP_LLVM_fragment) 263 HasComplexExpression = true; 264 265 // If the register can only be described by a complex expression (i.e., 266 // multiple subregisters) it doesn't safely compose with another complex 267 // expression. For example, it is not possible to apply a DW_OP_deref 268 // operation to multiple DW_OP_pieces, since composite location descriptions 269 // do not push anything on the DWARF stack. 270 // 271 // DW_OP_entry_value operations can only hold a DWARF expression or a 272 // register location description, so we can't emit a single entry value 273 // covering a composite location description. In the future we may want to 274 // emit entry value operations for each register location in the composite 275 // location, but until that is supported do not emit anything. 276 if ((HasComplexExpression || IsEmittingEntryValue) && DwarfRegs.size() > 1) { 277 if (IsEmittingEntryValue) 278 cancelEntryValue(); 279 DwarfRegs.clear(); 280 LocationKind = Unknown; 281 return false; 282 } 283 284 // Handle simple register locations. If we are supposed to emit 285 // a call site parameter expression and if that expression is just a register 286 // location, emit it with addBReg and offset 0, because we should emit a DWARF 287 // expression representing a value, rather than a location. 288 if ((!isParameterValue() && !isMemoryLocation() && !HasComplexExpression) || 289 isEntryValue()) { 290 for (auto &Reg : DwarfRegs) { 291 if (Reg.DwarfRegNo >= 0) 292 addReg(Reg.DwarfRegNo, Reg.Comment); 293 addOpPiece(Reg.SubRegSize); 294 } 295 296 if (isEntryValue()) { 297 finalizeEntryValue(); 298 299 if (!isIndirect() && !isParameterValue() && !HasComplexExpression && 300 DwarfVersion >= 4) 301 emitOp(dwarf::DW_OP_stack_value); 302 } 303 304 DwarfRegs.clear(); 305 // If we need to mask out a subregister, do it now, unless the next 306 // operation would emit an OpPiece anyway. 307 auto NextOp = ExprCursor.peek(); 308 if (SubRegisterSizeInBits && NextOp && 309 (NextOp->getOp() != dwarf::DW_OP_LLVM_fragment)) 310 maskSubRegister(); 311 return true; 312 } 313 314 // Don't emit locations that cannot be expressed without DW_OP_stack_value. 315 if (DwarfVersion < 4) 316 if (any_of(ExprCursor, [](DIExpression::ExprOperand Op) -> bool { 317 return Op.getOp() == dwarf::DW_OP_stack_value; 318 })) { 319 DwarfRegs.clear(); 320 LocationKind = Unknown; 321 return false; 322 } 323 324 assert(DwarfRegs.size() == 1); 325 auto Reg = DwarfRegs[0]; 326 bool FBReg = isFrameRegister(TRI, MachineReg); 327 int SignedOffset = 0; 328 assert(!Reg.isSubRegister() && "full register expected"); 329 330 // Pattern-match combinations for which more efficient representations exist. 331 // [Reg, DW_OP_plus_uconst, Offset] --> [DW_OP_breg, Offset]. 332 if (Op && (Op->getOp() == dwarf::DW_OP_plus_uconst)) { 333 uint64_t Offset = Op->getArg(0); 334 uint64_t IntMax = static_cast<uint64_t>(std::numeric_limits<int>::max()); 335 if (Offset <= IntMax) { 336 SignedOffset = Offset; 337 ExprCursor.take(); 338 } 339 } 340 341 // [Reg, DW_OP_constu, Offset, DW_OP_plus] --> [DW_OP_breg, Offset] 342 // [Reg, DW_OP_constu, Offset, DW_OP_minus] --> [DW_OP_breg,-Offset] 343 // If Reg is a subregister we need to mask it out before subtracting. 344 if (Op && Op->getOp() == dwarf::DW_OP_constu) { 345 uint64_t Offset = Op->getArg(0); 346 uint64_t IntMax = static_cast<uint64_t>(std::numeric_limits<int>::max()); 347 auto N = ExprCursor.peekNext(); 348 if (N && N->getOp() == dwarf::DW_OP_plus && Offset <= IntMax) { 349 SignedOffset = Offset; 350 ExprCursor.consume(2); 351 } else if (N && N->getOp() == dwarf::DW_OP_minus && 352 !SubRegisterSizeInBits && Offset <= IntMax + 1) { 353 SignedOffset = -static_cast<int64_t>(Offset); 354 ExprCursor.consume(2); 355 } 356 } 357 358 if (FBReg) 359 addFBReg(SignedOffset); 360 else 361 addBReg(Reg.DwarfRegNo, SignedOffset); 362 DwarfRegs.clear(); 363 364 // If we need to mask out a subregister, do it now, unless the next 365 // operation would emit an OpPiece anyway. 366 auto NextOp = ExprCursor.peek(); 367 if (SubRegisterSizeInBits && NextOp && 368 (NextOp->getOp() != dwarf::DW_OP_LLVM_fragment)) 369 maskSubRegister(); 370 371 return true; 372 } 373 374 void DwarfExpression::setEntryValueFlags(const MachineLocation &Loc) { 375 LocationFlags |= EntryValue; 376 if (Loc.isIndirect()) 377 LocationFlags |= Indirect; 378 } 379 380 void DwarfExpression::setLocation(const MachineLocation &Loc, 381 const DIExpression *DIExpr) { 382 if (Loc.isIndirect()) 383 setMemoryLocationKind(); 384 385 if (DIExpr->isEntryValue()) 386 setEntryValueFlags(Loc); 387 } 388 389 void DwarfExpression::beginEntryValueExpression( 390 DIExpressionCursor &ExprCursor) { 391 auto Op = ExprCursor.take(); 392 (void)Op; 393 assert(Op && Op->getOp() == dwarf::DW_OP_LLVM_entry_value); 394 assert(!IsEmittingEntryValue && "Already emitting entry value?"); 395 assert(Op->getArg(0) == 1 && 396 "Can currently only emit entry values covering a single operation"); 397 398 SavedLocationKind = LocationKind; 399 LocationKind = Register; 400 IsEmittingEntryValue = true; 401 enableTemporaryBuffer(); 402 } 403 404 void DwarfExpression::finalizeEntryValue() { 405 assert(IsEmittingEntryValue && "Entry value not open?"); 406 disableTemporaryBuffer(); 407 408 emitOp(CU.getDwarf5OrGNULocationAtom(dwarf::DW_OP_entry_value)); 409 410 // Emit the entry value's size operand. 411 unsigned Size = getTemporaryBufferSize(); 412 emitUnsigned(Size); 413 414 // Emit the entry value's DWARF block operand. 415 commitTemporaryBuffer(); 416 417 LocationFlags &= ~EntryValue; 418 LocationKind = SavedLocationKind; 419 IsEmittingEntryValue = false; 420 } 421 422 void DwarfExpression::cancelEntryValue() { 423 assert(IsEmittingEntryValue && "Entry value not open?"); 424 disableTemporaryBuffer(); 425 426 // The temporary buffer can't be emptied, so for now just assert that nothing 427 // has been emitted to it. 428 assert(getTemporaryBufferSize() == 0 && 429 "Began emitting entry value block before cancelling entry value"); 430 431 LocationKind = SavedLocationKind; 432 IsEmittingEntryValue = false; 433 } 434 435 unsigned DwarfExpression::getOrCreateBaseType(unsigned BitSize, 436 dwarf::TypeKind Encoding) { 437 // Reuse the base_type if we already have one in this CU otherwise we 438 // create a new one. 439 unsigned I = 0, E = CU.ExprRefedBaseTypes.size(); 440 for (; I != E; ++I) 441 if (CU.ExprRefedBaseTypes[I].BitSize == BitSize && 442 CU.ExprRefedBaseTypes[I].Encoding == Encoding) 443 break; 444 445 if (I == E) 446 CU.ExprRefedBaseTypes.emplace_back(BitSize, Encoding); 447 return I; 448 } 449 450 /// Assuming a well-formed expression, match "DW_OP_deref* 451 /// DW_OP_LLVM_fragment?". 452 static bool isMemoryLocation(DIExpressionCursor ExprCursor) { 453 while (ExprCursor) { 454 auto Op = ExprCursor.take(); 455 switch (Op->getOp()) { 456 case dwarf::DW_OP_deref: 457 case dwarf::DW_OP_LLVM_fragment: 458 break; 459 default: 460 return false; 461 } 462 } 463 return true; 464 } 465 466 void DwarfExpression::addExpression(DIExpressionCursor &&ExprCursor, 467 unsigned FragmentOffsetInBits) { 468 addExpression(std::move(ExprCursor), 469 [](unsigned Idx, DIExpressionCursor &Cursor) -> bool { 470 llvm_unreachable("unhandled opcode found in expression"); 471 }); 472 } 473 474 void DwarfExpression::addExpression( 475 DIExpressionCursor &&ExprCursor, 476 llvm::function_ref<bool(unsigned, DIExpressionCursor &)> InsertArg) { 477 // Entry values can currently only cover the initial register location, 478 // and not any other parts of the following DWARF expression. 479 assert(!IsEmittingEntryValue && "Can't emit entry value around expression"); 480 481 Optional<DIExpression::ExprOperand> PrevConvertOp = None; 482 483 while (ExprCursor) { 484 auto Op = ExprCursor.take(); 485 uint64_t OpNum = Op->getOp(); 486 487 if (OpNum >= dwarf::DW_OP_reg0 && OpNum <= dwarf::DW_OP_reg31) { 488 emitOp(OpNum); 489 continue; 490 } else if (OpNum >= dwarf::DW_OP_breg0 && OpNum <= dwarf::DW_OP_breg31) { 491 addBReg(OpNum - dwarf::DW_OP_breg0, Op->getArg(0)); 492 continue; 493 } 494 495 switch (OpNum) { 496 case dwarf::DW_OP_LLVM_arg: 497 if (!InsertArg(Op->getArg(0), ExprCursor)) { 498 LocationKind = Unknown; 499 return; 500 } 501 break; 502 case dwarf::DW_OP_LLVM_fragment: { 503 unsigned SizeInBits = Op->getArg(1); 504 unsigned FragmentOffset = Op->getArg(0); 505 // The fragment offset must have already been adjusted by emitting an 506 // empty DW_OP_piece / DW_OP_bit_piece before we emitted the base 507 // location. 508 assert(OffsetInBits >= FragmentOffset && "fragment offset not added?"); 509 assert(SizeInBits >= OffsetInBits - FragmentOffset && "size underflow"); 510 511 // If addMachineReg already emitted DW_OP_piece operations to represent 512 // a super-register by splicing together sub-registers, subtract the size 513 // of the pieces that was already emitted. 514 SizeInBits -= OffsetInBits - FragmentOffset; 515 516 // If addMachineReg requested a DW_OP_bit_piece to stencil out a 517 // sub-register that is smaller than the current fragment's size, use it. 518 if (SubRegisterSizeInBits) 519 SizeInBits = std::min<unsigned>(SizeInBits, SubRegisterSizeInBits); 520 521 // Emit a DW_OP_stack_value for implicit location descriptions. 522 if (isImplicitLocation()) 523 addStackValue(); 524 525 // Emit the DW_OP_piece. 526 addOpPiece(SizeInBits, SubRegisterOffsetInBits); 527 setSubRegisterPiece(0, 0); 528 // Reset the location description kind. 529 LocationKind = Unknown; 530 return; 531 } 532 case dwarf::DW_OP_plus_uconst: 533 assert(!isRegisterLocation()); 534 emitOp(dwarf::DW_OP_plus_uconst); 535 emitUnsigned(Op->getArg(0)); 536 break; 537 case dwarf::DW_OP_plus: 538 case dwarf::DW_OP_minus: 539 case dwarf::DW_OP_mul: 540 case dwarf::DW_OP_div: 541 case dwarf::DW_OP_mod: 542 case dwarf::DW_OP_or: 543 case dwarf::DW_OP_and: 544 case dwarf::DW_OP_xor: 545 case dwarf::DW_OP_shl: 546 case dwarf::DW_OP_shr: 547 case dwarf::DW_OP_shra: 548 case dwarf::DW_OP_lit0: 549 case dwarf::DW_OP_not: 550 case dwarf::DW_OP_dup: 551 case dwarf::DW_OP_push_object_address: 552 case dwarf::DW_OP_over: 553 emitOp(OpNum); 554 break; 555 case dwarf::DW_OP_deref: 556 assert(!isRegisterLocation()); 557 if (!isMemoryLocation() && ::isMemoryLocation(ExprCursor)) 558 // Turning this into a memory location description makes the deref 559 // implicit. 560 LocationKind = Memory; 561 else 562 emitOp(dwarf::DW_OP_deref); 563 break; 564 case dwarf::DW_OP_constu: 565 assert(!isRegisterLocation()); 566 emitConstu(Op->getArg(0)); 567 break; 568 case dwarf::DW_OP_consts: 569 assert(!isRegisterLocation()); 570 emitOp(dwarf::DW_OP_consts); 571 emitSigned(Op->getArg(0)); 572 break; 573 case dwarf::DW_OP_LLVM_convert: { 574 unsigned BitSize = Op->getArg(0); 575 dwarf::TypeKind Encoding = static_cast<dwarf::TypeKind>(Op->getArg(1)); 576 if (DwarfVersion >= 5 && CU.getDwarfDebug().useOpConvert()) { 577 emitOp(dwarf::DW_OP_convert); 578 // If targeting a location-list; simply emit the index into the raw 579 // byte stream as ULEB128, DwarfDebug::emitDebugLocEntry has been 580 // fitted with means to extract it later. 581 // If targeting a inlined DW_AT_location; insert a DIEBaseTypeRef 582 // (containing the index and a resolve mechanism during emit) into the 583 // DIE value list. 584 emitBaseTypeRef(getOrCreateBaseType(BitSize, Encoding)); 585 } else { 586 if (PrevConvertOp && PrevConvertOp->getArg(0) < BitSize) { 587 if (Encoding == dwarf::DW_ATE_signed) 588 emitLegacySExt(PrevConvertOp->getArg(0)); 589 else if (Encoding == dwarf::DW_ATE_unsigned) 590 emitLegacyZExt(PrevConvertOp->getArg(0)); 591 PrevConvertOp = None; 592 } else { 593 PrevConvertOp = Op; 594 } 595 } 596 break; 597 } 598 case dwarf::DW_OP_stack_value: 599 LocationKind = Implicit; 600 break; 601 case dwarf::DW_OP_swap: 602 assert(!isRegisterLocation()); 603 emitOp(dwarf::DW_OP_swap); 604 break; 605 case dwarf::DW_OP_xderef: 606 assert(!isRegisterLocation()); 607 emitOp(dwarf::DW_OP_xderef); 608 break; 609 case dwarf::DW_OP_deref_size: 610 emitOp(dwarf::DW_OP_deref_size); 611 emitData1(Op->getArg(0)); 612 break; 613 case dwarf::DW_OP_LLVM_tag_offset: 614 TagOffset = Op->getArg(0); 615 break; 616 case dwarf::DW_OP_regx: 617 emitOp(dwarf::DW_OP_regx); 618 emitUnsigned(Op->getArg(0)); 619 break; 620 case dwarf::DW_OP_bregx: 621 emitOp(dwarf::DW_OP_bregx); 622 emitUnsigned(Op->getArg(0)); 623 emitSigned(Op->getArg(1)); 624 break; 625 default: 626 llvm_unreachable("unhandled opcode found in expression"); 627 } 628 } 629 630 if (isImplicitLocation() && !isParameterValue()) 631 // Turn this into an implicit location description. 632 addStackValue(); 633 } 634 635 /// add masking operations to stencil out a subregister. 636 void DwarfExpression::maskSubRegister() { 637 assert(SubRegisterSizeInBits && "no subregister was registered"); 638 if (SubRegisterOffsetInBits > 0) 639 addShr(SubRegisterOffsetInBits); 640 uint64_t Mask = (1ULL << (uint64_t)SubRegisterSizeInBits) - 1ULL; 641 addAnd(Mask); 642 } 643 644 void DwarfExpression::finalize() { 645 assert(DwarfRegs.size() == 0 && "dwarf registers not emitted"); 646 // Emit any outstanding DW_OP_piece operations to mask out subregisters. 647 if (SubRegisterSizeInBits == 0) 648 return; 649 // Don't emit a DW_OP_piece for a subregister at offset 0. 650 if (SubRegisterOffsetInBits == 0) 651 return; 652 addOpPiece(SubRegisterSizeInBits, SubRegisterOffsetInBits); 653 } 654 655 void DwarfExpression::addFragmentOffset(const DIExpression *Expr) { 656 if (!Expr || !Expr->isFragment()) 657 return; 658 659 uint64_t FragmentOffset = Expr->getFragmentInfo()->OffsetInBits; 660 assert(FragmentOffset >= OffsetInBits && 661 "overlapping or duplicate fragments"); 662 if (FragmentOffset > OffsetInBits) 663 addOpPiece(FragmentOffset - OffsetInBits); 664 OffsetInBits = FragmentOffset; 665 } 666 667 void DwarfExpression::emitLegacySExt(unsigned FromBits) { 668 // (((X >> (FromBits - 1)) * (~0)) << FromBits) | X 669 emitOp(dwarf::DW_OP_dup); 670 emitOp(dwarf::DW_OP_constu); 671 emitUnsigned(FromBits - 1); 672 emitOp(dwarf::DW_OP_shr); 673 emitOp(dwarf::DW_OP_lit0); 674 emitOp(dwarf::DW_OP_not); 675 emitOp(dwarf::DW_OP_mul); 676 emitOp(dwarf::DW_OP_constu); 677 emitUnsigned(FromBits); 678 emitOp(dwarf::DW_OP_shl); 679 emitOp(dwarf::DW_OP_or); 680 } 681 682 void DwarfExpression::emitLegacyZExt(unsigned FromBits) { 683 // (X & (1 << FromBits - 1)) 684 emitOp(dwarf::DW_OP_constu); 685 emitUnsigned((1ULL << FromBits) - 1); 686 emitOp(dwarf::DW_OP_and); 687 } 688 689 void DwarfExpression::addWasmLocation(unsigned Index, uint64_t Offset) { 690 emitOp(dwarf::DW_OP_WASM_location); 691 emitUnsigned(Index == 4/*TI_LOCAL_INDIRECT*/ ? 0/*TI_LOCAL*/ : Index); 692 emitUnsigned(Offset); 693 if (Index == 4 /*TI_LOCAL_INDIRECT*/) { 694 assert(LocationKind == Unknown); 695 LocationKind = Memory; 696 } else { 697 assert(LocationKind == Implicit || LocationKind == Unknown); 698 LocationKind = Implicit; 699 } 700 } 701