1 //===- llvm/CodeGen/DwarfExpression.cpp - Dwarf Debug Framework -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains support for writing dwarf debug info into asm files.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "DwarfExpression.h"
14 #include "DwarfCompileUnit.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/SmallBitVector.h"
17 #include "llvm/BinaryFormat/Dwarf.h"
18 #include "llvm/CodeGen/Register.h"
19 #include "llvm/CodeGen/TargetRegisterInfo.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/Support/ErrorHandling.h"
22 #include <algorithm>
23
24 using namespace llvm;
25
26 #define DEBUG_TYPE "dwarfdebug"
27
emitConstu(uint64_t Value)28 void DwarfExpression::emitConstu(uint64_t Value) {
29 if (Value < 32)
30 emitOp(dwarf::DW_OP_lit0 + Value);
31 else if (Value == std::numeric_limits<uint64_t>::max()) {
32 // Only do this for 64-bit values as the DWARF expression stack uses
33 // target-address-size values.
34 emitOp(dwarf::DW_OP_lit0);
35 emitOp(dwarf::DW_OP_not);
36 } else {
37 emitOp(dwarf::DW_OP_constu);
38 emitUnsigned(Value);
39 }
40 }
41
addReg(int DwarfReg,const char * Comment)42 void DwarfExpression::addReg(int DwarfReg, const char *Comment) {
43 assert(DwarfReg >= 0 && "invalid negative dwarf register number");
44 assert((isUnknownLocation() || isRegisterLocation()) &&
45 "location description already locked down");
46 LocationKind = Register;
47 if (DwarfReg < 32) {
48 emitOp(dwarf::DW_OP_reg0 + DwarfReg, Comment);
49 } else {
50 emitOp(dwarf::DW_OP_regx, Comment);
51 emitUnsigned(DwarfReg);
52 }
53 }
54
addBReg(int DwarfReg,int Offset)55 void DwarfExpression::addBReg(int DwarfReg, int Offset) {
56 assert(DwarfReg >= 0 && "invalid negative dwarf register number");
57 assert(!isRegisterLocation() && "location description already locked down");
58 if (DwarfReg < 32) {
59 emitOp(dwarf::DW_OP_breg0 + DwarfReg);
60 } else {
61 emitOp(dwarf::DW_OP_bregx);
62 emitUnsigned(DwarfReg);
63 }
64 emitSigned(Offset);
65 }
66
addFBReg(int Offset)67 void DwarfExpression::addFBReg(int Offset) {
68 emitOp(dwarf::DW_OP_fbreg);
69 emitSigned(Offset);
70 }
71
addOpPiece(unsigned SizeInBits,unsigned OffsetInBits)72 void DwarfExpression::addOpPiece(unsigned SizeInBits, unsigned OffsetInBits) {
73 if (!SizeInBits)
74 return;
75
76 const unsigned SizeOfByte = 8;
77 if (OffsetInBits > 0 || SizeInBits % SizeOfByte) {
78 emitOp(dwarf::DW_OP_bit_piece);
79 emitUnsigned(SizeInBits);
80 emitUnsigned(OffsetInBits);
81 } else {
82 emitOp(dwarf::DW_OP_piece);
83 unsigned ByteSize = SizeInBits / SizeOfByte;
84 emitUnsigned(ByteSize);
85 }
86 this->OffsetInBits += SizeInBits;
87 }
88
addShr(unsigned ShiftBy)89 void DwarfExpression::addShr(unsigned ShiftBy) {
90 emitConstu(ShiftBy);
91 emitOp(dwarf::DW_OP_shr);
92 }
93
addAnd(unsigned Mask)94 void DwarfExpression::addAnd(unsigned Mask) {
95 emitConstu(Mask);
96 emitOp(dwarf::DW_OP_and);
97 }
98
addMachineReg(const TargetRegisterInfo & TRI,llvm::Register MachineReg,unsigned MaxSize)99 bool DwarfExpression::addMachineReg(const TargetRegisterInfo &TRI,
100 llvm::Register MachineReg,
101 unsigned MaxSize) {
102 if (!MachineReg.isPhysical()) {
103 if (isFrameRegister(TRI, MachineReg)) {
104 DwarfRegs.push_back(Register::createRegister(-1, nullptr));
105 return true;
106 }
107 return false;
108 }
109
110 int Reg = TRI.getDwarfRegNum(MachineReg, false);
111
112 // If this is a valid register number, emit it.
113 if (Reg >= 0) {
114 DwarfRegs.push_back(Register::createRegister(Reg, nullptr));
115 return true;
116 }
117
118 // Walk up the super-register chain until we find a valid number.
119 // For example, EAX on x86_64 is a 32-bit fragment of RAX with offset 0.
120 for (MCSuperRegIterator SR(MachineReg, &TRI); SR.isValid(); ++SR) {
121 Reg = TRI.getDwarfRegNum(*SR, false);
122 if (Reg >= 0) {
123 unsigned Idx = TRI.getSubRegIndex(*SR, MachineReg);
124 unsigned Size = TRI.getSubRegIdxSize(Idx);
125 unsigned RegOffset = TRI.getSubRegIdxOffset(Idx);
126 DwarfRegs.push_back(Register::createRegister(Reg, "super-register"));
127 // Use a DW_OP_bit_piece to describe the sub-register.
128 setSubRegisterPiece(Size, RegOffset);
129 return true;
130 }
131 }
132
133 // Otherwise, attempt to find a covering set of sub-register numbers.
134 // For example, Q0 on ARM is a composition of D0+D1.
135 unsigned CurPos = 0;
136 // The size of the register in bits.
137 const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(MachineReg);
138 unsigned RegSize = TRI.getRegSizeInBits(*RC);
139 // Keep track of the bits in the register we already emitted, so we
140 // can avoid emitting redundant aliasing subregs. Because this is
141 // just doing a greedy scan of all subregisters, it is possible that
142 // this doesn't find a combination of subregisters that fully cover
143 // the register (even though one may exist).
144 SmallBitVector Coverage(RegSize, false);
145 for (MCSubRegIterator SR(MachineReg, &TRI); SR.isValid(); ++SR) {
146 unsigned Idx = TRI.getSubRegIndex(MachineReg, *SR);
147 unsigned Size = TRI.getSubRegIdxSize(Idx);
148 unsigned Offset = TRI.getSubRegIdxOffset(Idx);
149 Reg = TRI.getDwarfRegNum(*SR, false);
150 if (Reg < 0)
151 continue;
152
153 // Used to build the intersection between the bits we already
154 // emitted and the bits covered by this subregister.
155 SmallBitVector CurSubReg(RegSize, false);
156 CurSubReg.set(Offset, Offset + Size);
157
158 // If this sub-register has a DWARF number and we haven't covered
159 // its range, and its range covers the value, emit a DWARF piece for it.
160 if (Offset < MaxSize && CurSubReg.test(Coverage)) {
161 // Emit a piece for any gap in the coverage.
162 if (Offset > CurPos)
163 DwarfRegs.push_back(Register::createSubRegister(
164 -1, Offset - CurPos, "no DWARF register encoding"));
165 if (Offset == 0 && Size >= MaxSize)
166 DwarfRegs.push_back(Register::createRegister(Reg, "sub-register"));
167 else
168 DwarfRegs.push_back(Register::createSubRegister(
169 Reg, std::min<unsigned>(Size, MaxSize - Offset), "sub-register"));
170 }
171 // Mark it as emitted.
172 Coverage.set(Offset, Offset + Size);
173 CurPos = Offset + Size;
174 }
175 // Failed to find any DWARF encoding.
176 if (CurPos == 0)
177 return false;
178 // Found a partial or complete DWARF encoding.
179 if (CurPos < RegSize)
180 DwarfRegs.push_back(Register::createSubRegister(
181 -1, RegSize - CurPos, "no DWARF register encoding"));
182 return true;
183 }
184
addStackValue()185 void DwarfExpression::addStackValue() {
186 if (DwarfVersion >= 4)
187 emitOp(dwarf::DW_OP_stack_value);
188 }
189
addSignedConstant(int64_t Value)190 void DwarfExpression::addSignedConstant(int64_t Value) {
191 assert(isImplicitLocation() || isUnknownLocation());
192 LocationKind = Implicit;
193 emitOp(dwarf::DW_OP_consts);
194 emitSigned(Value);
195 }
196
addUnsignedConstant(uint64_t Value)197 void DwarfExpression::addUnsignedConstant(uint64_t Value) {
198 assert(isImplicitLocation() || isUnknownLocation());
199 LocationKind = Implicit;
200 emitConstu(Value);
201 }
202
addUnsignedConstant(const APInt & Value)203 void DwarfExpression::addUnsignedConstant(const APInt &Value) {
204 assert(isImplicitLocation() || isUnknownLocation());
205 LocationKind = Implicit;
206
207 unsigned Size = Value.getBitWidth();
208 const uint64_t *Data = Value.getRawData();
209
210 // Chop it up into 64-bit pieces, because that's the maximum that
211 // addUnsignedConstant takes.
212 unsigned Offset = 0;
213 while (Offset < Size) {
214 addUnsignedConstant(*Data++);
215 if (Offset == 0 && Size <= 64)
216 break;
217 addStackValue();
218 addOpPiece(std::min(Size - Offset, 64u), Offset);
219 Offset += 64;
220 }
221 }
222
addConstantFP(const APFloat & APF,const AsmPrinter & AP)223 void DwarfExpression::addConstantFP(const APFloat &APF, const AsmPrinter &AP) {
224 assert(isImplicitLocation() || isUnknownLocation());
225 APInt API = APF.bitcastToAPInt();
226 int NumBytes = API.getBitWidth() / 8;
227 if (NumBytes == 4 /*float*/ || NumBytes == 8 /*double*/) {
228 // FIXME: Add support for `long double`.
229 emitOp(dwarf::DW_OP_implicit_value);
230 emitUnsigned(NumBytes /*Size of the block in bytes*/);
231
232 // The loop below is emitting the value starting at least significant byte,
233 // so we need to perform a byte-swap to get the byte order correct in case
234 // of a big-endian target.
235 if (AP.getDataLayout().isBigEndian())
236 API = API.byteSwap();
237
238 for (int i = 0; i < NumBytes; ++i) {
239 emitData1(API.getZExtValue() & 0xFF);
240 API = API.lshr(8);
241 }
242
243 return;
244 }
245 LLVM_DEBUG(
246 dbgs() << "Skipped DW_OP_implicit_value creation for ConstantFP of size: "
247 << API.getBitWidth() << " bits\n");
248 }
249
addMachineRegExpression(const TargetRegisterInfo & TRI,DIExpressionCursor & ExprCursor,llvm::Register MachineReg,unsigned FragmentOffsetInBits)250 bool DwarfExpression::addMachineRegExpression(const TargetRegisterInfo &TRI,
251 DIExpressionCursor &ExprCursor,
252 llvm::Register MachineReg,
253 unsigned FragmentOffsetInBits) {
254 auto Fragment = ExprCursor.getFragmentInfo();
255 if (!addMachineReg(TRI, MachineReg, Fragment ? Fragment->SizeInBits : ~1U)) {
256 LocationKind = Unknown;
257 return false;
258 }
259
260 bool HasComplexExpression = false;
261 auto Op = ExprCursor.peek();
262 if (Op && Op->getOp() != dwarf::DW_OP_LLVM_fragment)
263 HasComplexExpression = true;
264
265 // If the register can only be described by a complex expression (i.e.,
266 // multiple subregisters) it doesn't safely compose with another complex
267 // expression. For example, it is not possible to apply a DW_OP_deref
268 // operation to multiple DW_OP_pieces, since composite location descriptions
269 // do not push anything on the DWARF stack.
270 //
271 // DW_OP_entry_value operations can only hold a DWARF expression or a
272 // register location description, so we can't emit a single entry value
273 // covering a composite location description. In the future we may want to
274 // emit entry value operations for each register location in the composite
275 // location, but until that is supported do not emit anything.
276 if ((HasComplexExpression || IsEmittingEntryValue) && DwarfRegs.size() > 1) {
277 if (IsEmittingEntryValue)
278 cancelEntryValue();
279 DwarfRegs.clear();
280 LocationKind = Unknown;
281 return false;
282 }
283
284 // Handle simple register locations. If we are supposed to emit
285 // a call site parameter expression and if that expression is just a register
286 // location, emit it with addBReg and offset 0, because we should emit a DWARF
287 // expression representing a value, rather than a location.
288 if ((!isParameterValue() && !isMemoryLocation() && !HasComplexExpression) ||
289 isEntryValue()) {
290 auto FragmentInfo = ExprCursor.getFragmentInfo();
291 unsigned RegSize = 0;
292 for (auto &Reg : DwarfRegs) {
293 RegSize += Reg.SubRegSize;
294 if (Reg.DwarfRegNo >= 0)
295 addReg(Reg.DwarfRegNo, Reg.Comment);
296 if (FragmentInfo)
297 if (RegSize > FragmentInfo->SizeInBits)
298 // If the register is larger than the current fragment stop
299 // once the fragment is covered.
300 break;
301 addOpPiece(Reg.SubRegSize);
302 }
303
304 if (isEntryValue()) {
305 finalizeEntryValue();
306
307 if (!isIndirect() && !isParameterValue() && !HasComplexExpression &&
308 DwarfVersion >= 4)
309 emitOp(dwarf::DW_OP_stack_value);
310 }
311
312 DwarfRegs.clear();
313 // If we need to mask out a subregister, do it now, unless the next
314 // operation would emit an OpPiece anyway.
315 auto NextOp = ExprCursor.peek();
316 if (SubRegisterSizeInBits && NextOp &&
317 (NextOp->getOp() != dwarf::DW_OP_LLVM_fragment))
318 maskSubRegister();
319 return true;
320 }
321
322 // Don't emit locations that cannot be expressed without DW_OP_stack_value.
323 if (DwarfVersion < 4)
324 if (any_of(ExprCursor, [](DIExpression::ExprOperand Op) -> bool {
325 return Op.getOp() == dwarf::DW_OP_stack_value;
326 })) {
327 DwarfRegs.clear();
328 LocationKind = Unknown;
329 return false;
330 }
331
332 // TODO: We should not give up here but the following code needs to be changed
333 // to deal with multiple (sub)registers first.
334 if (DwarfRegs.size() > 1) {
335 LLVM_DEBUG(dbgs() << "TODO: giving up on debug information due to "
336 "multi-register usage.\n");
337 DwarfRegs.clear();
338 LocationKind = Unknown;
339 return false;
340 }
341
342 auto Reg = DwarfRegs[0];
343 bool FBReg = isFrameRegister(TRI, MachineReg);
344 int SignedOffset = 0;
345 assert(!Reg.isSubRegister() && "full register expected");
346
347 // Pattern-match combinations for which more efficient representations exist.
348 // [Reg, DW_OP_plus_uconst, Offset] --> [DW_OP_breg, Offset].
349 if (Op && (Op->getOp() == dwarf::DW_OP_plus_uconst)) {
350 uint64_t Offset = Op->getArg(0);
351 uint64_t IntMax = static_cast<uint64_t>(std::numeric_limits<int>::max());
352 if (Offset <= IntMax) {
353 SignedOffset = Offset;
354 ExprCursor.take();
355 }
356 }
357
358 // [Reg, DW_OP_constu, Offset, DW_OP_plus] --> [DW_OP_breg, Offset]
359 // [Reg, DW_OP_constu, Offset, DW_OP_minus] --> [DW_OP_breg,-Offset]
360 // If Reg is a subregister we need to mask it out before subtracting.
361 if (Op && Op->getOp() == dwarf::DW_OP_constu) {
362 uint64_t Offset = Op->getArg(0);
363 uint64_t IntMax = static_cast<uint64_t>(std::numeric_limits<int>::max());
364 auto N = ExprCursor.peekNext();
365 if (N && N->getOp() == dwarf::DW_OP_plus && Offset <= IntMax) {
366 SignedOffset = Offset;
367 ExprCursor.consume(2);
368 } else if (N && N->getOp() == dwarf::DW_OP_minus &&
369 !SubRegisterSizeInBits && Offset <= IntMax + 1) {
370 SignedOffset = -static_cast<int64_t>(Offset);
371 ExprCursor.consume(2);
372 }
373 }
374
375 if (FBReg)
376 addFBReg(SignedOffset);
377 else
378 addBReg(Reg.DwarfRegNo, SignedOffset);
379 DwarfRegs.clear();
380
381 // If we need to mask out a subregister, do it now, unless the next
382 // operation would emit an OpPiece anyway.
383 auto NextOp = ExprCursor.peek();
384 if (SubRegisterSizeInBits && NextOp &&
385 (NextOp->getOp() != dwarf::DW_OP_LLVM_fragment))
386 maskSubRegister();
387
388 return true;
389 }
390
setEntryValueFlags(const MachineLocation & Loc)391 void DwarfExpression::setEntryValueFlags(const MachineLocation &Loc) {
392 LocationFlags |= EntryValue;
393 if (Loc.isIndirect())
394 LocationFlags |= Indirect;
395 }
396
setLocation(const MachineLocation & Loc,const DIExpression * DIExpr)397 void DwarfExpression::setLocation(const MachineLocation &Loc,
398 const DIExpression *DIExpr) {
399 if (Loc.isIndirect())
400 setMemoryLocationKind();
401
402 if (DIExpr->isEntryValue())
403 setEntryValueFlags(Loc);
404 }
405
beginEntryValueExpression(DIExpressionCursor & ExprCursor)406 void DwarfExpression::beginEntryValueExpression(
407 DIExpressionCursor &ExprCursor) {
408 auto Op = ExprCursor.take();
409 (void)Op;
410 assert(Op && Op->getOp() == dwarf::DW_OP_LLVM_entry_value);
411 assert(!IsEmittingEntryValue && "Already emitting entry value?");
412 assert(Op->getArg(0) == 1 &&
413 "Can currently only emit entry values covering a single operation");
414
415 SavedLocationKind = LocationKind;
416 LocationKind = Register;
417 IsEmittingEntryValue = true;
418 enableTemporaryBuffer();
419 }
420
finalizeEntryValue()421 void DwarfExpression::finalizeEntryValue() {
422 assert(IsEmittingEntryValue && "Entry value not open?");
423 disableTemporaryBuffer();
424
425 emitOp(CU.getDwarf5OrGNULocationAtom(dwarf::DW_OP_entry_value));
426
427 // Emit the entry value's size operand.
428 unsigned Size = getTemporaryBufferSize();
429 emitUnsigned(Size);
430
431 // Emit the entry value's DWARF block operand.
432 commitTemporaryBuffer();
433
434 LocationFlags &= ~EntryValue;
435 LocationKind = SavedLocationKind;
436 IsEmittingEntryValue = false;
437 }
438
cancelEntryValue()439 void DwarfExpression::cancelEntryValue() {
440 assert(IsEmittingEntryValue && "Entry value not open?");
441 disableTemporaryBuffer();
442
443 // The temporary buffer can't be emptied, so for now just assert that nothing
444 // has been emitted to it.
445 assert(getTemporaryBufferSize() == 0 &&
446 "Began emitting entry value block before cancelling entry value");
447
448 LocationKind = SavedLocationKind;
449 IsEmittingEntryValue = false;
450 }
451
getOrCreateBaseType(unsigned BitSize,dwarf::TypeKind Encoding)452 unsigned DwarfExpression::getOrCreateBaseType(unsigned BitSize,
453 dwarf::TypeKind Encoding) {
454 // Reuse the base_type if we already have one in this CU otherwise we
455 // create a new one.
456 unsigned I = 0, E = CU.ExprRefedBaseTypes.size();
457 for (; I != E; ++I)
458 if (CU.ExprRefedBaseTypes[I].BitSize == BitSize &&
459 CU.ExprRefedBaseTypes[I].Encoding == Encoding)
460 break;
461
462 if (I == E)
463 CU.ExprRefedBaseTypes.emplace_back(BitSize, Encoding);
464 return I;
465 }
466
467 /// Assuming a well-formed expression, match "DW_OP_deref*
468 /// DW_OP_LLVM_fragment?".
isMemoryLocation(DIExpressionCursor ExprCursor)469 static bool isMemoryLocation(DIExpressionCursor ExprCursor) {
470 while (ExprCursor) {
471 auto Op = ExprCursor.take();
472 switch (Op->getOp()) {
473 case dwarf::DW_OP_deref:
474 case dwarf::DW_OP_LLVM_fragment:
475 break;
476 default:
477 return false;
478 }
479 }
480 return true;
481 }
482
addExpression(DIExpressionCursor && ExprCursor)483 void DwarfExpression::addExpression(DIExpressionCursor &&ExprCursor) {
484 addExpression(std::move(ExprCursor),
485 [](unsigned Idx, DIExpressionCursor &Cursor) -> bool {
486 llvm_unreachable("unhandled opcode found in expression");
487 });
488 }
489
addExpression(DIExpressionCursor && ExprCursor,llvm::function_ref<bool (unsigned,DIExpressionCursor &)> InsertArg)490 bool DwarfExpression::addExpression(
491 DIExpressionCursor &&ExprCursor,
492 llvm::function_ref<bool(unsigned, DIExpressionCursor &)> InsertArg) {
493 // Entry values can currently only cover the initial register location,
494 // and not any other parts of the following DWARF expression.
495 assert(!IsEmittingEntryValue && "Can't emit entry value around expression");
496
497 std::optional<DIExpression::ExprOperand> PrevConvertOp;
498
499 while (ExprCursor) {
500 auto Op = ExprCursor.take();
501 uint64_t OpNum = Op->getOp();
502
503 if (OpNum >= dwarf::DW_OP_reg0 && OpNum <= dwarf::DW_OP_reg31) {
504 emitOp(OpNum);
505 continue;
506 } else if (OpNum >= dwarf::DW_OP_breg0 && OpNum <= dwarf::DW_OP_breg31) {
507 addBReg(OpNum - dwarf::DW_OP_breg0, Op->getArg(0));
508 continue;
509 }
510
511 switch (OpNum) {
512 case dwarf::DW_OP_LLVM_arg:
513 if (!InsertArg(Op->getArg(0), ExprCursor)) {
514 LocationKind = Unknown;
515 return false;
516 }
517 break;
518 case dwarf::DW_OP_LLVM_fragment: {
519 unsigned SizeInBits = Op->getArg(1);
520 unsigned FragmentOffset = Op->getArg(0);
521 // The fragment offset must have already been adjusted by emitting an
522 // empty DW_OP_piece / DW_OP_bit_piece before we emitted the base
523 // location.
524 assert(OffsetInBits >= FragmentOffset && "fragment offset not added?");
525 assert(SizeInBits >= OffsetInBits - FragmentOffset && "size underflow");
526
527 // If addMachineReg already emitted DW_OP_piece operations to represent
528 // a super-register by splicing together sub-registers, subtract the size
529 // of the pieces that was already emitted.
530 SizeInBits -= OffsetInBits - FragmentOffset;
531
532 // If addMachineReg requested a DW_OP_bit_piece to stencil out a
533 // sub-register that is smaller than the current fragment's size, use it.
534 if (SubRegisterSizeInBits)
535 SizeInBits = std::min<unsigned>(SizeInBits, SubRegisterSizeInBits);
536
537 // Emit a DW_OP_stack_value for implicit location descriptions.
538 if (isImplicitLocation())
539 addStackValue();
540
541 // Emit the DW_OP_piece.
542 addOpPiece(SizeInBits, SubRegisterOffsetInBits);
543 setSubRegisterPiece(0, 0);
544 // Reset the location description kind.
545 LocationKind = Unknown;
546 return true;
547 }
548 case dwarf::DW_OP_plus_uconst:
549 assert(!isRegisterLocation());
550 emitOp(dwarf::DW_OP_plus_uconst);
551 emitUnsigned(Op->getArg(0));
552 break;
553 case dwarf::DW_OP_plus:
554 case dwarf::DW_OP_minus:
555 case dwarf::DW_OP_mul:
556 case dwarf::DW_OP_div:
557 case dwarf::DW_OP_mod:
558 case dwarf::DW_OP_or:
559 case dwarf::DW_OP_and:
560 case dwarf::DW_OP_xor:
561 case dwarf::DW_OP_shl:
562 case dwarf::DW_OP_shr:
563 case dwarf::DW_OP_shra:
564 case dwarf::DW_OP_lit0:
565 case dwarf::DW_OP_not:
566 case dwarf::DW_OP_dup:
567 case dwarf::DW_OP_push_object_address:
568 case dwarf::DW_OP_over:
569 emitOp(OpNum);
570 break;
571 case dwarf::DW_OP_deref:
572 assert(!isRegisterLocation());
573 if (!isMemoryLocation() && ::isMemoryLocation(ExprCursor))
574 // Turning this into a memory location description makes the deref
575 // implicit.
576 LocationKind = Memory;
577 else
578 emitOp(dwarf::DW_OP_deref);
579 break;
580 case dwarf::DW_OP_constu:
581 assert(!isRegisterLocation());
582 emitConstu(Op->getArg(0));
583 break;
584 case dwarf::DW_OP_consts:
585 assert(!isRegisterLocation());
586 emitOp(dwarf::DW_OP_consts);
587 emitSigned(Op->getArg(0));
588 break;
589 case dwarf::DW_OP_LLVM_convert: {
590 unsigned BitSize = Op->getArg(0);
591 dwarf::TypeKind Encoding = static_cast<dwarf::TypeKind>(Op->getArg(1));
592 if (DwarfVersion >= 5 && CU.getDwarfDebug().useOpConvert()) {
593 emitOp(dwarf::DW_OP_convert);
594 // If targeting a location-list; simply emit the index into the raw
595 // byte stream as ULEB128, DwarfDebug::emitDebugLocEntry has been
596 // fitted with means to extract it later.
597 // If targeting a inlined DW_AT_location; insert a DIEBaseTypeRef
598 // (containing the index and a resolve mechanism during emit) into the
599 // DIE value list.
600 emitBaseTypeRef(getOrCreateBaseType(BitSize, Encoding));
601 } else {
602 if (PrevConvertOp && PrevConvertOp->getArg(0) < BitSize) {
603 if (Encoding == dwarf::DW_ATE_signed)
604 emitLegacySExt(PrevConvertOp->getArg(0));
605 else if (Encoding == dwarf::DW_ATE_unsigned)
606 emitLegacyZExt(PrevConvertOp->getArg(0));
607 PrevConvertOp = std::nullopt;
608 } else {
609 PrevConvertOp = Op;
610 }
611 }
612 break;
613 }
614 case dwarf::DW_OP_stack_value:
615 LocationKind = Implicit;
616 break;
617 case dwarf::DW_OP_swap:
618 assert(!isRegisterLocation());
619 emitOp(dwarf::DW_OP_swap);
620 break;
621 case dwarf::DW_OP_xderef:
622 assert(!isRegisterLocation());
623 emitOp(dwarf::DW_OP_xderef);
624 break;
625 case dwarf::DW_OP_deref_size:
626 emitOp(dwarf::DW_OP_deref_size);
627 emitData1(Op->getArg(0));
628 break;
629 case dwarf::DW_OP_LLVM_tag_offset:
630 TagOffset = Op->getArg(0);
631 break;
632 case dwarf::DW_OP_regx:
633 emitOp(dwarf::DW_OP_regx);
634 emitUnsigned(Op->getArg(0));
635 break;
636 case dwarf::DW_OP_bregx:
637 emitOp(dwarf::DW_OP_bregx);
638 emitUnsigned(Op->getArg(0));
639 emitSigned(Op->getArg(1));
640 break;
641 default:
642 llvm_unreachable("unhandled opcode found in expression");
643 }
644 }
645
646 if (isImplicitLocation() && !isParameterValue())
647 // Turn this into an implicit location description.
648 addStackValue();
649
650 return true;
651 }
652
653 /// add masking operations to stencil out a subregister.
maskSubRegister()654 void DwarfExpression::maskSubRegister() {
655 assert(SubRegisterSizeInBits && "no subregister was registered");
656 if (SubRegisterOffsetInBits > 0)
657 addShr(SubRegisterOffsetInBits);
658 uint64_t Mask = (1ULL << (uint64_t)SubRegisterSizeInBits) - 1ULL;
659 addAnd(Mask);
660 }
661
finalize()662 void DwarfExpression::finalize() {
663 assert(DwarfRegs.size() == 0 && "dwarf registers not emitted");
664 // Emit any outstanding DW_OP_piece operations to mask out subregisters.
665 if (SubRegisterSizeInBits == 0)
666 return;
667 // Don't emit a DW_OP_piece for a subregister at offset 0.
668 if (SubRegisterOffsetInBits == 0)
669 return;
670 addOpPiece(SubRegisterSizeInBits, SubRegisterOffsetInBits);
671 }
672
addFragmentOffset(const DIExpression * Expr)673 void DwarfExpression::addFragmentOffset(const DIExpression *Expr) {
674 if (!Expr || !Expr->isFragment())
675 return;
676
677 uint64_t FragmentOffset = Expr->getFragmentInfo()->OffsetInBits;
678 assert(FragmentOffset >= OffsetInBits &&
679 "overlapping or duplicate fragments");
680 if (FragmentOffset > OffsetInBits)
681 addOpPiece(FragmentOffset - OffsetInBits);
682 OffsetInBits = FragmentOffset;
683 }
684
emitLegacySExt(unsigned FromBits)685 void DwarfExpression::emitLegacySExt(unsigned FromBits) {
686 // (((X >> (FromBits - 1)) * (~0)) << FromBits) | X
687 emitOp(dwarf::DW_OP_dup);
688 emitOp(dwarf::DW_OP_constu);
689 emitUnsigned(FromBits - 1);
690 emitOp(dwarf::DW_OP_shr);
691 emitOp(dwarf::DW_OP_lit0);
692 emitOp(dwarf::DW_OP_not);
693 emitOp(dwarf::DW_OP_mul);
694 emitOp(dwarf::DW_OP_constu);
695 emitUnsigned(FromBits);
696 emitOp(dwarf::DW_OP_shl);
697 emitOp(dwarf::DW_OP_or);
698 }
699
emitLegacyZExt(unsigned FromBits)700 void DwarfExpression::emitLegacyZExt(unsigned FromBits) {
701 // Heuristic to decide the most efficient encoding.
702 // A ULEB can encode 7 1-bits per byte.
703 if (FromBits / 7 < 1+1+1+1+1) {
704 // (X & (1 << FromBits - 1))
705 emitOp(dwarf::DW_OP_constu);
706 emitUnsigned((1ULL << FromBits) - 1);
707 } else {
708 // Note that the DWARF 4 stack consists of pointer-sized elements,
709 // so technically it doesn't make sense to shift left more than 64
710 // bits. We leave that for the consumer to decide though. LLDB for
711 // example uses APInt for the stack elements and can still deal
712 // with this.
713 emitOp(dwarf::DW_OP_lit1);
714 emitOp(dwarf::DW_OP_constu);
715 emitUnsigned(FromBits);
716 emitOp(dwarf::DW_OP_shl);
717 emitOp(dwarf::DW_OP_lit1);
718 emitOp(dwarf::DW_OP_minus);
719 }
720 emitOp(dwarf::DW_OP_and);
721 }
722
addWasmLocation(unsigned Index,uint64_t Offset)723 void DwarfExpression::addWasmLocation(unsigned Index, uint64_t Offset) {
724 emitOp(dwarf::DW_OP_WASM_location);
725 emitUnsigned(Index == 4/*TI_LOCAL_INDIRECT*/ ? 0/*TI_LOCAL*/ : Index);
726 emitUnsigned(Offset);
727 if (Index == 4 /*TI_LOCAL_INDIRECT*/) {
728 assert(LocationKind == Unknown);
729 LocationKind = Memory;
730 } else {
731 assert(LocationKind == Implicit || LocationKind == Unknown);
732 LocationKind = Implicit;
733 }
734 }
735