1 //===-- StackFrameList.cpp ------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "lldb/Target/StackFrameList.h"
10 #include "lldb/Breakpoint/Breakpoint.h"
11 #include "lldb/Breakpoint/BreakpointLocation.h"
12 #include "lldb/Core/Debugger.h"
13 #include "lldb/Core/SourceManager.h"
14 #include "lldb/Host/StreamFile.h"
15 #include "lldb/Symbol/Block.h"
16 #include "lldb/Symbol/Function.h"
17 #include "lldb/Symbol/Symbol.h"
18 #include "lldb/Target/Process.h"
19 #include "lldb/Target/RegisterContext.h"
20 #include "lldb/Target/StackFrame.h"
21 #include "lldb/Target/StackFrameRecognizer.h"
22 #include "lldb/Target/StopInfo.h"
23 #include "lldb/Target/Target.h"
24 #include "lldb/Target/Thread.h"
25 #include "lldb/Target/Unwind.h"
26 #include "lldb/Utility/LLDBLog.h"
27 #include "lldb/Utility/Log.h"
28 #include "llvm/ADT/SmallPtrSet.h"
29 
30 #include <memory>
31 
32 //#define DEBUG_STACK_FRAMES 1
33 
34 using namespace lldb;
35 using namespace lldb_private;
36 
37 // StackFrameList constructor
StackFrameList(Thread & thread,const lldb::StackFrameListSP & prev_frames_sp,bool show_inline_frames)38 StackFrameList::StackFrameList(Thread &thread,
39                                const lldb::StackFrameListSP &prev_frames_sp,
40                                bool show_inline_frames)
41     : m_thread(thread), m_prev_frames_sp(prev_frames_sp), m_mutex(), m_frames(),
42       m_selected_frame_idx(), m_concrete_frames_fetched(0),
43       m_current_inlined_depth(UINT32_MAX),
44       m_current_inlined_pc(LLDB_INVALID_ADDRESS),
45       m_show_inlined_frames(show_inline_frames) {
46   if (prev_frames_sp) {
47     m_current_inlined_depth = prev_frames_sp->m_current_inlined_depth;
48     m_current_inlined_pc = prev_frames_sp->m_current_inlined_pc;
49   }
50 }
51 
~StackFrameList()52 StackFrameList::~StackFrameList() {
53   // Call clear since this takes a lock and clears the stack frame list in case
54   // another thread is currently using this stack frame list
55   Clear();
56 }
57 
CalculateCurrentInlinedDepth()58 void StackFrameList::CalculateCurrentInlinedDepth() {
59   uint32_t cur_inlined_depth = GetCurrentInlinedDepth();
60   if (cur_inlined_depth == UINT32_MAX) {
61     ResetCurrentInlinedDepth();
62   }
63 }
64 
GetCurrentInlinedDepth()65 uint32_t StackFrameList::GetCurrentInlinedDepth() {
66   if (m_show_inlined_frames && m_current_inlined_pc != LLDB_INVALID_ADDRESS) {
67     lldb::addr_t cur_pc = m_thread.GetRegisterContext()->GetPC();
68     if (cur_pc != m_current_inlined_pc) {
69       m_current_inlined_pc = LLDB_INVALID_ADDRESS;
70       m_current_inlined_depth = UINT32_MAX;
71       Log *log = GetLog(LLDBLog::Step);
72       if (log && log->GetVerbose())
73         LLDB_LOGF(
74             log,
75             "GetCurrentInlinedDepth: invalidating current inlined depth.\n");
76     }
77     return m_current_inlined_depth;
78   } else {
79     return UINT32_MAX;
80   }
81 }
82 
ResetCurrentInlinedDepth()83 void StackFrameList::ResetCurrentInlinedDepth() {
84   if (!m_show_inlined_frames)
85     return;
86 
87   std::lock_guard<std::recursive_mutex> guard(m_mutex);
88 
89   GetFramesUpTo(0, DoNotAllowInterruption);
90   if (m_frames.empty())
91     return;
92   if (!m_frames[0]->IsInlined()) {
93     m_current_inlined_depth = UINT32_MAX;
94     m_current_inlined_pc = LLDB_INVALID_ADDRESS;
95     Log *log = GetLog(LLDBLog::Step);
96     if (log && log->GetVerbose())
97       LLDB_LOGF(
98           log,
99           "ResetCurrentInlinedDepth: Invalidating current inlined depth.\n");
100     return;
101   }
102 
103   // We only need to do something special about inlined blocks when we are
104   // at the beginning of an inlined function:
105   // FIXME: We probably also have to do something special if the PC is at
106   // the END of an inlined function, which coincides with the end of either
107   // its containing function or another inlined function.
108 
109   Block *block_ptr = m_frames[0]->GetFrameBlock();
110   if (!block_ptr)
111     return;
112 
113   Address pc_as_address;
114   lldb::addr_t curr_pc = m_thread.GetRegisterContext()->GetPC();
115   pc_as_address.SetLoadAddress(curr_pc, &(m_thread.GetProcess()->GetTarget()));
116   AddressRange containing_range;
117   if (!block_ptr->GetRangeContainingAddress(pc_as_address, containing_range) ||
118       pc_as_address != containing_range.GetBaseAddress())
119     return;
120 
121   // If we got here because of a breakpoint hit, then set the inlined depth
122   // depending on where the breakpoint was set. If we got here because of a
123   // crash, then set the inlined depth to the deepest most block.  Otherwise,
124   // we stopped here naturally as the result of a step, so set ourselves in the
125   // containing frame of the whole set of nested inlines, so the user can then
126   // "virtually" step into the frames one by one, or next over the whole mess.
127   // Note: We don't have to handle being somewhere in the middle of the stack
128   // here, since ResetCurrentInlinedDepth doesn't get called if there is a
129   // valid inlined depth set.
130   StopInfoSP stop_info_sp = m_thread.GetStopInfo();
131   if (!stop_info_sp)
132     return;
133   switch (stop_info_sp->GetStopReason()) {
134   case eStopReasonWatchpoint:
135   case eStopReasonException:
136   case eStopReasonExec:
137   case eStopReasonFork:
138   case eStopReasonVFork:
139   case eStopReasonVForkDone:
140   case eStopReasonSignal:
141     // In all these cases we want to stop in the deepest frame.
142     m_current_inlined_pc = curr_pc;
143     m_current_inlined_depth = 0;
144     break;
145   case eStopReasonBreakpoint: {
146     // FIXME: Figure out what this break point is doing, and set the inline
147     // depth appropriately.  Be careful to take into account breakpoints that
148     // implement step over prologue, since that should do the default
149     // calculation. For now, if the breakpoints corresponding to this hit are
150     // all internal, I set the stop location to the top of the inlined stack,
151     // since that will make things like stepping over prologues work right.
152     // But if there are any non-internal breakpoints I do to the bottom of the
153     // stack, since that was the old behavior.
154     uint32_t bp_site_id = stop_info_sp->GetValue();
155     BreakpointSiteSP bp_site_sp(
156         m_thread.GetProcess()->GetBreakpointSiteList().FindByID(bp_site_id));
157     bool all_internal = true;
158     if (bp_site_sp) {
159       uint32_t num_owners = bp_site_sp->GetNumberOfConstituents();
160       for (uint32_t i = 0; i < num_owners; i++) {
161         Breakpoint &bp_ref =
162             bp_site_sp->GetConstituentAtIndex(i)->GetBreakpoint();
163         if (!bp_ref.IsInternal()) {
164           all_internal = false;
165         }
166       }
167     }
168     if (!all_internal) {
169       m_current_inlined_pc = curr_pc;
170       m_current_inlined_depth = 0;
171       break;
172     }
173   }
174     [[fallthrough]];
175   default: {
176     // Otherwise, we should set ourselves at the container of the inlining, so
177     // that the user can descend into them. So first we check whether we have
178     // more than one inlined block sharing this PC:
179     int num_inlined_functions = 0;
180 
181     for (Block *container_ptr = block_ptr->GetInlinedParent();
182          container_ptr != nullptr;
183          container_ptr = container_ptr->GetInlinedParent()) {
184       if (!container_ptr->GetRangeContainingAddress(pc_as_address,
185                                                     containing_range))
186         break;
187       if (pc_as_address != containing_range.GetBaseAddress())
188         break;
189 
190       num_inlined_functions++;
191     }
192     m_current_inlined_pc = curr_pc;
193     m_current_inlined_depth = num_inlined_functions + 1;
194     Log *log = GetLog(LLDBLog::Step);
195     if (log && log->GetVerbose())
196       LLDB_LOGF(log,
197                 "ResetCurrentInlinedDepth: setting inlined "
198                 "depth: %d 0x%" PRIx64 ".\n",
199                 m_current_inlined_depth, curr_pc);
200 
201     break;
202   }
203   }
204 }
205 
DecrementCurrentInlinedDepth()206 bool StackFrameList::DecrementCurrentInlinedDepth() {
207   if (m_show_inlined_frames) {
208     uint32_t current_inlined_depth = GetCurrentInlinedDepth();
209     if (current_inlined_depth != UINT32_MAX) {
210       if (current_inlined_depth > 0) {
211         m_current_inlined_depth--;
212         return true;
213       }
214     }
215   }
216   return false;
217 }
218 
SetCurrentInlinedDepth(uint32_t new_depth)219 void StackFrameList::SetCurrentInlinedDepth(uint32_t new_depth) {
220   m_current_inlined_depth = new_depth;
221   if (new_depth == UINT32_MAX)
222     m_current_inlined_pc = LLDB_INVALID_ADDRESS;
223   else
224     m_current_inlined_pc = m_thread.GetRegisterContext()->GetPC();
225 }
226 
GetOnlyConcreteFramesUpTo(uint32_t end_idx,Unwind & unwinder)227 void StackFrameList::GetOnlyConcreteFramesUpTo(uint32_t end_idx,
228                                                Unwind &unwinder) {
229   assert(m_thread.IsValid() && "Expected valid thread");
230   assert(m_frames.size() <= end_idx && "Expected there to be frames to fill");
231 
232   if (end_idx < m_concrete_frames_fetched)
233     return;
234 
235   uint32_t num_frames = unwinder.GetFramesUpTo(end_idx);
236   if (num_frames <= end_idx + 1) {
237     // Done unwinding.
238     m_concrete_frames_fetched = UINT32_MAX;
239   }
240 
241   // Don't create the frames eagerly. Defer this work to GetFrameAtIndex,
242   // which can lazily query the unwinder to create frames.
243   m_frames.resize(num_frames);
244 }
245 
246 /// A sequence of calls that comprise some portion of a backtrace. Each frame
247 /// is represented as a pair of a callee (Function *) and an address within the
248 /// callee.
249 struct CallDescriptor {
250   Function *func;
251   CallEdge::AddrType address_type = CallEdge::AddrType::Call;
252   addr_t address = LLDB_INVALID_ADDRESS;
253 };
254 using CallSequence = std::vector<CallDescriptor>;
255 
256 /// Find the unique path through the call graph from \p begin (with return PC
257 /// \p return_pc) to \p end. On success this path is stored into \p path, and
258 /// on failure \p path is unchanged.
FindInterveningFrames(Function & begin,Function & end,ExecutionContext & exe_ctx,Target & target,addr_t return_pc,CallSequence & path,ModuleList & images,Log * log)259 static void FindInterveningFrames(Function &begin, Function &end,
260                                   ExecutionContext &exe_ctx, Target &target,
261                                   addr_t return_pc, CallSequence &path,
262                                   ModuleList &images, Log *log) {
263   LLDB_LOG(log, "Finding frames between {0} and {1}, retn-pc={2:x}",
264            begin.GetDisplayName(), end.GetDisplayName(), return_pc);
265 
266   // Find a non-tail calling edge with the correct return PC.
267   if (log)
268     for (const auto &edge : begin.GetCallEdges())
269       LLDB_LOG(log, "FindInterveningFrames: found call with retn-PC = {0:x}",
270                edge->GetReturnPCAddress(begin, target));
271   CallEdge *first_edge = begin.GetCallEdgeForReturnAddress(return_pc, target);
272   if (!first_edge) {
273     LLDB_LOG(log, "No call edge outgoing from {0} with retn-PC == {1:x}",
274              begin.GetDisplayName(), return_pc);
275     return;
276   }
277 
278   // The first callee may not be resolved, or there may be nothing to fill in.
279   Function *first_callee = first_edge->GetCallee(images, exe_ctx);
280   if (!first_callee) {
281     LLDB_LOG(log, "Could not resolve callee");
282     return;
283   }
284   if (first_callee == &end) {
285     LLDB_LOG(log, "Not searching further, first callee is {0} (retn-PC: {1:x})",
286              end.GetDisplayName(), return_pc);
287     return;
288   }
289 
290   // Run DFS on the tail-calling edges out of the first callee to find \p end.
291   // Fully explore the set of functions reachable from the first edge via tail
292   // calls in order to detect ambiguous executions.
293   struct DFS {
294     CallSequence active_path = {};
295     CallSequence solution_path = {};
296     llvm::SmallPtrSet<Function *, 2> visited_nodes = {};
297     bool ambiguous = false;
298     Function *end;
299     ModuleList &images;
300     Target &target;
301     ExecutionContext &context;
302 
303     DFS(Function *end, ModuleList &images, Target &target,
304         ExecutionContext &context)
305         : end(end), images(images), target(target), context(context) {}
306 
307     void search(CallEdge &first_edge, Function &first_callee,
308                 CallSequence &path) {
309       dfs(first_edge, first_callee);
310       if (!ambiguous)
311         path = std::move(solution_path);
312     }
313 
314     void dfs(CallEdge &current_edge, Function &callee) {
315       // Found a path to the target function.
316       if (&callee == end) {
317         if (solution_path.empty())
318           solution_path = active_path;
319         else
320           ambiguous = true;
321         return;
322       }
323 
324       // Terminate the search if tail recursion is found, or more generally if
325       // there's more than one way to reach a target. This errs on the side of
326       // caution: it conservatively stops searching when some solutions are
327       // still possible to save time in the average case.
328       if (!visited_nodes.insert(&callee).second) {
329         ambiguous = true;
330         return;
331       }
332 
333       // Search the calls made from this callee.
334       active_path.push_back(CallDescriptor{&callee});
335       for (const auto &edge : callee.GetTailCallingEdges()) {
336         Function *next_callee = edge->GetCallee(images, context);
337         if (!next_callee)
338           continue;
339 
340         std::tie(active_path.back().address_type, active_path.back().address) =
341             edge->GetCallerAddress(callee, target);
342 
343         dfs(*edge, *next_callee);
344         if (ambiguous)
345           return;
346       }
347       active_path.pop_back();
348     }
349   };
350 
351   DFS(&end, images, target, exe_ctx).search(*first_edge, *first_callee, path);
352 }
353 
354 /// Given that \p next_frame will be appended to the frame list, synthesize
355 /// tail call frames between the current end of the list and \p next_frame.
356 /// If any frames are added, adjust the frame index of \p next_frame.
357 ///
358 ///   --------------
359 ///   |    ...     | <- Completed frames.
360 ///   --------------
361 ///   | prev_frame |
362 ///   --------------
363 ///   |    ...     | <- Artificial frames inserted here.
364 ///   --------------
365 ///   | next_frame |
366 ///   --------------
367 ///   |    ...     | <- Not-yet-visited frames.
368 ///   --------------
SynthesizeTailCallFrames(StackFrame & next_frame)369 void StackFrameList::SynthesizeTailCallFrames(StackFrame &next_frame) {
370   // Cannot synthesize tail call frames when the stack is empty (there is no
371   // "previous" frame).
372   if (m_frames.empty())
373     return;
374 
375   TargetSP target_sp = next_frame.CalculateTarget();
376   if (!target_sp)
377     return;
378 
379   lldb::RegisterContextSP next_reg_ctx_sp = next_frame.GetRegisterContext();
380   if (!next_reg_ctx_sp)
381     return;
382 
383   Log *log = GetLog(LLDBLog::Step);
384 
385   StackFrame &prev_frame = *m_frames.back().get();
386 
387   // Find the functions prev_frame and next_frame are stopped in. The function
388   // objects are needed to search the lazy call graph for intervening frames.
389   Function *prev_func =
390       prev_frame.GetSymbolContext(eSymbolContextFunction).function;
391   if (!prev_func) {
392     LLDB_LOG(log, "SynthesizeTailCallFrames: can't find previous function");
393     return;
394   }
395   Function *next_func =
396       next_frame.GetSymbolContext(eSymbolContextFunction).function;
397   if (!next_func) {
398     LLDB_LOG(log, "SynthesizeTailCallFrames: can't find next function");
399     return;
400   }
401 
402   // Try to find the unique sequence of (tail) calls which led from next_frame
403   // to prev_frame.
404   CallSequence path;
405   addr_t return_pc = next_reg_ctx_sp->GetPC();
406   Target &target = *target_sp.get();
407   ModuleList &images = next_frame.CalculateTarget()->GetImages();
408   ExecutionContext exe_ctx(target_sp, /*get_process=*/true);
409   exe_ctx.SetFramePtr(&next_frame);
410   FindInterveningFrames(*next_func, *prev_func, exe_ctx, target, return_pc,
411                         path, images, log);
412 
413   // Push synthetic tail call frames.
414   for (auto calleeInfo : llvm::reverse(path)) {
415     Function *callee = calleeInfo.func;
416     uint32_t frame_idx = m_frames.size();
417     uint32_t concrete_frame_idx = next_frame.GetConcreteFrameIndex();
418     addr_t cfa = LLDB_INVALID_ADDRESS;
419     bool cfa_is_valid = false;
420     addr_t pc = calleeInfo.address;
421     // If the callee address refers to the call instruction, we do not want to
422     // subtract 1 from this value.
423     const bool behaves_like_zeroth_frame =
424         calleeInfo.address_type == CallEdge::AddrType::Call;
425     SymbolContext sc;
426     callee->CalculateSymbolContext(&sc);
427     auto synth_frame = std::make_shared<StackFrame>(
428         m_thread.shared_from_this(), frame_idx, concrete_frame_idx, cfa,
429         cfa_is_valid, pc, StackFrame::Kind::Artificial,
430         behaves_like_zeroth_frame, &sc);
431     m_frames.push_back(synth_frame);
432     LLDB_LOG(log, "Pushed frame {0} at {1:x}", callee->GetDisplayName(), pc);
433   }
434 
435   // If any frames were created, adjust next_frame's index.
436   if (!path.empty())
437     next_frame.SetFrameIndex(m_frames.size());
438 }
439 
GetFramesUpTo(uint32_t end_idx,InterruptionControl allow_interrupt)440 bool StackFrameList::GetFramesUpTo(uint32_t end_idx,
441                                    InterruptionControl allow_interrupt) {
442   // Do not fetch frames for an invalid thread.
443   bool was_interrupted = false;
444   if (!m_thread.IsValid())
445     return false;
446 
447   // We've already gotten more frames than asked for, or we've already finished
448   // unwinding, return.
449   if (m_frames.size() > end_idx || GetAllFramesFetched())
450     return false;
451 
452   Unwind &unwinder = m_thread.GetUnwinder();
453 
454   if (!m_show_inlined_frames) {
455     GetOnlyConcreteFramesUpTo(end_idx, unwinder);
456     return false;
457   }
458 
459 #if defined(DEBUG_STACK_FRAMES)
460   StreamFile s(stdout, false);
461 #endif
462   // If we are hiding some frames from the outside world, we need to add
463   // those onto the total count of frames to fetch.  However, we don't need
464   // to do that if end_idx is 0 since in that case we always get the first
465   // concrete frame and all the inlined frames below it...  And of course, if
466   // end_idx is UINT32_MAX that means get all, so just do that...
467 
468   uint32_t inlined_depth = 0;
469   if (end_idx > 0 && end_idx != UINT32_MAX) {
470     inlined_depth = GetCurrentInlinedDepth();
471     if (inlined_depth != UINT32_MAX) {
472       if (end_idx > 0)
473         end_idx += inlined_depth;
474     }
475   }
476 
477   StackFrameSP unwind_frame_sp;
478   Debugger &dbg = m_thread.GetProcess()->GetTarget().GetDebugger();
479   do {
480     uint32_t idx = m_concrete_frames_fetched++;
481     lldb::addr_t pc = LLDB_INVALID_ADDRESS;
482     lldb::addr_t cfa = LLDB_INVALID_ADDRESS;
483     bool behaves_like_zeroth_frame = (idx == 0);
484     if (idx == 0) {
485       // We might have already created frame zero, only create it if we need
486       // to.
487       if (m_frames.empty()) {
488         RegisterContextSP reg_ctx_sp(m_thread.GetRegisterContext());
489 
490         if (reg_ctx_sp) {
491           const bool success = unwinder.GetFrameInfoAtIndex(
492               idx, cfa, pc, behaves_like_zeroth_frame);
493           // There shouldn't be any way not to get the frame info for frame
494           // 0. But if the unwinder can't make one, lets make one by hand
495           // with the SP as the CFA and see if that gets any further.
496           if (!success) {
497             cfa = reg_ctx_sp->GetSP();
498             pc = reg_ctx_sp->GetPC();
499           }
500 
501           unwind_frame_sp = std::make_shared<StackFrame>(
502               m_thread.shared_from_this(), m_frames.size(), idx, reg_ctx_sp,
503               cfa, pc, behaves_like_zeroth_frame, nullptr);
504           m_frames.push_back(unwind_frame_sp);
505         }
506       } else {
507         unwind_frame_sp = m_frames.front();
508         cfa = unwind_frame_sp->m_id.GetCallFrameAddress();
509       }
510     } else {
511       // Check for interruption when building the frames.
512       // Do the check in idx > 0 so that we'll always create a 0th frame.
513       if (allow_interrupt
514           && INTERRUPT_REQUESTED(dbg, "Interrupted having fetched {0} frames",
515                                  m_frames.size())) {
516           was_interrupted = true;
517           break;
518       }
519 
520       const bool success =
521           unwinder.GetFrameInfoAtIndex(idx, cfa, pc, behaves_like_zeroth_frame);
522       if (!success) {
523         // We've gotten to the end of the stack.
524         SetAllFramesFetched();
525         break;
526       }
527       const bool cfa_is_valid = true;
528       unwind_frame_sp = std::make_shared<StackFrame>(
529           m_thread.shared_from_this(), m_frames.size(), idx, cfa, cfa_is_valid,
530           pc, StackFrame::Kind::Regular, behaves_like_zeroth_frame, nullptr);
531 
532       // Create synthetic tail call frames between the previous frame and the
533       // newly-found frame. The new frame's index may change after this call,
534       // although its concrete index will stay the same.
535       SynthesizeTailCallFrames(*unwind_frame_sp.get());
536 
537       m_frames.push_back(unwind_frame_sp);
538     }
539 
540     assert(unwind_frame_sp);
541     SymbolContext unwind_sc = unwind_frame_sp->GetSymbolContext(
542         eSymbolContextBlock | eSymbolContextFunction);
543     Block *unwind_block = unwind_sc.block;
544     TargetSP target_sp = m_thread.CalculateTarget();
545     if (unwind_block) {
546       Address curr_frame_address(
547           unwind_frame_sp->GetFrameCodeAddressForSymbolication());
548 
549       SymbolContext next_frame_sc;
550       Address next_frame_address;
551 
552       while (unwind_sc.GetParentOfInlinedScope(
553           curr_frame_address, next_frame_sc, next_frame_address)) {
554         next_frame_sc.line_entry.ApplyFileMappings(target_sp);
555         behaves_like_zeroth_frame = false;
556         StackFrameSP frame_sp(new StackFrame(
557             m_thread.shared_from_this(), m_frames.size(), idx,
558             unwind_frame_sp->GetRegisterContextSP(), cfa, next_frame_address,
559             behaves_like_zeroth_frame, &next_frame_sc));
560 
561         m_frames.push_back(frame_sp);
562         unwind_sc = next_frame_sc;
563         curr_frame_address = next_frame_address;
564       }
565     }
566   } while (m_frames.size() - 1 < end_idx);
567 
568   // Don't try to merge till you've calculated all the frames in this stack.
569   if (GetAllFramesFetched() && m_prev_frames_sp) {
570     StackFrameList *prev_frames = m_prev_frames_sp.get();
571     StackFrameList *curr_frames = this;
572 
573 #if defined(DEBUG_STACK_FRAMES)
574     s.PutCString("\nprev_frames:\n");
575     prev_frames->Dump(&s);
576     s.PutCString("\ncurr_frames:\n");
577     curr_frames->Dump(&s);
578     s.EOL();
579 #endif
580     size_t curr_frame_num, prev_frame_num;
581 
582     for (curr_frame_num = curr_frames->m_frames.size(),
583         prev_frame_num = prev_frames->m_frames.size();
584          curr_frame_num > 0 && prev_frame_num > 0;
585          --curr_frame_num, --prev_frame_num) {
586       const size_t curr_frame_idx = curr_frame_num - 1;
587       const size_t prev_frame_idx = prev_frame_num - 1;
588       StackFrameSP curr_frame_sp(curr_frames->m_frames[curr_frame_idx]);
589       StackFrameSP prev_frame_sp(prev_frames->m_frames[prev_frame_idx]);
590 
591 #if defined(DEBUG_STACK_FRAMES)
592       s.Printf("\n\nCurr frame #%u ", curr_frame_idx);
593       if (curr_frame_sp)
594         curr_frame_sp->Dump(&s, true, false);
595       else
596         s.PutCString("NULL");
597       s.Printf("\nPrev frame #%u ", prev_frame_idx);
598       if (prev_frame_sp)
599         prev_frame_sp->Dump(&s, true, false);
600       else
601         s.PutCString("NULL");
602 #endif
603 
604       StackFrame *curr_frame = curr_frame_sp.get();
605       StackFrame *prev_frame = prev_frame_sp.get();
606 
607       if (curr_frame == nullptr || prev_frame == nullptr)
608         break;
609 
610       // Check the stack ID to make sure they are equal.
611       if (curr_frame->GetStackID() != prev_frame->GetStackID())
612         break;
613 
614       prev_frame->UpdatePreviousFrameFromCurrentFrame(*curr_frame);
615       // Now copy the fixed up previous frame into the current frames so the
616       // pointer doesn't change.
617       m_frames[curr_frame_idx] = prev_frame_sp;
618 
619 #if defined(DEBUG_STACK_FRAMES)
620       s.Printf("\n    Copying previous frame to current frame");
621 #endif
622     }
623     // We are done with the old stack frame list, we can release it now.
624     m_prev_frames_sp.reset();
625   }
626 
627 #if defined(DEBUG_STACK_FRAMES)
628   s.PutCString("\n\nNew frames:\n");
629   Dump(&s);
630   s.EOL();
631 #endif
632   // Don't report interrupted if we happen to have gotten all the frames:
633   if (!GetAllFramesFetched())
634     return was_interrupted;
635   return false;
636 }
637 
GetNumFrames(bool can_create)638 uint32_t StackFrameList::GetNumFrames(bool can_create) {
639   std::lock_guard<std::recursive_mutex> guard(m_mutex);
640 
641   if (can_create) {
642     // Don't allow interrupt or we might not return the correct count
643     GetFramesUpTo(UINT32_MAX, DoNotAllowInterruption);
644   }
645   return GetVisibleStackFrameIndex(m_frames.size());
646 }
647 
Dump(Stream * s)648 void StackFrameList::Dump(Stream *s) {
649   if (s == nullptr)
650     return;
651 
652   std::lock_guard<std::recursive_mutex> guard(m_mutex);
653 
654   const_iterator pos, begin = m_frames.begin(), end = m_frames.end();
655   for (pos = begin; pos != end; ++pos) {
656     StackFrame *frame = (*pos).get();
657     s->Printf("%p: ", static_cast<void *>(frame));
658     if (frame) {
659       frame->GetStackID().Dump(s);
660       frame->DumpUsingSettingsFormat(s);
661     } else
662       s->Printf("frame #%u", (uint32_t)std::distance(begin, pos));
663     s->EOL();
664   }
665   s->EOL();
666 }
667 
GetFrameAtIndex(uint32_t idx)668 StackFrameSP StackFrameList::GetFrameAtIndex(uint32_t idx) {
669   StackFrameSP frame_sp;
670   std::lock_guard<std::recursive_mutex> guard(m_mutex);
671   uint32_t original_idx = idx;
672 
673   uint32_t inlined_depth = GetCurrentInlinedDepth();
674   if (inlined_depth != UINT32_MAX)
675     idx += inlined_depth;
676 
677   if (idx < m_frames.size())
678     frame_sp = m_frames[idx];
679 
680   if (frame_sp)
681     return frame_sp;
682 
683   // GetFramesUpTo will fill m_frames with as many frames as you asked for, if
684   // there are that many.  If there weren't then you asked for too many frames.
685   // GetFramesUpTo returns true if interrupted:
686   if (GetFramesUpTo(idx)) {
687     Log *log = GetLog(LLDBLog::Thread);
688     LLDB_LOG(log, "GetFrameAtIndex was interrupted");
689     return {};
690   }
691 
692   if (idx < m_frames.size()) {
693     if (m_show_inlined_frames) {
694       // When inline frames are enabled we actually create all the frames in
695       // GetFramesUpTo.
696       frame_sp = m_frames[idx];
697     } else {
698       addr_t pc, cfa;
699       bool behaves_like_zeroth_frame = (idx == 0);
700       if (m_thread.GetUnwinder().GetFrameInfoAtIndex(
701               idx, cfa, pc, behaves_like_zeroth_frame)) {
702         const bool cfa_is_valid = true;
703         frame_sp = std::make_shared<StackFrame>(
704             m_thread.shared_from_this(), idx, idx, cfa, cfa_is_valid, pc,
705             StackFrame::Kind::Regular, behaves_like_zeroth_frame, nullptr);
706 
707         Function *function =
708             frame_sp->GetSymbolContext(eSymbolContextFunction).function;
709         if (function) {
710           // When we aren't showing inline functions we always use the top
711           // most function block as the scope.
712           frame_sp->SetSymbolContextScope(&function->GetBlock(false));
713         } else {
714           // Set the symbol scope from the symbol regardless if it is nullptr
715           // or valid.
716           frame_sp->SetSymbolContextScope(
717               frame_sp->GetSymbolContext(eSymbolContextSymbol).symbol);
718         }
719         SetFrameAtIndex(idx, frame_sp);
720       }
721     }
722   } else if (original_idx == 0) {
723     // There should ALWAYS be a frame at index 0.  If something went wrong with
724     // the CurrentInlinedDepth such that there weren't as many frames as we
725     // thought taking that into account, then reset the current inlined depth
726     // and return the real zeroth frame.
727     if (m_frames.empty()) {
728       // Why do we have a thread with zero frames, that should not ever
729       // happen...
730       assert(!m_thread.IsValid() && "A valid thread has no frames.");
731     } else {
732       ResetCurrentInlinedDepth();
733       frame_sp = m_frames[original_idx];
734     }
735   }
736 
737   return frame_sp;
738 }
739 
740 StackFrameSP
GetFrameWithConcreteFrameIndex(uint32_t unwind_idx)741 StackFrameList::GetFrameWithConcreteFrameIndex(uint32_t unwind_idx) {
742   // First try assuming the unwind index is the same as the frame index. The
743   // unwind index is always greater than or equal to the frame index, so it is
744   // a good place to start. If we have inlined frames we might have 5 concrete
745   // frames (frame unwind indexes go from 0-4), but we might have 15 frames
746   // after we make all the inlined frames. Most of the time the unwind frame
747   // index (or the concrete frame index) is the same as the frame index.
748   uint32_t frame_idx = unwind_idx;
749   StackFrameSP frame_sp(GetFrameAtIndex(frame_idx));
750   while (frame_sp) {
751     if (frame_sp->GetFrameIndex() == unwind_idx)
752       break;
753     frame_sp = GetFrameAtIndex(++frame_idx);
754   }
755   return frame_sp;
756 }
757 
CompareStackID(const StackFrameSP & stack_sp,const StackID & stack_id)758 static bool CompareStackID(const StackFrameSP &stack_sp,
759                            const StackID &stack_id) {
760   return stack_sp->GetStackID() < stack_id;
761 }
762 
GetFrameWithStackID(const StackID & stack_id)763 StackFrameSP StackFrameList::GetFrameWithStackID(const StackID &stack_id) {
764   StackFrameSP frame_sp;
765 
766   if (stack_id.IsValid()) {
767     std::lock_guard<std::recursive_mutex> guard(m_mutex);
768     uint32_t frame_idx = 0;
769     // Do a binary search in case the stack frame is already in our cache
770     collection::const_iterator begin = m_frames.begin();
771     collection::const_iterator end = m_frames.end();
772     if (begin != end) {
773       collection::const_iterator pos =
774           std::lower_bound(begin, end, stack_id, CompareStackID);
775       if (pos != end) {
776         if ((*pos)->GetStackID() == stack_id)
777           return *pos;
778       }
779     }
780     do {
781       frame_sp = GetFrameAtIndex(frame_idx);
782       if (frame_sp && frame_sp->GetStackID() == stack_id)
783         break;
784       frame_idx++;
785     } while (frame_sp);
786   }
787   return frame_sp;
788 }
789 
SetFrameAtIndex(uint32_t idx,StackFrameSP & frame_sp)790 bool StackFrameList::SetFrameAtIndex(uint32_t idx, StackFrameSP &frame_sp) {
791   if (idx >= m_frames.size())
792     m_frames.resize(idx + 1);
793   // Make sure allocation succeeded by checking bounds again
794   if (idx < m_frames.size()) {
795     m_frames[idx] = frame_sp;
796     return true;
797   }
798   return false; // resize failed, out of memory?
799 }
800 
SelectMostRelevantFrame()801 void StackFrameList::SelectMostRelevantFrame() {
802   // Don't call into the frame recognizers on the private state thread as
803   // they can cause code to run in the target, and that can cause deadlocks
804   // when fetching stop events for the expression.
805   if (m_thread.GetProcess()->CurrentThreadIsPrivateStateThread())
806     return;
807 
808   Log *log = GetLog(LLDBLog::Thread);
809 
810   // Only the top frame should be recognized.
811   StackFrameSP frame_sp = GetFrameAtIndex(0);
812   if (!frame_sp) {
813     LLDB_LOG(log, "Failed to construct Frame #0");
814     return;
815   }
816 
817   RecognizedStackFrameSP recognized_frame_sp = frame_sp->GetRecognizedFrame();
818 
819   if (!recognized_frame_sp) {
820     LLDB_LOG(log, "Frame #0 not recognized");
821     return;
822   }
823 
824   if (StackFrameSP most_relevant_frame_sp =
825           recognized_frame_sp->GetMostRelevantFrame()) {
826     LLDB_LOG(log, "Found most relevant frame at index {0}",
827              most_relevant_frame_sp->GetFrameIndex());
828     SetSelectedFrame(most_relevant_frame_sp.get());
829   } else {
830     LLDB_LOG(log, "No relevant frame!");
831   }
832 }
833 
GetSelectedFrameIndex(SelectMostRelevant select_most_relevant)834 uint32_t StackFrameList::GetSelectedFrameIndex(
835     SelectMostRelevant select_most_relevant) {
836   std::lock_guard<std::recursive_mutex> guard(m_mutex);
837   if (!m_selected_frame_idx && select_most_relevant)
838     SelectMostRelevantFrame();
839   if (!m_selected_frame_idx) {
840     // If we aren't selecting the most relevant frame, and the selected frame
841     // isn't set, then don't force a selection here, just return 0.
842     if (!select_most_relevant)
843       return 0;
844     m_selected_frame_idx = 0;
845   }
846   return *m_selected_frame_idx;
847 }
848 
SetSelectedFrame(lldb_private::StackFrame * frame)849 uint32_t StackFrameList::SetSelectedFrame(lldb_private::StackFrame *frame) {
850   std::lock_guard<std::recursive_mutex> guard(m_mutex);
851   const_iterator pos;
852   const_iterator begin = m_frames.begin();
853   const_iterator end = m_frames.end();
854   m_selected_frame_idx = 0;
855 
856   for (pos = begin; pos != end; ++pos) {
857     if (pos->get() == frame) {
858       m_selected_frame_idx = std::distance(begin, pos);
859       uint32_t inlined_depth = GetCurrentInlinedDepth();
860       if (inlined_depth != UINT32_MAX)
861         m_selected_frame_idx = *m_selected_frame_idx - inlined_depth;
862       break;
863     }
864   }
865 
866   SetDefaultFileAndLineToSelectedFrame();
867   return *m_selected_frame_idx;
868 }
869 
SetSelectedFrameByIndex(uint32_t idx)870 bool StackFrameList::SetSelectedFrameByIndex(uint32_t idx) {
871   std::lock_guard<std::recursive_mutex> guard(m_mutex);
872   StackFrameSP frame_sp(GetFrameAtIndex(idx));
873   if (frame_sp) {
874     SetSelectedFrame(frame_sp.get());
875     return true;
876   } else
877     return false;
878 }
879 
SetDefaultFileAndLineToSelectedFrame()880 void StackFrameList::SetDefaultFileAndLineToSelectedFrame() {
881   if (m_thread.GetID() ==
882       m_thread.GetProcess()->GetThreadList().GetSelectedThread()->GetID()) {
883     StackFrameSP frame_sp(
884         GetFrameAtIndex(GetSelectedFrameIndex(DoNoSelectMostRelevantFrame)));
885     if (frame_sp) {
886       SymbolContext sc = frame_sp->GetSymbolContext(eSymbolContextLineEntry);
887       if (sc.line_entry.file)
888         m_thread.CalculateTarget()->GetSourceManager().SetDefaultFileAndLine(
889             sc.line_entry.file, sc.line_entry.line);
890     }
891   }
892 }
893 
894 // The thread has been run, reset the number stack frames to zero so we can
895 // determine how many frames we have lazily.
896 // Note, we don't actually re-use StackFrameLists, we always make a new
897 // StackFrameList every time we stop, and then copy frame information frame
898 // by frame from the old to the new StackFrameList.  So the comment above,
899 // does not describe how StackFrameLists are currently used.
900 // Clear is currently only used to clear the list in the destructor.
Clear()901 void StackFrameList::Clear() {
902   std::lock_guard<std::recursive_mutex> guard(m_mutex);
903   m_frames.clear();
904   m_concrete_frames_fetched = 0;
905   m_selected_frame_idx.reset();
906 }
907 
908 lldb::StackFrameSP
GetStackFrameSPForStackFramePtr(StackFrame * stack_frame_ptr)909 StackFrameList::GetStackFrameSPForStackFramePtr(StackFrame *stack_frame_ptr) {
910   const_iterator pos;
911   const_iterator begin = m_frames.begin();
912   const_iterator end = m_frames.end();
913   lldb::StackFrameSP ret_sp;
914 
915   for (pos = begin; pos != end; ++pos) {
916     if (pos->get() == stack_frame_ptr) {
917       ret_sp = (*pos);
918       break;
919     }
920   }
921   return ret_sp;
922 }
923 
GetStatus(Stream & strm,uint32_t first_frame,uint32_t num_frames,bool show_frame_info,uint32_t num_frames_with_source,bool show_unique,const char * selected_frame_marker)924 size_t StackFrameList::GetStatus(Stream &strm, uint32_t first_frame,
925                                  uint32_t num_frames, bool show_frame_info,
926                                  uint32_t num_frames_with_source,
927                                  bool show_unique,
928                                  const char *selected_frame_marker) {
929   size_t num_frames_displayed = 0;
930 
931   if (num_frames == 0)
932     return 0;
933 
934   StackFrameSP frame_sp;
935   uint32_t frame_idx = 0;
936   uint32_t last_frame;
937 
938   // Don't let the last frame wrap around...
939   if (num_frames == UINT32_MAX)
940     last_frame = UINT32_MAX;
941   else
942     last_frame = first_frame + num_frames;
943 
944   StackFrameSP selected_frame_sp =
945       m_thread.GetSelectedFrame(DoNoSelectMostRelevantFrame);
946   const char *unselected_marker = nullptr;
947   std::string buffer;
948   if (selected_frame_marker) {
949     size_t len = strlen(selected_frame_marker);
950     buffer.insert(buffer.begin(), len, ' ');
951     unselected_marker = buffer.c_str();
952   }
953   const char *marker = nullptr;
954 
955   for (frame_idx = first_frame; frame_idx < last_frame; ++frame_idx) {
956     frame_sp = GetFrameAtIndex(frame_idx);
957     if (!frame_sp)
958       break;
959 
960     if (selected_frame_marker != nullptr) {
961       if (frame_sp == selected_frame_sp)
962         marker = selected_frame_marker;
963       else
964         marker = unselected_marker;
965     }
966     // Check for interruption here.  If we're fetching arguments, this loop
967     // can go slowly:
968     Debugger &dbg = m_thread.GetProcess()->GetTarget().GetDebugger();
969     if (INTERRUPT_REQUESTED(dbg,
970           "Interrupted dumping stack for thread {0:hex} with {1} shown.",
971           m_thread.GetID(), num_frames_displayed))
972       break;
973 
974 
975     if (!frame_sp->GetStatus(strm, show_frame_info,
976                              num_frames_with_source > (first_frame - frame_idx),
977                              show_unique, marker))
978       break;
979     ++num_frames_displayed;
980   }
981 
982   strm.IndentLess();
983   return num_frames_displayed;
984 }
985