1 //===-- StackFrameList.cpp ------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "lldb/Target/StackFrameList.h"
10 #include "lldb/Breakpoint/Breakpoint.h"
11 #include "lldb/Breakpoint/BreakpointLocation.h"
12 #include "lldb/Core/SourceManager.h"
13 #include "lldb/Core/StreamFile.h"
14 #include "lldb/Symbol/Block.h"
15 #include "lldb/Symbol/Function.h"
16 #include "lldb/Symbol/Symbol.h"
17 #include "lldb/Target/Process.h"
18 #include "lldb/Target/RegisterContext.h"
19 #include "lldb/Target/StackFrame.h"
20 #include "lldb/Target/StopInfo.h"
21 #include "lldb/Target/Target.h"
22 #include "lldb/Target/Thread.h"
23 #include "lldb/Target/Unwind.h"
24 #include "lldb/Utility/LLDBLog.h"
25 #include "lldb/Utility/Log.h"
26 #include "llvm/ADT/SmallPtrSet.h"
27 
28 #include <memory>
29 
30 //#define DEBUG_STACK_FRAMES 1
31 
32 using namespace lldb;
33 using namespace lldb_private;
34 
35 // StackFrameList constructor
36 StackFrameList::StackFrameList(Thread &thread,
37                                const lldb::StackFrameListSP &prev_frames_sp,
38                                bool show_inline_frames)
39     : m_thread(thread), m_prev_frames_sp(prev_frames_sp), m_mutex(), m_frames(),
40       m_selected_frame_idx(0), m_concrete_frames_fetched(0),
41       m_current_inlined_depth(UINT32_MAX),
42       m_current_inlined_pc(LLDB_INVALID_ADDRESS),
43       m_show_inlined_frames(show_inline_frames) {
44   if (prev_frames_sp) {
45     m_current_inlined_depth = prev_frames_sp->m_current_inlined_depth;
46     m_current_inlined_pc = prev_frames_sp->m_current_inlined_pc;
47   }
48 }
49 
50 StackFrameList::~StackFrameList() {
51   // Call clear since this takes a lock and clears the stack frame list in case
52   // another thread is currently using this stack frame list
53   Clear();
54 }
55 
56 void StackFrameList::CalculateCurrentInlinedDepth() {
57   uint32_t cur_inlined_depth = GetCurrentInlinedDepth();
58   if (cur_inlined_depth == UINT32_MAX) {
59     ResetCurrentInlinedDepth();
60   }
61 }
62 
63 uint32_t StackFrameList::GetCurrentInlinedDepth() {
64   if (m_show_inlined_frames && m_current_inlined_pc != LLDB_INVALID_ADDRESS) {
65     lldb::addr_t cur_pc = m_thread.GetRegisterContext()->GetPC();
66     if (cur_pc != m_current_inlined_pc) {
67       m_current_inlined_pc = LLDB_INVALID_ADDRESS;
68       m_current_inlined_depth = UINT32_MAX;
69       Log *log = GetLog(LLDBLog::Step);
70       if (log && log->GetVerbose())
71         LLDB_LOGF(
72             log,
73             "GetCurrentInlinedDepth: invalidating current inlined depth.\n");
74     }
75     return m_current_inlined_depth;
76   } else {
77     return UINT32_MAX;
78   }
79 }
80 
81 void StackFrameList::ResetCurrentInlinedDepth() {
82   if (!m_show_inlined_frames)
83     return;
84 
85   std::lock_guard<std::recursive_mutex> guard(m_mutex);
86 
87   GetFramesUpTo(0);
88   if (m_frames.empty())
89     return;
90   if (!m_frames[0]->IsInlined()) {
91     m_current_inlined_depth = UINT32_MAX;
92     m_current_inlined_pc = LLDB_INVALID_ADDRESS;
93     Log *log = GetLog(LLDBLog::Step);
94     if (log && log->GetVerbose())
95       LLDB_LOGF(
96           log,
97           "ResetCurrentInlinedDepth: Invalidating current inlined depth.\n");
98     return;
99   }
100 
101   // We only need to do something special about inlined blocks when we are
102   // at the beginning of an inlined function:
103   // FIXME: We probably also have to do something special if the PC is at
104   // the END of an inlined function, which coincides with the end of either
105   // its containing function or another inlined function.
106 
107   Block *block_ptr = m_frames[0]->GetFrameBlock();
108   if (!block_ptr)
109     return;
110 
111   Address pc_as_address;
112   lldb::addr_t curr_pc = m_thread.GetRegisterContext()->GetPC();
113   pc_as_address.SetLoadAddress(curr_pc, &(m_thread.GetProcess()->GetTarget()));
114   AddressRange containing_range;
115   if (!block_ptr->GetRangeContainingAddress(pc_as_address, containing_range) ||
116       pc_as_address != containing_range.GetBaseAddress())
117     return;
118 
119   // If we got here because of a breakpoint hit, then set the inlined depth
120   // depending on where the breakpoint was set. If we got here because of a
121   // crash, then set the inlined depth to the deepest most block.  Otherwise,
122   // we stopped here naturally as the result of a step, so set ourselves in the
123   // containing frame of the whole set of nested inlines, so the user can then
124   // "virtually" step into the frames one by one, or next over the whole mess.
125   // Note: We don't have to handle being somewhere in the middle of the stack
126   // here, since ResetCurrentInlinedDepth doesn't get called if there is a
127   // valid inlined depth set.
128   StopInfoSP stop_info_sp = m_thread.GetStopInfo();
129   if (!stop_info_sp)
130     return;
131   switch (stop_info_sp->GetStopReason()) {
132   case eStopReasonWatchpoint:
133   case eStopReasonException:
134   case eStopReasonExec:
135   case eStopReasonFork:
136   case eStopReasonVFork:
137   case eStopReasonVForkDone:
138   case eStopReasonSignal:
139     // In all these cases we want to stop in the deepest frame.
140     m_current_inlined_pc = curr_pc;
141     m_current_inlined_depth = 0;
142     break;
143   case eStopReasonBreakpoint: {
144     // FIXME: Figure out what this break point is doing, and set the inline
145     // depth appropriately.  Be careful to take into account breakpoints that
146     // implement step over prologue, since that should do the default
147     // calculation. For now, if the breakpoints corresponding to this hit are
148     // all internal, I set the stop location to the top of the inlined stack,
149     // since that will make things like stepping over prologues work right.
150     // But if there are any non-internal breakpoints I do to the bottom of the
151     // stack, since that was the old behavior.
152     uint32_t bp_site_id = stop_info_sp->GetValue();
153     BreakpointSiteSP bp_site_sp(
154         m_thread.GetProcess()->GetBreakpointSiteList().FindByID(bp_site_id));
155     bool all_internal = true;
156     if (bp_site_sp) {
157       uint32_t num_owners = bp_site_sp->GetNumberOfOwners();
158       for (uint32_t i = 0; i < num_owners; i++) {
159         Breakpoint &bp_ref = bp_site_sp->GetOwnerAtIndex(i)->GetBreakpoint();
160         if (!bp_ref.IsInternal()) {
161           all_internal = false;
162         }
163       }
164     }
165     if (!all_internal) {
166       m_current_inlined_pc = curr_pc;
167       m_current_inlined_depth = 0;
168       break;
169     }
170   }
171     [[fallthrough]];
172   default: {
173     // Otherwise, we should set ourselves at the container of the inlining, so
174     // that the user can descend into them. So first we check whether we have
175     // more than one inlined block sharing this PC:
176     int num_inlined_functions = 0;
177 
178     for (Block *container_ptr = block_ptr->GetInlinedParent();
179          container_ptr != nullptr;
180          container_ptr = container_ptr->GetInlinedParent()) {
181       if (!container_ptr->GetRangeContainingAddress(pc_as_address,
182                                                     containing_range))
183         break;
184       if (pc_as_address != containing_range.GetBaseAddress())
185         break;
186 
187       num_inlined_functions++;
188     }
189     m_current_inlined_pc = curr_pc;
190     m_current_inlined_depth = num_inlined_functions + 1;
191     Log *log = GetLog(LLDBLog::Step);
192     if (log && log->GetVerbose())
193       LLDB_LOGF(log,
194                 "ResetCurrentInlinedDepth: setting inlined "
195                 "depth: %d 0x%" PRIx64 ".\n",
196                 m_current_inlined_depth, curr_pc);
197 
198     break;
199   }
200   }
201 }
202 
203 bool StackFrameList::DecrementCurrentInlinedDepth() {
204   if (m_show_inlined_frames) {
205     uint32_t current_inlined_depth = GetCurrentInlinedDepth();
206     if (current_inlined_depth != UINT32_MAX) {
207       if (current_inlined_depth > 0) {
208         m_current_inlined_depth--;
209         return true;
210       }
211     }
212   }
213   return false;
214 }
215 
216 void StackFrameList::SetCurrentInlinedDepth(uint32_t new_depth) {
217   m_current_inlined_depth = new_depth;
218   if (new_depth == UINT32_MAX)
219     m_current_inlined_pc = LLDB_INVALID_ADDRESS;
220   else
221     m_current_inlined_pc = m_thread.GetRegisterContext()->GetPC();
222 }
223 
224 void StackFrameList::GetOnlyConcreteFramesUpTo(uint32_t end_idx,
225                                                Unwind &unwinder) {
226   assert(m_thread.IsValid() && "Expected valid thread");
227   assert(m_frames.size() <= end_idx && "Expected there to be frames to fill");
228 
229   if (end_idx < m_concrete_frames_fetched)
230     return;
231 
232   uint32_t num_frames = unwinder.GetFramesUpTo(end_idx);
233   if (num_frames <= end_idx + 1) {
234     // Done unwinding.
235     m_concrete_frames_fetched = UINT32_MAX;
236   }
237 
238   // Don't create the frames eagerly. Defer this work to GetFrameAtIndex,
239   // which can lazily query the unwinder to create frames.
240   m_frames.resize(num_frames);
241 }
242 
243 /// A sequence of calls that comprise some portion of a backtrace. Each frame
244 /// is represented as a pair of a callee (Function *) and an address within the
245 /// callee.
246 struct CallDescriptor {
247   Function *func;
248   CallEdge::AddrType address_type = CallEdge::AddrType::Call;
249   addr_t address = LLDB_INVALID_ADDRESS;
250 };
251 using CallSequence = std::vector<CallDescriptor>;
252 
253 /// Find the unique path through the call graph from \p begin (with return PC
254 /// \p return_pc) to \p end. On success this path is stored into \p path, and
255 /// on failure \p path is unchanged.
256 static void FindInterveningFrames(Function &begin, Function &end,
257                                   ExecutionContext &exe_ctx, Target &target,
258                                   addr_t return_pc, CallSequence &path,
259                                   ModuleList &images, Log *log) {
260   LLDB_LOG(log, "Finding frames between {0} and {1}, retn-pc={2:x}",
261            begin.GetDisplayName(), end.GetDisplayName(), return_pc);
262 
263   // Find a non-tail calling edge with the correct return PC.
264   if (log)
265     for (const auto &edge : begin.GetCallEdges())
266       LLDB_LOG(log, "FindInterveningFrames: found call with retn-PC = {0:x}",
267                edge->GetReturnPCAddress(begin, target));
268   CallEdge *first_edge = begin.GetCallEdgeForReturnAddress(return_pc, target);
269   if (!first_edge) {
270     LLDB_LOG(log, "No call edge outgoing from {0} with retn-PC == {1:x}",
271              begin.GetDisplayName(), return_pc);
272     return;
273   }
274 
275   // The first callee may not be resolved, or there may be nothing to fill in.
276   Function *first_callee = first_edge->GetCallee(images, exe_ctx);
277   if (!first_callee) {
278     LLDB_LOG(log, "Could not resolve callee");
279     return;
280   }
281   if (first_callee == &end) {
282     LLDB_LOG(log, "Not searching further, first callee is {0} (retn-PC: {1:x})",
283              end.GetDisplayName(), return_pc);
284     return;
285   }
286 
287   // Run DFS on the tail-calling edges out of the first callee to find \p end.
288   // Fully explore the set of functions reachable from the first edge via tail
289   // calls in order to detect ambiguous executions.
290   struct DFS {
291     CallSequence active_path = {};
292     CallSequence solution_path = {};
293     llvm::SmallPtrSet<Function *, 2> visited_nodes = {};
294     bool ambiguous = false;
295     Function *end;
296     ModuleList &images;
297     Target &target;
298     ExecutionContext &context;
299 
300     DFS(Function *end, ModuleList &images, Target &target,
301         ExecutionContext &context)
302         : end(end), images(images), target(target), context(context) {}
303 
304     void search(CallEdge &first_edge, Function &first_callee,
305                 CallSequence &path) {
306       dfs(first_edge, first_callee);
307       if (!ambiguous)
308         path = std::move(solution_path);
309     }
310 
311     void dfs(CallEdge &current_edge, Function &callee) {
312       // Found a path to the target function.
313       if (&callee == end) {
314         if (solution_path.empty())
315           solution_path = active_path;
316         else
317           ambiguous = true;
318         return;
319       }
320 
321       // Terminate the search if tail recursion is found, or more generally if
322       // there's more than one way to reach a target. This errs on the side of
323       // caution: it conservatively stops searching when some solutions are
324       // still possible to save time in the average case.
325       if (!visited_nodes.insert(&callee).second) {
326         ambiguous = true;
327         return;
328       }
329 
330       // Search the calls made from this callee.
331       active_path.push_back(CallDescriptor{&callee});
332       for (const auto &edge : callee.GetTailCallingEdges()) {
333         Function *next_callee = edge->GetCallee(images, context);
334         if (!next_callee)
335           continue;
336 
337         std::tie(active_path.back().address_type, active_path.back().address) =
338             edge->GetCallerAddress(callee, target);
339 
340         dfs(*edge, *next_callee);
341         if (ambiguous)
342           return;
343       }
344       active_path.pop_back();
345     }
346   };
347 
348   DFS(&end, images, target, exe_ctx).search(*first_edge, *first_callee, path);
349 }
350 
351 /// Given that \p next_frame will be appended to the frame list, synthesize
352 /// tail call frames between the current end of the list and \p next_frame.
353 /// If any frames are added, adjust the frame index of \p next_frame.
354 ///
355 ///   --------------
356 ///   |    ...     | <- Completed frames.
357 ///   --------------
358 ///   | prev_frame |
359 ///   --------------
360 ///   |    ...     | <- Artificial frames inserted here.
361 ///   --------------
362 ///   | next_frame |
363 ///   --------------
364 ///   |    ...     | <- Not-yet-visited frames.
365 ///   --------------
366 void StackFrameList::SynthesizeTailCallFrames(StackFrame &next_frame) {
367   // Cannot synthesize tail call frames when the stack is empty (there is no
368   // "previous" frame).
369   if (m_frames.empty())
370     return;
371 
372   TargetSP target_sp = next_frame.CalculateTarget();
373   if (!target_sp)
374     return;
375 
376   lldb::RegisterContextSP next_reg_ctx_sp = next_frame.GetRegisterContext();
377   if (!next_reg_ctx_sp)
378     return;
379 
380   Log *log = GetLog(LLDBLog::Step);
381 
382   StackFrame &prev_frame = *m_frames.back().get();
383 
384   // Find the functions prev_frame and next_frame are stopped in. The function
385   // objects are needed to search the lazy call graph for intervening frames.
386   Function *prev_func =
387       prev_frame.GetSymbolContext(eSymbolContextFunction).function;
388   if (!prev_func) {
389     LLDB_LOG(log, "SynthesizeTailCallFrames: can't find previous function");
390     return;
391   }
392   Function *next_func =
393       next_frame.GetSymbolContext(eSymbolContextFunction).function;
394   if (!next_func) {
395     LLDB_LOG(log, "SynthesizeTailCallFrames: can't find next function");
396     return;
397   }
398 
399   // Try to find the unique sequence of (tail) calls which led from next_frame
400   // to prev_frame.
401   CallSequence path;
402   addr_t return_pc = next_reg_ctx_sp->GetPC();
403   Target &target = *target_sp.get();
404   ModuleList &images = next_frame.CalculateTarget()->GetImages();
405   ExecutionContext exe_ctx(target_sp, /*get_process=*/true);
406   exe_ctx.SetFramePtr(&next_frame);
407   FindInterveningFrames(*next_func, *prev_func, exe_ctx, target, return_pc,
408                         path, images, log);
409 
410   // Push synthetic tail call frames.
411   for (auto calleeInfo : llvm::reverse(path)) {
412     Function *callee = calleeInfo.func;
413     uint32_t frame_idx = m_frames.size();
414     uint32_t concrete_frame_idx = next_frame.GetConcreteFrameIndex();
415     addr_t cfa = LLDB_INVALID_ADDRESS;
416     bool cfa_is_valid = false;
417     addr_t pc = calleeInfo.address;
418     // If the callee address refers to the call instruction, we do not want to
419     // subtract 1 from this value.
420     const bool behaves_like_zeroth_frame =
421         calleeInfo.address_type == CallEdge::AddrType::Call;
422     SymbolContext sc;
423     callee->CalculateSymbolContext(&sc);
424     auto synth_frame = std::make_shared<StackFrame>(
425         m_thread.shared_from_this(), frame_idx, concrete_frame_idx, cfa,
426         cfa_is_valid, pc, StackFrame::Kind::Artificial,
427         behaves_like_zeroth_frame, &sc);
428     m_frames.push_back(synth_frame);
429     LLDB_LOG(log, "Pushed frame {0} at {1:x}", callee->GetDisplayName(), pc);
430   }
431 
432   // If any frames were created, adjust next_frame's index.
433   if (!path.empty())
434     next_frame.SetFrameIndex(m_frames.size());
435 }
436 
437 void StackFrameList::GetFramesUpTo(uint32_t end_idx) {
438   // Do not fetch frames for an invalid thread.
439   if (!m_thread.IsValid())
440     return;
441 
442   // We've already gotten more frames than asked for, or we've already finished
443   // unwinding, return.
444   if (m_frames.size() > end_idx || GetAllFramesFetched())
445     return;
446 
447   Unwind &unwinder = m_thread.GetUnwinder();
448 
449   if (!m_show_inlined_frames) {
450     GetOnlyConcreteFramesUpTo(end_idx, unwinder);
451     return;
452   }
453 
454 #if defined(DEBUG_STACK_FRAMES)
455   StreamFile s(stdout, false);
456 #endif
457   // If we are hiding some frames from the outside world, we need to add
458   // those onto the total count of frames to fetch.  However, we don't need
459   // to do that if end_idx is 0 since in that case we always get the first
460   // concrete frame and all the inlined frames below it...  And of course, if
461   // end_idx is UINT32_MAX that means get all, so just do that...
462 
463   uint32_t inlined_depth = 0;
464   if (end_idx > 0 && end_idx != UINT32_MAX) {
465     inlined_depth = GetCurrentInlinedDepth();
466     if (inlined_depth != UINT32_MAX) {
467       if (end_idx > 0)
468         end_idx += inlined_depth;
469     }
470   }
471 
472   StackFrameSP unwind_frame_sp;
473   do {
474     uint32_t idx = m_concrete_frames_fetched++;
475     lldb::addr_t pc = LLDB_INVALID_ADDRESS;
476     lldb::addr_t cfa = LLDB_INVALID_ADDRESS;
477     bool behaves_like_zeroth_frame = (idx == 0);
478     if (idx == 0) {
479       // We might have already created frame zero, only create it if we need
480       // to.
481       if (m_frames.empty()) {
482         RegisterContextSP reg_ctx_sp(m_thread.GetRegisterContext());
483 
484         if (reg_ctx_sp) {
485           const bool success = unwinder.GetFrameInfoAtIndex(
486               idx, cfa, pc, behaves_like_zeroth_frame);
487           // There shouldn't be any way not to get the frame info for frame
488           // 0. But if the unwinder can't make one, lets make one by hand
489           // with the SP as the CFA and see if that gets any further.
490           if (!success) {
491             cfa = reg_ctx_sp->GetSP();
492             pc = reg_ctx_sp->GetPC();
493           }
494 
495           unwind_frame_sp = std::make_shared<StackFrame>(
496               m_thread.shared_from_this(), m_frames.size(), idx, reg_ctx_sp,
497               cfa, pc, behaves_like_zeroth_frame, nullptr);
498           m_frames.push_back(unwind_frame_sp);
499         }
500       } else {
501         unwind_frame_sp = m_frames.front();
502         cfa = unwind_frame_sp->m_id.GetCallFrameAddress();
503       }
504     } else {
505       const bool success =
506           unwinder.GetFrameInfoAtIndex(idx, cfa, pc, behaves_like_zeroth_frame);
507       if (!success) {
508         // We've gotten to the end of the stack.
509         SetAllFramesFetched();
510         break;
511       }
512       const bool cfa_is_valid = true;
513       unwind_frame_sp = std::make_shared<StackFrame>(
514           m_thread.shared_from_this(), m_frames.size(), idx, cfa, cfa_is_valid,
515           pc, StackFrame::Kind::Regular, behaves_like_zeroth_frame, nullptr);
516 
517       // Create synthetic tail call frames between the previous frame and the
518       // newly-found frame. The new frame's index may change after this call,
519       // although its concrete index will stay the same.
520       SynthesizeTailCallFrames(*unwind_frame_sp.get());
521 
522       m_frames.push_back(unwind_frame_sp);
523     }
524 
525     assert(unwind_frame_sp);
526     SymbolContext unwind_sc = unwind_frame_sp->GetSymbolContext(
527         eSymbolContextBlock | eSymbolContextFunction);
528     Block *unwind_block = unwind_sc.block;
529     TargetSP target_sp = m_thread.CalculateTarget();
530     if (unwind_block) {
531       Address curr_frame_address(
532           unwind_frame_sp->GetFrameCodeAddressForSymbolication());
533 
534       SymbolContext next_frame_sc;
535       Address next_frame_address;
536 
537       while (unwind_sc.GetParentOfInlinedScope(
538           curr_frame_address, next_frame_sc, next_frame_address)) {
539         next_frame_sc.line_entry.ApplyFileMappings(target_sp);
540         behaves_like_zeroth_frame = false;
541         StackFrameSP frame_sp(new StackFrame(
542             m_thread.shared_from_this(), m_frames.size(), idx,
543             unwind_frame_sp->GetRegisterContextSP(), cfa, next_frame_address,
544             behaves_like_zeroth_frame, &next_frame_sc));
545 
546         m_frames.push_back(frame_sp);
547         unwind_sc = next_frame_sc;
548         curr_frame_address = next_frame_address;
549       }
550     }
551   } while (m_frames.size() - 1 < end_idx);
552 
553   // Don't try to merge till you've calculated all the frames in this stack.
554   if (GetAllFramesFetched() && m_prev_frames_sp) {
555     StackFrameList *prev_frames = m_prev_frames_sp.get();
556     StackFrameList *curr_frames = this;
557 
558 #if defined(DEBUG_STACK_FRAMES)
559     s.PutCString("\nprev_frames:\n");
560     prev_frames->Dump(&s);
561     s.PutCString("\ncurr_frames:\n");
562     curr_frames->Dump(&s);
563     s.EOL();
564 #endif
565     size_t curr_frame_num, prev_frame_num;
566 
567     for (curr_frame_num = curr_frames->m_frames.size(),
568         prev_frame_num = prev_frames->m_frames.size();
569          curr_frame_num > 0 && prev_frame_num > 0;
570          --curr_frame_num, --prev_frame_num) {
571       const size_t curr_frame_idx = curr_frame_num - 1;
572       const size_t prev_frame_idx = prev_frame_num - 1;
573       StackFrameSP curr_frame_sp(curr_frames->m_frames[curr_frame_idx]);
574       StackFrameSP prev_frame_sp(prev_frames->m_frames[prev_frame_idx]);
575 
576 #if defined(DEBUG_STACK_FRAMES)
577       s.Printf("\n\nCurr frame #%u ", curr_frame_idx);
578       if (curr_frame_sp)
579         curr_frame_sp->Dump(&s, true, false);
580       else
581         s.PutCString("NULL");
582       s.Printf("\nPrev frame #%u ", prev_frame_idx);
583       if (prev_frame_sp)
584         prev_frame_sp->Dump(&s, true, false);
585       else
586         s.PutCString("NULL");
587 #endif
588 
589       StackFrame *curr_frame = curr_frame_sp.get();
590       StackFrame *prev_frame = prev_frame_sp.get();
591 
592       if (curr_frame == nullptr || prev_frame == nullptr)
593         break;
594 
595       // Check the stack ID to make sure they are equal.
596       if (curr_frame->GetStackID() != prev_frame->GetStackID())
597         break;
598 
599       prev_frame->UpdatePreviousFrameFromCurrentFrame(*curr_frame);
600       // Now copy the fixed up previous frame into the current frames so the
601       // pointer doesn't change.
602       m_frames[curr_frame_idx] = prev_frame_sp;
603 
604 #if defined(DEBUG_STACK_FRAMES)
605       s.Printf("\n    Copying previous frame to current frame");
606 #endif
607     }
608     // We are done with the old stack frame list, we can release it now.
609     m_prev_frames_sp.reset();
610   }
611 
612 #if defined(DEBUG_STACK_FRAMES)
613   s.PutCString("\n\nNew frames:\n");
614   Dump(&s);
615   s.EOL();
616 #endif
617 }
618 
619 uint32_t StackFrameList::GetNumFrames(bool can_create) {
620   std::lock_guard<std::recursive_mutex> guard(m_mutex);
621 
622   if (can_create)
623     GetFramesUpTo(UINT32_MAX);
624 
625   return GetVisibleStackFrameIndex(m_frames.size());
626 }
627 
628 void StackFrameList::Dump(Stream *s) {
629   if (s == nullptr)
630     return;
631 
632   std::lock_guard<std::recursive_mutex> guard(m_mutex);
633 
634   const_iterator pos, begin = m_frames.begin(), end = m_frames.end();
635   for (pos = begin; pos != end; ++pos) {
636     StackFrame *frame = (*pos).get();
637     s->Printf("%p: ", static_cast<void *>(frame));
638     if (frame) {
639       frame->GetStackID().Dump(s);
640       frame->DumpUsingSettingsFormat(s);
641     } else
642       s->Printf("frame #%u", (uint32_t)std::distance(begin, pos));
643     s->EOL();
644   }
645   s->EOL();
646 }
647 
648 StackFrameSP StackFrameList::GetFrameAtIndex(uint32_t idx) {
649   StackFrameSP frame_sp;
650   std::lock_guard<std::recursive_mutex> guard(m_mutex);
651   uint32_t original_idx = idx;
652 
653   uint32_t inlined_depth = GetCurrentInlinedDepth();
654   if (inlined_depth != UINT32_MAX)
655     idx += inlined_depth;
656 
657   if (idx < m_frames.size())
658     frame_sp = m_frames[idx];
659 
660   if (frame_sp)
661     return frame_sp;
662 
663   // GetFramesUpTo will fill m_frames with as many frames as you asked for, if
664   // there are that many.  If there weren't then you asked for too many frames.
665   GetFramesUpTo(idx);
666   if (idx < m_frames.size()) {
667     if (m_show_inlined_frames) {
668       // When inline frames are enabled we actually create all the frames in
669       // GetFramesUpTo.
670       frame_sp = m_frames[idx];
671     } else {
672       addr_t pc, cfa;
673       bool behaves_like_zeroth_frame = (idx == 0);
674       if (m_thread.GetUnwinder().GetFrameInfoAtIndex(
675               idx, cfa, pc, behaves_like_zeroth_frame)) {
676         const bool cfa_is_valid = true;
677         frame_sp = std::make_shared<StackFrame>(
678             m_thread.shared_from_this(), idx, idx, cfa, cfa_is_valid, pc,
679             StackFrame::Kind::Regular, behaves_like_zeroth_frame, nullptr);
680 
681         Function *function =
682             frame_sp->GetSymbolContext(eSymbolContextFunction).function;
683         if (function) {
684           // When we aren't showing inline functions we always use the top
685           // most function block as the scope.
686           frame_sp->SetSymbolContextScope(&function->GetBlock(false));
687         } else {
688           // Set the symbol scope from the symbol regardless if it is nullptr
689           // or valid.
690           frame_sp->SetSymbolContextScope(
691               frame_sp->GetSymbolContext(eSymbolContextSymbol).symbol);
692         }
693         SetFrameAtIndex(idx, frame_sp);
694       }
695     }
696   } else if (original_idx == 0) {
697     // There should ALWAYS be a frame at index 0.  If something went wrong with
698     // the CurrentInlinedDepth such that there weren't as many frames as we
699     // thought taking that into account, then reset the current inlined depth
700     // and return the real zeroth frame.
701     if (m_frames.empty()) {
702       // Why do we have a thread with zero frames, that should not ever
703       // happen...
704       assert(!m_thread.IsValid() && "A valid thread has no frames.");
705     } else {
706       ResetCurrentInlinedDepth();
707       frame_sp = m_frames[original_idx];
708     }
709   }
710 
711   return frame_sp;
712 }
713 
714 StackFrameSP
715 StackFrameList::GetFrameWithConcreteFrameIndex(uint32_t unwind_idx) {
716   // First try assuming the unwind index is the same as the frame index. The
717   // unwind index is always greater than or equal to the frame index, so it is
718   // a good place to start. If we have inlined frames we might have 5 concrete
719   // frames (frame unwind indexes go from 0-4), but we might have 15 frames
720   // after we make all the inlined frames. Most of the time the unwind frame
721   // index (or the concrete frame index) is the same as the frame index.
722   uint32_t frame_idx = unwind_idx;
723   StackFrameSP frame_sp(GetFrameAtIndex(frame_idx));
724   while (frame_sp) {
725     if (frame_sp->GetFrameIndex() == unwind_idx)
726       break;
727     frame_sp = GetFrameAtIndex(++frame_idx);
728   }
729   return frame_sp;
730 }
731 
732 static bool CompareStackID(const StackFrameSP &stack_sp,
733                            const StackID &stack_id) {
734   return stack_sp->GetStackID() < stack_id;
735 }
736 
737 StackFrameSP StackFrameList::GetFrameWithStackID(const StackID &stack_id) {
738   StackFrameSP frame_sp;
739 
740   if (stack_id.IsValid()) {
741     std::lock_guard<std::recursive_mutex> guard(m_mutex);
742     uint32_t frame_idx = 0;
743     // Do a binary search in case the stack frame is already in our cache
744     collection::const_iterator begin = m_frames.begin();
745     collection::const_iterator end = m_frames.end();
746     if (begin != end) {
747       collection::const_iterator pos =
748           std::lower_bound(begin, end, stack_id, CompareStackID);
749       if (pos != end) {
750         if ((*pos)->GetStackID() == stack_id)
751           return *pos;
752       }
753     }
754     do {
755       frame_sp = GetFrameAtIndex(frame_idx);
756       if (frame_sp && frame_sp->GetStackID() == stack_id)
757         break;
758       frame_idx++;
759     } while (frame_sp);
760   }
761   return frame_sp;
762 }
763 
764 bool StackFrameList::SetFrameAtIndex(uint32_t idx, StackFrameSP &frame_sp) {
765   if (idx >= m_frames.size())
766     m_frames.resize(idx + 1);
767   // Make sure allocation succeeded by checking bounds again
768   if (idx < m_frames.size()) {
769     m_frames[idx] = frame_sp;
770     return true;
771   }
772   return false; // resize failed, out of memory?
773 }
774 
775 uint32_t StackFrameList::GetSelectedFrameIndex() const {
776   std::lock_guard<std::recursive_mutex> guard(m_mutex);
777   return m_selected_frame_idx;
778 }
779 
780 uint32_t StackFrameList::SetSelectedFrame(lldb_private::StackFrame *frame) {
781   std::lock_guard<std::recursive_mutex> guard(m_mutex);
782   const_iterator pos;
783   const_iterator begin = m_frames.begin();
784   const_iterator end = m_frames.end();
785   m_selected_frame_idx = 0;
786   for (pos = begin; pos != end; ++pos) {
787     if (pos->get() == frame) {
788       m_selected_frame_idx = std::distance(begin, pos);
789       uint32_t inlined_depth = GetCurrentInlinedDepth();
790       if (inlined_depth != UINT32_MAX)
791         m_selected_frame_idx -= inlined_depth;
792       break;
793     }
794   }
795   SetDefaultFileAndLineToSelectedFrame();
796   return m_selected_frame_idx;
797 }
798 
799 bool StackFrameList::SetSelectedFrameByIndex(uint32_t idx) {
800   std::lock_guard<std::recursive_mutex> guard(m_mutex);
801   StackFrameSP frame_sp(GetFrameAtIndex(idx));
802   if (frame_sp) {
803     SetSelectedFrame(frame_sp.get());
804     return true;
805   } else
806     return false;
807 }
808 
809 void StackFrameList::SetDefaultFileAndLineToSelectedFrame() {
810   if (m_thread.GetID() ==
811       m_thread.GetProcess()->GetThreadList().GetSelectedThread()->GetID()) {
812     StackFrameSP frame_sp(GetFrameAtIndex(GetSelectedFrameIndex()));
813     if (frame_sp) {
814       SymbolContext sc = frame_sp->GetSymbolContext(eSymbolContextLineEntry);
815       if (sc.line_entry.file)
816         m_thread.CalculateTarget()->GetSourceManager().SetDefaultFileAndLine(
817             sc.line_entry.file, sc.line_entry.line);
818     }
819   }
820 }
821 
822 // The thread has been run, reset the number stack frames to zero so we can
823 // determine how many frames we have lazily.
824 void StackFrameList::Clear() {
825   std::lock_guard<std::recursive_mutex> guard(m_mutex);
826   m_frames.clear();
827   m_concrete_frames_fetched = 0;
828 }
829 
830 lldb::StackFrameSP
831 StackFrameList::GetStackFrameSPForStackFramePtr(StackFrame *stack_frame_ptr) {
832   const_iterator pos;
833   const_iterator begin = m_frames.begin();
834   const_iterator end = m_frames.end();
835   lldb::StackFrameSP ret_sp;
836 
837   for (pos = begin; pos != end; ++pos) {
838     if (pos->get() == stack_frame_ptr) {
839       ret_sp = (*pos);
840       break;
841     }
842   }
843   return ret_sp;
844 }
845 
846 size_t StackFrameList::GetStatus(Stream &strm, uint32_t first_frame,
847                                  uint32_t num_frames, bool show_frame_info,
848                                  uint32_t num_frames_with_source,
849                                  bool show_unique,
850                                  const char *selected_frame_marker) {
851   size_t num_frames_displayed = 0;
852 
853   if (num_frames == 0)
854     return 0;
855 
856   StackFrameSP frame_sp;
857   uint32_t frame_idx = 0;
858   uint32_t last_frame;
859 
860   // Don't let the last frame wrap around...
861   if (num_frames == UINT32_MAX)
862     last_frame = UINT32_MAX;
863   else
864     last_frame = first_frame + num_frames;
865 
866   StackFrameSP selected_frame_sp = m_thread.GetSelectedFrame();
867   const char *unselected_marker = nullptr;
868   std::string buffer;
869   if (selected_frame_marker) {
870     size_t len = strlen(selected_frame_marker);
871     buffer.insert(buffer.begin(), len, ' ');
872     unselected_marker = buffer.c_str();
873   }
874   const char *marker = nullptr;
875 
876   for (frame_idx = first_frame; frame_idx < last_frame; ++frame_idx) {
877     frame_sp = GetFrameAtIndex(frame_idx);
878     if (!frame_sp)
879       break;
880 
881     if (selected_frame_marker != nullptr) {
882       if (frame_sp == selected_frame_sp)
883         marker = selected_frame_marker;
884       else
885         marker = unselected_marker;
886     }
887 
888     if (!frame_sp->GetStatus(strm, show_frame_info,
889                              num_frames_with_source > (first_frame - frame_idx),
890                              show_unique, marker))
891       break;
892     ++num_frames_displayed;
893   }
894 
895   strm.IndentLess();
896   return num_frames_displayed;
897 }
898