1 //===-- StackFrameList.cpp ------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "lldb/Target/StackFrameList.h"
10 #include "lldb/Breakpoint/Breakpoint.h"
11 #include "lldb/Breakpoint/BreakpointLocation.h"
12 #include "lldb/Core/SourceManager.h"
13 #include "lldb/Core/StreamFile.h"
14 #include "lldb/Symbol/Block.h"
15 #include "lldb/Symbol/Function.h"
16 #include "lldb/Symbol/Symbol.h"
17 #include "lldb/Target/Process.h"
18 #include "lldb/Target/RegisterContext.h"
19 #include "lldb/Target/StackFrame.h"
20 #include "lldb/Target/StopInfo.h"
21 #include "lldb/Target/Target.h"
22 #include "lldb/Target/Thread.h"
23 #include "lldb/Target/Unwind.h"
24 #include "lldb/Utility/Log.h"
25 #include "llvm/ADT/SmallPtrSet.h"
26 
27 #include <memory>
28 
29 //#define DEBUG_STACK_FRAMES 1
30 
31 using namespace lldb;
32 using namespace lldb_private;
33 
34 // StackFrameList constructor
35 StackFrameList::StackFrameList(Thread &thread,
36                                const lldb::StackFrameListSP &prev_frames_sp,
37                                bool show_inline_frames)
38     : m_thread(thread), m_prev_frames_sp(prev_frames_sp), m_mutex(), m_frames(),
39       m_selected_frame_idx(0), m_concrete_frames_fetched(0),
40       m_current_inlined_depth(UINT32_MAX),
41       m_current_inlined_pc(LLDB_INVALID_ADDRESS),
42       m_show_inlined_frames(show_inline_frames) {
43   if (prev_frames_sp) {
44     m_current_inlined_depth = prev_frames_sp->m_current_inlined_depth;
45     m_current_inlined_pc = prev_frames_sp->m_current_inlined_pc;
46   }
47 }
48 
49 StackFrameList::~StackFrameList() {
50   // Call clear since this takes a lock and clears the stack frame list in case
51   // another thread is currently using this stack frame list
52   Clear();
53 }
54 
55 void StackFrameList::CalculateCurrentInlinedDepth() {
56   uint32_t cur_inlined_depth = GetCurrentInlinedDepth();
57   if (cur_inlined_depth == UINT32_MAX) {
58     ResetCurrentInlinedDepth();
59   }
60 }
61 
62 uint32_t StackFrameList::GetCurrentInlinedDepth() {
63   if (m_show_inlined_frames && m_current_inlined_pc != LLDB_INVALID_ADDRESS) {
64     lldb::addr_t cur_pc = m_thread.GetRegisterContext()->GetPC();
65     if (cur_pc != m_current_inlined_pc) {
66       m_current_inlined_pc = LLDB_INVALID_ADDRESS;
67       m_current_inlined_depth = UINT32_MAX;
68       Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
69       if (log && log->GetVerbose())
70         LLDB_LOGF(
71             log,
72             "GetCurrentInlinedDepth: invalidating current inlined depth.\n");
73     }
74     return m_current_inlined_depth;
75   } else {
76     return UINT32_MAX;
77   }
78 }
79 
80 void StackFrameList::ResetCurrentInlinedDepth() {
81   if (!m_show_inlined_frames)
82     return;
83 
84   std::lock_guard<std::recursive_mutex> guard(m_mutex);
85 
86   GetFramesUpTo(0);
87   if (m_frames.empty())
88     return;
89   if (!m_frames[0]->IsInlined()) {
90     m_current_inlined_depth = UINT32_MAX;
91     m_current_inlined_pc = LLDB_INVALID_ADDRESS;
92     Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
93     if (log && log->GetVerbose())
94       LLDB_LOGF(
95           log,
96           "ResetCurrentInlinedDepth: Invalidating current inlined depth.\n");
97     return;
98   }
99 
100   // We only need to do something special about inlined blocks when we are
101   // at the beginning of an inlined function:
102   // FIXME: We probably also have to do something special if the PC is at
103   // the END of an inlined function, which coincides with the end of either
104   // its containing function or another inlined function.
105 
106   Block *block_ptr = m_frames[0]->GetFrameBlock();
107   if (!block_ptr)
108     return;
109 
110   Address pc_as_address;
111   lldb::addr_t curr_pc = m_thread.GetRegisterContext()->GetPC();
112   pc_as_address.SetLoadAddress(curr_pc, &(m_thread.GetProcess()->GetTarget()));
113   AddressRange containing_range;
114   if (!block_ptr->GetRangeContainingAddress(pc_as_address, containing_range) ||
115       pc_as_address != containing_range.GetBaseAddress())
116     return;
117 
118   // If we got here because of a breakpoint hit, then set the inlined depth
119   // depending on where the breakpoint was set. If we got here because of a
120   // crash, then set the inlined depth to the deepest most block.  Otherwise,
121   // we stopped here naturally as the result of a step, so set ourselves in the
122   // containing frame of the whole set of nested inlines, so the user can then
123   // "virtually" step into the frames one by one, or next over the whole mess.
124   // Note: We don't have to handle being somewhere in the middle of the stack
125   // here, since ResetCurrentInlinedDepth doesn't get called if there is a
126   // valid inlined depth set.
127   StopInfoSP stop_info_sp = m_thread.GetStopInfo();
128   if (!stop_info_sp)
129     return;
130   switch (stop_info_sp->GetStopReason()) {
131   case eStopReasonWatchpoint:
132   case eStopReasonException:
133   case eStopReasonExec:
134   case eStopReasonFork:
135   case eStopReasonVFork:
136   case eStopReasonVForkDone:
137   case eStopReasonSignal:
138     // In all these cases we want to stop in the deepest frame.
139     m_current_inlined_pc = curr_pc;
140     m_current_inlined_depth = 0;
141     break;
142   case eStopReasonBreakpoint: {
143     // FIXME: Figure out what this break point is doing, and set the inline
144     // depth appropriately.  Be careful to take into account breakpoints that
145     // implement step over prologue, since that should do the default
146     // calculation. For now, if the breakpoints corresponding to this hit are
147     // all internal, I set the stop location to the top of the inlined stack,
148     // since that will make things like stepping over prologues work right.
149     // But if there are any non-internal breakpoints I do to the bottom of the
150     // stack, since that was the old behavior.
151     uint32_t bp_site_id = stop_info_sp->GetValue();
152     BreakpointSiteSP bp_site_sp(
153         m_thread.GetProcess()->GetBreakpointSiteList().FindByID(bp_site_id));
154     bool all_internal = true;
155     if (bp_site_sp) {
156       uint32_t num_owners = bp_site_sp->GetNumberOfOwners();
157       for (uint32_t i = 0; i < num_owners; i++) {
158         Breakpoint &bp_ref = bp_site_sp->GetOwnerAtIndex(i)->GetBreakpoint();
159         if (!bp_ref.IsInternal()) {
160           all_internal = false;
161         }
162       }
163     }
164     if (!all_internal) {
165       m_current_inlined_pc = curr_pc;
166       m_current_inlined_depth = 0;
167       break;
168     }
169   }
170     LLVM_FALLTHROUGH;
171   default: {
172     // Otherwise, we should set ourselves at the container of the inlining, so
173     // that the user can descend into them. So first we check whether we have
174     // more than one inlined block sharing this PC:
175     int num_inlined_functions = 0;
176 
177     for (Block *container_ptr = block_ptr->GetInlinedParent();
178          container_ptr != nullptr;
179          container_ptr = container_ptr->GetInlinedParent()) {
180       if (!container_ptr->GetRangeContainingAddress(pc_as_address,
181                                                     containing_range))
182         break;
183       if (pc_as_address != containing_range.GetBaseAddress())
184         break;
185 
186       num_inlined_functions++;
187     }
188     m_current_inlined_pc = curr_pc;
189     m_current_inlined_depth = num_inlined_functions + 1;
190     Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
191     if (log && log->GetVerbose())
192       LLDB_LOGF(log,
193                 "ResetCurrentInlinedDepth: setting inlined "
194                 "depth: %d 0x%" PRIx64 ".\n",
195                 m_current_inlined_depth, curr_pc);
196 
197     break;
198   }
199   }
200 }
201 
202 bool StackFrameList::DecrementCurrentInlinedDepth() {
203   if (m_show_inlined_frames) {
204     uint32_t current_inlined_depth = GetCurrentInlinedDepth();
205     if (current_inlined_depth != UINT32_MAX) {
206       if (current_inlined_depth > 0) {
207         m_current_inlined_depth--;
208         return true;
209       }
210     }
211   }
212   return false;
213 }
214 
215 void StackFrameList::SetCurrentInlinedDepth(uint32_t new_depth) {
216   m_current_inlined_depth = new_depth;
217   if (new_depth == UINT32_MAX)
218     m_current_inlined_pc = LLDB_INVALID_ADDRESS;
219   else
220     m_current_inlined_pc = m_thread.GetRegisterContext()->GetPC();
221 }
222 
223 void StackFrameList::GetOnlyConcreteFramesUpTo(uint32_t end_idx,
224                                                Unwind &unwinder) {
225   assert(m_thread.IsValid() && "Expected valid thread");
226   assert(m_frames.size() <= end_idx && "Expected there to be frames to fill");
227 
228   if (end_idx < m_concrete_frames_fetched)
229     return;
230 
231   uint32_t num_frames = unwinder.GetFramesUpTo(end_idx);
232   if (num_frames <= end_idx + 1) {
233     // Done unwinding.
234     m_concrete_frames_fetched = UINT32_MAX;
235   }
236 
237   // Don't create the frames eagerly. Defer this work to GetFrameAtIndex,
238   // which can lazily query the unwinder to create frames.
239   m_frames.resize(num_frames);
240 }
241 
242 /// A sequence of calls that comprise some portion of a backtrace. Each frame
243 /// is represented as a pair of a callee (Function *) and an address within the
244 /// callee.
245 struct CallDescriptor {
246   Function *func;
247   CallEdge::AddrType address_type = CallEdge::AddrType::Call;
248   addr_t address = LLDB_INVALID_ADDRESS;
249 };
250 using CallSequence = std::vector<CallDescriptor>;
251 
252 /// Find the unique path through the call graph from \p begin (with return PC
253 /// \p return_pc) to \p end. On success this path is stored into \p path, and
254 /// on failure \p path is unchanged.
255 static void FindInterveningFrames(Function &begin, Function &end,
256                                   ExecutionContext &exe_ctx, Target &target,
257                                   addr_t return_pc, CallSequence &path,
258                                   ModuleList &images, Log *log) {
259   LLDB_LOG(log, "Finding frames between {0} and {1}, retn-pc={2:x}",
260            begin.GetDisplayName(), end.GetDisplayName(), return_pc);
261 
262   // Find a non-tail calling edge with the correct return PC.
263   if (log)
264     for (const auto &edge : begin.GetCallEdges())
265       LLDB_LOG(log, "FindInterveningFrames: found call with retn-PC = {0:x}",
266                edge->GetReturnPCAddress(begin, target));
267   CallEdge *first_edge = begin.GetCallEdgeForReturnAddress(return_pc, target);
268   if (!first_edge) {
269     LLDB_LOG(log, "No call edge outgoing from {0} with retn-PC == {1:x}",
270              begin.GetDisplayName(), return_pc);
271     return;
272   }
273 
274   // The first callee may not be resolved, or there may be nothing to fill in.
275   Function *first_callee = first_edge->GetCallee(images, exe_ctx);
276   if (!first_callee) {
277     LLDB_LOG(log, "Could not resolve callee");
278     return;
279   }
280   if (first_callee == &end) {
281     LLDB_LOG(log, "Not searching further, first callee is {0} (retn-PC: {1:x})",
282              end.GetDisplayName(), return_pc);
283     return;
284   }
285 
286   // Run DFS on the tail-calling edges out of the first callee to find \p end.
287   // Fully explore the set of functions reachable from the first edge via tail
288   // calls in order to detect ambiguous executions.
289   struct DFS {
290     CallSequence active_path = {};
291     CallSequence solution_path = {};
292     llvm::SmallPtrSet<Function *, 2> visited_nodes = {};
293     bool ambiguous = false;
294     Function *end;
295     ModuleList &images;
296     Target &target;
297     ExecutionContext &context;
298 
299     DFS(Function *end, ModuleList &images, Target &target,
300         ExecutionContext &context)
301         : end(end), images(images), target(target), context(context) {}
302 
303     void search(CallEdge &first_edge, Function &first_callee,
304                 CallSequence &path) {
305       dfs(first_edge, first_callee);
306       if (!ambiguous)
307         path = std::move(solution_path);
308     }
309 
310     void dfs(CallEdge &current_edge, Function &callee) {
311       // Found a path to the target function.
312       if (&callee == end) {
313         if (solution_path.empty())
314           solution_path = active_path;
315         else
316           ambiguous = true;
317         return;
318       }
319 
320       // Terminate the search if tail recursion is found, or more generally if
321       // there's more than one way to reach a target. This errs on the side of
322       // caution: it conservatively stops searching when some solutions are
323       // still possible to save time in the average case.
324       if (!visited_nodes.insert(&callee).second) {
325         ambiguous = true;
326         return;
327       }
328 
329       // Search the calls made from this callee.
330       active_path.push_back(CallDescriptor{&callee});
331       for (const auto &edge : callee.GetTailCallingEdges()) {
332         Function *next_callee = edge->GetCallee(images, context);
333         if (!next_callee)
334           continue;
335 
336         std::tie(active_path.back().address_type, active_path.back().address) =
337             edge->GetCallerAddress(callee, target);
338 
339         dfs(*edge, *next_callee);
340         if (ambiguous)
341           return;
342       }
343       active_path.pop_back();
344     }
345   };
346 
347   DFS(&end, images, target, exe_ctx).search(*first_edge, *first_callee, path);
348 }
349 
350 /// Given that \p next_frame will be appended to the frame list, synthesize
351 /// tail call frames between the current end of the list and \p next_frame.
352 /// If any frames are added, adjust the frame index of \p next_frame.
353 ///
354 ///   --------------
355 ///   |    ...     | <- Completed frames.
356 ///   --------------
357 ///   | prev_frame |
358 ///   --------------
359 ///   |    ...     | <- Artificial frames inserted here.
360 ///   --------------
361 ///   | next_frame |
362 ///   --------------
363 ///   |    ...     | <- Not-yet-visited frames.
364 ///   --------------
365 void StackFrameList::SynthesizeTailCallFrames(StackFrame &next_frame) {
366   // Cannot synthesize tail call frames when the stack is empty (there is no
367   // "previous" frame).
368   if (m_frames.empty())
369     return;
370 
371   TargetSP target_sp = next_frame.CalculateTarget();
372   if (!target_sp)
373     return;
374 
375   lldb::RegisterContextSP next_reg_ctx_sp = next_frame.GetRegisterContext();
376   if (!next_reg_ctx_sp)
377     return;
378 
379   Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP));
380 
381   StackFrame &prev_frame = *m_frames.back().get();
382 
383   // Find the functions prev_frame and next_frame are stopped in. The function
384   // objects are needed to search the lazy call graph for intervening frames.
385   Function *prev_func =
386       prev_frame.GetSymbolContext(eSymbolContextFunction).function;
387   if (!prev_func) {
388     LLDB_LOG(log, "SynthesizeTailCallFrames: can't find previous function");
389     return;
390   }
391   Function *next_func =
392       next_frame.GetSymbolContext(eSymbolContextFunction).function;
393   if (!next_func) {
394     LLDB_LOG(log, "SynthesizeTailCallFrames: can't find next function");
395     return;
396   }
397 
398   // Try to find the unique sequence of (tail) calls which led from next_frame
399   // to prev_frame.
400   CallSequence path;
401   addr_t return_pc = next_reg_ctx_sp->GetPC();
402   Target &target = *target_sp.get();
403   ModuleList &images = next_frame.CalculateTarget()->GetImages();
404   ExecutionContext exe_ctx(target_sp, /*get_process=*/true);
405   exe_ctx.SetFramePtr(&next_frame);
406   FindInterveningFrames(*next_func, *prev_func, exe_ctx, target, return_pc,
407                         path, images, log);
408 
409   // Push synthetic tail call frames.
410   for (auto calleeInfo : llvm::reverse(path)) {
411     Function *callee = calleeInfo.func;
412     uint32_t frame_idx = m_frames.size();
413     uint32_t concrete_frame_idx = next_frame.GetConcreteFrameIndex();
414     addr_t cfa = LLDB_INVALID_ADDRESS;
415     bool cfa_is_valid = false;
416     addr_t pc = calleeInfo.address;
417     // If the callee address refers to the call instruction, we do not want to
418     // subtract 1 from this value.
419     const bool behaves_like_zeroth_frame =
420         calleeInfo.address_type == CallEdge::AddrType::Call;
421     SymbolContext sc;
422     callee->CalculateSymbolContext(&sc);
423     auto synth_frame = std::make_shared<StackFrame>(
424         m_thread.shared_from_this(), frame_idx, concrete_frame_idx, cfa,
425         cfa_is_valid, pc, StackFrame::Kind::Artificial,
426         behaves_like_zeroth_frame, &sc);
427     m_frames.push_back(synth_frame);
428     LLDB_LOG(log, "Pushed frame {0} at {1:x}", callee->GetDisplayName(), pc);
429   }
430 
431   // If any frames were created, adjust next_frame's index.
432   if (!path.empty())
433     next_frame.SetFrameIndex(m_frames.size());
434 }
435 
436 void StackFrameList::GetFramesUpTo(uint32_t end_idx) {
437   // Do not fetch frames for an invalid thread.
438   if (!m_thread.IsValid())
439     return;
440 
441   // We've already gotten more frames than asked for, or we've already finished
442   // unwinding, return.
443   if (m_frames.size() > end_idx || GetAllFramesFetched())
444     return;
445 
446   Unwind &unwinder = m_thread.GetUnwinder();
447 
448   if (!m_show_inlined_frames) {
449     GetOnlyConcreteFramesUpTo(end_idx, unwinder);
450     return;
451   }
452 
453 #if defined(DEBUG_STACK_FRAMES)
454   StreamFile s(stdout, false);
455 #endif
456   // If we are hiding some frames from the outside world, we need to add
457   // those onto the total count of frames to fetch.  However, we don't need
458   // to do that if end_idx is 0 since in that case we always get the first
459   // concrete frame and all the inlined frames below it...  And of course, if
460   // end_idx is UINT32_MAX that means get all, so just do that...
461 
462   uint32_t inlined_depth = 0;
463   if (end_idx > 0 && end_idx != UINT32_MAX) {
464     inlined_depth = GetCurrentInlinedDepth();
465     if (inlined_depth != UINT32_MAX) {
466       if (end_idx > 0)
467         end_idx += inlined_depth;
468     }
469   }
470 
471   StackFrameSP unwind_frame_sp;
472   do {
473     uint32_t idx = m_concrete_frames_fetched++;
474     lldb::addr_t pc = LLDB_INVALID_ADDRESS;
475     lldb::addr_t cfa = LLDB_INVALID_ADDRESS;
476     bool behaves_like_zeroth_frame = (idx == 0);
477     if (idx == 0) {
478       // We might have already created frame zero, only create it if we need
479       // to.
480       if (m_frames.empty()) {
481         RegisterContextSP reg_ctx_sp(m_thread.GetRegisterContext());
482 
483         if (reg_ctx_sp) {
484           const bool success = unwinder.GetFrameInfoAtIndex(
485               idx, cfa, pc, behaves_like_zeroth_frame);
486           // There shouldn't be any way not to get the frame info for frame
487           // 0. But if the unwinder can't make one, lets make one by hand
488           // with the SP as the CFA and see if that gets any further.
489           if (!success) {
490             cfa = reg_ctx_sp->GetSP();
491             pc = reg_ctx_sp->GetPC();
492           }
493 
494           unwind_frame_sp = std::make_shared<StackFrame>(
495               m_thread.shared_from_this(), m_frames.size(), idx, reg_ctx_sp,
496               cfa, pc, behaves_like_zeroth_frame, nullptr);
497           m_frames.push_back(unwind_frame_sp);
498         }
499       } else {
500         unwind_frame_sp = m_frames.front();
501         cfa = unwind_frame_sp->m_id.GetCallFrameAddress();
502       }
503     } else {
504       const bool success =
505           unwinder.GetFrameInfoAtIndex(idx, cfa, pc, behaves_like_zeroth_frame);
506       if (!success) {
507         // We've gotten to the end of the stack.
508         SetAllFramesFetched();
509         break;
510       }
511       const bool cfa_is_valid = true;
512       unwind_frame_sp = std::make_shared<StackFrame>(
513           m_thread.shared_from_this(), m_frames.size(), idx, cfa, cfa_is_valid,
514           pc, StackFrame::Kind::Regular, behaves_like_zeroth_frame, nullptr);
515 
516       // Create synthetic tail call frames between the previous frame and the
517       // newly-found frame. The new frame's index may change after this call,
518       // although its concrete index will stay the same.
519       SynthesizeTailCallFrames(*unwind_frame_sp.get());
520 
521       m_frames.push_back(unwind_frame_sp);
522     }
523 
524     assert(unwind_frame_sp);
525     SymbolContext unwind_sc = unwind_frame_sp->GetSymbolContext(
526         eSymbolContextBlock | eSymbolContextFunction);
527     Block *unwind_block = unwind_sc.block;
528     TargetSP target_sp = m_thread.CalculateTarget();
529     if (unwind_block) {
530       Address curr_frame_address(
531           unwind_frame_sp->GetFrameCodeAddressForSymbolication());
532 
533       SymbolContext next_frame_sc;
534       Address next_frame_address;
535 
536       while (unwind_sc.GetParentOfInlinedScope(
537           curr_frame_address, next_frame_sc, next_frame_address)) {
538         next_frame_sc.line_entry.ApplyFileMappings(target_sp);
539         behaves_like_zeroth_frame = false;
540         StackFrameSP frame_sp(new StackFrame(
541             m_thread.shared_from_this(), m_frames.size(), idx,
542             unwind_frame_sp->GetRegisterContextSP(), cfa, next_frame_address,
543             behaves_like_zeroth_frame, &next_frame_sc));
544 
545         m_frames.push_back(frame_sp);
546         unwind_sc = next_frame_sc;
547         curr_frame_address = next_frame_address;
548       }
549     }
550   } while (m_frames.size() - 1 < end_idx);
551 
552   // Don't try to merge till you've calculated all the frames in this stack.
553   if (GetAllFramesFetched() && m_prev_frames_sp) {
554     StackFrameList *prev_frames = m_prev_frames_sp.get();
555     StackFrameList *curr_frames = this;
556 
557 #if defined(DEBUG_STACK_FRAMES)
558     s.PutCString("\nprev_frames:\n");
559     prev_frames->Dump(&s);
560     s.PutCString("\ncurr_frames:\n");
561     curr_frames->Dump(&s);
562     s.EOL();
563 #endif
564     size_t curr_frame_num, prev_frame_num;
565 
566     for (curr_frame_num = curr_frames->m_frames.size(),
567         prev_frame_num = prev_frames->m_frames.size();
568          curr_frame_num > 0 && prev_frame_num > 0;
569          --curr_frame_num, --prev_frame_num) {
570       const size_t curr_frame_idx = curr_frame_num - 1;
571       const size_t prev_frame_idx = prev_frame_num - 1;
572       StackFrameSP curr_frame_sp(curr_frames->m_frames[curr_frame_idx]);
573       StackFrameSP prev_frame_sp(prev_frames->m_frames[prev_frame_idx]);
574 
575 #if defined(DEBUG_STACK_FRAMES)
576       s.Printf("\n\nCurr frame #%u ", curr_frame_idx);
577       if (curr_frame_sp)
578         curr_frame_sp->Dump(&s, true, false);
579       else
580         s.PutCString("NULL");
581       s.Printf("\nPrev frame #%u ", prev_frame_idx);
582       if (prev_frame_sp)
583         prev_frame_sp->Dump(&s, true, false);
584       else
585         s.PutCString("NULL");
586 #endif
587 
588       StackFrame *curr_frame = curr_frame_sp.get();
589       StackFrame *prev_frame = prev_frame_sp.get();
590 
591       if (curr_frame == nullptr || prev_frame == nullptr)
592         break;
593 
594       // Check the stack ID to make sure they are equal.
595       if (curr_frame->GetStackID() != prev_frame->GetStackID())
596         break;
597 
598       prev_frame->UpdatePreviousFrameFromCurrentFrame(*curr_frame);
599       // Now copy the fixed up previous frame into the current frames so the
600       // pointer doesn't change.
601       m_frames[curr_frame_idx] = prev_frame_sp;
602 
603 #if defined(DEBUG_STACK_FRAMES)
604       s.Printf("\n    Copying previous frame to current frame");
605 #endif
606     }
607     // We are done with the old stack frame list, we can release it now.
608     m_prev_frames_sp.reset();
609   }
610 
611 #if defined(DEBUG_STACK_FRAMES)
612   s.PutCString("\n\nNew frames:\n");
613   Dump(&s);
614   s.EOL();
615 #endif
616 }
617 
618 uint32_t StackFrameList::GetNumFrames(bool can_create) {
619   std::lock_guard<std::recursive_mutex> guard(m_mutex);
620 
621   if (can_create)
622     GetFramesUpTo(UINT32_MAX);
623 
624   return GetVisibleStackFrameIndex(m_frames.size());
625 }
626 
627 void StackFrameList::Dump(Stream *s) {
628   if (s == nullptr)
629     return;
630 
631   std::lock_guard<std::recursive_mutex> guard(m_mutex);
632 
633   const_iterator pos, begin = m_frames.begin(), end = m_frames.end();
634   for (pos = begin; pos != end; ++pos) {
635     StackFrame *frame = (*pos).get();
636     s->Printf("%p: ", static_cast<void *>(frame));
637     if (frame) {
638       frame->GetStackID().Dump(s);
639       frame->DumpUsingSettingsFormat(s);
640     } else
641       s->Printf("frame #%u", (uint32_t)std::distance(begin, pos));
642     s->EOL();
643   }
644   s->EOL();
645 }
646 
647 StackFrameSP StackFrameList::GetFrameAtIndex(uint32_t idx) {
648   StackFrameSP frame_sp;
649   std::lock_guard<std::recursive_mutex> guard(m_mutex);
650   uint32_t original_idx = idx;
651 
652   uint32_t inlined_depth = GetCurrentInlinedDepth();
653   if (inlined_depth != UINT32_MAX)
654     idx += inlined_depth;
655 
656   if (idx < m_frames.size())
657     frame_sp = m_frames[idx];
658 
659   if (frame_sp)
660     return frame_sp;
661 
662   // GetFramesUpTo will fill m_frames with as many frames as you asked for, if
663   // there are that many.  If there weren't then you asked for too many frames.
664   GetFramesUpTo(idx);
665   if (idx < m_frames.size()) {
666     if (m_show_inlined_frames) {
667       // When inline frames are enabled we actually create all the frames in
668       // GetFramesUpTo.
669       frame_sp = m_frames[idx];
670     } else {
671       addr_t pc, cfa;
672       bool behaves_like_zeroth_frame = (idx == 0);
673       if (m_thread.GetUnwinder().GetFrameInfoAtIndex(
674               idx, cfa, pc, behaves_like_zeroth_frame)) {
675         const bool cfa_is_valid = true;
676         frame_sp = std::make_shared<StackFrame>(
677             m_thread.shared_from_this(), idx, idx, cfa, cfa_is_valid, pc,
678             StackFrame::Kind::Regular, behaves_like_zeroth_frame, nullptr);
679 
680         Function *function =
681             frame_sp->GetSymbolContext(eSymbolContextFunction).function;
682         if (function) {
683           // When we aren't showing inline functions we always use the top
684           // most function block as the scope.
685           frame_sp->SetSymbolContextScope(&function->GetBlock(false));
686         } else {
687           // Set the symbol scope from the symbol regardless if it is nullptr
688           // or valid.
689           frame_sp->SetSymbolContextScope(
690               frame_sp->GetSymbolContext(eSymbolContextSymbol).symbol);
691         }
692         SetFrameAtIndex(idx, frame_sp);
693       }
694     }
695   } else if (original_idx == 0) {
696     // There should ALWAYS be a frame at index 0.  If something went wrong with
697     // the CurrentInlinedDepth such that there weren't as many frames as we
698     // thought taking that into account, then reset the current inlined depth
699     // and return the real zeroth frame.
700     if (m_frames.empty()) {
701       // Why do we have a thread with zero frames, that should not ever
702       // happen...
703       assert(!m_thread.IsValid() && "A valid thread has no frames.");
704     } else {
705       ResetCurrentInlinedDepth();
706       frame_sp = m_frames[original_idx];
707     }
708   }
709 
710   return frame_sp;
711 }
712 
713 StackFrameSP
714 StackFrameList::GetFrameWithConcreteFrameIndex(uint32_t unwind_idx) {
715   // First try assuming the unwind index is the same as the frame index. The
716   // unwind index is always greater than or equal to the frame index, so it is
717   // a good place to start. If we have inlined frames we might have 5 concrete
718   // frames (frame unwind indexes go from 0-4), but we might have 15 frames
719   // after we make all the inlined frames. Most of the time the unwind frame
720   // index (or the concrete frame index) is the same as the frame index.
721   uint32_t frame_idx = unwind_idx;
722   StackFrameSP frame_sp(GetFrameAtIndex(frame_idx));
723   while (frame_sp) {
724     if (frame_sp->GetFrameIndex() == unwind_idx)
725       break;
726     frame_sp = GetFrameAtIndex(++frame_idx);
727   }
728   return frame_sp;
729 }
730 
731 static bool CompareStackID(const StackFrameSP &stack_sp,
732                            const StackID &stack_id) {
733   return stack_sp->GetStackID() < stack_id;
734 }
735 
736 StackFrameSP StackFrameList::GetFrameWithStackID(const StackID &stack_id) {
737   StackFrameSP frame_sp;
738 
739   if (stack_id.IsValid()) {
740     std::lock_guard<std::recursive_mutex> guard(m_mutex);
741     uint32_t frame_idx = 0;
742     // Do a binary search in case the stack frame is already in our cache
743     collection::const_iterator begin = m_frames.begin();
744     collection::const_iterator end = m_frames.end();
745     if (begin != end) {
746       collection::const_iterator pos =
747           std::lower_bound(begin, end, stack_id, CompareStackID);
748       if (pos != end) {
749         if ((*pos)->GetStackID() == stack_id)
750           return *pos;
751       }
752     }
753     do {
754       frame_sp = GetFrameAtIndex(frame_idx);
755       if (frame_sp && frame_sp->GetStackID() == stack_id)
756         break;
757       frame_idx++;
758     } while (frame_sp);
759   }
760   return frame_sp;
761 }
762 
763 bool StackFrameList::SetFrameAtIndex(uint32_t idx, StackFrameSP &frame_sp) {
764   if (idx >= m_frames.size())
765     m_frames.resize(idx + 1);
766   // Make sure allocation succeeded by checking bounds again
767   if (idx < m_frames.size()) {
768     m_frames[idx] = frame_sp;
769     return true;
770   }
771   return false; // resize failed, out of memory?
772 }
773 
774 uint32_t StackFrameList::GetSelectedFrameIndex() const {
775   std::lock_guard<std::recursive_mutex> guard(m_mutex);
776   return m_selected_frame_idx;
777 }
778 
779 uint32_t StackFrameList::SetSelectedFrame(lldb_private::StackFrame *frame) {
780   std::lock_guard<std::recursive_mutex> guard(m_mutex);
781   const_iterator pos;
782   const_iterator begin = m_frames.begin();
783   const_iterator end = m_frames.end();
784   m_selected_frame_idx = 0;
785   for (pos = begin; pos != end; ++pos) {
786     if (pos->get() == frame) {
787       m_selected_frame_idx = std::distance(begin, pos);
788       uint32_t inlined_depth = GetCurrentInlinedDepth();
789       if (inlined_depth != UINT32_MAX)
790         m_selected_frame_idx -= inlined_depth;
791       break;
792     }
793   }
794   SetDefaultFileAndLineToSelectedFrame();
795   return m_selected_frame_idx;
796 }
797 
798 bool StackFrameList::SetSelectedFrameByIndex(uint32_t idx) {
799   std::lock_guard<std::recursive_mutex> guard(m_mutex);
800   StackFrameSP frame_sp(GetFrameAtIndex(idx));
801   if (frame_sp) {
802     SetSelectedFrame(frame_sp.get());
803     return true;
804   } else
805     return false;
806 }
807 
808 void StackFrameList::SetDefaultFileAndLineToSelectedFrame() {
809   if (m_thread.GetID() ==
810       m_thread.GetProcess()->GetThreadList().GetSelectedThread()->GetID()) {
811     StackFrameSP frame_sp(GetFrameAtIndex(GetSelectedFrameIndex()));
812     if (frame_sp) {
813       SymbolContext sc = frame_sp->GetSymbolContext(eSymbolContextLineEntry);
814       if (sc.line_entry.file)
815         m_thread.CalculateTarget()->GetSourceManager().SetDefaultFileAndLine(
816             sc.line_entry.file, sc.line_entry.line);
817     }
818   }
819 }
820 
821 // The thread has been run, reset the number stack frames to zero so we can
822 // determine how many frames we have lazily.
823 void StackFrameList::Clear() {
824   std::lock_guard<std::recursive_mutex> guard(m_mutex);
825   m_frames.clear();
826   m_concrete_frames_fetched = 0;
827 }
828 
829 lldb::StackFrameSP
830 StackFrameList::GetStackFrameSPForStackFramePtr(StackFrame *stack_frame_ptr) {
831   const_iterator pos;
832   const_iterator begin = m_frames.begin();
833   const_iterator end = m_frames.end();
834   lldb::StackFrameSP ret_sp;
835 
836   for (pos = begin; pos != end; ++pos) {
837     if (pos->get() == stack_frame_ptr) {
838       ret_sp = (*pos);
839       break;
840     }
841   }
842   return ret_sp;
843 }
844 
845 size_t StackFrameList::GetStatus(Stream &strm, uint32_t first_frame,
846                                  uint32_t num_frames, bool show_frame_info,
847                                  uint32_t num_frames_with_source,
848                                  bool show_unique,
849                                  const char *selected_frame_marker) {
850   size_t num_frames_displayed = 0;
851 
852   if (num_frames == 0)
853     return 0;
854 
855   StackFrameSP frame_sp;
856   uint32_t frame_idx = 0;
857   uint32_t last_frame;
858 
859   // Don't let the last frame wrap around...
860   if (num_frames == UINT32_MAX)
861     last_frame = UINT32_MAX;
862   else
863     last_frame = first_frame + num_frames;
864 
865   StackFrameSP selected_frame_sp = m_thread.GetSelectedFrame();
866   const char *unselected_marker = nullptr;
867   std::string buffer;
868   if (selected_frame_marker) {
869     size_t len = strlen(selected_frame_marker);
870     buffer.insert(buffer.begin(), len, ' ');
871     unselected_marker = buffer.c_str();
872   }
873   const char *marker = nullptr;
874 
875   for (frame_idx = first_frame; frame_idx < last_frame; ++frame_idx) {
876     frame_sp = GetFrameAtIndex(frame_idx);
877     if (!frame_sp)
878       break;
879 
880     if (selected_frame_marker != nullptr) {
881       if (frame_sp == selected_frame_sp)
882         marker = selected_frame_marker;
883       else
884         marker = unselected_marker;
885     }
886 
887     if (!frame_sp->GetStatus(strm, show_frame_info,
888                              num_frames_with_source > (first_frame - frame_idx),
889                              show_unique, marker))
890       break;
891     ++num_frames_displayed;
892   }
893 
894   strm.IndentLess();
895   return num_frames_displayed;
896 }
897