1 //===-- ThreadPlanStack.cpp -------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "lldb/Target/ThreadPlanStack.h"
10 #include "lldb/Target/Process.h"
11 #include "lldb/Target/Target.h"
12 #include "lldb/Target/Thread.h"
13 #include "lldb/Target/ThreadPlan.h"
14 #include "lldb/Utility/Log.h"
15
16 using namespace lldb;
17 using namespace lldb_private;
18
PrintPlanElement(Stream & s,const ThreadPlanSP & plan,lldb::DescriptionLevel desc_level,int32_t elem_idx)19 static void PrintPlanElement(Stream &s, const ThreadPlanSP &plan,
20 lldb::DescriptionLevel desc_level,
21 int32_t elem_idx) {
22 s.IndentMore();
23 s.Indent();
24 s.Printf("Element %d: ", elem_idx);
25 plan->GetDescription(&s, desc_level);
26 s.EOL();
27 s.IndentLess();
28 }
29
ThreadPlanStack(const Thread & thread,bool make_null)30 ThreadPlanStack::ThreadPlanStack(const Thread &thread, bool make_null) {
31 if (make_null) {
32 // The ThreadPlanNull doesn't do anything to the Thread, so this is actually
33 // still a const operation.
34 m_plans.push_back(
35 ThreadPlanSP(new ThreadPlanNull(const_cast<Thread &>(thread))));
36 }
37 }
38
DumpThreadPlans(Stream & s,lldb::DescriptionLevel desc_level,bool include_internal) const39 void ThreadPlanStack::DumpThreadPlans(Stream &s,
40 lldb::DescriptionLevel desc_level,
41 bool include_internal) const {
42 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
43 s.IndentMore();
44 PrintOneStack(s, "Active plan stack", m_plans, desc_level, include_internal);
45 PrintOneStack(s, "Completed plan stack", m_completed_plans, desc_level,
46 include_internal);
47 PrintOneStack(s, "Discarded plan stack", m_discarded_plans, desc_level,
48 include_internal);
49 s.IndentLess();
50 }
51
PrintOneStack(Stream & s,llvm::StringRef stack_name,const PlanStack & stack,lldb::DescriptionLevel desc_level,bool include_internal) const52 void ThreadPlanStack::PrintOneStack(Stream &s, llvm::StringRef stack_name,
53 const PlanStack &stack,
54 lldb::DescriptionLevel desc_level,
55 bool include_internal) const {
56 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
57 // If the stack is empty, just exit:
58 if (stack.empty())
59 return;
60
61 // Make sure there are public completed plans:
62 bool any_public = false;
63 if (!include_internal) {
64 for (auto plan : stack) {
65 if (!plan->GetPrivate()) {
66 any_public = true;
67 break;
68 }
69 }
70 }
71
72 if (include_internal || any_public) {
73 int print_idx = 0;
74 s.Indent();
75 s << stack_name << ":\n";
76 for (auto plan : stack) {
77 if (!include_internal && plan->GetPrivate())
78 continue;
79 PrintPlanElement(s, plan, desc_level, print_idx++);
80 }
81 }
82 }
83
CheckpointCompletedPlans()84 size_t ThreadPlanStack::CheckpointCompletedPlans() {
85 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
86 m_completed_plan_checkpoint++;
87 m_completed_plan_store.insert(
88 std::make_pair(m_completed_plan_checkpoint, m_completed_plans));
89 return m_completed_plan_checkpoint;
90 }
91
RestoreCompletedPlanCheckpoint(size_t checkpoint)92 void ThreadPlanStack::RestoreCompletedPlanCheckpoint(size_t checkpoint) {
93 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
94 auto result = m_completed_plan_store.find(checkpoint);
95 assert(result != m_completed_plan_store.end() &&
96 "Asked for a checkpoint that didn't exist");
97 m_completed_plans.swap((*result).second);
98 m_completed_plan_store.erase(result);
99 }
100
DiscardCompletedPlanCheckpoint(size_t checkpoint)101 void ThreadPlanStack::DiscardCompletedPlanCheckpoint(size_t checkpoint) {
102 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
103 m_completed_plan_store.erase(checkpoint);
104 }
105
ThreadDestroyed(Thread * thread)106 void ThreadPlanStack::ThreadDestroyed(Thread *thread) {
107 // Tell the plan stacks that this thread is going away:
108 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
109 for (ThreadPlanSP plan : m_plans)
110 plan->ThreadDestroyed();
111
112 for (ThreadPlanSP plan : m_discarded_plans)
113 plan->ThreadDestroyed();
114
115 for (ThreadPlanSP plan : m_completed_plans)
116 plan->ThreadDestroyed();
117
118 // Now clear the current plan stacks:
119 m_plans.clear();
120 m_discarded_plans.clear();
121 m_completed_plans.clear();
122
123 // Push a ThreadPlanNull on the plan stack. That way we can continue
124 // assuming that the plan stack is never empty, but if somebody errantly asks
125 // questions of a destroyed thread without checking first whether it is
126 // destroyed, they won't crash.
127 if (thread != nullptr) {
128 lldb::ThreadPlanSP null_plan_sp(new ThreadPlanNull(*thread));
129 m_plans.push_back(null_plan_sp);
130 }
131 }
132
PushPlan(lldb::ThreadPlanSP new_plan_sp)133 void ThreadPlanStack::PushPlan(lldb::ThreadPlanSP new_plan_sp) {
134 // If the thread plan doesn't already have a tracer, give it its parent's
135 // tracer:
136 // The first plan has to be a base plan:
137 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
138 assert((m_plans.size() > 0 || new_plan_sp->IsBasePlan()) &&
139 "Zeroth plan must be a base plan");
140
141 if (!new_plan_sp->GetThreadPlanTracer()) {
142 assert(!m_plans.empty());
143 new_plan_sp->SetThreadPlanTracer(m_plans.back()->GetThreadPlanTracer());
144 }
145 m_plans.push_back(new_plan_sp);
146 new_plan_sp->DidPush();
147 }
148
PopPlan()149 lldb::ThreadPlanSP ThreadPlanStack::PopPlan() {
150 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
151 assert(m_plans.size() > 1 && "Can't pop the base thread plan");
152
153 lldb::ThreadPlanSP plan_sp = std::move(m_plans.back());
154 m_completed_plans.push_back(plan_sp);
155 plan_sp->WillPop();
156 m_plans.pop_back();
157 return plan_sp;
158 }
159
DiscardPlan()160 lldb::ThreadPlanSP ThreadPlanStack::DiscardPlan() {
161 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
162 assert(m_plans.size() > 1 && "Can't discard the base thread plan");
163
164 lldb::ThreadPlanSP plan_sp = std::move(m_plans.back());
165 m_discarded_plans.push_back(plan_sp);
166 plan_sp->WillPop();
167 m_plans.pop_back();
168 return plan_sp;
169 }
170
171 // If the input plan is nullptr, discard all plans. Otherwise make sure this
172 // plan is in the stack, and if so discard up to and including it.
DiscardPlansUpToPlan(ThreadPlan * up_to_plan_ptr)173 void ThreadPlanStack::DiscardPlansUpToPlan(ThreadPlan *up_to_plan_ptr) {
174 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
175 int stack_size = m_plans.size();
176
177 if (up_to_plan_ptr == nullptr) {
178 for (int i = stack_size - 1; i > 0; i--)
179 DiscardPlan();
180 return;
181 }
182
183 bool found_it = false;
184 for (int i = stack_size - 1; i > 0; i--) {
185 if (m_plans[i].get() == up_to_plan_ptr) {
186 found_it = true;
187 break;
188 }
189 }
190
191 if (found_it) {
192 bool last_one = false;
193 for (int i = stack_size - 1; i > 0 && !last_one; i--) {
194 if (GetCurrentPlan().get() == up_to_plan_ptr)
195 last_one = true;
196 DiscardPlan();
197 }
198 }
199 }
200
DiscardAllPlans()201 void ThreadPlanStack::DiscardAllPlans() {
202 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
203 int stack_size = m_plans.size();
204 for (int i = stack_size - 1; i > 0; i--) {
205 DiscardPlan();
206 }
207 return;
208 }
209
DiscardConsultingMasterPlans()210 void ThreadPlanStack::DiscardConsultingMasterPlans() {
211 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
212 while (true) {
213 int master_plan_idx;
214 bool discard = true;
215
216 // Find the first master plan, see if it wants discarding, and if yes
217 // discard up to it.
218 for (master_plan_idx = m_plans.size() - 1; master_plan_idx >= 0;
219 master_plan_idx--) {
220 if (m_plans[master_plan_idx]->IsMasterPlan()) {
221 discard = m_plans[master_plan_idx]->OkayToDiscard();
222 break;
223 }
224 }
225
226 // If the master plan doesn't want to get discarded, then we're done.
227 if (!discard)
228 return;
229
230 // First pop all the dependent plans:
231 for (int i = m_plans.size() - 1; i > master_plan_idx; i--) {
232 DiscardPlan();
233 }
234
235 // Now discard the master plan itself.
236 // The bottom-most plan never gets discarded. "OkayToDiscard" for it
237 // means discard it's dependent plans, but not it...
238 if (master_plan_idx > 0) {
239 DiscardPlan();
240 }
241 }
242 }
243
GetCurrentPlan() const244 lldb::ThreadPlanSP ThreadPlanStack::GetCurrentPlan() const {
245 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
246 assert(m_plans.size() != 0 && "There will always be a base plan.");
247 return m_plans.back();
248 }
249
GetCompletedPlan(bool skip_private) const250 lldb::ThreadPlanSP ThreadPlanStack::GetCompletedPlan(bool skip_private) const {
251 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
252 if (m_completed_plans.empty())
253 return {};
254
255 if (!skip_private)
256 return m_completed_plans.back();
257
258 for (int i = m_completed_plans.size() - 1; i >= 0; i--) {
259 lldb::ThreadPlanSP completed_plan_sp;
260 completed_plan_sp = m_completed_plans[i];
261 if (!completed_plan_sp->GetPrivate())
262 return completed_plan_sp;
263 }
264 return {};
265 }
266
GetPlanByIndex(uint32_t plan_idx,bool skip_private) const267 lldb::ThreadPlanSP ThreadPlanStack::GetPlanByIndex(uint32_t plan_idx,
268 bool skip_private) const {
269 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
270 uint32_t idx = 0;
271
272 for (lldb::ThreadPlanSP plan_sp : m_plans) {
273 if (skip_private && plan_sp->GetPrivate())
274 continue;
275 if (idx == plan_idx)
276 return plan_sp;
277 idx++;
278 }
279 return {};
280 }
281
GetReturnValueObject() const282 lldb::ValueObjectSP ThreadPlanStack::GetReturnValueObject() const {
283 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
284 if (m_completed_plans.empty())
285 return {};
286
287 for (int i = m_completed_plans.size() - 1; i >= 0; i--) {
288 lldb::ValueObjectSP return_valobj_sp;
289 return_valobj_sp = m_completed_plans[i]->GetReturnValueObject();
290 if (return_valobj_sp)
291 return return_valobj_sp;
292 }
293 return {};
294 }
295
GetExpressionVariable() const296 lldb::ExpressionVariableSP ThreadPlanStack::GetExpressionVariable() const {
297 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
298 if (m_completed_plans.empty())
299 return {};
300
301 for (int i = m_completed_plans.size() - 1; i >= 0; i--) {
302 lldb::ExpressionVariableSP expression_variable_sp;
303 expression_variable_sp = m_completed_plans[i]->GetExpressionVariable();
304 if (expression_variable_sp)
305 return expression_variable_sp;
306 }
307 return {};
308 }
AnyPlans() const309 bool ThreadPlanStack::AnyPlans() const {
310 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
311 // There is always a base plan...
312 return m_plans.size() > 1;
313 }
314
AnyCompletedPlans() const315 bool ThreadPlanStack::AnyCompletedPlans() const {
316 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
317 return !m_completed_plans.empty();
318 }
319
AnyDiscardedPlans() const320 bool ThreadPlanStack::AnyDiscardedPlans() const {
321 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
322 return !m_discarded_plans.empty();
323 }
324
IsPlanDone(ThreadPlan * in_plan) const325 bool ThreadPlanStack::IsPlanDone(ThreadPlan *in_plan) const {
326 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
327 for (auto plan : m_completed_plans) {
328 if (plan.get() == in_plan)
329 return true;
330 }
331 return false;
332 }
333
WasPlanDiscarded(ThreadPlan * in_plan) const334 bool ThreadPlanStack::WasPlanDiscarded(ThreadPlan *in_plan) const {
335 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
336 for (auto plan : m_discarded_plans) {
337 if (plan.get() == in_plan)
338 return true;
339 }
340 return false;
341 }
342
GetPreviousPlan(ThreadPlan * current_plan) const343 ThreadPlan *ThreadPlanStack::GetPreviousPlan(ThreadPlan *current_plan) const {
344 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
345 if (current_plan == nullptr)
346 return nullptr;
347
348 // Look first in the completed plans, if the plan is here and there is
349 // a completed plan above it, return that.
350 int stack_size = m_completed_plans.size();
351 for (int i = stack_size - 1; i > 0; i--) {
352 if (current_plan == m_completed_plans[i].get())
353 return m_completed_plans[i - 1].get();
354 }
355
356 // If this is the first completed plan, the previous one is the
357 // bottom of the regular plan stack.
358 if (stack_size > 0 && m_completed_plans[0].get() == current_plan) {
359 return GetCurrentPlan().get();
360 }
361
362 // Otherwise look for it in the regular plans.
363 stack_size = m_plans.size();
364 for (int i = stack_size - 1; i > 0; i--) {
365 if (current_plan == m_plans[i].get())
366 return m_plans[i - 1].get();
367 }
368 return nullptr;
369 }
370
GetInnermostExpression() const371 ThreadPlan *ThreadPlanStack::GetInnermostExpression() const {
372 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
373 int stack_size = m_plans.size();
374
375 for (int i = stack_size - 1; i > 0; i--) {
376 if (m_plans[i]->GetKind() == ThreadPlan::eKindCallFunction)
377 return m_plans[i].get();
378 }
379 return nullptr;
380 }
381
ClearThreadCache()382 void ThreadPlanStack::ClearThreadCache() {
383 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
384 for (lldb::ThreadPlanSP thread_plan_sp : m_plans)
385 thread_plan_sp->ClearThreadCache();
386 }
387
WillResume()388 void ThreadPlanStack::WillResume() {
389 std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
390 m_completed_plans.clear();
391 m_discarded_plans.clear();
392 }
393
Update(ThreadList & current_threads,bool delete_missing,bool check_for_new)394 void ThreadPlanStackMap::Update(ThreadList ¤t_threads,
395 bool delete_missing,
396 bool check_for_new) {
397
398 // Now find all the new threads and add them to the map:
399 if (check_for_new) {
400 for (auto thread : current_threads.Threads()) {
401 lldb::tid_t cur_tid = thread->GetID();
402 if (!Find(cur_tid)) {
403 AddThread(*thread.get());
404 thread->QueueBasePlan(true);
405 }
406 }
407 }
408
409 // If we aren't reaping missing threads at this point,
410 // we are done.
411 if (!delete_missing)
412 return;
413 // Otherwise scan for absent TID's.
414 std::vector<lldb::tid_t> missing_threads;
415 // If we are going to delete plans from the plan stack,
416 // then scan for absent TID's:
417 for (auto &thread_plans : m_plans_list) {
418 lldb::tid_t cur_tid = thread_plans.first;
419 ThreadSP thread_sp = current_threads.FindThreadByID(cur_tid);
420 if (!thread_sp)
421 missing_threads.push_back(cur_tid);
422 }
423 for (lldb::tid_t tid : missing_threads) {
424 RemoveTID(tid);
425 }
426 }
427
DumpPlans(Stream & strm,lldb::DescriptionLevel desc_level,bool internal,bool condense_if_trivial,bool skip_unreported)428 void ThreadPlanStackMap::DumpPlans(Stream &strm,
429 lldb::DescriptionLevel desc_level,
430 bool internal, bool condense_if_trivial,
431 bool skip_unreported) {
432 for (auto &elem : m_plans_list) {
433 lldb::tid_t tid = elem.first;
434 uint32_t index_id = 0;
435 ThreadSP thread_sp = m_process.GetThreadList().FindThreadByID(tid);
436
437 if (skip_unreported) {
438 if (!thread_sp)
439 continue;
440 }
441 if (thread_sp)
442 index_id = thread_sp->GetIndexID();
443
444 if (condense_if_trivial) {
445 if (!elem.second.AnyPlans() && !elem.second.AnyCompletedPlans() &&
446 !elem.second.AnyDiscardedPlans()) {
447 strm.Printf("thread #%u: tid = 0x%4.4" PRIx64 "\n", index_id, tid);
448 strm.IndentMore();
449 strm.Indent();
450 strm.Printf("No active thread plans\n");
451 strm.IndentLess();
452 return;
453 }
454 }
455
456 strm.Indent();
457 strm.Printf("thread #%u: tid = 0x%4.4" PRIx64 ":\n", index_id, tid);
458
459 elem.second.DumpThreadPlans(strm, desc_level, internal);
460 }
461 }
462
DumpPlansForTID(Stream & strm,lldb::tid_t tid,lldb::DescriptionLevel desc_level,bool internal,bool condense_if_trivial,bool skip_unreported)463 bool ThreadPlanStackMap::DumpPlansForTID(Stream &strm, lldb::tid_t tid,
464 lldb::DescriptionLevel desc_level,
465 bool internal,
466 bool condense_if_trivial,
467 bool skip_unreported) {
468 uint32_t index_id = 0;
469 ThreadSP thread_sp = m_process.GetThreadList().FindThreadByID(tid);
470
471 if (skip_unreported) {
472 if (!thread_sp) {
473 strm.Format("Unknown TID: {0}", tid);
474 return false;
475 }
476 }
477
478 if (thread_sp)
479 index_id = thread_sp->GetIndexID();
480 ThreadPlanStack *stack = Find(tid);
481 if (!stack) {
482 strm.Format("Unknown TID: {0}\n", tid);
483 return false;
484 }
485
486 if (condense_if_trivial) {
487 if (!stack->AnyPlans() && !stack->AnyCompletedPlans() &&
488 !stack->AnyDiscardedPlans()) {
489 strm.Printf("thread #%u: tid = 0x%4.4" PRIx64 "\n", index_id, tid);
490 strm.IndentMore();
491 strm.Indent();
492 strm.Printf("No active thread plans\n");
493 strm.IndentLess();
494 return true;
495 }
496 }
497
498 strm.Indent();
499 strm.Printf("thread #%u: tid = 0x%4.4" PRIx64 ":\n", index_id, tid);
500
501 stack->DumpThreadPlans(strm, desc_level, internal);
502 return true;
503 }
504
PrunePlansForTID(lldb::tid_t tid)505 bool ThreadPlanStackMap::PrunePlansForTID(lldb::tid_t tid) {
506 // We only remove the plans for unreported TID's.
507 ThreadSP thread_sp = m_process.GetThreadList().FindThreadByID(tid);
508 if (thread_sp)
509 return false;
510
511 return RemoveTID(tid);
512 }
513