1 //===-- MachThreadList.cpp --------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //  Created by Greg Clayton on 6/19/07.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "MachThreadList.h"
14 
15 #include "DNB.h"
16 #include "DNBLog.h"
17 #include "DNBThreadResumeActions.h"
18 #include "MachProcess.h"
19 
20 #include <cinttypes>
21 #include <sys/sysctl.h>
22 
23 #include <memory>
24 
MachThreadList()25 MachThreadList::MachThreadList()
26     : m_threads(), m_threads_mutex(PTHREAD_MUTEX_RECURSIVE),
27       m_is_64_bit(false) {}
28 
~MachThreadList()29 MachThreadList::~MachThreadList() {}
30 
GetState(nub_thread_t tid)31 nub_state_t MachThreadList::GetState(nub_thread_t tid) {
32   MachThreadSP thread_sp(GetThreadByID(tid));
33   if (thread_sp)
34     return thread_sp->GetState();
35   return eStateInvalid;
36 }
37 
GetName(nub_thread_t tid)38 const char *MachThreadList::GetName(nub_thread_t tid) {
39   MachThreadSP thread_sp(GetThreadByID(tid));
40   if (thread_sp)
41     return thread_sp->GetName();
42   return NULL;
43 }
44 
GetRequestedQoS(nub_thread_t tid,nub_addr_t tsd,uint64_t dti_qos_class_index)45 ThreadInfo::QoS MachThreadList::GetRequestedQoS(nub_thread_t tid,
46                                                 nub_addr_t tsd,
47                                                 uint64_t dti_qos_class_index) {
48   MachThreadSP thread_sp(GetThreadByID(tid));
49   if (thread_sp)
50     return thread_sp->GetRequestedQoS(tsd, dti_qos_class_index);
51   return ThreadInfo::QoS();
52 }
53 
GetPThreadT(nub_thread_t tid)54 nub_addr_t MachThreadList::GetPThreadT(nub_thread_t tid) {
55   MachThreadSP thread_sp(GetThreadByID(tid));
56   if (thread_sp)
57     return thread_sp->GetPThreadT();
58   return INVALID_NUB_ADDRESS;
59 }
60 
GetDispatchQueueT(nub_thread_t tid)61 nub_addr_t MachThreadList::GetDispatchQueueT(nub_thread_t tid) {
62   MachThreadSP thread_sp(GetThreadByID(tid));
63   if (thread_sp)
64     return thread_sp->GetDispatchQueueT();
65   return INVALID_NUB_ADDRESS;
66 }
67 
GetTSDAddressForThread(nub_thread_t tid,uint64_t plo_pthread_tsd_base_address_offset,uint64_t plo_pthread_tsd_base_offset,uint64_t plo_pthread_tsd_entry_size)68 nub_addr_t MachThreadList::GetTSDAddressForThread(
69     nub_thread_t tid, uint64_t plo_pthread_tsd_base_address_offset,
70     uint64_t plo_pthread_tsd_base_offset, uint64_t plo_pthread_tsd_entry_size) {
71   MachThreadSP thread_sp(GetThreadByID(tid));
72   if (thread_sp)
73     return thread_sp->GetTSDAddressForThread(
74         plo_pthread_tsd_base_address_offset, plo_pthread_tsd_base_offset,
75         plo_pthread_tsd_entry_size);
76   return INVALID_NUB_ADDRESS;
77 }
78 
SetCurrentThread(nub_thread_t tid)79 nub_thread_t MachThreadList::SetCurrentThread(nub_thread_t tid) {
80   MachThreadSP thread_sp(GetThreadByID(tid));
81   if (thread_sp) {
82     m_current_thread = thread_sp;
83     return tid;
84   }
85   return INVALID_NUB_THREAD;
86 }
87 
GetThreadStoppedReason(nub_thread_t tid,struct DNBThreadStopInfo * stop_info) const88 bool MachThreadList::GetThreadStoppedReason(
89     nub_thread_t tid, struct DNBThreadStopInfo *stop_info) const {
90   MachThreadSP thread_sp(GetThreadByID(tid));
91   if (thread_sp)
92     return thread_sp->GetStopException().GetStopInfo(stop_info);
93   return false;
94 }
95 
GetIdentifierInfo(nub_thread_t tid,thread_identifier_info_data_t * ident_info)96 bool MachThreadList::GetIdentifierInfo(
97     nub_thread_t tid, thread_identifier_info_data_t *ident_info) {
98   thread_t mach_port_number = GetMachPortNumberByThreadID(tid);
99 
100   mach_msg_type_number_t count = THREAD_IDENTIFIER_INFO_COUNT;
101   return ::thread_info(mach_port_number, THREAD_IDENTIFIER_INFO,
102                        (thread_info_t)ident_info, &count) == KERN_SUCCESS;
103 }
104 
DumpThreadStoppedReason(nub_thread_t tid) const105 void MachThreadList::DumpThreadStoppedReason(nub_thread_t tid) const {
106   MachThreadSP thread_sp(GetThreadByID(tid));
107   if (thread_sp)
108     thread_sp->GetStopException().DumpStopReason();
109 }
110 
GetThreadInfo(nub_thread_t tid) const111 const char *MachThreadList::GetThreadInfo(nub_thread_t tid) const {
112   MachThreadSP thread_sp(GetThreadByID(tid));
113   if (thread_sp)
114     return thread_sp->GetBasicInfoAsString();
115   return NULL;
116 }
117 
GetThreadByID(nub_thread_t tid) const118 MachThreadSP MachThreadList::GetThreadByID(nub_thread_t tid) const {
119   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
120   MachThreadSP thread_sp;
121   const size_t num_threads = m_threads.size();
122   for (size_t idx = 0; idx < num_threads; ++idx) {
123     if (m_threads[idx]->ThreadID() == tid) {
124       thread_sp = m_threads[idx];
125       break;
126     }
127   }
128   return thread_sp;
129 }
130 
131 MachThreadSP
GetThreadByMachPortNumber(thread_t mach_port_number) const132 MachThreadList::GetThreadByMachPortNumber(thread_t mach_port_number) const {
133   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
134   MachThreadSP thread_sp;
135   const size_t num_threads = m_threads.size();
136   for (size_t idx = 0; idx < num_threads; ++idx) {
137     if (m_threads[idx]->MachPortNumber() == mach_port_number) {
138       thread_sp = m_threads[idx];
139       break;
140     }
141   }
142   return thread_sp;
143 }
144 
145 nub_thread_t
GetThreadIDByMachPortNumber(thread_t mach_port_number) const146 MachThreadList::GetThreadIDByMachPortNumber(thread_t mach_port_number) const {
147   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
148   MachThreadSP thread_sp;
149   const size_t num_threads = m_threads.size();
150   for (size_t idx = 0; idx < num_threads; ++idx) {
151     if (m_threads[idx]->MachPortNumber() == mach_port_number) {
152       return m_threads[idx]->ThreadID();
153     }
154   }
155   return INVALID_NUB_THREAD;
156 }
157 
GetMachPortNumberByThreadID(nub_thread_t globally_unique_id) const158 thread_t MachThreadList::GetMachPortNumberByThreadID(
159     nub_thread_t globally_unique_id) const {
160   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
161   MachThreadSP thread_sp;
162   const size_t num_threads = m_threads.size();
163   for (size_t idx = 0; idx < num_threads; ++idx) {
164     if (m_threads[idx]->ThreadID() == globally_unique_id) {
165       return m_threads[idx]->MachPortNumber();
166     }
167   }
168   return 0;
169 }
170 
GetRegisterValue(nub_thread_t tid,uint32_t set,uint32_t reg,DNBRegisterValue * reg_value) const171 bool MachThreadList::GetRegisterValue(nub_thread_t tid, uint32_t set,
172                                       uint32_t reg,
173                                       DNBRegisterValue *reg_value) const {
174   MachThreadSP thread_sp(GetThreadByID(tid));
175   if (thread_sp)
176     return thread_sp->GetRegisterValue(set, reg, reg_value);
177 
178   return false;
179 }
180 
SetRegisterValue(nub_thread_t tid,uint32_t set,uint32_t reg,const DNBRegisterValue * reg_value) const181 bool MachThreadList::SetRegisterValue(nub_thread_t tid, uint32_t set,
182                                       uint32_t reg,
183                                       const DNBRegisterValue *reg_value) const {
184   MachThreadSP thread_sp(GetThreadByID(tid));
185   if (thread_sp)
186     return thread_sp->SetRegisterValue(set, reg, reg_value);
187 
188   return false;
189 }
190 
GetRegisterContext(nub_thread_t tid,void * buf,size_t buf_len)191 nub_size_t MachThreadList::GetRegisterContext(nub_thread_t tid, void *buf,
192                                               size_t buf_len) {
193   MachThreadSP thread_sp(GetThreadByID(tid));
194   if (thread_sp)
195     return thread_sp->GetRegisterContext(buf, buf_len);
196   return 0;
197 }
198 
SetRegisterContext(nub_thread_t tid,const void * buf,size_t buf_len)199 nub_size_t MachThreadList::SetRegisterContext(nub_thread_t tid, const void *buf,
200                                               size_t buf_len) {
201   MachThreadSP thread_sp(GetThreadByID(tid));
202   if (thread_sp)
203     return thread_sp->SetRegisterContext(buf, buf_len);
204   return 0;
205 }
206 
SaveRegisterState(nub_thread_t tid)207 uint32_t MachThreadList::SaveRegisterState(nub_thread_t tid) {
208   MachThreadSP thread_sp(GetThreadByID(tid));
209   if (thread_sp)
210     return thread_sp->SaveRegisterState();
211   return 0;
212 }
213 
RestoreRegisterState(nub_thread_t tid,uint32_t save_id)214 bool MachThreadList::RestoreRegisterState(nub_thread_t tid, uint32_t save_id) {
215   MachThreadSP thread_sp(GetThreadByID(tid));
216   if (thread_sp)
217     return thread_sp->RestoreRegisterState(save_id);
218   return false;
219 }
220 
NumThreads() const221 nub_size_t MachThreadList::NumThreads() const {
222   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
223   return m_threads.size();
224 }
225 
ThreadIDAtIndex(nub_size_t idx) const226 nub_thread_t MachThreadList::ThreadIDAtIndex(nub_size_t idx) const {
227   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
228   if (idx < m_threads.size())
229     return m_threads[idx]->ThreadID();
230   return INVALID_NUB_THREAD;
231 }
232 
CurrentThreadID()233 nub_thread_t MachThreadList::CurrentThreadID() {
234   MachThreadSP thread_sp;
235   CurrentThread(thread_sp);
236   if (thread_sp.get())
237     return thread_sp->ThreadID();
238   return INVALID_NUB_THREAD;
239 }
240 
NotifyException(MachException::Data & exc)241 bool MachThreadList::NotifyException(MachException::Data &exc) {
242   MachThreadSP thread_sp(GetThreadByMachPortNumber(exc.thread_port));
243   if (thread_sp) {
244     thread_sp->NotifyException(exc);
245     return true;
246   }
247   return false;
248 }
249 
Clear()250 void MachThreadList::Clear() {
251   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
252   m_threads.clear();
253 }
254 
255 uint32_t
UpdateThreadList(MachProcess * process,bool update,MachThreadList::collection * new_threads)256 MachThreadList::UpdateThreadList(MachProcess *process, bool update,
257                                  MachThreadList::collection *new_threads) {
258   // locker will keep a mutex locked until it goes out of scope
259   DNBLogThreadedIf(LOG_THREAD, "MachThreadList::UpdateThreadList (pid = %4.4x, "
260                                "update = %u) process stop count = %u",
261                    process->ProcessID(), update, process->StopCount());
262   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
263 
264   if (process->StopCount() == 0) {
265     int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, process->ProcessID()};
266     struct kinfo_proc processInfo;
267     size_t bufsize = sizeof(processInfo);
268     if (sysctl(mib, (unsigned)(sizeof(mib) / sizeof(int)), &processInfo,
269                &bufsize, NULL, 0) == 0 &&
270         bufsize > 0) {
271       if (processInfo.kp_proc.p_flag & P_LP64)
272         m_is_64_bit = true;
273     }
274 #if defined(__i386__) || defined(__x86_64__)
275     if (m_is_64_bit)
276       DNBArchProtocol::SetArchitecture(CPU_TYPE_X86_64);
277     else
278       DNBArchProtocol::SetArchitecture(CPU_TYPE_I386);
279 #elif defined(__arm__) || defined(__arm64__) || defined(__aarch64__)
280     if (m_is_64_bit)
281       DNBArchProtocol::SetArchitecture(CPU_TYPE_ARM64);
282     else {
283       if (process->GetCPUType() == CPU_TYPE_ARM64_32)
284         DNBArchProtocol::SetArchitecture(CPU_TYPE_ARM64_32);
285       else
286         DNBArchProtocol::SetArchitecture(CPU_TYPE_ARM);
287     }
288 #endif
289   }
290 
291   if (m_threads.empty() || update) {
292     thread_array_t thread_list = NULL;
293     mach_msg_type_number_t thread_list_count = 0;
294     task_t task = process->Task().TaskPort();
295     DNBError err(::task_threads(task, &thread_list, &thread_list_count),
296                  DNBError::MachKernel);
297 
298     if (DNBLogCheckLogBit(LOG_THREAD) || err.Fail())
299       err.LogThreaded("::task_threads ( task = 0x%4.4x, thread_list => %p, "
300                       "thread_list_count => %u )",
301                       task, thread_list, thread_list_count);
302 
303     if (err.Status() == KERN_SUCCESS && thread_list_count > 0) {
304       MachThreadList::collection currThreads;
305       size_t idx;
306       // Iterator through the current thread list and see which threads
307       // we already have in our list (keep them), which ones we don't
308       // (add them), and which ones are not around anymore (remove them).
309       for (idx = 0; idx < thread_list_count; ++idx) {
310         const thread_t mach_port_num = thread_list[idx];
311 
312         uint64_t unique_thread_id =
313             MachThread::GetGloballyUniqueThreadIDForMachPortID(mach_port_num);
314         MachThreadSP thread_sp(GetThreadByID(unique_thread_id));
315         if (thread_sp) {
316           // Keep the existing thread class
317           currThreads.push_back(thread_sp);
318         } else {
319           // We don't have this thread, lets add it.
320           thread_sp = std::make_shared<MachThread>(
321               process, m_is_64_bit, unique_thread_id, mach_port_num);
322 
323           // Add the new thread regardless of its is user ready state...
324           // Make sure the thread is ready to be displayed and shown to users
325           // before we add this thread to our list...
326           if (thread_sp->IsUserReady()) {
327             if (new_threads)
328               new_threads->push_back(thread_sp);
329 
330             currThreads.push_back(thread_sp);
331           }
332         }
333       }
334 
335       m_threads.swap(currThreads);
336       m_current_thread.reset();
337 
338       // Free the vm memory given to us by ::task_threads()
339       vm_size_t thread_list_size =
340           (vm_size_t)(thread_list_count * sizeof(thread_t));
341       ::vm_deallocate(::mach_task_self(), (vm_address_t)thread_list,
342                       thread_list_size);
343     }
344   }
345   return static_cast<uint32_t>(m_threads.size());
346 }
347 
CurrentThread(MachThreadSP & thread_sp)348 void MachThreadList::CurrentThread(MachThreadSP &thread_sp) {
349   // locker will keep a mutex locked until it goes out of scope
350   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
351   if (m_current_thread.get() == NULL) {
352     // Figure out which thread is going to be our current thread.
353     // This is currently done by finding the first thread in the list
354     // that has a valid exception.
355     const size_t num_threads = m_threads.size();
356     for (uint32_t idx = 0; idx < num_threads; ++idx) {
357       if (m_threads[idx]->GetStopException().IsValid()) {
358         m_current_thread = m_threads[idx];
359         break;
360       }
361     }
362   }
363   thread_sp = m_current_thread;
364 }
365 
Dump() const366 void MachThreadList::Dump() const {
367   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
368   const size_t num_threads = m_threads.size();
369   for (uint32_t idx = 0; idx < num_threads; ++idx) {
370     m_threads[idx]->Dump(idx);
371   }
372 }
373 
ProcessWillResume(MachProcess * process,const DNBThreadResumeActions & thread_actions)374 void MachThreadList::ProcessWillResume(
375     MachProcess *process, const DNBThreadResumeActions &thread_actions) {
376   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
377 
378   // Update our thread list, because sometimes libdispatch or the kernel
379   // will spawn threads while a task is suspended.
380   MachThreadList::collection new_threads;
381 
382   // First figure out if we were planning on running only one thread, and if so
383   // force that thread to resume.
384   bool run_one_thread;
385   nub_thread_t solo_thread = INVALID_NUB_THREAD;
386   if (thread_actions.GetSize() > 0 &&
387       thread_actions.NumActionsWithState(eStateStepping) +
388               thread_actions.NumActionsWithState(eStateRunning) ==
389           1) {
390     run_one_thread = true;
391     const DNBThreadResumeAction *action_ptr = thread_actions.GetFirst();
392     size_t num_actions = thread_actions.GetSize();
393     for (size_t i = 0; i < num_actions; i++, action_ptr++) {
394       if (action_ptr->state == eStateStepping ||
395           action_ptr->state == eStateRunning) {
396         solo_thread = action_ptr->tid;
397         break;
398       }
399     }
400   } else
401     run_one_thread = false;
402 
403   UpdateThreadList(process, true, &new_threads);
404 
405   DNBThreadResumeAction resume_new_threads = {-1U, eStateRunning, 0,
406                                               INVALID_NUB_ADDRESS};
407   // If we are planning to run only one thread, any new threads should be
408   // suspended.
409   if (run_one_thread)
410     resume_new_threads.state = eStateSuspended;
411 
412   const size_t num_new_threads = new_threads.size();
413   const size_t num_threads = m_threads.size();
414   for (uint32_t idx = 0; idx < num_threads; ++idx) {
415     MachThread *thread = m_threads[idx].get();
416     bool handled = false;
417     for (uint32_t new_idx = 0; new_idx < num_new_threads; ++new_idx) {
418       if (thread == new_threads[new_idx].get()) {
419         thread->ThreadWillResume(&resume_new_threads);
420         handled = true;
421         break;
422       }
423     }
424 
425     if (!handled) {
426       const DNBThreadResumeAction *thread_action =
427           thread_actions.GetActionForThread(thread->ThreadID(), true);
428       // There must always be a thread action for every thread.
429       assert(thread_action);
430       bool others_stopped = false;
431       if (solo_thread == thread->ThreadID())
432         others_stopped = true;
433       thread->ThreadWillResume(thread_action, others_stopped);
434     }
435   }
436 
437   if (new_threads.size()) {
438     for (uint32_t idx = 0; idx < num_new_threads; ++idx) {
439       DNBLogThreadedIf(
440           LOG_THREAD, "MachThreadList::ProcessWillResume (pid = %4.4x) "
441                       "stop-id=%u, resuming newly discovered thread: "
442                       "0x%8.8" PRIx64 ", thread-is-user-ready=%i)",
443           process->ProcessID(), process->StopCount(),
444           new_threads[idx]->ThreadID(), new_threads[idx]->IsUserReady());
445     }
446   }
447 }
448 
ProcessDidStop(MachProcess * process)449 uint32_t MachThreadList::ProcessDidStop(MachProcess *process) {
450   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
451   // Update our thread list
452   const uint32_t num_threads = UpdateThreadList(process, true);
453   for (uint32_t idx = 0; idx < num_threads; ++idx) {
454     m_threads[idx]->ThreadDidStop();
455   }
456   return num_threads;
457 }
458 
459 // Check each thread in our thread list to see if we should notify our
460 // client of the current halt in execution.
461 //
462 // Breakpoints can have callback functions associated with them than
463 // can return true to stop, or false to continue executing the inferior.
464 //
465 // RETURNS
466 //    true if we should stop and notify our clients
467 //    false if we should resume our child process and skip notification
ShouldStop(bool & step_more)468 bool MachThreadList::ShouldStop(bool &step_more) {
469   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
470   uint32_t should_stop = false;
471   const size_t num_threads = m_threads.size();
472   for (uint32_t idx = 0; !should_stop && idx < num_threads; ++idx) {
473     should_stop = m_threads[idx]->ShouldStop(step_more);
474   }
475   return should_stop;
476 }
477 
NotifyBreakpointChanged(const DNBBreakpoint * bp)478 void MachThreadList::NotifyBreakpointChanged(const DNBBreakpoint *bp) {
479   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
480   const size_t num_threads = m_threads.size();
481   for (uint32_t idx = 0; idx < num_threads; ++idx) {
482     m_threads[idx]->NotifyBreakpointChanged(bp);
483   }
484 }
485 
DoHardwareBreakpointAction(const DNBBreakpoint * bp,HardwareBreakpointAction action) const486 uint32_t MachThreadList::DoHardwareBreakpointAction(
487     const DNBBreakpoint *bp, HardwareBreakpointAction action) const {
488   if (bp == NULL)
489     return INVALID_NUB_HW_INDEX;
490 
491   uint32_t hw_index = INVALID_NUB_HW_INDEX;
492   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
493   const size_t num_threads = m_threads.size();
494   // On Mac OS X we have to prime the control registers for new threads.  We do
495   // this using the control register data for the first thread, for lack of a
496   // better way of choosing.
497   bool also_set_on_task = true;
498   for (uint32_t idx = 0; idx < num_threads; ++idx) {
499     switch (action) {
500     case HardwareBreakpointAction::EnableWatchpoint:
501       hw_index = m_threads[idx]->EnableHardwareWatchpoint(bp, also_set_on_task);
502       break;
503     case HardwareBreakpointAction::DisableWatchpoint:
504       hw_index =
505           m_threads[idx]->DisableHardwareWatchpoint(bp, also_set_on_task);
506       break;
507     case HardwareBreakpointAction::EnableBreakpoint:
508       hw_index = m_threads[idx]->EnableHardwareBreakpoint(bp, also_set_on_task);
509       break;
510     case HardwareBreakpointAction::DisableBreakpoint:
511       hw_index =
512           m_threads[idx]->DisableHardwareBreakpoint(bp, also_set_on_task);
513       break;
514     }
515     if (hw_index == INVALID_NUB_HW_INDEX) {
516       // We know that idx failed for some reason.  Let's rollback the
517       // transaction for [0, idx).
518       for (uint32_t i = 0; i < idx; ++i)
519         m_threads[i]->RollbackTransForHWP();
520       return INVALID_NUB_HW_INDEX;
521     }
522     also_set_on_task = false;
523   }
524   // Notify each thread to commit the pending transaction.
525   for (uint32_t idx = 0; idx < num_threads; ++idx)
526     m_threads[idx]->FinishTransForHWP();
527   return hw_index;
528 }
529 
530 // DNBWatchpointSet() -> MachProcess::CreateWatchpoint() ->
531 // MachProcess::EnableWatchpoint()
532 // -> MachThreadList::EnableHardwareWatchpoint().
533 uint32_t
EnableHardwareWatchpoint(const DNBBreakpoint * wp) const534 MachThreadList::EnableHardwareWatchpoint(const DNBBreakpoint *wp) const {
535   return DoHardwareBreakpointAction(wp,
536                                     HardwareBreakpointAction::EnableWatchpoint);
537 }
538 
DisableHardwareWatchpoint(const DNBBreakpoint * wp) const539 bool MachThreadList::DisableHardwareWatchpoint(const DNBBreakpoint *wp) const {
540   return DoHardwareBreakpointAction(
541              wp, HardwareBreakpointAction::DisableWatchpoint) !=
542          INVALID_NUB_HW_INDEX;
543 }
544 
545 uint32_t
EnableHardwareBreakpoint(const DNBBreakpoint * bp) const546 MachThreadList::EnableHardwareBreakpoint(const DNBBreakpoint *bp) const {
547   return DoHardwareBreakpointAction(bp,
548                                     HardwareBreakpointAction::EnableBreakpoint);
549 }
550 
DisableHardwareBreakpoint(const DNBBreakpoint * bp) const551 bool MachThreadList::DisableHardwareBreakpoint(const DNBBreakpoint *bp) const {
552   return DoHardwareBreakpointAction(
553              bp, HardwareBreakpointAction::DisableBreakpoint) !=
554          INVALID_NUB_HW_INDEX;
555 }
556 
NumSupportedHardwareWatchpoints() const557 uint32_t MachThreadList::NumSupportedHardwareWatchpoints() const {
558   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
559   const size_t num_threads = m_threads.size();
560   // Use an arbitrary thread to retrieve the number of supported hardware
561   // watchpoints.
562   if (num_threads)
563     return m_threads[0]->NumSupportedHardwareWatchpoints();
564   return 0;
565 }
566 
GetThreadIndexForThreadStoppedWithSignal(const int signo) const567 uint32_t MachThreadList::GetThreadIndexForThreadStoppedWithSignal(
568     const int signo) const {
569   PTHREAD_MUTEX_LOCKER(locker, m_threads_mutex);
570   uint32_t should_stop = false;
571   const size_t num_threads = m_threads.size();
572   for (uint32_t idx = 0; !should_stop && idx < num_threads; ++idx) {
573     if (m_threads[idx]->GetStopException().SoftSignal() == signo)
574       return idx;
575   }
576   return UINT32_MAX;
577 }
578