1 //===-- DNBArchImplARM64.cpp ------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Created by Greg Clayton on 6/25/07.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #if defined(__arm__) || defined(__arm64__) || defined(__aarch64__)
14
15 #include "MacOSX/arm64/DNBArchImplARM64.h"
16
17 #if defined(ARM_THREAD_STATE64_COUNT)
18
19 #include "DNB.h"
20 #include "DNBBreakpoint.h"
21 #include "DNBLog.h"
22 #include "DNBRegisterInfo.h"
23 #include "MacOSX/MachProcess.h"
24 #include "MacOSX/MachThread.h"
25
26 #include <cinttypes>
27 #include <sys/sysctl.h>
28
29 #if __has_feature(ptrauth_calls)
30 #include <ptrauth.h>
31 #endif
32
33 // Break only in privileged or user mode
34 // (PAC bits in the DBGWVRn_EL1 watchpoint control register)
35 #define S_USER ((uint32_t)(2u << 1))
36
37 #define BCR_ENABLE ((uint32_t)(1u))
38 #define WCR_ENABLE ((uint32_t)(1u))
39
40 // Watchpoint load/store
41 // (LSC bits in the DBGWVRn_EL1 watchpoint control register)
42 #define WCR_LOAD ((uint32_t)(1u << 3))
43 #define WCR_STORE ((uint32_t)(1u << 4))
44
45 // Enable breakpoint, watchpoint, and vector catch debug exceptions.
46 // (MDE bit in the MDSCR_EL1 register. Equivalent to the MDBGen bit in
47 // DBGDSCRext in Aarch32)
48 #define MDE_ENABLE ((uint32_t)(1u << 15))
49
50 // Single instruction step
51 // (SS bit in the MDSCR_EL1 register)
52 #define SS_ENABLE ((uint32_t)(1u))
53
54 static const uint8_t g_arm64_breakpoint_opcode[] = {
55 0x00, 0x00, 0x20, 0xD4}; // "brk #0", 0xd4200000 in BE byte order
56
57 // If we need to set one logical watchpoint by using
58 // two hardware watchpoint registers, the watchpoint
59 // will be split into a "high" and "low" watchpoint.
60 // Record both of them in the LoHi array.
61
62 // It's safe to initialize to all 0's since
63 // hi > lo and therefore LoHi[i] cannot be 0.
64 static uint32_t LoHi[16] = {0};
65
Initialize()66 void DNBArchMachARM64::Initialize() {
67 DNBArchPluginInfo arch_plugin_info = {
68 CPU_TYPE_ARM64, DNBArchMachARM64::Create,
69 DNBArchMachARM64::GetRegisterSetInfo,
70 DNBArchMachARM64::SoftwareBreakpointOpcode};
71
72 // Register this arch plug-in with the main protocol class
73 DNBArchProtocol::RegisterArchPlugin(arch_plugin_info);
74
75 DNBArchPluginInfo arch_plugin_info_32 = {
76 CPU_TYPE_ARM64_32, DNBArchMachARM64::Create,
77 DNBArchMachARM64::GetRegisterSetInfo,
78 DNBArchMachARM64::SoftwareBreakpointOpcode};
79
80 // Register this arch plug-in with the main protocol class
81 DNBArchProtocol::RegisterArchPlugin(arch_plugin_info_32);
82 }
83
Create(MachThread * thread)84 DNBArchProtocol *DNBArchMachARM64::Create(MachThread *thread) {
85 DNBArchMachARM64 *obj = new DNBArchMachARM64(thread);
86
87 return obj;
88 }
89
90 const uint8_t *
SoftwareBreakpointOpcode(nub_size_t byte_size)91 DNBArchMachARM64::SoftwareBreakpointOpcode(nub_size_t byte_size) {
92 return g_arm64_breakpoint_opcode;
93 }
94
GetCPUType()95 uint32_t DNBArchMachARM64::GetCPUType() { return CPU_TYPE_ARM64; }
96
clear_pac_bits(uint64_t value)97 static uint64_t clear_pac_bits(uint64_t value) {
98 uint32_t addressing_bits = 0;
99 if (!DNBGetAddressingBits(addressing_bits))
100 return value;
101
102 // On arm64_32, no ptrauth bits to clear
103 #if !defined(__LP64__)
104 return value;
105 #endif
106
107 uint64_t mask = ((1ULL << addressing_bits) - 1);
108
109 // Normally PAC bit clearing needs to check b55 and either set the
110 // non-addressing bits, or clear them. But the register values we
111 // get from thread_get_state on an arm64e process don't follow this
112 // convention?, at least when there's been a PAC auth failure in
113 // the inferior.
114 // Userland processes are always in low memory, so this
115 // hardcoding b55 == 0 PAC stripping behavior here.
116
117 return value & mask; // high bits cleared to 0
118 }
119
GetPC(uint64_t failValue)120 uint64_t DNBArchMachARM64::GetPC(uint64_t failValue) {
121 // Get program counter
122 if (GetGPRState(false) == KERN_SUCCESS)
123 #if __has_feature(ptrauth_calls) && defined(__LP64__)
124 return clear_pac_bits(
125 reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_pc));
126 #else
127 return m_state.context.gpr.__pc;
128 #endif
129 return failValue;
130 }
131
SetPC(uint64_t value)132 kern_return_t DNBArchMachARM64::SetPC(uint64_t value) {
133 // Get program counter
134 kern_return_t err = GetGPRState(false);
135 if (err == KERN_SUCCESS) {
136 #if defined(__LP64__)
137 #if __has_feature(ptrauth_calls)
138 // The incoming value could be garbage. Strip it to avoid
139 // trapping when it gets resigned in the thread state.
140 value = (uint64_t) ptrauth_strip((void*) value, ptrauth_key_function_pointer);
141 value = (uint64_t) ptrauth_sign_unauthenticated((void*) value, ptrauth_key_function_pointer, 0);
142 #endif
143 arm_thread_state64_set_pc_fptr (m_state.context.gpr, (void*) value);
144 #else
145 m_state.context.gpr.__pc = value;
146 #endif
147 err = SetGPRState();
148 }
149 return err == KERN_SUCCESS;
150 }
151
GetSP(uint64_t failValue)152 uint64_t DNBArchMachARM64::GetSP(uint64_t failValue) {
153 // Get stack pointer
154 if (GetGPRState(false) == KERN_SUCCESS)
155 #if __has_feature(ptrauth_calls) && defined(__LP64__)
156 return clear_pac_bits(
157 reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_sp));
158 #else
159 return m_state.context.gpr.__sp;
160 #endif
161 return failValue;
162 }
163
GetGPRState(bool force)164 kern_return_t DNBArchMachARM64::GetGPRState(bool force) {
165 int set = e_regSetGPR;
166 // Check if we have valid cached registers
167 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
168 return KERN_SUCCESS;
169
170 // Read the registers from our thread
171 mach_msg_type_number_t count = e_regSetGPRCount;
172 kern_return_t kret =
173 ::thread_get_state(m_thread->MachPortNumber(), ARM_THREAD_STATE64,
174 (thread_state_t)&m_state.context.gpr, &count);
175 if (DNBLogEnabledForAny(LOG_THREAD)) {
176 uint64_t *x = &m_state.context.gpr.__x[0];
177
178 #if __has_feature(ptrauth_calls) && defined(__LP64__)
179 uint64_t log_fp = clear_pac_bits(
180 reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_fp));
181 uint64_t log_lr = clear_pac_bits(
182 reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_lr));
183 uint64_t log_sp = clear_pac_bits(
184 reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_sp));
185 uint64_t log_pc = clear_pac_bits(
186 reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_pc));
187 #else
188 uint64_t log_fp = m_state.context.gpr.__fp;
189 uint64_t log_lr = m_state.context.gpr.__lr;
190 uint64_t log_sp = m_state.context.gpr.__sp;
191 uint64_t log_pc = m_state.context.gpr.__pc;
192 #endif
193 DNBLogThreaded(
194 "thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x (count = %u) regs"
195 "\n x0=%16.16llx"
196 "\n x1=%16.16llx"
197 "\n x2=%16.16llx"
198 "\n x3=%16.16llx"
199 "\n x4=%16.16llx"
200 "\n x5=%16.16llx"
201 "\n x6=%16.16llx"
202 "\n x7=%16.16llx"
203 "\n x8=%16.16llx"
204 "\n x9=%16.16llx"
205 "\n x10=%16.16llx"
206 "\n x11=%16.16llx"
207 "\n x12=%16.16llx"
208 "\n x13=%16.16llx"
209 "\n x14=%16.16llx"
210 "\n x15=%16.16llx"
211 "\n x16=%16.16llx"
212 "\n x17=%16.16llx"
213 "\n x18=%16.16llx"
214 "\n x19=%16.16llx"
215 "\n x20=%16.16llx"
216 "\n x21=%16.16llx"
217 "\n x22=%16.16llx"
218 "\n x23=%16.16llx"
219 "\n x24=%16.16llx"
220 "\n x25=%16.16llx"
221 "\n x26=%16.16llx"
222 "\n x27=%16.16llx"
223 "\n x28=%16.16llx"
224 "\n fp=%16.16llx"
225 "\n lr=%16.16llx"
226 "\n sp=%16.16llx"
227 "\n pc=%16.16llx"
228 "\n cpsr=%8.8x",
229 m_thread->MachPortNumber(), e_regSetGPR, e_regSetGPRCount, kret, count,
230 x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[0], x[11],
231 x[12], x[13], x[14], x[15], x[16], x[17], x[18], x[19], x[20], x[21],
232 x[22], x[23], x[24], x[25], x[26], x[27], x[28],
233 log_fp, log_lr, log_sp, log_pc, m_state.context.gpr.__cpsr);
234 }
235 m_state.SetError(set, Read, kret);
236 return kret;
237 }
238
GetVFPState(bool force)239 kern_return_t DNBArchMachARM64::GetVFPState(bool force) {
240 int set = e_regSetVFP;
241 // Check if we have valid cached registers
242 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
243 return KERN_SUCCESS;
244
245 // Read the registers from our thread
246 mach_msg_type_number_t count = e_regSetVFPCount;
247 kern_return_t kret =
248 ::thread_get_state(m_thread->MachPortNumber(), ARM_NEON_STATE64,
249 (thread_state_t)&m_state.context.vfp, &count);
250 if (DNBLogEnabledForAny(LOG_THREAD)) {
251 #if defined(__arm64__) || defined(__aarch64__)
252 DNBLogThreaded(
253 "thread_get_state(0x%4.4x, %u, &vfp, %u) => 0x%8.8x (count = %u) regs"
254 "\n q0 = 0x%16.16llx%16.16llx"
255 "\n q1 = 0x%16.16llx%16.16llx"
256 "\n q2 = 0x%16.16llx%16.16llx"
257 "\n q3 = 0x%16.16llx%16.16llx"
258 "\n q4 = 0x%16.16llx%16.16llx"
259 "\n q5 = 0x%16.16llx%16.16llx"
260 "\n q6 = 0x%16.16llx%16.16llx"
261 "\n q7 = 0x%16.16llx%16.16llx"
262 "\n q8 = 0x%16.16llx%16.16llx"
263 "\n q9 = 0x%16.16llx%16.16llx"
264 "\n q10 = 0x%16.16llx%16.16llx"
265 "\n q11 = 0x%16.16llx%16.16llx"
266 "\n q12 = 0x%16.16llx%16.16llx"
267 "\n q13 = 0x%16.16llx%16.16llx"
268 "\n q14 = 0x%16.16llx%16.16llx"
269 "\n q15 = 0x%16.16llx%16.16llx"
270 "\n q16 = 0x%16.16llx%16.16llx"
271 "\n q17 = 0x%16.16llx%16.16llx"
272 "\n q18 = 0x%16.16llx%16.16llx"
273 "\n q19 = 0x%16.16llx%16.16llx"
274 "\n q20 = 0x%16.16llx%16.16llx"
275 "\n q21 = 0x%16.16llx%16.16llx"
276 "\n q22 = 0x%16.16llx%16.16llx"
277 "\n q23 = 0x%16.16llx%16.16llx"
278 "\n q24 = 0x%16.16llx%16.16llx"
279 "\n q25 = 0x%16.16llx%16.16llx"
280 "\n q26 = 0x%16.16llx%16.16llx"
281 "\n q27 = 0x%16.16llx%16.16llx"
282 "\n q28 = 0x%16.16llx%16.16llx"
283 "\n q29 = 0x%16.16llx%16.16llx"
284 "\n q30 = 0x%16.16llx%16.16llx"
285 "\n q31 = 0x%16.16llx%16.16llx"
286 "\n fpsr = 0x%8.8x"
287 "\n fpcr = 0x%8.8x\n\n",
288 m_thread->MachPortNumber(), e_regSetVFP, e_regSetVFPCount, kret, count,
289 ((uint64_t *)&m_state.context.vfp.__v[0])[0],
290 ((uint64_t *)&m_state.context.vfp.__v[0])[1],
291 ((uint64_t *)&m_state.context.vfp.__v[1])[0],
292 ((uint64_t *)&m_state.context.vfp.__v[1])[1],
293 ((uint64_t *)&m_state.context.vfp.__v[2])[0],
294 ((uint64_t *)&m_state.context.vfp.__v[2])[1],
295 ((uint64_t *)&m_state.context.vfp.__v[3])[0],
296 ((uint64_t *)&m_state.context.vfp.__v[3])[1],
297 ((uint64_t *)&m_state.context.vfp.__v[4])[0],
298 ((uint64_t *)&m_state.context.vfp.__v[4])[1],
299 ((uint64_t *)&m_state.context.vfp.__v[5])[0],
300 ((uint64_t *)&m_state.context.vfp.__v[5])[1],
301 ((uint64_t *)&m_state.context.vfp.__v[6])[0],
302 ((uint64_t *)&m_state.context.vfp.__v[6])[1],
303 ((uint64_t *)&m_state.context.vfp.__v[7])[0],
304 ((uint64_t *)&m_state.context.vfp.__v[7])[1],
305 ((uint64_t *)&m_state.context.vfp.__v[8])[0],
306 ((uint64_t *)&m_state.context.vfp.__v[8])[1],
307 ((uint64_t *)&m_state.context.vfp.__v[9])[0],
308 ((uint64_t *)&m_state.context.vfp.__v[9])[1],
309 ((uint64_t *)&m_state.context.vfp.__v[10])[0],
310 ((uint64_t *)&m_state.context.vfp.__v[10])[1],
311 ((uint64_t *)&m_state.context.vfp.__v[11])[0],
312 ((uint64_t *)&m_state.context.vfp.__v[11])[1],
313 ((uint64_t *)&m_state.context.vfp.__v[12])[0],
314 ((uint64_t *)&m_state.context.vfp.__v[12])[1],
315 ((uint64_t *)&m_state.context.vfp.__v[13])[0],
316 ((uint64_t *)&m_state.context.vfp.__v[13])[1],
317 ((uint64_t *)&m_state.context.vfp.__v[14])[0],
318 ((uint64_t *)&m_state.context.vfp.__v[14])[1],
319 ((uint64_t *)&m_state.context.vfp.__v[15])[0],
320 ((uint64_t *)&m_state.context.vfp.__v[15])[1],
321 ((uint64_t *)&m_state.context.vfp.__v[16])[0],
322 ((uint64_t *)&m_state.context.vfp.__v[16])[1],
323 ((uint64_t *)&m_state.context.vfp.__v[17])[0],
324 ((uint64_t *)&m_state.context.vfp.__v[17])[1],
325 ((uint64_t *)&m_state.context.vfp.__v[18])[0],
326 ((uint64_t *)&m_state.context.vfp.__v[18])[1],
327 ((uint64_t *)&m_state.context.vfp.__v[19])[0],
328 ((uint64_t *)&m_state.context.vfp.__v[19])[1],
329 ((uint64_t *)&m_state.context.vfp.__v[20])[0],
330 ((uint64_t *)&m_state.context.vfp.__v[20])[1],
331 ((uint64_t *)&m_state.context.vfp.__v[21])[0],
332 ((uint64_t *)&m_state.context.vfp.__v[21])[1],
333 ((uint64_t *)&m_state.context.vfp.__v[22])[0],
334 ((uint64_t *)&m_state.context.vfp.__v[22])[1],
335 ((uint64_t *)&m_state.context.vfp.__v[23])[0],
336 ((uint64_t *)&m_state.context.vfp.__v[23])[1],
337 ((uint64_t *)&m_state.context.vfp.__v[24])[0],
338 ((uint64_t *)&m_state.context.vfp.__v[24])[1],
339 ((uint64_t *)&m_state.context.vfp.__v[25])[0],
340 ((uint64_t *)&m_state.context.vfp.__v[25])[1],
341 ((uint64_t *)&m_state.context.vfp.__v[26])[0],
342 ((uint64_t *)&m_state.context.vfp.__v[26])[1],
343 ((uint64_t *)&m_state.context.vfp.__v[27])[0],
344 ((uint64_t *)&m_state.context.vfp.__v[27])[1],
345 ((uint64_t *)&m_state.context.vfp.__v[28])[0],
346 ((uint64_t *)&m_state.context.vfp.__v[28])[1],
347 ((uint64_t *)&m_state.context.vfp.__v[29])[0],
348 ((uint64_t *)&m_state.context.vfp.__v[29])[1],
349 ((uint64_t *)&m_state.context.vfp.__v[30])[0],
350 ((uint64_t *)&m_state.context.vfp.__v[30])[1],
351 ((uint64_t *)&m_state.context.vfp.__v[31])[0],
352 ((uint64_t *)&m_state.context.vfp.__v[31])[1],
353 m_state.context.vfp.__fpsr, m_state.context.vfp.__fpcr);
354 #endif
355 }
356 m_state.SetError(set, Read, kret);
357 return kret;
358 }
359
GetEXCState(bool force)360 kern_return_t DNBArchMachARM64::GetEXCState(bool force) {
361 int set = e_regSetEXC;
362 // Check if we have valid cached registers
363 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
364 return KERN_SUCCESS;
365
366 // Read the registers from our thread
367 mach_msg_type_number_t count = e_regSetEXCCount;
368 kern_return_t kret =
369 ::thread_get_state(m_thread->MachPortNumber(), ARM_EXCEPTION_STATE64,
370 (thread_state_t)&m_state.context.exc, &count);
371 m_state.SetError(set, Read, kret);
372 return kret;
373 }
374
375 #if 0
376 static void DumpDBGState(const arm_debug_state_t &dbg) {
377 uint32_t i = 0;
378 for (i = 0; i < 16; i++)
379 DNBLogThreadedIf(LOG_STEP, "BVR%-2u/BCR%-2u = { 0x%8.8x, 0x%8.8x } "
380 "WVR%-2u/WCR%-2u = { 0x%8.8x, 0x%8.8x }",
381 i, i, dbg.__bvr[i], dbg.__bcr[i], i, i, dbg.__wvr[i],
382 dbg.__wcr[i]);
383 }
384 #endif
385
GetDBGState(bool force)386 kern_return_t DNBArchMachARM64::GetDBGState(bool force) {
387 int set = e_regSetDBG;
388
389 // Check if we have valid cached registers
390 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
391 return KERN_SUCCESS;
392
393 // Read the registers from our thread
394 mach_msg_type_number_t count = e_regSetDBGCount;
395 kern_return_t kret =
396 ::thread_get_state(m_thread->MachPortNumber(), ARM_DEBUG_STATE64,
397 (thread_state_t)&m_state.dbg, &count);
398 m_state.SetError(set, Read, kret);
399
400 return kret;
401 }
402
SetGPRState()403 kern_return_t DNBArchMachARM64::SetGPRState() {
404 int set = e_regSetGPR;
405 kern_return_t kret = ::thread_set_state(
406 m_thread->MachPortNumber(), ARM_THREAD_STATE64,
407 (thread_state_t)&m_state.context.gpr, e_regSetGPRCount);
408 m_state.SetError(set, Write,
409 kret); // Set the current write error for this register set
410 m_state.InvalidateRegisterSetState(set); // Invalidate the current register
411 // state in case registers are read
412 // back differently
413 return kret; // Return the error code
414 }
415
SetVFPState()416 kern_return_t DNBArchMachARM64::SetVFPState() {
417 int set = e_regSetVFP;
418 kern_return_t kret = ::thread_set_state(
419 m_thread->MachPortNumber(), ARM_NEON_STATE64,
420 (thread_state_t)&m_state.context.vfp, e_regSetVFPCount);
421 m_state.SetError(set, Write,
422 kret); // Set the current write error for this register set
423 m_state.InvalidateRegisterSetState(set); // Invalidate the current register
424 // state in case registers are read
425 // back differently
426 return kret; // Return the error code
427 }
428
SetEXCState()429 kern_return_t DNBArchMachARM64::SetEXCState() {
430 int set = e_regSetEXC;
431 kern_return_t kret = ::thread_set_state(
432 m_thread->MachPortNumber(), ARM_EXCEPTION_STATE64,
433 (thread_state_t)&m_state.context.exc, e_regSetEXCCount);
434 m_state.SetError(set, Write,
435 kret); // Set the current write error for this register set
436 m_state.InvalidateRegisterSetState(set); // Invalidate the current register
437 // state in case registers are read
438 // back differently
439 return kret; // Return the error code
440 }
441
SetDBGState(bool also_set_on_task)442 kern_return_t DNBArchMachARM64::SetDBGState(bool also_set_on_task) {
443 int set = e_regSetDBG;
444 kern_return_t kret =
445 ::thread_set_state(m_thread->MachPortNumber(), ARM_DEBUG_STATE64,
446 (thread_state_t)&m_state.dbg, e_regSetDBGCount);
447 if (also_set_on_task) {
448 kern_return_t task_kret = task_set_state(
449 m_thread->Process()->Task().TaskPort(), ARM_DEBUG_STATE64,
450 (thread_state_t)&m_state.dbg, e_regSetDBGCount);
451 if (task_kret != KERN_SUCCESS)
452 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::SetDBGState failed "
453 "to set debug control register state: "
454 "0x%8.8x.",
455 task_kret);
456 }
457 m_state.SetError(set, Write,
458 kret); // Set the current write error for this register set
459 m_state.InvalidateRegisterSetState(set); // Invalidate the current register
460 // state in case registers are read
461 // back differently
462
463 return kret; // Return the error code
464 }
465
ThreadWillResume()466 void DNBArchMachARM64::ThreadWillResume() {
467 // Do we need to step this thread? If so, let the mach thread tell us so.
468 if (m_thread->IsStepping()) {
469 EnableHardwareSingleStep(true);
470 }
471
472 // Disable the triggered watchpoint temporarily before we resume.
473 // Plus, we try to enable hardware single step to execute past the instruction
474 // which triggered our watchpoint.
475 if (m_watchpoint_did_occur) {
476 if (m_watchpoint_hw_index >= 0) {
477 kern_return_t kret = GetDBGState(false);
478 if (kret == KERN_SUCCESS &&
479 !IsWatchpointEnabled(m_state.dbg, m_watchpoint_hw_index)) {
480 // The watchpoint might have been disabled by the user. We don't need
481 // to do anything at all
482 // to enable hardware single stepping.
483 m_watchpoint_did_occur = false;
484 m_watchpoint_hw_index = -1;
485 return;
486 }
487
488 DisableHardwareWatchpoint(m_watchpoint_hw_index, false);
489 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::ThreadWillResume() "
490 "DisableHardwareWatchpoint(%d) called",
491 m_watchpoint_hw_index);
492
493 // Enable hardware single step to move past the watchpoint-triggering
494 // instruction.
495 m_watchpoint_resume_single_step_enabled =
496 (EnableHardwareSingleStep(true) == KERN_SUCCESS);
497
498 // If we are not able to enable single step to move past the
499 // watchpoint-triggering instruction,
500 // at least we should reset the two watchpoint member variables so that
501 // the next time around
502 // this callback function is invoked, the enclosing logical branch is
503 // skipped.
504 if (!m_watchpoint_resume_single_step_enabled) {
505 // Reset the two watchpoint member variables.
506 m_watchpoint_did_occur = false;
507 m_watchpoint_hw_index = -1;
508 DNBLogThreadedIf(
509 LOG_WATCHPOINTS,
510 "DNBArchMachARM::ThreadWillResume() failed to enable single step");
511 } else
512 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::ThreadWillResume() "
513 "succeeded to enable single step");
514 }
515 }
516 }
517
NotifyException(MachException::Data & exc)518 bool DNBArchMachARM64::NotifyException(MachException::Data &exc) {
519
520 switch (exc.exc_type) {
521 default:
522 break;
523 case EXC_BREAKPOINT:
524 if (exc.exc_data.size() == 2 && exc.exc_data[0] == EXC_ARM_DA_DEBUG) {
525 // The data break address is passed as exc_data[1].
526 nub_addr_t addr = exc.exc_data[1];
527 // Find the hardware index with the side effect of possibly massaging the
528 // addr to return the starting address as seen from the debugger side.
529 uint32_t hw_index = GetHardwareWatchpointHit(addr);
530
531 // One logical watchpoint was split into two watchpoint locations because
532 // it was too big. If the watchpoint exception is indicating the 2nd half
533 // of the two-parter, find the address of the 1st half and report that --
534 // that's what lldb is going to expect to see.
535 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::NotifyException "
536 "watchpoint %d was hit on address "
537 "0x%llx",
538 hw_index, (uint64_t)addr);
539 const uint32_t num_watchpoints = NumSupportedHardwareWatchpoints();
540 for (uint32_t i = 0; i < num_watchpoints; i++) {
541 if (LoHi[i] != 0 && LoHi[i] == hw_index && LoHi[i] != i &&
542 GetWatchpointAddressByIndex(i) != INVALID_NUB_ADDRESS) {
543 addr = GetWatchpointAddressByIndex(i);
544 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::NotifyException "
545 "It is a linked watchpoint; "
546 "rewritten to index %d addr 0x%llx",
547 LoHi[i], (uint64_t)addr);
548 }
549 }
550
551 if (hw_index != INVALID_NUB_HW_INDEX) {
552 m_watchpoint_did_occur = true;
553 m_watchpoint_hw_index = hw_index;
554 exc.exc_data[1] = addr;
555 // Piggyback the hw_index in the exc.data.
556 exc.exc_data.push_back(hw_index);
557 }
558
559 return true;
560 }
561 // detect a __builtin_debugtrap instruction pattern ("brk #0xf000")
562 // and advance the $pc past it, so that the user can continue execution.
563 // Generally speaking, this knowledge should be centralized in lldb,
564 // recognizing the builtin_trap instruction and knowing how to advance
565 // the pc past it, so that continue etc work.
566 if (exc.exc_data.size() == 2 && exc.exc_data[0] == EXC_ARM_BREAKPOINT) {
567 nub_addr_t pc = GetPC(INVALID_NUB_ADDRESS);
568 if (pc != INVALID_NUB_ADDRESS && pc > 0) {
569 DNBBreakpoint *bp =
570 m_thread->Process()->Breakpoints().FindByAddress(pc);
571 if (bp == nullptr) {
572 uint8_t insnbuf[4];
573 if (m_thread->Process()->ReadMemory(pc, 4, insnbuf) == 4) {
574 uint8_t builtin_debugtrap_insn[4] = {0x00, 0x00, 0x3e,
575 0xd4}; // brk #0xf000
576 if (memcmp(insnbuf, builtin_debugtrap_insn, 4) == 0) {
577 SetPC(pc + 4);
578 }
579 }
580 }
581 }
582 }
583 break;
584 }
585 return false;
586 }
587
ThreadDidStop()588 bool DNBArchMachARM64::ThreadDidStop() {
589 bool success = true;
590
591 m_state.InvalidateAllRegisterStates();
592
593 if (m_watchpoint_resume_single_step_enabled) {
594 // Great! We now disable the hardware single step as well as re-enable the
595 // hardware watchpoint.
596 // See also ThreadWillResume().
597 if (EnableHardwareSingleStep(false) == KERN_SUCCESS) {
598 if (m_watchpoint_did_occur && m_watchpoint_hw_index >= 0) {
599 ReenableHardwareWatchpoint(m_watchpoint_hw_index);
600 m_watchpoint_resume_single_step_enabled = false;
601 m_watchpoint_did_occur = false;
602 m_watchpoint_hw_index = -1;
603 } else {
604 DNBLogError("internal error detected: m_watchpoint_resume_step_enabled "
605 "is true but (m_watchpoint_did_occur && "
606 "m_watchpoint_hw_index >= 0) does not hold!");
607 }
608 } else {
609 DNBLogError("internal error detected: m_watchpoint_resume_step_enabled "
610 "is true but unable to disable single step!");
611 }
612 }
613
614 // Are we stepping a single instruction?
615 if (GetGPRState(true) == KERN_SUCCESS) {
616 // We are single stepping, was this the primary thread?
617 if (m_thread->IsStepping()) {
618 // This was the primary thread, we need to clear the trace
619 // bit if so.
620 success = EnableHardwareSingleStep(false) == KERN_SUCCESS;
621 } else {
622 // The MachThread will automatically restore the suspend count
623 // in ThreadDidStop(), so we don't need to do anything here if
624 // we weren't the primary thread the last time
625 }
626 }
627 return success;
628 }
629
630 // Set the single step bit in the processor status register.
EnableHardwareSingleStep(bool enable)631 kern_return_t DNBArchMachARM64::EnableHardwareSingleStep(bool enable) {
632 DNBError err;
633 DNBLogThreadedIf(LOG_STEP, "%s( enable = %d )", __FUNCTION__, enable);
634
635 err = GetGPRState(false);
636
637 if (err.Fail()) {
638 err.LogThreaded("%s: failed to read the GPR registers", __FUNCTION__);
639 return err.Status();
640 }
641
642 err = GetDBGState(false);
643
644 if (err.Fail()) {
645 err.LogThreaded("%s: failed to read the DBG registers", __FUNCTION__);
646 return err.Status();
647 }
648
649 #if __has_feature(ptrauth_calls) && defined(__LP64__)
650 uint64_t pc = clear_pac_bits(
651 reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_pc));
652 #else
653 uint64_t pc = m_state.context.gpr.__pc;
654 #endif
655
656 if (enable) {
657 DNBLogThreadedIf(LOG_STEP,
658 "%s: Setting MDSCR_EL1 Single Step bit at pc 0x%llx",
659 __FUNCTION__, pc);
660 m_state.dbg.__mdscr_el1 |= SS_ENABLE;
661 } else {
662 DNBLogThreadedIf(LOG_STEP,
663 "%s: Clearing MDSCR_EL1 Single Step bit at pc 0x%llx",
664 __FUNCTION__, pc);
665 m_state.dbg.__mdscr_el1 &= ~(SS_ENABLE);
666 }
667
668 return SetDBGState(false);
669 }
670
671 // return 1 if bit "BIT" is set in "value"
bit(uint32_t value,uint32_t bit)672 static inline uint32_t bit(uint32_t value, uint32_t bit) {
673 return (value >> bit) & 1u;
674 }
675
676 // return the bitfield "value[msbit:lsbit]".
bits(uint64_t value,uint32_t msbit,uint32_t lsbit)677 static inline uint64_t bits(uint64_t value, uint32_t msbit, uint32_t lsbit) {
678 assert(msbit >= lsbit);
679 uint64_t shift_left = sizeof(value) * 8 - 1 - msbit;
680 value <<=
681 shift_left; // shift anything above the msbit off of the unsigned edge
682 value >>= shift_left + lsbit; // shift it back again down to the lsbit
683 // (including undoing any shift from above)
684 return value; // return our result
685 }
686
NumSupportedHardwareWatchpoints()687 uint32_t DNBArchMachARM64::NumSupportedHardwareWatchpoints() {
688 // Set the init value to something that will let us know that we need to
689 // autodetect how many watchpoints are supported dynamically...
690 static uint32_t g_num_supported_hw_watchpoints = UINT_MAX;
691 if (g_num_supported_hw_watchpoints == UINT_MAX) {
692 // Set this to zero in case we can't tell if there are any HW breakpoints
693 g_num_supported_hw_watchpoints = 0;
694
695 size_t len;
696 uint32_t n = 0;
697 len = sizeof(n);
698 if (::sysctlbyname("hw.optional.watchpoint", &n, &len, NULL, 0) == 0) {
699 g_num_supported_hw_watchpoints = n;
700 DNBLogThreadedIf(LOG_THREAD, "hw.optional.watchpoint=%u", n);
701 } else {
702 // For AArch64 we would need to look at ID_AA64DFR0_EL1 but debugserver runs in
703 // EL0 so it can't
704 // access that reg. The kernel should have filled in the sysctls based on it
705 // though.
706 #if defined(__arm__)
707 uint32_t register_DBGDIDR;
708
709 asm("mrc p14, 0, %0, c0, c0, 0" : "=r"(register_DBGDIDR));
710 uint32_t numWRPs = bits(register_DBGDIDR, 31, 28);
711 // Zero is reserved for the WRP count, so don't increment it if it is zero
712 if (numWRPs > 0)
713 numWRPs++;
714 g_num_supported_hw_watchpoints = numWRPs;
715 DNBLogThreadedIf(LOG_THREAD,
716 "Number of supported hw watchpoints via asm(): %d",
717 g_num_supported_hw_watchpoints);
718 #endif
719 }
720 }
721 return g_num_supported_hw_watchpoints;
722 }
723
NumSupportedHardwareBreakpoints()724 uint32_t DNBArchMachARM64::NumSupportedHardwareBreakpoints() {
725 // Set the init value to something that will let us know that we need to
726 // autodetect how many breakpoints are supported dynamically...
727 static uint32_t g_num_supported_hw_breakpoints = UINT_MAX;
728 if (g_num_supported_hw_breakpoints == UINT_MAX) {
729 // Set this to zero in case we can't tell if there are any HW breakpoints
730 g_num_supported_hw_breakpoints = 0;
731
732 size_t len;
733 uint32_t n = 0;
734 len = sizeof(n);
735 if (::sysctlbyname("hw.optional.breakpoint", &n, &len, NULL, 0) == 0) {
736 g_num_supported_hw_breakpoints = n;
737 DNBLogThreadedIf(LOG_THREAD, "hw.optional.breakpoint=%u", n);
738 } else {
739 // For AArch64 we would need to look at ID_AA64DFR0_EL1 but debugserver runs in
740 // EL0 so it can't access that reg. The kernel should have filled in the
741 // sysctls based on it though.
742 #if defined(__arm__)
743 uint32_t register_DBGDIDR;
744
745 asm("mrc p14, 0, %0, c0, c0, 0" : "=r"(register_DBGDIDR));
746 uint32_t numWRPs = bits(register_DBGDIDR, 31, 28);
747 // Zero is reserved for the WRP count, so don't increment it if it is zero
748 if (numWRPs > 0)
749 numWRPs++;
750 g_num_supported_hw_breakpoints = numWRPs;
751 DNBLogThreadedIf(LOG_THREAD,
752 "Number of supported hw breakpoint via asm(): %d",
753 g_num_supported_hw_breakpoints);
754 #endif
755 }
756 }
757 return g_num_supported_hw_breakpoints;
758 }
759
EnableHardwareBreakpoint(nub_addr_t addr,nub_size_t size,bool also_set_on_task)760 uint32_t DNBArchMachARM64::EnableHardwareBreakpoint(nub_addr_t addr,
761 nub_size_t size,
762 bool also_set_on_task) {
763 DNBLogThreadedIf(LOG_WATCHPOINTS,
764 "DNBArchMachARM64::EnableHardwareBreakpoint(addr = "
765 "0x%8.8llx, size = %zu)",
766 (uint64_t)addr, size);
767
768 const uint32_t num_hw_breakpoints = NumSupportedHardwareBreakpoints();
769
770 nub_addr_t aligned_bp_address = addr;
771 uint32_t control_value = 0;
772
773 switch (size) {
774 case 2:
775 control_value = (0x3 << 5) | 7;
776 aligned_bp_address &= ~1;
777 break;
778 case 4:
779 control_value = (0xfu << 5) | 7;
780 aligned_bp_address &= ~3;
781 break;
782 };
783
784 // Read the debug state
785 kern_return_t kret = GetDBGState(false);
786 if (kret == KERN_SUCCESS) {
787 // Check to make sure we have the needed hardware support
788 uint32_t i = 0;
789
790 for (i = 0; i < num_hw_breakpoints; ++i) {
791 if ((m_state.dbg.__bcr[i] & BCR_ENABLE) == 0)
792 break; // We found an available hw breakpoint slot (in i)
793 }
794
795 // See if we found an available hw breakpoint slot above
796 if (i < num_hw_breakpoints) {
797 m_state.dbg.__bvr[i] = aligned_bp_address;
798 m_state.dbg.__bcr[i] = control_value;
799
800 DNBLogThreadedIf(LOG_WATCHPOINTS,
801 "DNBArchMachARM64::EnableHardwareBreakpoint() "
802 "adding breakpoint on address 0x%llx with control "
803 "register value 0x%x",
804 (uint64_t)m_state.dbg.__bvr[i],
805 (uint32_t)m_state.dbg.__bcr[i]);
806
807 // The kernel will set the MDE_ENABLE bit in the MDSCR_EL1 for us
808 // automatically, don't need to do it here.
809 kret = SetDBGState(also_set_on_task);
810
811 DNBLogThreadedIf(LOG_WATCHPOINTS,
812 "DNBArchMachARM64::"
813 "EnableHardwareBreakpoint() "
814 "SetDBGState() => 0x%8.8x.",
815 kret);
816
817 if (kret == KERN_SUCCESS)
818 return i;
819 } else {
820 DNBLogThreadedIf(LOG_WATCHPOINTS,
821 "DNBArchMachARM64::"
822 "EnableHardwareBreakpoint(): All "
823 "hardware resources (%u) are in use.",
824 num_hw_breakpoints);
825 }
826 }
827 return INVALID_NUB_HW_INDEX;
828 }
829
EnableHardwareWatchpoint(nub_addr_t addr,nub_size_t size,bool read,bool write,bool also_set_on_task)830 uint32_t DNBArchMachARM64::EnableHardwareWatchpoint(nub_addr_t addr,
831 nub_size_t size, bool read,
832 bool write,
833 bool also_set_on_task) {
834 DNBLogThreadedIf(LOG_WATCHPOINTS,
835 "DNBArchMachARM64::EnableHardwareWatchpoint(addr = "
836 "0x%8.8llx, size = %zu, read = %u, write = %u)",
837 (uint64_t)addr, size, read, write);
838
839 const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints();
840
841 // Can't watch zero bytes
842 if (size == 0)
843 return INVALID_NUB_HW_INDEX;
844
845 // We must watch for either read or write
846 if (read == false && write == false)
847 return INVALID_NUB_HW_INDEX;
848
849 // Otherwise, can't watch more than 8 bytes per WVR/WCR pair
850 if (size > 8)
851 return INVALID_NUB_HW_INDEX;
852
853 // Aarch64 watchpoints are in one of two forms: (1) 1-8 bytes, aligned to
854 // an 8 byte address, or (2) a power-of-two size region of memory; minimum
855 // 8 bytes, maximum 2GB; the starting address must be aligned to that power
856 // of two.
857 //
858 // For (1), 1-8 byte watchpoints, using the Byte Address Selector field in
859 // DBGWCR<n>.BAS. Any of the bytes may be watched, but if multiple bytes
860 // are watched, the bytes selected must be contiguous. The start address
861 // watched must be doubleword (8-byte) aligned; if the start address is
862 // word (4-byte) aligned, only 4 bytes can be watched.
863 //
864 // For (2), the MASK field in DBGWCR<n>.MASK is used.
865 //
866 // See the ARM ARM, section "Watchpoint exceptions", and more specifically,
867 // "Watchpoint data address comparisons".
868 //
869 // debugserver today only supports (1) - the Byte Address Selector 1-8 byte
870 // watchpoints that are 8-byte aligned. To support larger watchpoints,
871 // debugserver would need to interpret the mach exception when the watched
872 // region was hit, see if the address accessed lies within the subset
873 // of the power-of-two region that lldb asked us to watch (v. ARM ARM,
874 // "Determining the memory location that caused a Watchpoint exception"),
875 // and silently resume the inferior (disable watchpoint, stepi, re-enable
876 // watchpoint) if the address lies outside the region that lldb asked us
877 // to watch.
878 //
879 // Alternatively, lldb would need to be prepared for a larger region
880 // being watched than it requested, and silently resume the inferior if
881 // the accessed address is outside the region lldb wants to watch.
882
883 nub_addr_t aligned_wp_address = addr & ~0x7;
884 uint32_t addr_dword_offset = addr & 0x7;
885
886 // Do we need to split up this logical watchpoint into two hardware watchpoint
887 // registers?
888 // e.g. a watchpoint of length 4 on address 6. We need do this with
889 // one watchpoint on address 0 with bytes 6 & 7 being monitored
890 // one watchpoint on address 8 with bytes 0, 1, 2, 3 being monitored
891
892 if (addr_dword_offset + size > 8) {
893 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::"
894 "EnableHardwareWatchpoint(addr = "
895 "0x%8.8llx, size = %zu) needs two "
896 "hardware watchpoints slots to monitor",
897 (uint64_t)addr, size);
898 int low_watchpoint_size = 8 - addr_dword_offset;
899 int high_watchpoint_size = addr_dword_offset + size - 8;
900
901 uint32_t lo = EnableHardwareWatchpoint(addr, low_watchpoint_size, read,
902 write, also_set_on_task);
903 if (lo == INVALID_NUB_HW_INDEX)
904 return INVALID_NUB_HW_INDEX;
905 uint32_t hi =
906 EnableHardwareWatchpoint(aligned_wp_address + 8, high_watchpoint_size,
907 read, write, also_set_on_task);
908 if (hi == INVALID_NUB_HW_INDEX) {
909 DisableHardwareWatchpoint(lo, also_set_on_task);
910 return INVALID_NUB_HW_INDEX;
911 }
912 // Tag this lo->hi mapping in our database.
913 LoHi[lo] = hi;
914 return lo;
915 }
916
917 // At this point
918 // 1 aligned_wp_address is the requested address rounded down to 8-byte
919 // alignment
920 // 2 addr_dword_offset is the offset into that double word (8-byte) region
921 // that we are watching
922 // 3 size is the number of bytes within that 8-byte region that we are
923 // watching
924
925 // Set the Byte Address Selects bits DBGWCRn_EL1 bits [12:5] based on the
926 // above.
927 // The bit shift and negation operation will give us 0b11 for 2, 0b1111 for 4,
928 // etc, up to 0b11111111 for 8.
929 // then we shift those bits left by the offset into this dword that we are
930 // interested in.
931 // e.g. if we are watching bytes 4,5,6,7 in a dword we want a BAS of
932 // 0b11110000.
933 uint32_t byte_address_select = ((1 << size) - 1) << addr_dword_offset;
934
935 // Read the debug state
936 kern_return_t kret = GetDBGState(false);
937
938 if (kret == KERN_SUCCESS) {
939 // Check to make sure we have the needed hardware support
940 uint32_t i = 0;
941
942 for (i = 0; i < num_hw_watchpoints; ++i) {
943 if ((m_state.dbg.__wcr[i] & WCR_ENABLE) == 0)
944 break; // We found an available hw watchpoint slot (in i)
945 }
946
947 // See if we found an available hw watchpoint slot above
948 if (i < num_hw_watchpoints) {
949 // DumpDBGState(m_state.dbg);
950
951 // Clear any previous LoHi joined-watchpoint that may have been in use
952 LoHi[i] = 0;
953
954 // shift our Byte Address Select bits up to the correct bit range for the
955 // DBGWCRn_EL1
956 byte_address_select = byte_address_select << 5;
957
958 // Make sure bits 1:0 are clear in our address
959 m_state.dbg.__wvr[i] = aligned_wp_address; // DVA (Data Virtual Address)
960 m_state.dbg.__wcr[i] = byte_address_select | // Which bytes that follow
961 // the DVA that we will watch
962 S_USER | // Stop only in user mode
963 (read ? WCR_LOAD : 0) | // Stop on read access?
964 (write ? WCR_STORE : 0) | // Stop on write access?
965 WCR_ENABLE; // Enable this watchpoint;
966
967 DNBLogThreadedIf(
968 LOG_WATCHPOINTS, "DNBArchMachARM64::EnableHardwareWatchpoint() "
969 "adding watchpoint on address 0x%llx with control "
970 "register value 0x%x",
971 (uint64_t)m_state.dbg.__wvr[i], (uint32_t)m_state.dbg.__wcr[i]);
972
973 // The kernel will set the MDE_ENABLE bit in the MDSCR_EL1 for us
974 // automatically, don't need to do it here.
975
976 kret = SetDBGState(also_set_on_task);
977 // DumpDBGState(m_state.dbg);
978
979 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::"
980 "EnableHardwareWatchpoint() "
981 "SetDBGState() => 0x%8.8x.",
982 kret);
983
984 if (kret == KERN_SUCCESS)
985 return i;
986 } else {
987 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::"
988 "EnableHardwareWatchpoint(): All "
989 "hardware resources (%u) are in use.",
990 num_hw_watchpoints);
991 }
992 }
993 return INVALID_NUB_HW_INDEX;
994 }
995
ReenableHardwareWatchpoint(uint32_t hw_index)996 bool DNBArchMachARM64::ReenableHardwareWatchpoint(uint32_t hw_index) {
997 // If this logical watchpoint # is actually implemented using
998 // two hardware watchpoint registers, re-enable both of them.
999
1000 if (hw_index < NumSupportedHardwareWatchpoints() && LoHi[hw_index]) {
1001 return ReenableHardwareWatchpoint_helper(hw_index) &&
1002 ReenableHardwareWatchpoint_helper(LoHi[hw_index]);
1003 } else {
1004 return ReenableHardwareWatchpoint_helper(hw_index);
1005 }
1006 }
1007
ReenableHardwareWatchpoint_helper(uint32_t hw_index)1008 bool DNBArchMachARM64::ReenableHardwareWatchpoint_helper(uint32_t hw_index) {
1009 kern_return_t kret = GetDBGState(false);
1010 if (kret != KERN_SUCCESS)
1011 return false;
1012
1013 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints();
1014 if (hw_index >= num_hw_points)
1015 return false;
1016
1017 m_state.dbg.__wvr[hw_index] = m_disabled_watchpoints[hw_index].addr;
1018 m_state.dbg.__wcr[hw_index] = m_disabled_watchpoints[hw_index].control;
1019
1020 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::"
1021 "EnableHardwareWatchpoint( %u ) - WVR%u = "
1022 "0x%8.8llx WCR%u = 0x%8.8llx",
1023 hw_index, hw_index, (uint64_t)m_state.dbg.__wvr[hw_index],
1024 hw_index, (uint64_t)m_state.dbg.__wcr[hw_index]);
1025
1026 // The kernel will set the MDE_ENABLE bit in the MDSCR_EL1 for us
1027 // automatically, don't need to do it here.
1028
1029 kret = SetDBGState(false);
1030
1031 return (kret == KERN_SUCCESS);
1032 }
1033
DisableHardwareWatchpoint(uint32_t hw_index,bool also_set_on_task)1034 bool DNBArchMachARM64::DisableHardwareWatchpoint(uint32_t hw_index,
1035 bool also_set_on_task) {
1036 if (hw_index < NumSupportedHardwareWatchpoints() && LoHi[hw_index]) {
1037 return DisableHardwareWatchpoint_helper(hw_index, also_set_on_task) &&
1038 DisableHardwareWatchpoint_helper(LoHi[hw_index], also_set_on_task);
1039 } else {
1040 return DisableHardwareWatchpoint_helper(hw_index, also_set_on_task);
1041 }
1042 }
1043
DisableHardwareWatchpoint_helper(uint32_t hw_index,bool also_set_on_task)1044 bool DNBArchMachARM64::DisableHardwareWatchpoint_helper(uint32_t hw_index,
1045 bool also_set_on_task) {
1046 kern_return_t kret = GetDBGState(false);
1047 if (kret != KERN_SUCCESS)
1048 return false;
1049
1050 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints();
1051 if (hw_index >= num_hw_points)
1052 return false;
1053
1054 m_disabled_watchpoints[hw_index].addr = m_state.dbg.__wvr[hw_index];
1055 m_disabled_watchpoints[hw_index].control = m_state.dbg.__wcr[hw_index];
1056
1057 m_state.dbg.__wcr[hw_index] &= ~((nub_addr_t)WCR_ENABLE);
1058 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::"
1059 "DisableHardwareWatchpoint( %u ) - WVR%u = "
1060 "0x%8.8llx WCR%u = 0x%8.8llx",
1061 hw_index, hw_index, (uint64_t)m_state.dbg.__wvr[hw_index],
1062 hw_index, (uint64_t)m_state.dbg.__wcr[hw_index]);
1063
1064 kret = SetDBGState(also_set_on_task);
1065
1066 return (kret == KERN_SUCCESS);
1067 }
1068
DisableHardwareBreakpoint(uint32_t hw_index,bool also_set_on_task)1069 bool DNBArchMachARM64::DisableHardwareBreakpoint(uint32_t hw_index,
1070 bool also_set_on_task) {
1071 kern_return_t kret = GetDBGState(false);
1072 if (kret != KERN_SUCCESS)
1073 return false;
1074
1075 const uint32_t num_hw_points = NumSupportedHardwareBreakpoints();
1076 if (hw_index >= num_hw_points)
1077 return false;
1078
1079 m_disabled_breakpoints[hw_index].addr = m_state.dbg.__bvr[hw_index];
1080 m_disabled_breakpoints[hw_index].control = m_state.dbg.__bcr[hw_index];
1081
1082 m_state.dbg.__bcr[hw_index] = 0;
1083 DNBLogThreadedIf(LOG_WATCHPOINTS,
1084 "DNBArchMachARM64::"
1085 "DisableHardwareBreakpoint( %u ) - WVR%u = "
1086 "0x%8.8llx BCR%u = 0x%8.8llx",
1087 hw_index, hw_index, (uint64_t)m_state.dbg.__bvr[hw_index],
1088 hw_index, (uint64_t)m_state.dbg.__bcr[hw_index]);
1089
1090 kret = SetDBGState(also_set_on_task);
1091
1092 return (kret == KERN_SUCCESS);
1093 }
1094
1095 // This is for checking the Byte Address Select bits in the DBRWCRn_EL1 control
1096 // register.
1097 // Returns -1 if the trailing bit patterns are not one of:
1098 // { 0b???????1, 0b??????10, 0b?????100, 0b????1000, 0b???10000, 0b??100000,
1099 // 0b?1000000, 0b10000000 }.
LowestBitSet(uint32_t val)1100 static inline int32_t LowestBitSet(uint32_t val) {
1101 for (unsigned i = 0; i < 8; ++i) {
1102 if (bit(val, i))
1103 return i;
1104 }
1105 return -1;
1106 }
1107
1108 // Iterate through the debug registers; return the index of the first watchpoint
1109 // whose address matches.
1110 // As a side effect, the starting address as understood by the debugger is
1111 // returned which could be
1112 // different from 'addr' passed as an in/out argument.
GetHardwareWatchpointHit(nub_addr_t & addr)1113 uint32_t DNBArchMachARM64::GetHardwareWatchpointHit(nub_addr_t &addr) {
1114 // Read the debug state
1115 kern_return_t kret = GetDBGState(true);
1116 // DumpDBGState(m_state.dbg);
1117 DNBLogThreadedIf(
1118 LOG_WATCHPOINTS,
1119 "DNBArchMachARM64::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.",
1120 kret);
1121 DNBLogThreadedIf(LOG_WATCHPOINTS,
1122 "DNBArchMachARM64::GetHardwareWatchpointHit() addr = 0x%llx",
1123 (uint64_t)addr);
1124
1125 if (kret == KERN_SUCCESS) {
1126 DBG &debug_state = m_state.dbg;
1127 uint32_t i, num = NumSupportedHardwareWatchpoints();
1128 for (i = 0; i < num; ++i) {
1129 nub_addr_t wp_addr = GetWatchAddress(debug_state, i);
1130 uint32_t byte_mask = bits(debug_state.__wcr[i], 12, 5);
1131
1132 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchImplX86_64::"
1133 "GetHardwareWatchpointHit() slot: %u "
1134 "(addr = 0x%llx; byte_mask = 0x%x)",
1135 i, static_cast<uint64_t>(wp_addr),
1136 byte_mask);
1137
1138 if (!IsWatchpointEnabled(debug_state, i))
1139 continue;
1140
1141 if (bits(wp_addr, 48, 3) != bits(addr, 48, 3))
1142 continue;
1143
1144 // Sanity check the byte_mask
1145 uint32_t lsb = LowestBitSet(byte_mask);
1146 if (lsb < 0)
1147 continue;
1148
1149 uint64_t byte_to_match = bits(addr, 2, 0);
1150
1151 if (byte_mask & (1 << byte_to_match)) {
1152 addr = wp_addr + lsb;
1153 return i;
1154 }
1155 }
1156 }
1157 return INVALID_NUB_HW_INDEX;
1158 }
1159
GetWatchpointAddressByIndex(uint32_t hw_index)1160 nub_addr_t DNBArchMachARM64::GetWatchpointAddressByIndex(uint32_t hw_index) {
1161 kern_return_t kret = GetDBGState(true);
1162 if (kret != KERN_SUCCESS)
1163 return INVALID_NUB_ADDRESS;
1164 const uint32_t num = NumSupportedHardwareWatchpoints();
1165 if (hw_index >= num)
1166 return INVALID_NUB_ADDRESS;
1167 if (IsWatchpointEnabled(m_state.dbg, hw_index))
1168 return GetWatchAddress(m_state.dbg, hw_index);
1169 return INVALID_NUB_ADDRESS;
1170 }
1171
IsWatchpointEnabled(const DBG & debug_state,uint32_t hw_index)1172 bool DNBArchMachARM64::IsWatchpointEnabled(const DBG &debug_state,
1173 uint32_t hw_index) {
1174 // Watchpoint Control Registers, bitfield definitions
1175 // ...
1176 // Bits Value Description
1177 // [0] 0 Watchpoint disabled
1178 // 1 Watchpoint enabled.
1179 return (debug_state.__wcr[hw_index] & 1u);
1180 }
1181
GetWatchAddress(const DBG & debug_state,uint32_t hw_index)1182 nub_addr_t DNBArchMachARM64::GetWatchAddress(const DBG &debug_state,
1183 uint32_t hw_index) {
1184 // Watchpoint Value Registers, bitfield definitions
1185 // Bits Description
1186 // [31:2] Watchpoint value (word address, i.e., 4-byte aligned)
1187 // [1:0] RAZ/SBZP
1188 return bits(debug_state.__wvr[hw_index], 63, 0);
1189 }
1190
1191 // Register information definitions for 64 bit ARMv8.
1192 enum gpr_regnums {
1193 gpr_x0 = 0,
1194 gpr_x1,
1195 gpr_x2,
1196 gpr_x3,
1197 gpr_x4,
1198 gpr_x5,
1199 gpr_x6,
1200 gpr_x7,
1201 gpr_x8,
1202 gpr_x9,
1203 gpr_x10,
1204 gpr_x11,
1205 gpr_x12,
1206 gpr_x13,
1207 gpr_x14,
1208 gpr_x15,
1209 gpr_x16,
1210 gpr_x17,
1211 gpr_x18,
1212 gpr_x19,
1213 gpr_x20,
1214 gpr_x21,
1215 gpr_x22,
1216 gpr_x23,
1217 gpr_x24,
1218 gpr_x25,
1219 gpr_x26,
1220 gpr_x27,
1221 gpr_x28,
1222 gpr_fp,
1223 gpr_x29 = gpr_fp,
1224 gpr_lr,
1225 gpr_x30 = gpr_lr,
1226 gpr_sp,
1227 gpr_x31 = gpr_sp,
1228 gpr_pc,
1229 gpr_cpsr,
1230 gpr_w0,
1231 gpr_w1,
1232 gpr_w2,
1233 gpr_w3,
1234 gpr_w4,
1235 gpr_w5,
1236 gpr_w6,
1237 gpr_w7,
1238 gpr_w8,
1239 gpr_w9,
1240 gpr_w10,
1241 gpr_w11,
1242 gpr_w12,
1243 gpr_w13,
1244 gpr_w14,
1245 gpr_w15,
1246 gpr_w16,
1247 gpr_w17,
1248 gpr_w18,
1249 gpr_w19,
1250 gpr_w20,
1251 gpr_w21,
1252 gpr_w22,
1253 gpr_w23,
1254 gpr_w24,
1255 gpr_w25,
1256 gpr_w26,
1257 gpr_w27,
1258 gpr_w28
1259
1260 };
1261
1262 enum {
1263 vfp_v0 = 0,
1264 vfp_v1,
1265 vfp_v2,
1266 vfp_v3,
1267 vfp_v4,
1268 vfp_v5,
1269 vfp_v6,
1270 vfp_v7,
1271 vfp_v8,
1272 vfp_v9,
1273 vfp_v10,
1274 vfp_v11,
1275 vfp_v12,
1276 vfp_v13,
1277 vfp_v14,
1278 vfp_v15,
1279 vfp_v16,
1280 vfp_v17,
1281 vfp_v18,
1282 vfp_v19,
1283 vfp_v20,
1284 vfp_v21,
1285 vfp_v22,
1286 vfp_v23,
1287 vfp_v24,
1288 vfp_v25,
1289 vfp_v26,
1290 vfp_v27,
1291 vfp_v28,
1292 vfp_v29,
1293 vfp_v30,
1294 vfp_v31,
1295 vfp_fpsr,
1296 vfp_fpcr,
1297
1298 // lower 32 bits of the corresponding vfp_v<n> reg.
1299 vfp_s0,
1300 vfp_s1,
1301 vfp_s2,
1302 vfp_s3,
1303 vfp_s4,
1304 vfp_s5,
1305 vfp_s6,
1306 vfp_s7,
1307 vfp_s8,
1308 vfp_s9,
1309 vfp_s10,
1310 vfp_s11,
1311 vfp_s12,
1312 vfp_s13,
1313 vfp_s14,
1314 vfp_s15,
1315 vfp_s16,
1316 vfp_s17,
1317 vfp_s18,
1318 vfp_s19,
1319 vfp_s20,
1320 vfp_s21,
1321 vfp_s22,
1322 vfp_s23,
1323 vfp_s24,
1324 vfp_s25,
1325 vfp_s26,
1326 vfp_s27,
1327 vfp_s28,
1328 vfp_s29,
1329 vfp_s30,
1330 vfp_s31,
1331
1332 // lower 64 bits of the corresponding vfp_v<n> reg.
1333 vfp_d0,
1334 vfp_d1,
1335 vfp_d2,
1336 vfp_d3,
1337 vfp_d4,
1338 vfp_d5,
1339 vfp_d6,
1340 vfp_d7,
1341 vfp_d8,
1342 vfp_d9,
1343 vfp_d10,
1344 vfp_d11,
1345 vfp_d12,
1346 vfp_d13,
1347 vfp_d14,
1348 vfp_d15,
1349 vfp_d16,
1350 vfp_d17,
1351 vfp_d18,
1352 vfp_d19,
1353 vfp_d20,
1354 vfp_d21,
1355 vfp_d22,
1356 vfp_d23,
1357 vfp_d24,
1358 vfp_d25,
1359 vfp_d26,
1360 vfp_d27,
1361 vfp_d28,
1362 vfp_d29,
1363 vfp_d30,
1364 vfp_d31
1365 };
1366
1367 enum { exc_far = 0, exc_esr, exc_exception };
1368
1369 // These numbers from the "DWARF for the ARM 64-bit Architecture (AArch64)"
1370 // document.
1371
1372 enum {
1373 dwarf_x0 = 0,
1374 dwarf_x1,
1375 dwarf_x2,
1376 dwarf_x3,
1377 dwarf_x4,
1378 dwarf_x5,
1379 dwarf_x6,
1380 dwarf_x7,
1381 dwarf_x8,
1382 dwarf_x9,
1383 dwarf_x10,
1384 dwarf_x11,
1385 dwarf_x12,
1386 dwarf_x13,
1387 dwarf_x14,
1388 dwarf_x15,
1389 dwarf_x16,
1390 dwarf_x17,
1391 dwarf_x18,
1392 dwarf_x19,
1393 dwarf_x20,
1394 dwarf_x21,
1395 dwarf_x22,
1396 dwarf_x23,
1397 dwarf_x24,
1398 dwarf_x25,
1399 dwarf_x26,
1400 dwarf_x27,
1401 dwarf_x28,
1402 dwarf_x29,
1403 dwarf_x30,
1404 dwarf_x31,
1405 dwarf_pc = 32,
1406 dwarf_elr_mode = 33,
1407 dwarf_fp = dwarf_x29,
1408 dwarf_lr = dwarf_x30,
1409 dwarf_sp = dwarf_x31,
1410 // 34-63 reserved
1411
1412 // V0-V31 (128 bit vector registers)
1413 dwarf_v0 = 64,
1414 dwarf_v1,
1415 dwarf_v2,
1416 dwarf_v3,
1417 dwarf_v4,
1418 dwarf_v5,
1419 dwarf_v6,
1420 dwarf_v7,
1421 dwarf_v8,
1422 dwarf_v9,
1423 dwarf_v10,
1424 dwarf_v11,
1425 dwarf_v12,
1426 dwarf_v13,
1427 dwarf_v14,
1428 dwarf_v15,
1429 dwarf_v16,
1430 dwarf_v17,
1431 dwarf_v18,
1432 dwarf_v19,
1433 dwarf_v20,
1434 dwarf_v21,
1435 dwarf_v22,
1436 dwarf_v23,
1437 dwarf_v24,
1438 dwarf_v25,
1439 dwarf_v26,
1440 dwarf_v27,
1441 dwarf_v28,
1442 dwarf_v29,
1443 dwarf_v30,
1444 dwarf_v31
1445
1446 // 96-127 reserved
1447 };
1448
1449 enum {
1450 debugserver_gpr_x0 = 0,
1451 debugserver_gpr_x1,
1452 debugserver_gpr_x2,
1453 debugserver_gpr_x3,
1454 debugserver_gpr_x4,
1455 debugserver_gpr_x5,
1456 debugserver_gpr_x6,
1457 debugserver_gpr_x7,
1458 debugserver_gpr_x8,
1459 debugserver_gpr_x9,
1460 debugserver_gpr_x10,
1461 debugserver_gpr_x11,
1462 debugserver_gpr_x12,
1463 debugserver_gpr_x13,
1464 debugserver_gpr_x14,
1465 debugserver_gpr_x15,
1466 debugserver_gpr_x16,
1467 debugserver_gpr_x17,
1468 debugserver_gpr_x18,
1469 debugserver_gpr_x19,
1470 debugserver_gpr_x20,
1471 debugserver_gpr_x21,
1472 debugserver_gpr_x22,
1473 debugserver_gpr_x23,
1474 debugserver_gpr_x24,
1475 debugserver_gpr_x25,
1476 debugserver_gpr_x26,
1477 debugserver_gpr_x27,
1478 debugserver_gpr_x28,
1479 debugserver_gpr_fp, // x29
1480 debugserver_gpr_lr, // x30
1481 debugserver_gpr_sp, // sp aka xsp
1482 debugserver_gpr_pc,
1483 debugserver_gpr_cpsr,
1484 debugserver_vfp_v0,
1485 debugserver_vfp_v1,
1486 debugserver_vfp_v2,
1487 debugserver_vfp_v3,
1488 debugserver_vfp_v4,
1489 debugserver_vfp_v5,
1490 debugserver_vfp_v6,
1491 debugserver_vfp_v7,
1492 debugserver_vfp_v8,
1493 debugserver_vfp_v9,
1494 debugserver_vfp_v10,
1495 debugserver_vfp_v11,
1496 debugserver_vfp_v12,
1497 debugserver_vfp_v13,
1498 debugserver_vfp_v14,
1499 debugserver_vfp_v15,
1500 debugserver_vfp_v16,
1501 debugserver_vfp_v17,
1502 debugserver_vfp_v18,
1503 debugserver_vfp_v19,
1504 debugserver_vfp_v20,
1505 debugserver_vfp_v21,
1506 debugserver_vfp_v22,
1507 debugserver_vfp_v23,
1508 debugserver_vfp_v24,
1509 debugserver_vfp_v25,
1510 debugserver_vfp_v26,
1511 debugserver_vfp_v27,
1512 debugserver_vfp_v28,
1513 debugserver_vfp_v29,
1514 debugserver_vfp_v30,
1515 debugserver_vfp_v31,
1516 debugserver_vfp_fpsr,
1517 debugserver_vfp_fpcr
1518 };
1519
1520 const char *g_contained_x0[]{"x0", NULL};
1521 const char *g_contained_x1[]{"x1", NULL};
1522 const char *g_contained_x2[]{"x2", NULL};
1523 const char *g_contained_x3[]{"x3", NULL};
1524 const char *g_contained_x4[]{"x4", NULL};
1525 const char *g_contained_x5[]{"x5", NULL};
1526 const char *g_contained_x6[]{"x6", NULL};
1527 const char *g_contained_x7[]{"x7", NULL};
1528 const char *g_contained_x8[]{"x8", NULL};
1529 const char *g_contained_x9[]{"x9", NULL};
1530 const char *g_contained_x10[]{"x10", NULL};
1531 const char *g_contained_x11[]{"x11", NULL};
1532 const char *g_contained_x12[]{"x12", NULL};
1533 const char *g_contained_x13[]{"x13", NULL};
1534 const char *g_contained_x14[]{"x14", NULL};
1535 const char *g_contained_x15[]{"x15", NULL};
1536 const char *g_contained_x16[]{"x16", NULL};
1537 const char *g_contained_x17[]{"x17", NULL};
1538 const char *g_contained_x18[]{"x18", NULL};
1539 const char *g_contained_x19[]{"x19", NULL};
1540 const char *g_contained_x20[]{"x20", NULL};
1541 const char *g_contained_x21[]{"x21", NULL};
1542 const char *g_contained_x22[]{"x22", NULL};
1543 const char *g_contained_x23[]{"x23", NULL};
1544 const char *g_contained_x24[]{"x24", NULL};
1545 const char *g_contained_x25[]{"x25", NULL};
1546 const char *g_contained_x26[]{"x26", NULL};
1547 const char *g_contained_x27[]{"x27", NULL};
1548 const char *g_contained_x28[]{"x28", NULL};
1549
1550 const char *g_invalidate_x0[]{"x0", "w0", NULL};
1551 const char *g_invalidate_x1[]{"x1", "w1", NULL};
1552 const char *g_invalidate_x2[]{"x2", "w2", NULL};
1553 const char *g_invalidate_x3[]{"x3", "w3", NULL};
1554 const char *g_invalidate_x4[]{"x4", "w4", NULL};
1555 const char *g_invalidate_x5[]{"x5", "w5", NULL};
1556 const char *g_invalidate_x6[]{"x6", "w6", NULL};
1557 const char *g_invalidate_x7[]{"x7", "w7", NULL};
1558 const char *g_invalidate_x8[]{"x8", "w8", NULL};
1559 const char *g_invalidate_x9[]{"x9", "w9", NULL};
1560 const char *g_invalidate_x10[]{"x10", "w10", NULL};
1561 const char *g_invalidate_x11[]{"x11", "w11", NULL};
1562 const char *g_invalidate_x12[]{"x12", "w12", NULL};
1563 const char *g_invalidate_x13[]{"x13", "w13", NULL};
1564 const char *g_invalidate_x14[]{"x14", "w14", NULL};
1565 const char *g_invalidate_x15[]{"x15", "w15", NULL};
1566 const char *g_invalidate_x16[]{"x16", "w16", NULL};
1567 const char *g_invalidate_x17[]{"x17", "w17", NULL};
1568 const char *g_invalidate_x18[]{"x18", "w18", NULL};
1569 const char *g_invalidate_x19[]{"x19", "w19", NULL};
1570 const char *g_invalidate_x20[]{"x20", "w20", NULL};
1571 const char *g_invalidate_x21[]{"x21", "w21", NULL};
1572 const char *g_invalidate_x22[]{"x22", "w22", NULL};
1573 const char *g_invalidate_x23[]{"x23", "w23", NULL};
1574 const char *g_invalidate_x24[]{"x24", "w24", NULL};
1575 const char *g_invalidate_x25[]{"x25", "w25", NULL};
1576 const char *g_invalidate_x26[]{"x26", "w26", NULL};
1577 const char *g_invalidate_x27[]{"x27", "w27", NULL};
1578 const char *g_invalidate_x28[]{"x28", "w28", NULL};
1579
1580 #define GPR_OFFSET_IDX(idx) (offsetof(DNBArchMachARM64::GPR, __x[idx]))
1581
1582 #define GPR_OFFSET_NAME(reg) (offsetof(DNBArchMachARM64::GPR, __##reg))
1583
1584 // These macros will auto define the register name, alt name, register size,
1585 // register offset, encoding, format and native register. This ensures that
1586 // the register state structures are defined correctly and have the correct
1587 // sizes and offsets.
1588 #define DEFINE_GPR_IDX(idx, reg, alt, gen) \
1589 { \
1590 e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, 8, GPR_OFFSET_IDX(idx), \
1591 dwarf_##reg, dwarf_##reg, gen, debugserver_gpr_##reg, NULL, \
1592 g_invalidate_x##idx \
1593 }
1594 #define DEFINE_GPR_NAME(reg, alt, gen) \
1595 { \
1596 e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, 8, GPR_OFFSET_NAME(reg), \
1597 dwarf_##reg, dwarf_##reg, gen, debugserver_gpr_##reg, NULL, NULL \
1598 }
1599 #define DEFINE_PSEUDO_GPR_IDX(idx, reg) \
1600 { \
1601 e_regSetGPR, gpr_##reg, #reg, NULL, Uint, Hex, 4, 0, INVALID_NUB_REGNUM, \
1602 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \
1603 g_contained_x##idx, g_invalidate_x##idx \
1604 }
1605
1606 //_STRUCT_ARM_THREAD_STATE64
1607 //{
1608 // uint64_t x[29]; /* General purpose registers x0-x28 */
1609 // uint64_t fp; /* Frame pointer x29 */
1610 // uint64_t lr; /* Link register x30 */
1611 // uint64_t sp; /* Stack pointer x31 */
1612 // uint64_t pc; /* Program counter */
1613 // uint32_t cpsr; /* Current program status register */
1614 //};
1615
1616 // General purpose registers
1617 const DNBRegisterInfo DNBArchMachARM64::g_gpr_registers[] = {
1618 DEFINE_GPR_IDX(0, x0, "arg1", GENERIC_REGNUM_ARG1),
1619 DEFINE_GPR_IDX(1, x1, "arg2", GENERIC_REGNUM_ARG2),
1620 DEFINE_GPR_IDX(2, x2, "arg3", GENERIC_REGNUM_ARG3),
1621 DEFINE_GPR_IDX(3, x3, "arg4", GENERIC_REGNUM_ARG4),
1622 DEFINE_GPR_IDX(4, x4, "arg5", GENERIC_REGNUM_ARG5),
1623 DEFINE_GPR_IDX(5, x5, "arg6", GENERIC_REGNUM_ARG6),
1624 DEFINE_GPR_IDX(6, x6, "arg7", GENERIC_REGNUM_ARG7),
1625 DEFINE_GPR_IDX(7, x7, "arg8", GENERIC_REGNUM_ARG8),
1626 DEFINE_GPR_IDX(8, x8, NULL, INVALID_NUB_REGNUM),
1627 DEFINE_GPR_IDX(9, x9, NULL, INVALID_NUB_REGNUM),
1628 DEFINE_GPR_IDX(10, x10, NULL, INVALID_NUB_REGNUM),
1629 DEFINE_GPR_IDX(11, x11, NULL, INVALID_NUB_REGNUM),
1630 DEFINE_GPR_IDX(12, x12, NULL, INVALID_NUB_REGNUM),
1631 DEFINE_GPR_IDX(13, x13, NULL, INVALID_NUB_REGNUM),
1632 DEFINE_GPR_IDX(14, x14, NULL, INVALID_NUB_REGNUM),
1633 DEFINE_GPR_IDX(15, x15, NULL, INVALID_NUB_REGNUM),
1634 DEFINE_GPR_IDX(16, x16, NULL, INVALID_NUB_REGNUM),
1635 DEFINE_GPR_IDX(17, x17, NULL, INVALID_NUB_REGNUM),
1636 DEFINE_GPR_IDX(18, x18, NULL, INVALID_NUB_REGNUM),
1637 DEFINE_GPR_IDX(19, x19, NULL, INVALID_NUB_REGNUM),
1638 DEFINE_GPR_IDX(20, x20, NULL, INVALID_NUB_REGNUM),
1639 DEFINE_GPR_IDX(21, x21, NULL, INVALID_NUB_REGNUM),
1640 DEFINE_GPR_IDX(22, x22, NULL, INVALID_NUB_REGNUM),
1641 DEFINE_GPR_IDX(23, x23, NULL, INVALID_NUB_REGNUM),
1642 DEFINE_GPR_IDX(24, x24, NULL, INVALID_NUB_REGNUM),
1643 DEFINE_GPR_IDX(25, x25, NULL, INVALID_NUB_REGNUM),
1644 DEFINE_GPR_IDX(26, x26, NULL, INVALID_NUB_REGNUM),
1645 DEFINE_GPR_IDX(27, x27, NULL, INVALID_NUB_REGNUM),
1646 DEFINE_GPR_IDX(28, x28, NULL, INVALID_NUB_REGNUM),
1647 // For the G/g packet we want to show where the offset into the regctx
1648 // is for fp/lr/sp/pc, but we cannot directly access them on arm64e
1649 // devices (and therefore can't offsetof() them)) - add the offset based
1650 // on the last accessible register by hand for advertising the location
1651 // in the regctx to lldb. We'll go through the accessor functions when
1652 // we read/write them here.
1653 {
1654 e_regSetGPR, gpr_fp, "fp", "x29", Uint, Hex, 8, GPR_OFFSET_IDX(28) + 8,
1655 dwarf_fp, dwarf_fp, GENERIC_REGNUM_FP, debugserver_gpr_fp, NULL, NULL
1656 },
1657 {
1658 e_regSetGPR, gpr_lr, "lr", "x30", Uint, Hex, 8, GPR_OFFSET_IDX(28) + 16,
1659 dwarf_lr, dwarf_lr, GENERIC_REGNUM_RA, debugserver_gpr_lr, NULL, NULL
1660 },
1661 {
1662 e_regSetGPR, gpr_sp, "sp", "xsp", Uint, Hex, 8, GPR_OFFSET_IDX(28) + 24,
1663 dwarf_sp, dwarf_sp, GENERIC_REGNUM_SP, debugserver_gpr_sp, NULL, NULL
1664 },
1665 {
1666 e_regSetGPR, gpr_pc, "pc", NULL, Uint, Hex, 8, GPR_OFFSET_IDX(28) + 32,
1667 dwarf_pc, dwarf_pc, GENERIC_REGNUM_PC, debugserver_gpr_pc, NULL, NULL
1668 },
1669
1670 // in armv7 we specify that writing to the CPSR should invalidate r8-12, sp,
1671 // lr.
1672 // this should be specified for arm64 too even though debugserver is only
1673 // used for
1674 // userland debugging.
1675 {e_regSetGPR, gpr_cpsr, "cpsr", "flags", Uint, Hex, 4,
1676 GPR_OFFSET_NAME(cpsr), dwarf_elr_mode, dwarf_elr_mode, GENERIC_REGNUM_FLAGS,
1677 debugserver_gpr_cpsr, NULL, NULL},
1678
1679 DEFINE_PSEUDO_GPR_IDX(0, w0),
1680 DEFINE_PSEUDO_GPR_IDX(1, w1),
1681 DEFINE_PSEUDO_GPR_IDX(2, w2),
1682 DEFINE_PSEUDO_GPR_IDX(3, w3),
1683 DEFINE_PSEUDO_GPR_IDX(4, w4),
1684 DEFINE_PSEUDO_GPR_IDX(5, w5),
1685 DEFINE_PSEUDO_GPR_IDX(6, w6),
1686 DEFINE_PSEUDO_GPR_IDX(7, w7),
1687 DEFINE_PSEUDO_GPR_IDX(8, w8),
1688 DEFINE_PSEUDO_GPR_IDX(9, w9),
1689 DEFINE_PSEUDO_GPR_IDX(10, w10),
1690 DEFINE_PSEUDO_GPR_IDX(11, w11),
1691 DEFINE_PSEUDO_GPR_IDX(12, w12),
1692 DEFINE_PSEUDO_GPR_IDX(13, w13),
1693 DEFINE_PSEUDO_GPR_IDX(14, w14),
1694 DEFINE_PSEUDO_GPR_IDX(15, w15),
1695 DEFINE_PSEUDO_GPR_IDX(16, w16),
1696 DEFINE_PSEUDO_GPR_IDX(17, w17),
1697 DEFINE_PSEUDO_GPR_IDX(18, w18),
1698 DEFINE_PSEUDO_GPR_IDX(19, w19),
1699 DEFINE_PSEUDO_GPR_IDX(20, w20),
1700 DEFINE_PSEUDO_GPR_IDX(21, w21),
1701 DEFINE_PSEUDO_GPR_IDX(22, w22),
1702 DEFINE_PSEUDO_GPR_IDX(23, w23),
1703 DEFINE_PSEUDO_GPR_IDX(24, w24),
1704 DEFINE_PSEUDO_GPR_IDX(25, w25),
1705 DEFINE_PSEUDO_GPR_IDX(26, w26),
1706 DEFINE_PSEUDO_GPR_IDX(27, w27),
1707 DEFINE_PSEUDO_GPR_IDX(28, w28)};
1708
1709 const char *g_contained_v0[]{"v0", NULL};
1710 const char *g_contained_v1[]{"v1", NULL};
1711 const char *g_contained_v2[]{"v2", NULL};
1712 const char *g_contained_v3[]{"v3", NULL};
1713 const char *g_contained_v4[]{"v4", NULL};
1714 const char *g_contained_v5[]{"v5", NULL};
1715 const char *g_contained_v6[]{"v6", NULL};
1716 const char *g_contained_v7[]{"v7", NULL};
1717 const char *g_contained_v8[]{"v8", NULL};
1718 const char *g_contained_v9[]{"v9", NULL};
1719 const char *g_contained_v10[]{"v10", NULL};
1720 const char *g_contained_v11[]{"v11", NULL};
1721 const char *g_contained_v12[]{"v12", NULL};
1722 const char *g_contained_v13[]{"v13", NULL};
1723 const char *g_contained_v14[]{"v14", NULL};
1724 const char *g_contained_v15[]{"v15", NULL};
1725 const char *g_contained_v16[]{"v16", NULL};
1726 const char *g_contained_v17[]{"v17", NULL};
1727 const char *g_contained_v18[]{"v18", NULL};
1728 const char *g_contained_v19[]{"v19", NULL};
1729 const char *g_contained_v20[]{"v20", NULL};
1730 const char *g_contained_v21[]{"v21", NULL};
1731 const char *g_contained_v22[]{"v22", NULL};
1732 const char *g_contained_v23[]{"v23", NULL};
1733 const char *g_contained_v24[]{"v24", NULL};
1734 const char *g_contained_v25[]{"v25", NULL};
1735 const char *g_contained_v26[]{"v26", NULL};
1736 const char *g_contained_v27[]{"v27", NULL};
1737 const char *g_contained_v28[]{"v28", NULL};
1738 const char *g_contained_v29[]{"v29", NULL};
1739 const char *g_contained_v30[]{"v30", NULL};
1740 const char *g_contained_v31[]{"v31", NULL};
1741
1742 const char *g_invalidate_v0[]{"v0", "d0", "s0", NULL};
1743 const char *g_invalidate_v1[]{"v1", "d1", "s1", NULL};
1744 const char *g_invalidate_v2[]{"v2", "d2", "s2", NULL};
1745 const char *g_invalidate_v3[]{"v3", "d3", "s3", NULL};
1746 const char *g_invalidate_v4[]{"v4", "d4", "s4", NULL};
1747 const char *g_invalidate_v5[]{"v5", "d5", "s5", NULL};
1748 const char *g_invalidate_v6[]{"v6", "d6", "s6", NULL};
1749 const char *g_invalidate_v7[]{"v7", "d7", "s7", NULL};
1750 const char *g_invalidate_v8[]{"v8", "d8", "s8", NULL};
1751 const char *g_invalidate_v9[]{"v9", "d9", "s9", NULL};
1752 const char *g_invalidate_v10[]{"v10", "d10", "s10", NULL};
1753 const char *g_invalidate_v11[]{"v11", "d11", "s11", NULL};
1754 const char *g_invalidate_v12[]{"v12", "d12", "s12", NULL};
1755 const char *g_invalidate_v13[]{"v13", "d13", "s13", NULL};
1756 const char *g_invalidate_v14[]{"v14", "d14", "s14", NULL};
1757 const char *g_invalidate_v15[]{"v15", "d15", "s15", NULL};
1758 const char *g_invalidate_v16[]{"v16", "d16", "s16", NULL};
1759 const char *g_invalidate_v17[]{"v17", "d17", "s17", NULL};
1760 const char *g_invalidate_v18[]{"v18", "d18", "s18", NULL};
1761 const char *g_invalidate_v19[]{"v19", "d19", "s19", NULL};
1762 const char *g_invalidate_v20[]{"v20", "d20", "s20", NULL};
1763 const char *g_invalidate_v21[]{"v21", "d21", "s21", NULL};
1764 const char *g_invalidate_v22[]{"v22", "d22", "s22", NULL};
1765 const char *g_invalidate_v23[]{"v23", "d23", "s23", NULL};
1766 const char *g_invalidate_v24[]{"v24", "d24", "s24", NULL};
1767 const char *g_invalidate_v25[]{"v25", "d25", "s25", NULL};
1768 const char *g_invalidate_v26[]{"v26", "d26", "s26", NULL};
1769 const char *g_invalidate_v27[]{"v27", "d27", "s27", NULL};
1770 const char *g_invalidate_v28[]{"v28", "d28", "s28", NULL};
1771 const char *g_invalidate_v29[]{"v29", "d29", "s29", NULL};
1772 const char *g_invalidate_v30[]{"v30", "d30", "s30", NULL};
1773 const char *g_invalidate_v31[]{"v31", "d31", "s31", NULL};
1774
1775 #if defined(__arm64__) || defined(__aarch64__)
1776 #define VFP_V_OFFSET_IDX(idx) \
1777 (offsetof(DNBArchMachARM64::FPU, __v) + (idx * 16) + \
1778 offsetof(DNBArchMachARM64::Context, vfp))
1779 #else
1780 #define VFP_V_OFFSET_IDX(idx) \
1781 (offsetof(DNBArchMachARM64::FPU, opaque) + (idx * 16) + \
1782 offsetof(DNBArchMachARM64::Context, vfp))
1783 #endif
1784 #define VFP_OFFSET_NAME(reg) \
1785 (offsetof(DNBArchMachARM64::FPU, reg) + \
1786 offsetof(DNBArchMachARM64::Context, vfp))
1787 #define EXC_OFFSET(reg) \
1788 (offsetof(DNBArchMachARM64::EXC, reg) + \
1789 offsetof(DNBArchMachARM64::Context, exc))
1790
1791 //#define FLOAT_FORMAT Float
1792 #define DEFINE_VFP_V_IDX(idx) \
1793 { \
1794 e_regSetVFP, vfp_v##idx, "v" #idx, "q" #idx, Vector, VectorOfUInt8, 16, \
1795 VFP_V_OFFSET_IDX(idx), INVALID_NUB_REGNUM, dwarf_v##idx, \
1796 INVALID_NUB_REGNUM, debugserver_vfp_v##idx, NULL, g_invalidate_v##idx \
1797 }
1798 #define DEFINE_PSEUDO_VFP_S_IDX(idx) \
1799 { \
1800 e_regSetVFP, vfp_s##idx, "s" #idx, NULL, IEEE754, Float, 4, 0, \
1801 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \
1802 INVALID_NUB_REGNUM, g_contained_v##idx, g_invalidate_v##idx \
1803 }
1804 #define DEFINE_PSEUDO_VFP_D_IDX(idx) \
1805 { \
1806 e_regSetVFP, vfp_d##idx, "d" #idx, NULL, IEEE754, Float, 8, 0, \
1807 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \
1808 INVALID_NUB_REGNUM, g_contained_v##idx, g_invalidate_v##idx \
1809 }
1810
1811 // Floating point registers
1812 const DNBRegisterInfo DNBArchMachARM64::g_vfp_registers[] = {
1813 DEFINE_VFP_V_IDX(0),
1814 DEFINE_VFP_V_IDX(1),
1815 DEFINE_VFP_V_IDX(2),
1816 DEFINE_VFP_V_IDX(3),
1817 DEFINE_VFP_V_IDX(4),
1818 DEFINE_VFP_V_IDX(5),
1819 DEFINE_VFP_V_IDX(6),
1820 DEFINE_VFP_V_IDX(7),
1821 DEFINE_VFP_V_IDX(8),
1822 DEFINE_VFP_V_IDX(9),
1823 DEFINE_VFP_V_IDX(10),
1824 DEFINE_VFP_V_IDX(11),
1825 DEFINE_VFP_V_IDX(12),
1826 DEFINE_VFP_V_IDX(13),
1827 DEFINE_VFP_V_IDX(14),
1828 DEFINE_VFP_V_IDX(15),
1829 DEFINE_VFP_V_IDX(16),
1830 DEFINE_VFP_V_IDX(17),
1831 DEFINE_VFP_V_IDX(18),
1832 DEFINE_VFP_V_IDX(19),
1833 DEFINE_VFP_V_IDX(20),
1834 DEFINE_VFP_V_IDX(21),
1835 DEFINE_VFP_V_IDX(22),
1836 DEFINE_VFP_V_IDX(23),
1837 DEFINE_VFP_V_IDX(24),
1838 DEFINE_VFP_V_IDX(25),
1839 DEFINE_VFP_V_IDX(26),
1840 DEFINE_VFP_V_IDX(27),
1841 DEFINE_VFP_V_IDX(28),
1842 DEFINE_VFP_V_IDX(29),
1843 DEFINE_VFP_V_IDX(30),
1844 DEFINE_VFP_V_IDX(31),
1845 {e_regSetVFP, vfp_fpsr, "fpsr", NULL, Uint, Hex, 4,
1846 VFP_V_OFFSET_IDX(32) + 0, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
1847 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL},
1848 {e_regSetVFP, vfp_fpcr, "fpcr", NULL, Uint, Hex, 4,
1849 VFP_V_OFFSET_IDX(32) + 4, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
1850 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL},
1851
1852 DEFINE_PSEUDO_VFP_S_IDX(0),
1853 DEFINE_PSEUDO_VFP_S_IDX(1),
1854 DEFINE_PSEUDO_VFP_S_IDX(2),
1855 DEFINE_PSEUDO_VFP_S_IDX(3),
1856 DEFINE_PSEUDO_VFP_S_IDX(4),
1857 DEFINE_PSEUDO_VFP_S_IDX(5),
1858 DEFINE_PSEUDO_VFP_S_IDX(6),
1859 DEFINE_PSEUDO_VFP_S_IDX(7),
1860 DEFINE_PSEUDO_VFP_S_IDX(8),
1861 DEFINE_PSEUDO_VFP_S_IDX(9),
1862 DEFINE_PSEUDO_VFP_S_IDX(10),
1863 DEFINE_PSEUDO_VFP_S_IDX(11),
1864 DEFINE_PSEUDO_VFP_S_IDX(12),
1865 DEFINE_PSEUDO_VFP_S_IDX(13),
1866 DEFINE_PSEUDO_VFP_S_IDX(14),
1867 DEFINE_PSEUDO_VFP_S_IDX(15),
1868 DEFINE_PSEUDO_VFP_S_IDX(16),
1869 DEFINE_PSEUDO_VFP_S_IDX(17),
1870 DEFINE_PSEUDO_VFP_S_IDX(18),
1871 DEFINE_PSEUDO_VFP_S_IDX(19),
1872 DEFINE_PSEUDO_VFP_S_IDX(20),
1873 DEFINE_PSEUDO_VFP_S_IDX(21),
1874 DEFINE_PSEUDO_VFP_S_IDX(22),
1875 DEFINE_PSEUDO_VFP_S_IDX(23),
1876 DEFINE_PSEUDO_VFP_S_IDX(24),
1877 DEFINE_PSEUDO_VFP_S_IDX(25),
1878 DEFINE_PSEUDO_VFP_S_IDX(26),
1879 DEFINE_PSEUDO_VFP_S_IDX(27),
1880 DEFINE_PSEUDO_VFP_S_IDX(28),
1881 DEFINE_PSEUDO_VFP_S_IDX(29),
1882 DEFINE_PSEUDO_VFP_S_IDX(30),
1883 DEFINE_PSEUDO_VFP_S_IDX(31),
1884
1885 DEFINE_PSEUDO_VFP_D_IDX(0),
1886 DEFINE_PSEUDO_VFP_D_IDX(1),
1887 DEFINE_PSEUDO_VFP_D_IDX(2),
1888 DEFINE_PSEUDO_VFP_D_IDX(3),
1889 DEFINE_PSEUDO_VFP_D_IDX(4),
1890 DEFINE_PSEUDO_VFP_D_IDX(5),
1891 DEFINE_PSEUDO_VFP_D_IDX(6),
1892 DEFINE_PSEUDO_VFP_D_IDX(7),
1893 DEFINE_PSEUDO_VFP_D_IDX(8),
1894 DEFINE_PSEUDO_VFP_D_IDX(9),
1895 DEFINE_PSEUDO_VFP_D_IDX(10),
1896 DEFINE_PSEUDO_VFP_D_IDX(11),
1897 DEFINE_PSEUDO_VFP_D_IDX(12),
1898 DEFINE_PSEUDO_VFP_D_IDX(13),
1899 DEFINE_PSEUDO_VFP_D_IDX(14),
1900 DEFINE_PSEUDO_VFP_D_IDX(15),
1901 DEFINE_PSEUDO_VFP_D_IDX(16),
1902 DEFINE_PSEUDO_VFP_D_IDX(17),
1903 DEFINE_PSEUDO_VFP_D_IDX(18),
1904 DEFINE_PSEUDO_VFP_D_IDX(19),
1905 DEFINE_PSEUDO_VFP_D_IDX(20),
1906 DEFINE_PSEUDO_VFP_D_IDX(21),
1907 DEFINE_PSEUDO_VFP_D_IDX(22),
1908 DEFINE_PSEUDO_VFP_D_IDX(23),
1909 DEFINE_PSEUDO_VFP_D_IDX(24),
1910 DEFINE_PSEUDO_VFP_D_IDX(25),
1911 DEFINE_PSEUDO_VFP_D_IDX(26),
1912 DEFINE_PSEUDO_VFP_D_IDX(27),
1913 DEFINE_PSEUDO_VFP_D_IDX(28),
1914 DEFINE_PSEUDO_VFP_D_IDX(29),
1915 DEFINE_PSEUDO_VFP_D_IDX(30),
1916 DEFINE_PSEUDO_VFP_D_IDX(31)
1917
1918 };
1919
1920 //_STRUCT_ARM_EXCEPTION_STATE64
1921 //{
1922 // uint64_t far; /* Virtual Fault Address */
1923 // uint32_t esr; /* Exception syndrome */
1924 // uint32_t exception; /* number of arm exception taken */
1925 //};
1926
1927 // Exception registers
1928 const DNBRegisterInfo DNBArchMachARM64::g_exc_registers[] = {
1929 {e_regSetEXC, exc_far, "far", NULL, Uint, Hex, 8, EXC_OFFSET(__far),
1930 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
1931 INVALID_NUB_REGNUM, NULL, NULL},
1932 {e_regSetEXC, exc_esr, "esr", NULL, Uint, Hex, 4, EXC_OFFSET(__esr),
1933 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
1934 INVALID_NUB_REGNUM, NULL, NULL},
1935 {e_regSetEXC, exc_exception, "exception", NULL, Uint, Hex, 4,
1936 EXC_OFFSET(__exception), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
1937 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}};
1938
1939 // Number of registers in each register set
1940 const size_t DNBArchMachARM64::k_num_gpr_registers =
1941 sizeof(g_gpr_registers) / sizeof(DNBRegisterInfo);
1942 const size_t DNBArchMachARM64::k_num_vfp_registers =
1943 sizeof(g_vfp_registers) / sizeof(DNBRegisterInfo);
1944 const size_t DNBArchMachARM64::k_num_exc_registers =
1945 sizeof(g_exc_registers) / sizeof(DNBRegisterInfo);
1946 const size_t DNBArchMachARM64::k_num_all_registers =
1947 k_num_gpr_registers + k_num_vfp_registers + k_num_exc_registers;
1948
1949 // Register set definitions. The first definitions at register set index
1950 // of zero is for all registers, followed by other registers sets. The
1951 // register information for the all register set need not be filled in.
1952 const DNBRegisterSetInfo DNBArchMachARM64::g_reg_sets[] = {
1953 {"ARM64 Registers", NULL, k_num_all_registers},
1954 {"General Purpose Registers", g_gpr_registers, k_num_gpr_registers},
1955 {"Floating Point Registers", g_vfp_registers, k_num_vfp_registers},
1956 {"Exception State Registers", g_exc_registers, k_num_exc_registers}};
1957 // Total number of register sets for this architecture
1958 const size_t DNBArchMachARM64::k_num_register_sets =
1959 sizeof(g_reg_sets) / sizeof(DNBRegisterSetInfo);
1960
1961 const DNBRegisterSetInfo *
GetRegisterSetInfo(nub_size_t * num_reg_sets)1962 DNBArchMachARM64::GetRegisterSetInfo(nub_size_t *num_reg_sets) {
1963 *num_reg_sets = k_num_register_sets;
1964 return g_reg_sets;
1965 }
1966
FixGenericRegisterNumber(uint32_t & set,uint32_t & reg)1967 bool DNBArchMachARM64::FixGenericRegisterNumber(uint32_t &set, uint32_t ®) {
1968 if (set == REGISTER_SET_GENERIC) {
1969 switch (reg) {
1970 case GENERIC_REGNUM_PC: // Program Counter
1971 set = e_regSetGPR;
1972 reg = gpr_pc;
1973 break;
1974
1975 case GENERIC_REGNUM_SP: // Stack Pointer
1976 set = e_regSetGPR;
1977 reg = gpr_sp;
1978 break;
1979
1980 case GENERIC_REGNUM_FP: // Frame Pointer
1981 set = e_regSetGPR;
1982 reg = gpr_fp;
1983 break;
1984
1985 case GENERIC_REGNUM_RA: // Return Address
1986 set = e_regSetGPR;
1987 reg = gpr_lr;
1988 break;
1989
1990 case GENERIC_REGNUM_FLAGS: // Processor flags register
1991 set = e_regSetGPR;
1992 reg = gpr_cpsr;
1993 break;
1994
1995 case GENERIC_REGNUM_ARG1:
1996 case GENERIC_REGNUM_ARG2:
1997 case GENERIC_REGNUM_ARG3:
1998 case GENERIC_REGNUM_ARG4:
1999 case GENERIC_REGNUM_ARG5:
2000 case GENERIC_REGNUM_ARG6:
2001 set = e_regSetGPR;
2002 reg = gpr_x0 + reg - GENERIC_REGNUM_ARG1;
2003 break;
2004
2005 default:
2006 return false;
2007 }
2008 }
2009 return true;
2010 }
GetRegisterValue(uint32_t set,uint32_t reg,DNBRegisterValue * value)2011 bool DNBArchMachARM64::GetRegisterValue(uint32_t set, uint32_t reg,
2012 DNBRegisterValue *value) {
2013 if (!FixGenericRegisterNumber(set, reg))
2014 return false;
2015
2016 if (GetRegisterState(set, false) != KERN_SUCCESS)
2017 return false;
2018
2019 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
2020 if (regInfo) {
2021 value->info = *regInfo;
2022 switch (set) {
2023 case e_regSetGPR:
2024 if (reg <= gpr_pc) {
2025 switch (reg) {
2026 #if __has_feature(ptrauth_calls) && defined(__LP64__)
2027 case gpr_pc:
2028 value->value.uint64 = clear_pac_bits(
2029 reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_pc));
2030 break;
2031 case gpr_lr:
2032 value->value.uint64 = clear_pac_bits(
2033 reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_lr));
2034 break;
2035 case gpr_sp:
2036 value->value.uint64 = clear_pac_bits(
2037 reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_sp));
2038 break;
2039 case gpr_fp:
2040 value->value.uint64 = clear_pac_bits(
2041 reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_fp));
2042 break;
2043 #else
2044 case gpr_pc:
2045 value->value.uint64 = clear_pac_bits(m_state.context.gpr.__pc);
2046 break;
2047 case gpr_lr:
2048 value->value.uint64 = clear_pac_bits(m_state.context.gpr.__lr);
2049 break;
2050 case gpr_sp:
2051 value->value.uint64 = clear_pac_bits(m_state.context.gpr.__sp);
2052 break;
2053 case gpr_fp:
2054 value->value.uint64 = clear_pac_bits(m_state.context.gpr.__fp);
2055 break;
2056 #endif
2057 default:
2058 value->value.uint64 = m_state.context.gpr.__x[reg];
2059 }
2060 return true;
2061 } else if (reg == gpr_cpsr) {
2062 value->value.uint32 = m_state.context.gpr.__cpsr;
2063 return true;
2064 }
2065 break;
2066
2067 case e_regSetVFP:
2068
2069 if (reg >= vfp_v0 && reg <= vfp_v31) {
2070 #if defined(__arm64__) || defined(__aarch64__)
2071 memcpy(&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_v0],
2072 16);
2073 #else
2074 memcpy(&value->value.v_uint8,
2075 ((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_v0) * 16),
2076 16);
2077 #endif
2078 return true;
2079 } else if (reg == vfp_fpsr) {
2080 #if defined(__arm64__) || defined(__aarch64__)
2081 memcpy(&value->value.uint32, &m_state.context.vfp.__fpsr, 4);
2082 #else
2083 memcpy(&value->value.uint32,
2084 ((uint8_t *)&m_state.context.vfp.opaque) + (32 * 16) + 0, 4);
2085 #endif
2086 return true;
2087 } else if (reg == vfp_fpcr) {
2088 #if defined(__arm64__) || defined(__aarch64__)
2089 memcpy(&value->value.uint32, &m_state.context.vfp.__fpcr, 4);
2090 #else
2091 memcpy(&value->value.uint32,
2092 ((uint8_t *)&m_state.context.vfp.opaque) + (32 * 16) + 4, 4);
2093 #endif
2094 return true;
2095 } else if (reg >= vfp_s0 && reg <= vfp_s31) {
2096 #if defined(__arm64__) || defined(__aarch64__)
2097 memcpy(&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_s0],
2098 4);
2099 #else
2100 memcpy(&value->value.v_uint8,
2101 ((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_s0) * 16),
2102 4);
2103 #endif
2104 return true;
2105 } else if (reg >= vfp_d0 && reg <= vfp_d31) {
2106 #if defined(__arm64__) || defined(__aarch64__)
2107 memcpy(&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_d0],
2108 8);
2109 #else
2110 memcpy(&value->value.v_uint8,
2111 ((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_d0) * 16),
2112 8);
2113 #endif
2114 return true;
2115 }
2116 break;
2117
2118 case e_regSetEXC:
2119 if (reg == exc_far) {
2120 value->value.uint64 = m_state.context.exc.__far;
2121 return true;
2122 } else if (reg == exc_esr) {
2123 value->value.uint32 = m_state.context.exc.__esr;
2124 return true;
2125 } else if (reg == exc_exception) {
2126 value->value.uint32 = m_state.context.exc.__exception;
2127 return true;
2128 }
2129 break;
2130 }
2131 }
2132 return false;
2133 }
2134
SetRegisterValue(uint32_t set,uint32_t reg,const DNBRegisterValue * value)2135 bool DNBArchMachARM64::SetRegisterValue(uint32_t set, uint32_t reg,
2136 const DNBRegisterValue *value) {
2137 if (!FixGenericRegisterNumber(set, reg))
2138 return false;
2139
2140 if (GetRegisterState(set, false) != KERN_SUCCESS)
2141 return false;
2142
2143 bool success = false;
2144 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
2145 if (regInfo) {
2146 switch (set) {
2147 case e_regSetGPR:
2148 if (reg <= gpr_pc) {
2149 #if defined(__LP64__)
2150 uint64_t signed_value = value->value.uint64;
2151 #if __has_feature(ptrauth_calls)
2152 // The incoming value could be garbage. Strip it to avoid
2153 // trapping when it gets resigned in the thread state.
2154 signed_value = (uint64_t) ptrauth_strip((void*) signed_value, ptrauth_key_function_pointer);
2155 signed_value = (uint64_t) ptrauth_sign_unauthenticated((void*) signed_value, ptrauth_key_function_pointer, 0);
2156 #endif
2157 if (reg == gpr_pc)
2158 arm_thread_state64_set_pc_fptr (m_state.context.gpr, (void*) signed_value);
2159 else if (reg == gpr_lr)
2160 arm_thread_state64_set_lr_fptr (m_state.context.gpr, (void*) signed_value);
2161 else if (reg == gpr_sp)
2162 arm_thread_state64_set_sp (m_state.context.gpr, value->value.uint64);
2163 else if (reg == gpr_fp)
2164 arm_thread_state64_set_fp (m_state.context.gpr, value->value.uint64);
2165 else
2166 m_state.context.gpr.__x[reg] = value->value.uint64;
2167 #else
2168 m_state.context.gpr.__x[reg] = value->value.uint64;
2169 #endif
2170 success = true;
2171 } else if (reg == gpr_cpsr) {
2172 m_state.context.gpr.__cpsr = value->value.uint32;
2173 success = true;
2174 }
2175 break;
2176
2177 case e_regSetVFP:
2178 if (reg >= vfp_v0 && reg <= vfp_v31) {
2179 #if defined(__arm64__) || defined(__aarch64__)
2180 memcpy(&m_state.context.vfp.__v[reg - vfp_v0], &value->value.v_uint8,
2181 16);
2182 #else
2183 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_v0) * 16),
2184 &value->value.v_uint8, 16);
2185 #endif
2186 success = true;
2187 } else if (reg == vfp_fpsr) {
2188 #if defined(__arm64__) || defined(__aarch64__)
2189 memcpy(&m_state.context.vfp.__fpsr, &value->value.uint32, 4);
2190 #else
2191 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + (32 * 16) + 0,
2192 &value->value.uint32, 4);
2193 #endif
2194 success = true;
2195 } else if (reg == vfp_fpcr) {
2196 #if defined(__arm64__) || defined(__aarch64__)
2197 memcpy(&m_state.context.vfp.__fpcr, &value->value.uint32, 4);
2198 #else
2199 memcpy(((uint8_t *)m_state.context.vfp.opaque) + (32 * 16) + 4,
2200 &value->value.uint32, 4);
2201 #endif
2202 success = true;
2203 } else if (reg >= vfp_s0 && reg <= vfp_s31) {
2204 #if defined(__arm64__) || defined(__aarch64__)
2205 memcpy(&m_state.context.vfp.__v[reg - vfp_s0], &value->value.v_uint8,
2206 4);
2207 #else
2208 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_s0) * 16),
2209 &value->value.v_uint8, 4);
2210 #endif
2211 success = true;
2212 } else if (reg >= vfp_d0 && reg <= vfp_d31) {
2213 #if defined(__arm64__) || defined(__aarch64__)
2214 memcpy(&m_state.context.vfp.__v[reg - vfp_d0], &value->value.v_uint8,
2215 8);
2216 #else
2217 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_d0) * 16),
2218 &value->value.v_uint8, 8);
2219 #endif
2220 success = true;
2221 }
2222 break;
2223
2224 case e_regSetEXC:
2225 if (reg == exc_far) {
2226 m_state.context.exc.__far = value->value.uint64;
2227 success = true;
2228 } else if (reg == exc_esr) {
2229 m_state.context.exc.__esr = value->value.uint32;
2230 success = true;
2231 } else if (reg == exc_exception) {
2232 m_state.context.exc.__exception = value->value.uint32;
2233 success = true;
2234 }
2235 break;
2236 }
2237 }
2238 if (success)
2239 return SetRegisterState(set) == KERN_SUCCESS;
2240 return false;
2241 }
2242
GetRegisterState(int set,bool force)2243 kern_return_t DNBArchMachARM64::GetRegisterState(int set, bool force) {
2244 switch (set) {
2245 case e_regSetALL:
2246 return GetGPRState(force) | GetVFPState(force) | GetEXCState(force) |
2247 GetDBGState(force);
2248 case e_regSetGPR:
2249 return GetGPRState(force);
2250 case e_regSetVFP:
2251 return GetVFPState(force);
2252 case e_regSetEXC:
2253 return GetEXCState(force);
2254 case e_regSetDBG:
2255 return GetDBGState(force);
2256 default:
2257 break;
2258 }
2259 return KERN_INVALID_ARGUMENT;
2260 }
2261
SetRegisterState(int set)2262 kern_return_t DNBArchMachARM64::SetRegisterState(int set) {
2263 // Make sure we have a valid context to set.
2264 kern_return_t err = GetRegisterState(set, false);
2265 if (err != KERN_SUCCESS)
2266 return err;
2267
2268 switch (set) {
2269 case e_regSetALL:
2270 return SetGPRState() | SetVFPState() | SetEXCState() | SetDBGState(false);
2271 case e_regSetGPR:
2272 return SetGPRState();
2273 case e_regSetVFP:
2274 return SetVFPState();
2275 case e_regSetEXC:
2276 return SetEXCState();
2277 case e_regSetDBG:
2278 return SetDBGState(false);
2279 default:
2280 break;
2281 }
2282 return KERN_INVALID_ARGUMENT;
2283 }
2284
RegisterSetStateIsValid(int set) const2285 bool DNBArchMachARM64::RegisterSetStateIsValid(int set) const {
2286 return m_state.RegsAreValid(set);
2287 }
2288
GetRegisterContext(void * buf,nub_size_t buf_len)2289 nub_size_t DNBArchMachARM64::GetRegisterContext(void *buf, nub_size_t buf_len) {
2290 nub_size_t size = sizeof(m_state.context.gpr) + sizeof(m_state.context.vfp) +
2291 sizeof(m_state.context.exc);
2292
2293 if (buf && buf_len) {
2294 if (size > buf_len)
2295 size = buf_len;
2296
2297 bool force = false;
2298 if (GetGPRState(force) | GetVFPState(force) | GetEXCState(force))
2299 return 0;
2300
2301 // Copy each struct individually to avoid any padding that might be between
2302 // the structs in m_state.context
2303 uint8_t *p = (uint8_t *)buf;
2304 ::memcpy(p, &m_state.context.gpr, sizeof(m_state.context.gpr));
2305 p += sizeof(m_state.context.gpr);
2306 ::memcpy(p, &m_state.context.vfp, sizeof(m_state.context.vfp));
2307 p += sizeof(m_state.context.vfp);
2308 ::memcpy(p, &m_state.context.exc, sizeof(m_state.context.exc));
2309 p += sizeof(m_state.context.exc);
2310
2311 size_t bytes_written = p - (uint8_t *)buf;
2312 UNUSED_IF_ASSERT_DISABLED(bytes_written);
2313 assert(bytes_written == size);
2314 }
2315 DNBLogThreadedIf(
2316 LOG_THREAD,
2317 "DNBArchMachARM64::GetRegisterContext (buf = %p, len = %zu) => %zu", buf,
2318 buf_len, size);
2319 // Return the size of the register context even if NULL was passed in
2320 return size;
2321 }
2322
SetRegisterContext(const void * buf,nub_size_t buf_len)2323 nub_size_t DNBArchMachARM64::SetRegisterContext(const void *buf,
2324 nub_size_t buf_len) {
2325 nub_size_t size = sizeof(m_state.context.gpr) + sizeof(m_state.context.vfp) +
2326 sizeof(m_state.context.exc);
2327
2328 if (buf == NULL || buf_len == 0)
2329 size = 0;
2330
2331 if (size) {
2332 if (size > buf_len)
2333 size = buf_len;
2334
2335 // Copy each struct individually to avoid any padding that might be between
2336 // the structs in m_state.context
2337 uint8_t *p = const_cast<uint8_t*>(reinterpret_cast<const uint8_t *>(buf));
2338 ::memcpy(&m_state.context.gpr, p, sizeof(m_state.context.gpr));
2339 p += sizeof(m_state.context.gpr);
2340 ::memcpy(&m_state.context.vfp, p, sizeof(m_state.context.vfp));
2341 p += sizeof(m_state.context.vfp);
2342 ::memcpy(&m_state.context.exc, p, sizeof(m_state.context.exc));
2343 p += sizeof(m_state.context.exc);
2344
2345 size_t bytes_written = p - reinterpret_cast<const uint8_t *>(buf);
2346 UNUSED_IF_ASSERT_DISABLED(bytes_written);
2347 assert(bytes_written == size);
2348 SetGPRState();
2349 SetVFPState();
2350 SetEXCState();
2351 }
2352 DNBLogThreadedIf(
2353 LOG_THREAD,
2354 "DNBArchMachARM64::SetRegisterContext (buf = %p, len = %zu) => %zu", buf,
2355 buf_len, size);
2356 return size;
2357 }
2358
SaveRegisterState()2359 uint32_t DNBArchMachARM64::SaveRegisterState() {
2360 kern_return_t kret = ::thread_abort_safely(m_thread->MachPortNumber());
2361 DNBLogThreadedIf(
2362 LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u "
2363 "(SetGPRState() for stop_count = %u)",
2364 m_thread->MachPortNumber(), kret, m_thread->Process()->StopCount());
2365
2366 // Always re-read the registers because above we call thread_abort_safely();
2367 bool force = true;
2368
2369 if ((kret = GetGPRState(force)) != KERN_SUCCESS) {
2370 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::SaveRegisterState () "
2371 "error: GPR regs failed to read: %u ",
2372 kret);
2373 } else if ((kret = GetVFPState(force)) != KERN_SUCCESS) {
2374 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::SaveRegisterState () "
2375 "error: %s regs failed to read: %u",
2376 "VFP", kret);
2377 } else {
2378 const uint32_t save_id = GetNextRegisterStateSaveID();
2379 m_saved_register_states[save_id] = m_state.context;
2380 return save_id;
2381 }
2382 return UINT32_MAX;
2383 }
2384
RestoreRegisterState(uint32_t save_id)2385 bool DNBArchMachARM64::RestoreRegisterState(uint32_t save_id) {
2386 SaveRegisterStates::iterator pos = m_saved_register_states.find(save_id);
2387 if (pos != m_saved_register_states.end()) {
2388 m_state.context.gpr = pos->second.gpr;
2389 m_state.context.vfp = pos->second.vfp;
2390 kern_return_t kret;
2391 bool success = true;
2392 if ((kret = SetGPRState()) != KERN_SUCCESS) {
2393 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::RestoreRegisterState "
2394 "(save_id = %u) error: GPR regs failed to "
2395 "write: %u",
2396 save_id, kret);
2397 success = false;
2398 } else if ((kret = SetVFPState()) != KERN_SUCCESS) {
2399 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::RestoreRegisterState "
2400 "(save_id = %u) error: %s regs failed to "
2401 "write: %u",
2402 save_id, "VFP", kret);
2403 success = false;
2404 }
2405 m_saved_register_states.erase(pos);
2406 return success;
2407 }
2408 return false;
2409 }
2410
2411 #endif // #if defined (ARM_THREAD_STATE64_COUNT)
2412 #endif // #if defined (__arm__) || defined (__arm64__) || defined (__aarch64__)
2413