1 //===-- DNBArchImplARM64.cpp ------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Created by Greg Clayton on 6/25/07.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #if defined(__arm__) || defined(__arm64__) || defined(__aarch64__)
14
15 #include "MacOSX/arm64/DNBArchImplARM64.h"
16
17 #if defined(ARM_THREAD_STATE64_COUNT)
18
19 #include "DNB.h"
20 #include "DNBBreakpoint.h"
21 #include "DNBLog.h"
22 #include "DNBRegisterInfo.h"
23 #include "MacOSX/MachProcess.h"
24 #include "MacOSX/MachThread.h"
25
26 #include <inttypes.h>
27 #include <sys/sysctl.h>
28
29 #if __has_feature(ptrauth_calls)
30 #include <ptrauth.h>
31 #endif
32
33 // Break only in privileged or user mode
34 // (PAC bits in the DBGWVRn_EL1 watchpoint control register)
35 #define S_USER ((uint32_t)(2u << 1))
36
37 #define BCR_ENABLE ((uint32_t)(1u))
38 #define WCR_ENABLE ((uint32_t)(1u))
39
40 // Watchpoint load/store
41 // (LSC bits in the DBGWVRn_EL1 watchpoint control register)
42 #define WCR_LOAD ((uint32_t)(1u << 3))
43 #define WCR_STORE ((uint32_t)(1u << 4))
44
45 // Enable breakpoint, watchpoint, and vector catch debug exceptions.
46 // (MDE bit in the MDSCR_EL1 register. Equivalent to the MDBGen bit in
47 // DBGDSCRext in Aarch32)
48 #define MDE_ENABLE ((uint32_t)(1u << 15))
49
50 // Single instruction step
51 // (SS bit in the MDSCR_EL1 register)
52 #define SS_ENABLE ((uint32_t)(1u))
53
54 static const uint8_t g_arm64_breakpoint_opcode[] = {
55 0x00, 0x00, 0x20, 0xD4}; // "brk #0", 0xd4200000 in BE byte order
56
57 // If we need to set one logical watchpoint by using
58 // two hardware watchpoint registers, the watchpoint
59 // will be split into a "high" and "low" watchpoint.
60 // Record both of them in the LoHi array.
61
62 // It's safe to initialize to all 0's since
63 // hi > lo and therefore LoHi[i] cannot be 0.
64 static uint32_t LoHi[16] = {0};
65
Initialize()66 void DNBArchMachARM64::Initialize() {
67 DNBArchPluginInfo arch_plugin_info = {
68 CPU_TYPE_ARM64, DNBArchMachARM64::Create,
69 DNBArchMachARM64::GetRegisterSetInfo,
70 DNBArchMachARM64::SoftwareBreakpointOpcode};
71
72 // Register this arch plug-in with the main protocol class
73 DNBArchProtocol::RegisterArchPlugin(arch_plugin_info);
74
75 DNBArchPluginInfo arch_plugin_info_32 = {
76 CPU_TYPE_ARM64_32, DNBArchMachARM64::Create,
77 DNBArchMachARM64::GetRegisterSetInfo,
78 DNBArchMachARM64::SoftwareBreakpointOpcode};
79
80 // Register this arch plug-in with the main protocol class
81 DNBArchProtocol::RegisterArchPlugin(arch_plugin_info_32);
82 }
83
Create(MachThread * thread)84 DNBArchProtocol *DNBArchMachARM64::Create(MachThread *thread) {
85 DNBArchMachARM64 *obj = new DNBArchMachARM64(thread);
86
87 return obj;
88 }
89
90 const uint8_t *
SoftwareBreakpointOpcode(nub_size_t byte_size)91 DNBArchMachARM64::SoftwareBreakpointOpcode(nub_size_t byte_size) {
92 return g_arm64_breakpoint_opcode;
93 }
94
GetCPUType()95 uint32_t DNBArchMachARM64::GetCPUType() { return CPU_TYPE_ARM64; }
96
GetPC(uint64_t failValue)97 uint64_t DNBArchMachARM64::GetPC(uint64_t failValue) {
98 // Get program counter
99 if (GetGPRState(false) == KERN_SUCCESS)
100 #if defined(__LP64__)
101 return arm_thread_state64_get_pc(m_state.context.gpr);
102 #else
103 return m_state.context.gpr.__pc;
104 #endif
105 return failValue;
106 }
107
SetPC(uint64_t value)108 kern_return_t DNBArchMachARM64::SetPC(uint64_t value) {
109 // Get program counter
110 kern_return_t err = GetGPRState(false);
111 if (err == KERN_SUCCESS) {
112 #if defined(__LP64__)
113 #if __has_feature(ptrauth_calls)
114 // The incoming value could be garbage. Strip it to avoid
115 // trapping when it gets resigned in the thread state.
116 value = (uint64_t) ptrauth_strip((void*) value, ptrauth_key_function_pointer);
117 value = (uint64_t) ptrauth_sign_unauthenticated((void*) value, ptrauth_key_function_pointer, 0);
118 #endif
119 arm_thread_state64_set_pc_fptr (m_state.context.gpr, (void*) value);
120 #else
121 m_state.context.gpr.__pc = value;
122 #endif
123 err = SetGPRState();
124 }
125 return err == KERN_SUCCESS;
126 }
127
GetSP(uint64_t failValue)128 uint64_t DNBArchMachARM64::GetSP(uint64_t failValue) {
129 // Get stack pointer
130 if (GetGPRState(false) == KERN_SUCCESS)
131 #if defined(__LP64__)
132 return arm_thread_state64_get_sp(m_state.context.gpr);
133 #else
134 return m_state.context.gpr.__sp;
135 #endif
136 return failValue;
137 }
138
GetGPRState(bool force)139 kern_return_t DNBArchMachARM64::GetGPRState(bool force) {
140 int set = e_regSetGPR;
141 // Check if we have valid cached registers
142 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
143 return KERN_SUCCESS;
144
145 // Read the registers from our thread
146 mach_msg_type_number_t count = e_regSetGPRCount;
147 kern_return_t kret =
148 ::thread_get_state(m_thread->MachPortNumber(), ARM_THREAD_STATE64,
149 (thread_state_t)&m_state.context.gpr, &count);
150 if (DNBLogEnabledForAny(LOG_THREAD)) {
151 uint64_t *x = &m_state.context.gpr.__x[0];
152 DNBLogThreaded(
153 "thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x (count = %u) regs"
154 "\n x0=%16.16llx"
155 "\n x1=%16.16llx"
156 "\n x2=%16.16llx"
157 "\n x3=%16.16llx"
158 "\n x4=%16.16llx"
159 "\n x5=%16.16llx"
160 "\n x6=%16.16llx"
161 "\n x7=%16.16llx"
162 "\n x8=%16.16llx"
163 "\n x9=%16.16llx"
164 "\n x10=%16.16llx"
165 "\n x11=%16.16llx"
166 "\n x12=%16.16llx"
167 "\n x13=%16.16llx"
168 "\n x14=%16.16llx"
169 "\n x15=%16.16llx"
170 "\n x16=%16.16llx"
171 "\n x17=%16.16llx"
172 "\n x18=%16.16llx"
173 "\n x19=%16.16llx"
174 "\n x20=%16.16llx"
175 "\n x21=%16.16llx"
176 "\n x22=%16.16llx"
177 "\n x23=%16.16llx"
178 "\n x24=%16.16llx"
179 "\n x25=%16.16llx"
180 "\n x26=%16.16llx"
181 "\n x27=%16.16llx"
182 "\n x28=%16.16llx"
183 "\n fp=%16.16llx"
184 "\n lr=%16.16llx"
185 "\n sp=%16.16llx"
186 "\n pc=%16.16llx"
187 "\n cpsr=%8.8x",
188 m_thread->MachPortNumber(), e_regSetGPR, e_regSetGPRCount, kret, count,
189 x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[0], x[11],
190 x[12], x[13], x[14], x[15], x[16], x[17], x[18], x[19], x[20], x[21],
191 x[22], x[23], x[24], x[25], x[26], x[27], x[28],
192 #if defined(__LP64__)
193 (uint64_t) arm_thread_state64_get_fp (m_state.context.gpr),
194 (uint64_t) arm_thread_state64_get_lr (m_state.context.gpr),
195 (uint64_t) arm_thread_state64_get_sp (m_state.context.gpr),
196 (uint64_t) arm_thread_state64_get_pc (m_state.context.gpr),
197 #else
198 m_state.context.gpr.__fp, m_state.context.gpr.__lr,
199 m_state.context.gpr.__sp, m_state.context.gpr.__pc,
200 #endif
201 m_state.context.gpr.__cpsr);
202 }
203 m_state.SetError(set, Read, kret);
204 return kret;
205 }
206
GetVFPState(bool force)207 kern_return_t DNBArchMachARM64::GetVFPState(bool force) {
208 int set = e_regSetVFP;
209 // Check if we have valid cached registers
210 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
211 return KERN_SUCCESS;
212
213 // Read the registers from our thread
214 mach_msg_type_number_t count = e_regSetVFPCount;
215 kern_return_t kret =
216 ::thread_get_state(m_thread->MachPortNumber(), ARM_NEON_STATE64,
217 (thread_state_t)&m_state.context.vfp, &count);
218 if (DNBLogEnabledForAny(LOG_THREAD)) {
219 #if defined(__arm64__) || defined(__aarch64__)
220 DNBLogThreaded(
221 "thread_get_state(0x%4.4x, %u, &vfp, %u) => 0x%8.8x (count = %u) regs"
222 "\n q0 = 0x%16.16llx%16.16llx"
223 "\n q1 = 0x%16.16llx%16.16llx"
224 "\n q2 = 0x%16.16llx%16.16llx"
225 "\n q3 = 0x%16.16llx%16.16llx"
226 "\n q4 = 0x%16.16llx%16.16llx"
227 "\n q5 = 0x%16.16llx%16.16llx"
228 "\n q6 = 0x%16.16llx%16.16llx"
229 "\n q7 = 0x%16.16llx%16.16llx"
230 "\n q8 = 0x%16.16llx%16.16llx"
231 "\n q9 = 0x%16.16llx%16.16llx"
232 "\n q10 = 0x%16.16llx%16.16llx"
233 "\n q11 = 0x%16.16llx%16.16llx"
234 "\n q12 = 0x%16.16llx%16.16llx"
235 "\n q13 = 0x%16.16llx%16.16llx"
236 "\n q14 = 0x%16.16llx%16.16llx"
237 "\n q15 = 0x%16.16llx%16.16llx"
238 "\n q16 = 0x%16.16llx%16.16llx"
239 "\n q17 = 0x%16.16llx%16.16llx"
240 "\n q18 = 0x%16.16llx%16.16llx"
241 "\n q19 = 0x%16.16llx%16.16llx"
242 "\n q20 = 0x%16.16llx%16.16llx"
243 "\n q21 = 0x%16.16llx%16.16llx"
244 "\n q22 = 0x%16.16llx%16.16llx"
245 "\n q23 = 0x%16.16llx%16.16llx"
246 "\n q24 = 0x%16.16llx%16.16llx"
247 "\n q25 = 0x%16.16llx%16.16llx"
248 "\n q26 = 0x%16.16llx%16.16llx"
249 "\n q27 = 0x%16.16llx%16.16llx"
250 "\n q28 = 0x%16.16llx%16.16llx"
251 "\n q29 = 0x%16.16llx%16.16llx"
252 "\n q30 = 0x%16.16llx%16.16llx"
253 "\n q31 = 0x%16.16llx%16.16llx"
254 "\n fpsr = 0x%8.8x"
255 "\n fpcr = 0x%8.8x\n\n",
256 m_thread->MachPortNumber(), e_regSetVFP, e_regSetVFPCount, kret, count,
257 ((uint64_t *)&m_state.context.vfp.__v[0])[0],
258 ((uint64_t *)&m_state.context.vfp.__v[0])[1],
259 ((uint64_t *)&m_state.context.vfp.__v[1])[0],
260 ((uint64_t *)&m_state.context.vfp.__v[1])[1],
261 ((uint64_t *)&m_state.context.vfp.__v[2])[0],
262 ((uint64_t *)&m_state.context.vfp.__v[2])[1],
263 ((uint64_t *)&m_state.context.vfp.__v[3])[0],
264 ((uint64_t *)&m_state.context.vfp.__v[3])[1],
265 ((uint64_t *)&m_state.context.vfp.__v[4])[0],
266 ((uint64_t *)&m_state.context.vfp.__v[4])[1],
267 ((uint64_t *)&m_state.context.vfp.__v[5])[0],
268 ((uint64_t *)&m_state.context.vfp.__v[5])[1],
269 ((uint64_t *)&m_state.context.vfp.__v[6])[0],
270 ((uint64_t *)&m_state.context.vfp.__v[6])[1],
271 ((uint64_t *)&m_state.context.vfp.__v[7])[0],
272 ((uint64_t *)&m_state.context.vfp.__v[7])[1],
273 ((uint64_t *)&m_state.context.vfp.__v[8])[0],
274 ((uint64_t *)&m_state.context.vfp.__v[8])[1],
275 ((uint64_t *)&m_state.context.vfp.__v[9])[0],
276 ((uint64_t *)&m_state.context.vfp.__v[9])[1],
277 ((uint64_t *)&m_state.context.vfp.__v[10])[0],
278 ((uint64_t *)&m_state.context.vfp.__v[10])[1],
279 ((uint64_t *)&m_state.context.vfp.__v[11])[0],
280 ((uint64_t *)&m_state.context.vfp.__v[11])[1],
281 ((uint64_t *)&m_state.context.vfp.__v[12])[0],
282 ((uint64_t *)&m_state.context.vfp.__v[12])[1],
283 ((uint64_t *)&m_state.context.vfp.__v[13])[0],
284 ((uint64_t *)&m_state.context.vfp.__v[13])[1],
285 ((uint64_t *)&m_state.context.vfp.__v[14])[0],
286 ((uint64_t *)&m_state.context.vfp.__v[14])[1],
287 ((uint64_t *)&m_state.context.vfp.__v[15])[0],
288 ((uint64_t *)&m_state.context.vfp.__v[15])[1],
289 ((uint64_t *)&m_state.context.vfp.__v[16])[0],
290 ((uint64_t *)&m_state.context.vfp.__v[16])[1],
291 ((uint64_t *)&m_state.context.vfp.__v[17])[0],
292 ((uint64_t *)&m_state.context.vfp.__v[17])[1],
293 ((uint64_t *)&m_state.context.vfp.__v[18])[0],
294 ((uint64_t *)&m_state.context.vfp.__v[18])[1],
295 ((uint64_t *)&m_state.context.vfp.__v[19])[0],
296 ((uint64_t *)&m_state.context.vfp.__v[19])[1],
297 ((uint64_t *)&m_state.context.vfp.__v[20])[0],
298 ((uint64_t *)&m_state.context.vfp.__v[20])[1],
299 ((uint64_t *)&m_state.context.vfp.__v[21])[0],
300 ((uint64_t *)&m_state.context.vfp.__v[21])[1],
301 ((uint64_t *)&m_state.context.vfp.__v[22])[0],
302 ((uint64_t *)&m_state.context.vfp.__v[22])[1],
303 ((uint64_t *)&m_state.context.vfp.__v[23])[0],
304 ((uint64_t *)&m_state.context.vfp.__v[23])[1],
305 ((uint64_t *)&m_state.context.vfp.__v[24])[0],
306 ((uint64_t *)&m_state.context.vfp.__v[24])[1],
307 ((uint64_t *)&m_state.context.vfp.__v[25])[0],
308 ((uint64_t *)&m_state.context.vfp.__v[25])[1],
309 ((uint64_t *)&m_state.context.vfp.__v[26])[0],
310 ((uint64_t *)&m_state.context.vfp.__v[26])[1],
311 ((uint64_t *)&m_state.context.vfp.__v[27])[0],
312 ((uint64_t *)&m_state.context.vfp.__v[27])[1],
313 ((uint64_t *)&m_state.context.vfp.__v[28])[0],
314 ((uint64_t *)&m_state.context.vfp.__v[28])[1],
315 ((uint64_t *)&m_state.context.vfp.__v[29])[0],
316 ((uint64_t *)&m_state.context.vfp.__v[29])[1],
317 ((uint64_t *)&m_state.context.vfp.__v[30])[0],
318 ((uint64_t *)&m_state.context.vfp.__v[30])[1],
319 ((uint64_t *)&m_state.context.vfp.__v[31])[0],
320 ((uint64_t *)&m_state.context.vfp.__v[31])[1],
321 m_state.context.vfp.__fpsr, m_state.context.vfp.__fpcr);
322 #endif
323 }
324 m_state.SetError(set, Read, kret);
325 return kret;
326 }
327
GetEXCState(bool force)328 kern_return_t DNBArchMachARM64::GetEXCState(bool force) {
329 int set = e_regSetEXC;
330 // Check if we have valid cached registers
331 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
332 return KERN_SUCCESS;
333
334 // Read the registers from our thread
335 mach_msg_type_number_t count = e_regSetEXCCount;
336 kern_return_t kret =
337 ::thread_get_state(m_thread->MachPortNumber(), ARM_EXCEPTION_STATE64,
338 (thread_state_t)&m_state.context.exc, &count);
339 m_state.SetError(set, Read, kret);
340 return kret;
341 }
342
DumpDBGState(const arm_debug_state_t & dbg)343 static void DumpDBGState(const arm_debug_state_t &dbg) {
344 uint32_t i = 0;
345 for (i = 0; i < 16; i++)
346 DNBLogThreadedIf(LOG_STEP, "BVR%-2u/BCR%-2u = { 0x%8.8x, 0x%8.8x } "
347 "WVR%-2u/WCR%-2u = { 0x%8.8x, 0x%8.8x }",
348 i, i, dbg.__bvr[i], dbg.__bcr[i], i, i, dbg.__wvr[i],
349 dbg.__wcr[i]);
350 }
351
GetDBGState(bool force)352 kern_return_t DNBArchMachARM64::GetDBGState(bool force) {
353 int set = e_regSetDBG;
354
355 // Check if we have valid cached registers
356 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
357 return KERN_SUCCESS;
358
359 // Read the registers from our thread
360 mach_msg_type_number_t count = e_regSetDBGCount;
361 kern_return_t kret =
362 ::thread_get_state(m_thread->MachPortNumber(), ARM_DEBUG_STATE64,
363 (thread_state_t)&m_state.dbg, &count);
364 m_state.SetError(set, Read, kret);
365
366 return kret;
367 }
368
SetGPRState()369 kern_return_t DNBArchMachARM64::SetGPRState() {
370 int set = e_regSetGPR;
371 kern_return_t kret = ::thread_set_state(
372 m_thread->MachPortNumber(), ARM_THREAD_STATE64,
373 (thread_state_t)&m_state.context.gpr, e_regSetGPRCount);
374 m_state.SetError(set, Write,
375 kret); // Set the current write error for this register set
376 m_state.InvalidateRegisterSetState(set); // Invalidate the current register
377 // state in case registers are read
378 // back differently
379 return kret; // Return the error code
380 }
381
SetVFPState()382 kern_return_t DNBArchMachARM64::SetVFPState() {
383 int set = e_regSetVFP;
384 kern_return_t kret = ::thread_set_state(
385 m_thread->MachPortNumber(), ARM_NEON_STATE64,
386 (thread_state_t)&m_state.context.vfp, e_regSetVFPCount);
387 m_state.SetError(set, Write,
388 kret); // Set the current write error for this register set
389 m_state.InvalidateRegisterSetState(set); // Invalidate the current register
390 // state in case registers are read
391 // back differently
392 return kret; // Return the error code
393 }
394
SetEXCState()395 kern_return_t DNBArchMachARM64::SetEXCState() {
396 int set = e_regSetEXC;
397 kern_return_t kret = ::thread_set_state(
398 m_thread->MachPortNumber(), ARM_EXCEPTION_STATE64,
399 (thread_state_t)&m_state.context.exc, e_regSetEXCCount);
400 m_state.SetError(set, Write,
401 kret); // Set the current write error for this register set
402 m_state.InvalidateRegisterSetState(set); // Invalidate the current register
403 // state in case registers are read
404 // back differently
405 return kret; // Return the error code
406 }
407
SetDBGState(bool also_set_on_task)408 kern_return_t DNBArchMachARM64::SetDBGState(bool also_set_on_task) {
409 int set = e_regSetDBG;
410 kern_return_t kret =
411 ::thread_set_state(m_thread->MachPortNumber(), ARM_DEBUG_STATE64,
412 (thread_state_t)&m_state.dbg, e_regSetDBGCount);
413 if (also_set_on_task) {
414 kern_return_t task_kret = task_set_state(
415 m_thread->Process()->Task().TaskPort(), ARM_DEBUG_STATE64,
416 (thread_state_t)&m_state.dbg, e_regSetDBGCount);
417 if (task_kret != KERN_SUCCESS)
418 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::SetDBGState failed "
419 "to set debug control register state: "
420 "0x%8.8x.",
421 task_kret);
422 }
423 m_state.SetError(set, Write,
424 kret); // Set the current write error for this register set
425 m_state.InvalidateRegisterSetState(set); // Invalidate the current register
426 // state in case registers are read
427 // back differently
428
429 return kret; // Return the error code
430 }
431
ThreadWillResume()432 void DNBArchMachARM64::ThreadWillResume() {
433 // Do we need to step this thread? If so, let the mach thread tell us so.
434 if (m_thread->IsStepping()) {
435 EnableHardwareSingleStep(true);
436 }
437
438 // Disable the triggered watchpoint temporarily before we resume.
439 // Plus, we try to enable hardware single step to execute past the instruction
440 // which triggered our watchpoint.
441 if (m_watchpoint_did_occur) {
442 if (m_watchpoint_hw_index >= 0) {
443 kern_return_t kret = GetDBGState(false);
444 if (kret == KERN_SUCCESS &&
445 !IsWatchpointEnabled(m_state.dbg, m_watchpoint_hw_index)) {
446 // The watchpoint might have been disabled by the user. We don't need
447 // to do anything at all
448 // to enable hardware single stepping.
449 m_watchpoint_did_occur = false;
450 m_watchpoint_hw_index = -1;
451 return;
452 }
453
454 DisableHardwareWatchpoint(m_watchpoint_hw_index, false);
455 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::ThreadWillResume() "
456 "DisableHardwareWatchpoint(%d) called",
457 m_watchpoint_hw_index);
458
459 // Enable hardware single step to move past the watchpoint-triggering
460 // instruction.
461 m_watchpoint_resume_single_step_enabled =
462 (EnableHardwareSingleStep(true) == KERN_SUCCESS);
463
464 // If we are not able to enable single step to move past the
465 // watchpoint-triggering instruction,
466 // at least we should reset the two watchpoint member variables so that
467 // the next time around
468 // this callback function is invoked, the enclosing logical branch is
469 // skipped.
470 if (!m_watchpoint_resume_single_step_enabled) {
471 // Reset the two watchpoint member variables.
472 m_watchpoint_did_occur = false;
473 m_watchpoint_hw_index = -1;
474 DNBLogThreadedIf(
475 LOG_WATCHPOINTS,
476 "DNBArchMachARM::ThreadWillResume() failed to enable single step");
477 } else
478 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::ThreadWillResume() "
479 "succeeded to enable single step");
480 }
481 }
482 }
483
NotifyException(MachException::Data & exc)484 bool DNBArchMachARM64::NotifyException(MachException::Data &exc) {
485
486 switch (exc.exc_type) {
487 default:
488 break;
489 case EXC_BREAKPOINT:
490 if (exc.exc_data.size() == 2 && exc.exc_data[0] == EXC_ARM_DA_DEBUG) {
491 // The data break address is passed as exc_data[1].
492 nub_addr_t addr = exc.exc_data[1];
493 // Find the hardware index with the side effect of possibly massaging the
494 // addr to return the starting address as seen from the debugger side.
495 uint32_t hw_index = GetHardwareWatchpointHit(addr);
496
497 // One logical watchpoint was split into two watchpoint locations because
498 // it was too big. If the watchpoint exception is indicating the 2nd half
499 // of the two-parter, find the address of the 1st half and report that --
500 // that's what lldb is going to expect to see.
501 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::NotifyException "
502 "watchpoint %d was hit on address "
503 "0x%llx",
504 hw_index, (uint64_t)addr);
505 const int num_watchpoints = NumSupportedHardwareWatchpoints();
506 for (int i = 0; i < num_watchpoints; i++) {
507 if (LoHi[i] != 0 && LoHi[i] == hw_index && LoHi[i] != i &&
508 GetWatchpointAddressByIndex(i) != INVALID_NUB_ADDRESS) {
509 addr = GetWatchpointAddressByIndex(i);
510 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM::NotifyException "
511 "It is a linked watchpoint; "
512 "rewritten to index %d addr 0x%llx",
513 LoHi[i], (uint64_t)addr);
514 }
515 }
516
517 if (hw_index != INVALID_NUB_HW_INDEX) {
518 m_watchpoint_did_occur = true;
519 m_watchpoint_hw_index = hw_index;
520 exc.exc_data[1] = addr;
521 // Piggyback the hw_index in the exc.data.
522 exc.exc_data.push_back(hw_index);
523 }
524
525 return true;
526 }
527 break;
528 }
529 return false;
530 }
531
ThreadDidStop()532 bool DNBArchMachARM64::ThreadDidStop() {
533 bool success = true;
534
535 m_state.InvalidateAllRegisterStates();
536
537 if (m_watchpoint_resume_single_step_enabled) {
538 // Great! We now disable the hardware single step as well as re-enable the
539 // hardware watchpoint.
540 // See also ThreadWillResume().
541 if (EnableHardwareSingleStep(false) == KERN_SUCCESS) {
542 if (m_watchpoint_did_occur && m_watchpoint_hw_index >= 0) {
543 ReenableHardwareWatchpoint(m_watchpoint_hw_index);
544 m_watchpoint_resume_single_step_enabled = false;
545 m_watchpoint_did_occur = false;
546 m_watchpoint_hw_index = -1;
547 } else {
548 DNBLogError("internal error detected: m_watchpoint_resume_step_enabled "
549 "is true but (m_watchpoint_did_occur && "
550 "m_watchpoint_hw_index >= 0) does not hold!");
551 }
552 } else {
553 DNBLogError("internal error detected: m_watchpoint_resume_step_enabled "
554 "is true but unable to disable single step!");
555 }
556 }
557
558 // Are we stepping a single instruction?
559 if (GetGPRState(true) == KERN_SUCCESS) {
560 // We are single stepping, was this the primary thread?
561 if (m_thread->IsStepping()) {
562 // This was the primary thread, we need to clear the trace
563 // bit if so.
564 success = EnableHardwareSingleStep(false) == KERN_SUCCESS;
565 } else {
566 // The MachThread will automatically restore the suspend count
567 // in ThreadDidStop(), so we don't need to do anything here if
568 // we weren't the primary thread the last time
569 }
570 }
571 return success;
572 }
573
574 // Set the single step bit in the processor status register.
EnableHardwareSingleStep(bool enable)575 kern_return_t DNBArchMachARM64::EnableHardwareSingleStep(bool enable) {
576 DNBError err;
577 DNBLogThreadedIf(LOG_STEP, "%s( enable = %d )", __FUNCTION__, enable);
578
579 err = GetGPRState(false);
580
581 if (err.Fail()) {
582 err.LogThreaded("%s: failed to read the GPR registers", __FUNCTION__);
583 return err.Status();
584 }
585
586 err = GetDBGState(false);
587
588 if (err.Fail()) {
589 err.LogThreaded("%s: failed to read the DBG registers", __FUNCTION__);
590 return err.Status();
591 }
592
593 if (enable) {
594 DNBLogThreadedIf(LOG_STEP,
595 "%s: Setting MDSCR_EL1 Single Step bit at pc 0x%llx",
596 #if defined(__LP64__)
597 __FUNCTION__, (uint64_t)arm_thread_state64_get_pc (m_state.context.gpr));
598 #else
599 __FUNCTION__, (uint64_t)m_state.context.gpr.__pc);
600 #endif
601 m_state.dbg.__mdscr_el1 |= SS_ENABLE;
602 } else {
603 DNBLogThreadedIf(LOG_STEP,
604 "%s: Clearing MDSCR_EL1 Single Step bit at pc 0x%llx",
605 #if defined(__LP64__)
606 __FUNCTION__, (uint64_t)arm_thread_state64_get_pc (m_state.context.gpr));
607 #else
608 __FUNCTION__, (uint64_t)m_state.context.gpr.__pc);
609 #endif
610 m_state.dbg.__mdscr_el1 &= ~(SS_ENABLE);
611 }
612
613 return SetDBGState(false);
614 }
615
616 // return 1 if bit "BIT" is set in "value"
bit(uint32_t value,uint32_t bit)617 static inline uint32_t bit(uint32_t value, uint32_t bit) {
618 return (value >> bit) & 1u;
619 }
620
621 // return the bitfield "value[msbit:lsbit]".
bits(uint64_t value,uint32_t msbit,uint32_t lsbit)622 static inline uint64_t bits(uint64_t value, uint32_t msbit, uint32_t lsbit) {
623 assert(msbit >= lsbit);
624 uint64_t shift_left = sizeof(value) * 8 - 1 - msbit;
625 value <<=
626 shift_left; // shift anything above the msbit off of the unsigned edge
627 value >>= shift_left + lsbit; // shift it back again down to the lsbit
628 // (including undoing any shift from above)
629 return value; // return our result
630 }
631
NumSupportedHardwareWatchpoints()632 uint32_t DNBArchMachARM64::NumSupportedHardwareWatchpoints() {
633 // Set the init value to something that will let us know that we need to
634 // autodetect how many watchpoints are supported dynamically...
635 static uint32_t g_num_supported_hw_watchpoints = UINT_MAX;
636 if (g_num_supported_hw_watchpoints == UINT_MAX) {
637 // Set this to zero in case we can't tell if there are any HW breakpoints
638 g_num_supported_hw_watchpoints = 0;
639
640 size_t len;
641 uint32_t n = 0;
642 len = sizeof(n);
643 if (::sysctlbyname("hw.optional.watchpoint", &n, &len, NULL, 0) == 0) {
644 g_num_supported_hw_watchpoints = n;
645 DNBLogThreadedIf(LOG_THREAD, "hw.optional.watchpoint=%u", n);
646 } else {
647 // For AArch64 we would need to look at ID_AA64DFR0_EL1 but debugserver runs in
648 // EL0 so it can't
649 // access that reg. The kernel should have filled in the sysctls based on it
650 // though.
651 #if defined(__arm__)
652 uint32_t register_DBGDIDR;
653
654 asm("mrc p14, 0, %0, c0, c0, 0" : "=r"(register_DBGDIDR));
655 uint32_t numWRPs = bits(register_DBGDIDR, 31, 28);
656 // Zero is reserved for the WRP count, so don't increment it if it is zero
657 if (numWRPs > 0)
658 numWRPs++;
659 g_num_supported_hw_watchpoints = numWRPs;
660 DNBLogThreadedIf(LOG_THREAD,
661 "Number of supported hw watchpoints via asm(): %d",
662 g_num_supported_hw_watchpoints);
663 #endif
664 }
665 }
666 return g_num_supported_hw_watchpoints;
667 }
668
EnableHardwareWatchpoint(nub_addr_t addr,nub_size_t size,bool read,bool write,bool also_set_on_task)669 uint32_t DNBArchMachARM64::EnableHardwareWatchpoint(nub_addr_t addr,
670 nub_size_t size, bool read,
671 bool write,
672 bool also_set_on_task) {
673 DNBLogThreadedIf(LOG_WATCHPOINTS,
674 "DNBArchMachARM64::EnableHardwareWatchpoint(addr = "
675 "0x%8.8llx, size = %zu, read = %u, write = %u)",
676 (uint64_t)addr, size, read, write);
677
678 const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints();
679
680 // Can't watch zero bytes
681 if (size == 0)
682 return INVALID_NUB_HW_INDEX;
683
684 // We must watch for either read or write
685 if (read == false && write == false)
686 return INVALID_NUB_HW_INDEX;
687
688 // Otherwise, can't watch more than 8 bytes per WVR/WCR pair
689 if (size > 8)
690 return INVALID_NUB_HW_INDEX;
691
692 // Aarch64 watchpoints are in one of two forms: (1) 1-8 bytes, aligned to
693 // an 8 byte address, or (2) a power-of-two size region of memory; minimum
694 // 8 bytes, maximum 2GB; the starting address must be aligned to that power
695 // of two.
696 //
697 // For (1), 1-8 byte watchpoints, using the Byte Address Selector field in
698 // DBGWCR<n>.BAS. Any of the bytes may be watched, but if multiple bytes
699 // are watched, the bytes selected must be contiguous. The start address
700 // watched must be doubleword (8-byte) aligned; if the start address is
701 // word (4-byte) aligned, only 4 bytes can be watched.
702 //
703 // For (2), the MASK field in DBGWCR<n>.MASK is used.
704 //
705 // See the ARM ARM, section "Watchpoint exceptions", and more specifically,
706 // "Watchpoint data address comparisons".
707 //
708 // debugserver today only supports (1) - the Byte Address Selector 1-8 byte
709 // watchpoints that are 8-byte aligned. To support larger watchpoints,
710 // debugserver would need to interpret the mach exception when the watched
711 // region was hit, see if the address accessed lies within the subset
712 // of the power-of-two region that lldb asked us to watch (v. ARM ARM,
713 // "Determining the memory location that caused a Watchpoint exception"),
714 // and silently resume the inferior (disable watchpoint, stepi, re-enable
715 // watchpoint) if the address lies outside the region that lldb asked us
716 // to watch.
717 //
718 // Alternatively, lldb would need to be prepared for a larger region
719 // being watched than it requested, and silently resume the inferior if
720 // the accessed address is outside the region lldb wants to watch.
721
722 nub_addr_t aligned_wp_address = addr & ~0x7;
723 uint32_t addr_dword_offset = addr & 0x7;
724
725 // Do we need to split up this logical watchpoint into two hardware watchpoint
726 // registers?
727 // e.g. a watchpoint of length 4 on address 6. We need do this with
728 // one watchpoint on address 0 with bytes 6 & 7 being monitored
729 // one watchpoint on address 8 with bytes 0, 1, 2, 3 being monitored
730
731 if (addr_dword_offset + size > 8) {
732 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::"
733 "EnableHardwareWatchpoint(addr = "
734 "0x%8.8llx, size = %zu) needs two "
735 "hardware watchpoints slots to monitor",
736 (uint64_t)addr, size);
737 int low_watchpoint_size = 8 - addr_dword_offset;
738 int high_watchpoint_size = addr_dword_offset + size - 8;
739
740 uint32_t lo = EnableHardwareWatchpoint(addr, low_watchpoint_size, read,
741 write, also_set_on_task);
742 if (lo == INVALID_NUB_HW_INDEX)
743 return INVALID_NUB_HW_INDEX;
744 uint32_t hi =
745 EnableHardwareWatchpoint(aligned_wp_address + 8, high_watchpoint_size,
746 read, write, also_set_on_task);
747 if (hi == INVALID_NUB_HW_INDEX) {
748 DisableHardwareWatchpoint(lo, also_set_on_task);
749 return INVALID_NUB_HW_INDEX;
750 }
751 // Tag this lo->hi mapping in our database.
752 LoHi[lo] = hi;
753 return lo;
754 }
755
756 // At this point
757 // 1 aligned_wp_address is the requested address rounded down to 8-byte
758 // alignment
759 // 2 addr_dword_offset is the offset into that double word (8-byte) region
760 // that we are watching
761 // 3 size is the number of bytes within that 8-byte region that we are
762 // watching
763
764 // Set the Byte Address Selects bits DBGWCRn_EL1 bits [12:5] based on the
765 // above.
766 // The bit shift and negation operation will give us 0b11 for 2, 0b1111 for 4,
767 // etc, up to 0b11111111 for 8.
768 // then we shift those bits left by the offset into this dword that we are
769 // interested in.
770 // e.g. if we are watching bytes 4,5,6,7 in a dword we want a BAS of
771 // 0b11110000.
772 uint32_t byte_address_select = ((1 << size) - 1) << addr_dword_offset;
773
774 // Read the debug state
775 kern_return_t kret = GetDBGState(false);
776
777 if (kret == KERN_SUCCESS) {
778 // Check to make sure we have the needed hardware support
779 uint32_t i = 0;
780
781 for (i = 0; i < num_hw_watchpoints; ++i) {
782 if ((m_state.dbg.__wcr[i] & WCR_ENABLE) == 0)
783 break; // We found an available hw watchpoint slot (in i)
784 }
785
786 // See if we found an available hw watchpoint slot above
787 if (i < num_hw_watchpoints) {
788 // DumpDBGState(m_state.dbg);
789
790 // Clear any previous LoHi joined-watchpoint that may have been in use
791 LoHi[i] = 0;
792
793 // shift our Byte Address Select bits up to the correct bit range for the
794 // DBGWCRn_EL1
795 byte_address_select = byte_address_select << 5;
796
797 // Make sure bits 1:0 are clear in our address
798 m_state.dbg.__wvr[i] = aligned_wp_address; // DVA (Data Virtual Address)
799 m_state.dbg.__wcr[i] = byte_address_select | // Which bytes that follow
800 // the DVA that we will watch
801 S_USER | // Stop only in user mode
802 (read ? WCR_LOAD : 0) | // Stop on read access?
803 (write ? WCR_STORE : 0) | // Stop on write access?
804 WCR_ENABLE; // Enable this watchpoint;
805
806 DNBLogThreadedIf(
807 LOG_WATCHPOINTS, "DNBArchMachARM64::EnableHardwareWatchpoint() "
808 "adding watchpoint on address 0x%llx with control "
809 "register value 0x%x",
810 (uint64_t)m_state.dbg.__wvr[i], (uint32_t)m_state.dbg.__wcr[i]);
811
812 // The kernel will set the MDE_ENABLE bit in the MDSCR_EL1 for us
813 // automatically, don't need to do it here.
814
815 kret = SetDBGState(also_set_on_task);
816 // DumpDBGState(m_state.dbg);
817
818 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::"
819 "EnableHardwareWatchpoint() "
820 "SetDBGState() => 0x%8.8x.",
821 kret);
822
823 if (kret == KERN_SUCCESS)
824 return i;
825 } else {
826 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::"
827 "EnableHardwareWatchpoint(): All "
828 "hardware resources (%u) are in use.",
829 num_hw_watchpoints);
830 }
831 }
832 return INVALID_NUB_HW_INDEX;
833 }
834
ReenableHardwareWatchpoint(uint32_t hw_index)835 bool DNBArchMachARM64::ReenableHardwareWatchpoint(uint32_t hw_index) {
836 // If this logical watchpoint # is actually implemented using
837 // two hardware watchpoint registers, re-enable both of them.
838
839 if (hw_index < NumSupportedHardwareWatchpoints() && LoHi[hw_index]) {
840 return ReenableHardwareWatchpoint_helper(hw_index) &&
841 ReenableHardwareWatchpoint_helper(LoHi[hw_index]);
842 } else {
843 return ReenableHardwareWatchpoint_helper(hw_index);
844 }
845 }
846
ReenableHardwareWatchpoint_helper(uint32_t hw_index)847 bool DNBArchMachARM64::ReenableHardwareWatchpoint_helper(uint32_t hw_index) {
848 kern_return_t kret = GetDBGState(false);
849 if (kret != KERN_SUCCESS)
850 return false;
851
852 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints();
853 if (hw_index >= num_hw_points)
854 return false;
855
856 m_state.dbg.__wvr[hw_index] = m_disabled_watchpoints[hw_index].addr;
857 m_state.dbg.__wcr[hw_index] = m_disabled_watchpoints[hw_index].control;
858
859 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::"
860 "EnableHardwareWatchpoint( %u ) - WVR%u = "
861 "0x%8.8llx WCR%u = 0x%8.8llx",
862 hw_index, hw_index, (uint64_t)m_state.dbg.__wvr[hw_index],
863 hw_index, (uint64_t)m_state.dbg.__wcr[hw_index]);
864
865 // The kernel will set the MDE_ENABLE bit in the MDSCR_EL1 for us
866 // automatically, don't need to do it here.
867
868 kret = SetDBGState(false);
869
870 return (kret == KERN_SUCCESS);
871 }
872
DisableHardwareWatchpoint(uint32_t hw_index,bool also_set_on_task)873 bool DNBArchMachARM64::DisableHardwareWatchpoint(uint32_t hw_index,
874 bool also_set_on_task) {
875 if (hw_index < NumSupportedHardwareWatchpoints() && LoHi[hw_index]) {
876 return DisableHardwareWatchpoint_helper(hw_index, also_set_on_task) &&
877 DisableHardwareWatchpoint_helper(LoHi[hw_index], also_set_on_task);
878 } else {
879 return DisableHardwareWatchpoint_helper(hw_index, also_set_on_task);
880 }
881 }
882
DisableHardwareWatchpoint_helper(uint32_t hw_index,bool also_set_on_task)883 bool DNBArchMachARM64::DisableHardwareWatchpoint_helper(uint32_t hw_index,
884 bool also_set_on_task) {
885 kern_return_t kret = GetDBGState(false);
886 if (kret != KERN_SUCCESS)
887 return false;
888
889 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints();
890 if (hw_index >= num_hw_points)
891 return false;
892
893 m_disabled_watchpoints[hw_index].addr = m_state.dbg.__wvr[hw_index];
894 m_disabled_watchpoints[hw_index].control = m_state.dbg.__wcr[hw_index];
895
896 m_state.dbg.__wcr[hw_index] &= ~((nub_addr_t)WCR_ENABLE);
897 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::"
898 "DisableHardwareWatchpoint( %u ) - WVR%u = "
899 "0x%8.8llx WCR%u = 0x%8.8llx",
900 hw_index, hw_index, (uint64_t)m_state.dbg.__wvr[hw_index],
901 hw_index, (uint64_t)m_state.dbg.__wcr[hw_index]);
902
903 kret = SetDBGState(also_set_on_task);
904
905 return (kret == KERN_SUCCESS);
906 }
907
908 // This is for checking the Byte Address Select bits in the DBRWCRn_EL1 control
909 // register.
910 // Returns -1 if the trailing bit patterns are not one of:
911 // { 0b???????1, 0b??????10, 0b?????100, 0b????1000, 0b???10000, 0b??100000,
912 // 0b?1000000, 0b10000000 }.
LowestBitSet(uint32_t val)913 static inline int32_t LowestBitSet(uint32_t val) {
914 for (unsigned i = 0; i < 8; ++i) {
915 if (bit(val, i))
916 return i;
917 }
918 return -1;
919 }
920
921 // Iterate through the debug registers; return the index of the first watchpoint
922 // whose address matches.
923 // As a side effect, the starting address as understood by the debugger is
924 // returned which could be
925 // different from 'addr' passed as an in/out argument.
GetHardwareWatchpointHit(nub_addr_t & addr)926 uint32_t DNBArchMachARM64::GetHardwareWatchpointHit(nub_addr_t &addr) {
927 // Read the debug state
928 kern_return_t kret = GetDBGState(true);
929 // DumpDBGState(m_state.dbg);
930 DNBLogThreadedIf(
931 LOG_WATCHPOINTS,
932 "DNBArchMachARM64::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.",
933 kret);
934 DNBLogThreadedIf(LOG_WATCHPOINTS,
935 "DNBArchMachARM64::GetHardwareWatchpointHit() addr = 0x%llx",
936 (uint64_t)addr);
937
938 // This is the watchpoint value to match against, i.e., word address.
939 nub_addr_t wp_val = addr & ~((nub_addr_t)3);
940 if (kret == KERN_SUCCESS) {
941 DBG &debug_state = m_state.dbg;
942 uint32_t i, num = NumSupportedHardwareWatchpoints();
943 for (i = 0; i < num; ++i) {
944 nub_addr_t wp_addr = GetWatchAddress(debug_state, i);
945 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::"
946 "GetHardwareWatchpointHit() slot: %u "
947 "(addr = 0x%llx).",
948 i, (uint64_t)wp_addr);
949 if (wp_val == wp_addr) {
950 uint32_t byte_mask = bits(debug_state.__wcr[i], 12, 5);
951
952 // Sanity check the byte_mask, first.
953 if (LowestBitSet(byte_mask) < 0)
954 continue;
955
956 // Check that the watchpoint is enabled.
957 if (!IsWatchpointEnabled(debug_state, i))
958 continue;
959
960 // Compute the starting address (from the point of view of the
961 // debugger).
962 addr = wp_addr + LowestBitSet(byte_mask);
963 return i;
964 }
965 }
966 }
967 return INVALID_NUB_HW_INDEX;
968 }
969
GetWatchpointAddressByIndex(uint32_t hw_index)970 nub_addr_t DNBArchMachARM64::GetWatchpointAddressByIndex(uint32_t hw_index) {
971 kern_return_t kret = GetDBGState(true);
972 if (kret != KERN_SUCCESS)
973 return INVALID_NUB_ADDRESS;
974 const uint32_t num = NumSupportedHardwareWatchpoints();
975 if (hw_index >= num)
976 return INVALID_NUB_ADDRESS;
977 if (IsWatchpointEnabled(m_state.dbg, hw_index))
978 return GetWatchAddress(m_state.dbg, hw_index);
979 return INVALID_NUB_ADDRESS;
980 }
981
IsWatchpointEnabled(const DBG & debug_state,uint32_t hw_index)982 bool DNBArchMachARM64::IsWatchpointEnabled(const DBG &debug_state,
983 uint32_t hw_index) {
984 // Watchpoint Control Registers, bitfield definitions
985 // ...
986 // Bits Value Description
987 // [0] 0 Watchpoint disabled
988 // 1 Watchpoint enabled.
989 return (debug_state.__wcr[hw_index] & 1u);
990 }
991
GetWatchAddress(const DBG & debug_state,uint32_t hw_index)992 nub_addr_t DNBArchMachARM64::GetWatchAddress(const DBG &debug_state,
993 uint32_t hw_index) {
994 // Watchpoint Value Registers, bitfield definitions
995 // Bits Description
996 // [31:2] Watchpoint value (word address, i.e., 4-byte aligned)
997 // [1:0] RAZ/SBZP
998 return bits(debug_state.__wvr[hw_index], 63, 0);
999 }
1000
1001 // Register information definitions for 64 bit ARMv8.
1002 enum gpr_regnums {
1003 gpr_x0 = 0,
1004 gpr_x1,
1005 gpr_x2,
1006 gpr_x3,
1007 gpr_x4,
1008 gpr_x5,
1009 gpr_x6,
1010 gpr_x7,
1011 gpr_x8,
1012 gpr_x9,
1013 gpr_x10,
1014 gpr_x11,
1015 gpr_x12,
1016 gpr_x13,
1017 gpr_x14,
1018 gpr_x15,
1019 gpr_x16,
1020 gpr_x17,
1021 gpr_x18,
1022 gpr_x19,
1023 gpr_x20,
1024 gpr_x21,
1025 gpr_x22,
1026 gpr_x23,
1027 gpr_x24,
1028 gpr_x25,
1029 gpr_x26,
1030 gpr_x27,
1031 gpr_x28,
1032 gpr_fp,
1033 gpr_x29 = gpr_fp,
1034 gpr_lr,
1035 gpr_x30 = gpr_lr,
1036 gpr_sp,
1037 gpr_x31 = gpr_sp,
1038 gpr_pc,
1039 gpr_cpsr,
1040 gpr_w0,
1041 gpr_w1,
1042 gpr_w2,
1043 gpr_w3,
1044 gpr_w4,
1045 gpr_w5,
1046 gpr_w6,
1047 gpr_w7,
1048 gpr_w8,
1049 gpr_w9,
1050 gpr_w10,
1051 gpr_w11,
1052 gpr_w12,
1053 gpr_w13,
1054 gpr_w14,
1055 gpr_w15,
1056 gpr_w16,
1057 gpr_w17,
1058 gpr_w18,
1059 gpr_w19,
1060 gpr_w20,
1061 gpr_w21,
1062 gpr_w22,
1063 gpr_w23,
1064 gpr_w24,
1065 gpr_w25,
1066 gpr_w26,
1067 gpr_w27,
1068 gpr_w28
1069
1070 };
1071
1072 enum {
1073 vfp_v0 = 0,
1074 vfp_v1,
1075 vfp_v2,
1076 vfp_v3,
1077 vfp_v4,
1078 vfp_v5,
1079 vfp_v6,
1080 vfp_v7,
1081 vfp_v8,
1082 vfp_v9,
1083 vfp_v10,
1084 vfp_v11,
1085 vfp_v12,
1086 vfp_v13,
1087 vfp_v14,
1088 vfp_v15,
1089 vfp_v16,
1090 vfp_v17,
1091 vfp_v18,
1092 vfp_v19,
1093 vfp_v20,
1094 vfp_v21,
1095 vfp_v22,
1096 vfp_v23,
1097 vfp_v24,
1098 vfp_v25,
1099 vfp_v26,
1100 vfp_v27,
1101 vfp_v28,
1102 vfp_v29,
1103 vfp_v30,
1104 vfp_v31,
1105 vfp_fpsr,
1106 vfp_fpcr,
1107
1108 // lower 32 bits of the corresponding vfp_v<n> reg.
1109 vfp_s0,
1110 vfp_s1,
1111 vfp_s2,
1112 vfp_s3,
1113 vfp_s4,
1114 vfp_s5,
1115 vfp_s6,
1116 vfp_s7,
1117 vfp_s8,
1118 vfp_s9,
1119 vfp_s10,
1120 vfp_s11,
1121 vfp_s12,
1122 vfp_s13,
1123 vfp_s14,
1124 vfp_s15,
1125 vfp_s16,
1126 vfp_s17,
1127 vfp_s18,
1128 vfp_s19,
1129 vfp_s20,
1130 vfp_s21,
1131 vfp_s22,
1132 vfp_s23,
1133 vfp_s24,
1134 vfp_s25,
1135 vfp_s26,
1136 vfp_s27,
1137 vfp_s28,
1138 vfp_s29,
1139 vfp_s30,
1140 vfp_s31,
1141
1142 // lower 64 bits of the corresponding vfp_v<n> reg.
1143 vfp_d0,
1144 vfp_d1,
1145 vfp_d2,
1146 vfp_d3,
1147 vfp_d4,
1148 vfp_d5,
1149 vfp_d6,
1150 vfp_d7,
1151 vfp_d8,
1152 vfp_d9,
1153 vfp_d10,
1154 vfp_d11,
1155 vfp_d12,
1156 vfp_d13,
1157 vfp_d14,
1158 vfp_d15,
1159 vfp_d16,
1160 vfp_d17,
1161 vfp_d18,
1162 vfp_d19,
1163 vfp_d20,
1164 vfp_d21,
1165 vfp_d22,
1166 vfp_d23,
1167 vfp_d24,
1168 vfp_d25,
1169 vfp_d26,
1170 vfp_d27,
1171 vfp_d28,
1172 vfp_d29,
1173 vfp_d30,
1174 vfp_d31
1175 };
1176
1177 enum { exc_far = 0, exc_esr, exc_exception };
1178
1179 // These numbers from the "DWARF for the ARM 64-bit Architecture (AArch64)"
1180 // document.
1181
1182 enum {
1183 dwarf_x0 = 0,
1184 dwarf_x1,
1185 dwarf_x2,
1186 dwarf_x3,
1187 dwarf_x4,
1188 dwarf_x5,
1189 dwarf_x6,
1190 dwarf_x7,
1191 dwarf_x8,
1192 dwarf_x9,
1193 dwarf_x10,
1194 dwarf_x11,
1195 dwarf_x12,
1196 dwarf_x13,
1197 dwarf_x14,
1198 dwarf_x15,
1199 dwarf_x16,
1200 dwarf_x17,
1201 dwarf_x18,
1202 dwarf_x19,
1203 dwarf_x20,
1204 dwarf_x21,
1205 dwarf_x22,
1206 dwarf_x23,
1207 dwarf_x24,
1208 dwarf_x25,
1209 dwarf_x26,
1210 dwarf_x27,
1211 dwarf_x28,
1212 dwarf_x29,
1213 dwarf_x30,
1214 dwarf_x31,
1215 dwarf_pc = 32,
1216 dwarf_elr_mode = 33,
1217 dwarf_fp = dwarf_x29,
1218 dwarf_lr = dwarf_x30,
1219 dwarf_sp = dwarf_x31,
1220 // 34-63 reserved
1221
1222 // V0-V31 (128 bit vector registers)
1223 dwarf_v0 = 64,
1224 dwarf_v1,
1225 dwarf_v2,
1226 dwarf_v3,
1227 dwarf_v4,
1228 dwarf_v5,
1229 dwarf_v6,
1230 dwarf_v7,
1231 dwarf_v8,
1232 dwarf_v9,
1233 dwarf_v10,
1234 dwarf_v11,
1235 dwarf_v12,
1236 dwarf_v13,
1237 dwarf_v14,
1238 dwarf_v15,
1239 dwarf_v16,
1240 dwarf_v17,
1241 dwarf_v18,
1242 dwarf_v19,
1243 dwarf_v20,
1244 dwarf_v21,
1245 dwarf_v22,
1246 dwarf_v23,
1247 dwarf_v24,
1248 dwarf_v25,
1249 dwarf_v26,
1250 dwarf_v27,
1251 dwarf_v28,
1252 dwarf_v29,
1253 dwarf_v30,
1254 dwarf_v31
1255
1256 // 96-127 reserved
1257 };
1258
1259 enum {
1260 debugserver_gpr_x0 = 0,
1261 debugserver_gpr_x1,
1262 debugserver_gpr_x2,
1263 debugserver_gpr_x3,
1264 debugserver_gpr_x4,
1265 debugserver_gpr_x5,
1266 debugserver_gpr_x6,
1267 debugserver_gpr_x7,
1268 debugserver_gpr_x8,
1269 debugserver_gpr_x9,
1270 debugserver_gpr_x10,
1271 debugserver_gpr_x11,
1272 debugserver_gpr_x12,
1273 debugserver_gpr_x13,
1274 debugserver_gpr_x14,
1275 debugserver_gpr_x15,
1276 debugserver_gpr_x16,
1277 debugserver_gpr_x17,
1278 debugserver_gpr_x18,
1279 debugserver_gpr_x19,
1280 debugserver_gpr_x20,
1281 debugserver_gpr_x21,
1282 debugserver_gpr_x22,
1283 debugserver_gpr_x23,
1284 debugserver_gpr_x24,
1285 debugserver_gpr_x25,
1286 debugserver_gpr_x26,
1287 debugserver_gpr_x27,
1288 debugserver_gpr_x28,
1289 debugserver_gpr_fp, // x29
1290 debugserver_gpr_lr, // x30
1291 debugserver_gpr_sp, // sp aka xsp
1292 debugserver_gpr_pc,
1293 debugserver_gpr_cpsr,
1294 debugserver_vfp_v0,
1295 debugserver_vfp_v1,
1296 debugserver_vfp_v2,
1297 debugserver_vfp_v3,
1298 debugserver_vfp_v4,
1299 debugserver_vfp_v5,
1300 debugserver_vfp_v6,
1301 debugserver_vfp_v7,
1302 debugserver_vfp_v8,
1303 debugserver_vfp_v9,
1304 debugserver_vfp_v10,
1305 debugserver_vfp_v11,
1306 debugserver_vfp_v12,
1307 debugserver_vfp_v13,
1308 debugserver_vfp_v14,
1309 debugserver_vfp_v15,
1310 debugserver_vfp_v16,
1311 debugserver_vfp_v17,
1312 debugserver_vfp_v18,
1313 debugserver_vfp_v19,
1314 debugserver_vfp_v20,
1315 debugserver_vfp_v21,
1316 debugserver_vfp_v22,
1317 debugserver_vfp_v23,
1318 debugserver_vfp_v24,
1319 debugserver_vfp_v25,
1320 debugserver_vfp_v26,
1321 debugserver_vfp_v27,
1322 debugserver_vfp_v28,
1323 debugserver_vfp_v29,
1324 debugserver_vfp_v30,
1325 debugserver_vfp_v31,
1326 debugserver_vfp_fpsr,
1327 debugserver_vfp_fpcr
1328 };
1329
1330 const char *g_contained_x0[]{"x0", NULL};
1331 const char *g_contained_x1[]{"x1", NULL};
1332 const char *g_contained_x2[]{"x2", NULL};
1333 const char *g_contained_x3[]{"x3", NULL};
1334 const char *g_contained_x4[]{"x4", NULL};
1335 const char *g_contained_x5[]{"x5", NULL};
1336 const char *g_contained_x6[]{"x6", NULL};
1337 const char *g_contained_x7[]{"x7", NULL};
1338 const char *g_contained_x8[]{"x8", NULL};
1339 const char *g_contained_x9[]{"x9", NULL};
1340 const char *g_contained_x10[]{"x10", NULL};
1341 const char *g_contained_x11[]{"x11", NULL};
1342 const char *g_contained_x12[]{"x12", NULL};
1343 const char *g_contained_x13[]{"x13", NULL};
1344 const char *g_contained_x14[]{"x14", NULL};
1345 const char *g_contained_x15[]{"x15", NULL};
1346 const char *g_contained_x16[]{"x16", NULL};
1347 const char *g_contained_x17[]{"x17", NULL};
1348 const char *g_contained_x18[]{"x18", NULL};
1349 const char *g_contained_x19[]{"x19", NULL};
1350 const char *g_contained_x20[]{"x20", NULL};
1351 const char *g_contained_x21[]{"x21", NULL};
1352 const char *g_contained_x22[]{"x22", NULL};
1353 const char *g_contained_x23[]{"x23", NULL};
1354 const char *g_contained_x24[]{"x24", NULL};
1355 const char *g_contained_x25[]{"x25", NULL};
1356 const char *g_contained_x26[]{"x26", NULL};
1357 const char *g_contained_x27[]{"x27", NULL};
1358 const char *g_contained_x28[]{"x28", NULL};
1359
1360 const char *g_invalidate_x0[]{"x0", "w0", NULL};
1361 const char *g_invalidate_x1[]{"x1", "w1", NULL};
1362 const char *g_invalidate_x2[]{"x2", "w2", NULL};
1363 const char *g_invalidate_x3[]{"x3", "w3", NULL};
1364 const char *g_invalidate_x4[]{"x4", "w4", NULL};
1365 const char *g_invalidate_x5[]{"x5", "w5", NULL};
1366 const char *g_invalidate_x6[]{"x6", "w6", NULL};
1367 const char *g_invalidate_x7[]{"x7", "w7", NULL};
1368 const char *g_invalidate_x8[]{"x8", "w8", NULL};
1369 const char *g_invalidate_x9[]{"x9", "w9", NULL};
1370 const char *g_invalidate_x10[]{"x10", "w10", NULL};
1371 const char *g_invalidate_x11[]{"x11", "w11", NULL};
1372 const char *g_invalidate_x12[]{"x12", "w12", NULL};
1373 const char *g_invalidate_x13[]{"x13", "w13", NULL};
1374 const char *g_invalidate_x14[]{"x14", "w14", NULL};
1375 const char *g_invalidate_x15[]{"x15", "w15", NULL};
1376 const char *g_invalidate_x16[]{"x16", "w16", NULL};
1377 const char *g_invalidate_x17[]{"x17", "w17", NULL};
1378 const char *g_invalidate_x18[]{"x18", "w18", NULL};
1379 const char *g_invalidate_x19[]{"x19", "w19", NULL};
1380 const char *g_invalidate_x20[]{"x20", "w20", NULL};
1381 const char *g_invalidate_x21[]{"x21", "w21", NULL};
1382 const char *g_invalidate_x22[]{"x22", "w22", NULL};
1383 const char *g_invalidate_x23[]{"x23", "w23", NULL};
1384 const char *g_invalidate_x24[]{"x24", "w24", NULL};
1385 const char *g_invalidate_x25[]{"x25", "w25", NULL};
1386 const char *g_invalidate_x26[]{"x26", "w26", NULL};
1387 const char *g_invalidate_x27[]{"x27", "w27", NULL};
1388 const char *g_invalidate_x28[]{"x28", "w28", NULL};
1389
1390 #define GPR_OFFSET_IDX(idx) (offsetof(DNBArchMachARM64::GPR, __x[idx]))
1391
1392 #define GPR_OFFSET_NAME(reg) (offsetof(DNBArchMachARM64::GPR, __##reg))
1393
1394 // These macros will auto define the register name, alt name, register size,
1395 // register offset, encoding, format and native register. This ensures that
1396 // the register state structures are defined correctly and have the correct
1397 // sizes and offsets.
1398 #define DEFINE_GPR_IDX(idx, reg, alt, gen) \
1399 { \
1400 e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, 8, GPR_OFFSET_IDX(idx), \
1401 dwarf_##reg, dwarf_##reg, gen, debugserver_gpr_##reg, NULL, \
1402 g_invalidate_x##idx \
1403 }
1404 #define DEFINE_GPR_NAME(reg, alt, gen) \
1405 { \
1406 e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, 8, GPR_OFFSET_NAME(reg), \
1407 dwarf_##reg, dwarf_##reg, gen, debugserver_gpr_##reg, NULL, NULL \
1408 }
1409 #define DEFINE_PSEUDO_GPR_IDX(idx, reg) \
1410 { \
1411 e_regSetGPR, gpr_##reg, #reg, NULL, Uint, Hex, 4, 0, INVALID_NUB_REGNUM, \
1412 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \
1413 g_contained_x##idx, g_invalidate_x##idx \
1414 }
1415
1416 //_STRUCT_ARM_THREAD_STATE64
1417 //{
1418 // uint64_t x[29]; /* General purpose registers x0-x28 */
1419 // uint64_t fp; /* Frame pointer x29 */
1420 // uint64_t lr; /* Link register x30 */
1421 // uint64_t sp; /* Stack pointer x31 */
1422 // uint64_t pc; /* Program counter */
1423 // uint32_t cpsr; /* Current program status register */
1424 //};
1425
1426 // General purpose registers
1427 const DNBRegisterInfo DNBArchMachARM64::g_gpr_registers[] = {
1428 DEFINE_GPR_IDX(0, x0, "arg1", GENERIC_REGNUM_ARG1),
1429 DEFINE_GPR_IDX(1, x1, "arg2", GENERIC_REGNUM_ARG2),
1430 DEFINE_GPR_IDX(2, x2, "arg3", GENERIC_REGNUM_ARG3),
1431 DEFINE_GPR_IDX(3, x3, "arg4", GENERIC_REGNUM_ARG4),
1432 DEFINE_GPR_IDX(4, x4, "arg5", GENERIC_REGNUM_ARG5),
1433 DEFINE_GPR_IDX(5, x5, "arg6", GENERIC_REGNUM_ARG6),
1434 DEFINE_GPR_IDX(6, x6, "arg7", GENERIC_REGNUM_ARG7),
1435 DEFINE_GPR_IDX(7, x7, "arg8", GENERIC_REGNUM_ARG8),
1436 DEFINE_GPR_IDX(8, x8, NULL, INVALID_NUB_REGNUM),
1437 DEFINE_GPR_IDX(9, x9, NULL, INVALID_NUB_REGNUM),
1438 DEFINE_GPR_IDX(10, x10, NULL, INVALID_NUB_REGNUM),
1439 DEFINE_GPR_IDX(11, x11, NULL, INVALID_NUB_REGNUM),
1440 DEFINE_GPR_IDX(12, x12, NULL, INVALID_NUB_REGNUM),
1441 DEFINE_GPR_IDX(13, x13, NULL, INVALID_NUB_REGNUM),
1442 DEFINE_GPR_IDX(14, x14, NULL, INVALID_NUB_REGNUM),
1443 DEFINE_GPR_IDX(15, x15, NULL, INVALID_NUB_REGNUM),
1444 DEFINE_GPR_IDX(16, x16, NULL, INVALID_NUB_REGNUM),
1445 DEFINE_GPR_IDX(17, x17, NULL, INVALID_NUB_REGNUM),
1446 DEFINE_GPR_IDX(18, x18, NULL, INVALID_NUB_REGNUM),
1447 DEFINE_GPR_IDX(19, x19, NULL, INVALID_NUB_REGNUM),
1448 DEFINE_GPR_IDX(20, x20, NULL, INVALID_NUB_REGNUM),
1449 DEFINE_GPR_IDX(21, x21, NULL, INVALID_NUB_REGNUM),
1450 DEFINE_GPR_IDX(22, x22, NULL, INVALID_NUB_REGNUM),
1451 DEFINE_GPR_IDX(23, x23, NULL, INVALID_NUB_REGNUM),
1452 DEFINE_GPR_IDX(24, x24, NULL, INVALID_NUB_REGNUM),
1453 DEFINE_GPR_IDX(25, x25, NULL, INVALID_NUB_REGNUM),
1454 DEFINE_GPR_IDX(26, x26, NULL, INVALID_NUB_REGNUM),
1455 DEFINE_GPR_IDX(27, x27, NULL, INVALID_NUB_REGNUM),
1456 DEFINE_GPR_IDX(28, x28, NULL, INVALID_NUB_REGNUM),
1457 // For the G/g packet we want to show where the offset into the regctx
1458 // is for fp/lr/sp/pc, but we cannot directly access them on arm64e
1459 // devices (and therefore can't offsetof() them)) - add the offset based
1460 // on the last accessible register by hand for advertising the location
1461 // in the regctx to lldb. We'll go through the accessor functions when
1462 // we read/write them here.
1463 {
1464 e_regSetGPR, gpr_fp, "fp", "x29", Uint, Hex, 8, GPR_OFFSET_IDX(28) + 8,
1465 dwarf_fp, dwarf_fp, GENERIC_REGNUM_FP, debugserver_gpr_fp, NULL, NULL
1466 },
1467 {
1468 e_regSetGPR, gpr_lr, "lr", "x30", Uint, Hex, 8, GPR_OFFSET_IDX(28) + 16,
1469 dwarf_lr, dwarf_lr, GENERIC_REGNUM_RA, debugserver_gpr_lr, NULL, NULL
1470 },
1471 {
1472 e_regSetGPR, gpr_sp, "sp", "xsp", Uint, Hex, 8, GPR_OFFSET_IDX(28) + 24,
1473 dwarf_sp, dwarf_sp, GENERIC_REGNUM_SP, debugserver_gpr_sp, NULL, NULL
1474 },
1475 {
1476 e_regSetGPR, gpr_pc, "pc", NULL, Uint, Hex, 8, GPR_OFFSET_IDX(28) + 32,
1477 dwarf_pc, dwarf_pc, GENERIC_REGNUM_PC, debugserver_gpr_pc, NULL, NULL
1478 },
1479
1480 // in armv7 we specify that writing to the CPSR should invalidate r8-12, sp,
1481 // lr.
1482 // this should be specified for arm64 too even though debugserver is only
1483 // used for
1484 // userland debugging.
1485 {e_regSetGPR, gpr_cpsr, "cpsr", "flags", Uint, Hex, 4,
1486 GPR_OFFSET_NAME(cpsr), dwarf_elr_mode, dwarf_elr_mode, INVALID_NUB_REGNUM,
1487 debugserver_gpr_cpsr, NULL, NULL},
1488
1489 DEFINE_PSEUDO_GPR_IDX(0, w0),
1490 DEFINE_PSEUDO_GPR_IDX(1, w1),
1491 DEFINE_PSEUDO_GPR_IDX(2, w2),
1492 DEFINE_PSEUDO_GPR_IDX(3, w3),
1493 DEFINE_PSEUDO_GPR_IDX(4, w4),
1494 DEFINE_PSEUDO_GPR_IDX(5, w5),
1495 DEFINE_PSEUDO_GPR_IDX(6, w6),
1496 DEFINE_PSEUDO_GPR_IDX(7, w7),
1497 DEFINE_PSEUDO_GPR_IDX(8, w8),
1498 DEFINE_PSEUDO_GPR_IDX(9, w9),
1499 DEFINE_PSEUDO_GPR_IDX(10, w10),
1500 DEFINE_PSEUDO_GPR_IDX(11, w11),
1501 DEFINE_PSEUDO_GPR_IDX(12, w12),
1502 DEFINE_PSEUDO_GPR_IDX(13, w13),
1503 DEFINE_PSEUDO_GPR_IDX(14, w14),
1504 DEFINE_PSEUDO_GPR_IDX(15, w15),
1505 DEFINE_PSEUDO_GPR_IDX(16, w16),
1506 DEFINE_PSEUDO_GPR_IDX(17, w17),
1507 DEFINE_PSEUDO_GPR_IDX(18, w18),
1508 DEFINE_PSEUDO_GPR_IDX(19, w19),
1509 DEFINE_PSEUDO_GPR_IDX(20, w20),
1510 DEFINE_PSEUDO_GPR_IDX(21, w21),
1511 DEFINE_PSEUDO_GPR_IDX(22, w22),
1512 DEFINE_PSEUDO_GPR_IDX(23, w23),
1513 DEFINE_PSEUDO_GPR_IDX(24, w24),
1514 DEFINE_PSEUDO_GPR_IDX(25, w25),
1515 DEFINE_PSEUDO_GPR_IDX(26, w26),
1516 DEFINE_PSEUDO_GPR_IDX(27, w27),
1517 DEFINE_PSEUDO_GPR_IDX(28, w28)};
1518
1519 const char *g_contained_v0[]{"v0", NULL};
1520 const char *g_contained_v1[]{"v1", NULL};
1521 const char *g_contained_v2[]{"v2", NULL};
1522 const char *g_contained_v3[]{"v3", NULL};
1523 const char *g_contained_v4[]{"v4", NULL};
1524 const char *g_contained_v5[]{"v5", NULL};
1525 const char *g_contained_v6[]{"v6", NULL};
1526 const char *g_contained_v7[]{"v7", NULL};
1527 const char *g_contained_v8[]{"v8", NULL};
1528 const char *g_contained_v9[]{"v9", NULL};
1529 const char *g_contained_v10[]{"v10", NULL};
1530 const char *g_contained_v11[]{"v11", NULL};
1531 const char *g_contained_v12[]{"v12", NULL};
1532 const char *g_contained_v13[]{"v13", NULL};
1533 const char *g_contained_v14[]{"v14", NULL};
1534 const char *g_contained_v15[]{"v15", NULL};
1535 const char *g_contained_v16[]{"v16", NULL};
1536 const char *g_contained_v17[]{"v17", NULL};
1537 const char *g_contained_v18[]{"v18", NULL};
1538 const char *g_contained_v19[]{"v19", NULL};
1539 const char *g_contained_v20[]{"v20", NULL};
1540 const char *g_contained_v21[]{"v21", NULL};
1541 const char *g_contained_v22[]{"v22", NULL};
1542 const char *g_contained_v23[]{"v23", NULL};
1543 const char *g_contained_v24[]{"v24", NULL};
1544 const char *g_contained_v25[]{"v25", NULL};
1545 const char *g_contained_v26[]{"v26", NULL};
1546 const char *g_contained_v27[]{"v27", NULL};
1547 const char *g_contained_v28[]{"v28", NULL};
1548 const char *g_contained_v29[]{"v29", NULL};
1549 const char *g_contained_v30[]{"v30", NULL};
1550 const char *g_contained_v31[]{"v31", NULL};
1551
1552 const char *g_invalidate_v0[]{"v0", "d0", "s0", NULL};
1553 const char *g_invalidate_v1[]{"v1", "d1", "s1", NULL};
1554 const char *g_invalidate_v2[]{"v2", "d2", "s2", NULL};
1555 const char *g_invalidate_v3[]{"v3", "d3", "s3", NULL};
1556 const char *g_invalidate_v4[]{"v4", "d4", "s4", NULL};
1557 const char *g_invalidate_v5[]{"v5", "d5", "s5", NULL};
1558 const char *g_invalidate_v6[]{"v6", "d6", "s6", NULL};
1559 const char *g_invalidate_v7[]{"v7", "d7", "s7", NULL};
1560 const char *g_invalidate_v8[]{"v8", "d8", "s8", NULL};
1561 const char *g_invalidate_v9[]{"v9", "d9", "s9", NULL};
1562 const char *g_invalidate_v10[]{"v10", "d10", "s10", NULL};
1563 const char *g_invalidate_v11[]{"v11", "d11", "s11", NULL};
1564 const char *g_invalidate_v12[]{"v12", "d12", "s12", NULL};
1565 const char *g_invalidate_v13[]{"v13", "d13", "s13", NULL};
1566 const char *g_invalidate_v14[]{"v14", "d14", "s14", NULL};
1567 const char *g_invalidate_v15[]{"v15", "d15", "s15", NULL};
1568 const char *g_invalidate_v16[]{"v16", "d16", "s16", NULL};
1569 const char *g_invalidate_v17[]{"v17", "d17", "s17", NULL};
1570 const char *g_invalidate_v18[]{"v18", "d18", "s18", NULL};
1571 const char *g_invalidate_v19[]{"v19", "d19", "s19", NULL};
1572 const char *g_invalidate_v20[]{"v20", "d20", "s20", NULL};
1573 const char *g_invalidate_v21[]{"v21", "d21", "s21", NULL};
1574 const char *g_invalidate_v22[]{"v22", "d22", "s22", NULL};
1575 const char *g_invalidate_v23[]{"v23", "d23", "s23", NULL};
1576 const char *g_invalidate_v24[]{"v24", "d24", "s24", NULL};
1577 const char *g_invalidate_v25[]{"v25", "d25", "s25", NULL};
1578 const char *g_invalidate_v26[]{"v26", "d26", "s26", NULL};
1579 const char *g_invalidate_v27[]{"v27", "d27", "s27", NULL};
1580 const char *g_invalidate_v28[]{"v28", "d28", "s28", NULL};
1581 const char *g_invalidate_v29[]{"v29", "d29", "s29", NULL};
1582 const char *g_invalidate_v30[]{"v30", "d30", "s30", NULL};
1583 const char *g_invalidate_v31[]{"v31", "d31", "s31", NULL};
1584
1585 #if defined(__arm64__) || defined(__aarch64__)
1586 #define VFP_V_OFFSET_IDX(idx) \
1587 (offsetof(DNBArchMachARM64::FPU, __v) + (idx * 16) + \
1588 offsetof(DNBArchMachARM64::Context, vfp))
1589 #else
1590 #define VFP_V_OFFSET_IDX(idx) \
1591 (offsetof(DNBArchMachARM64::FPU, opaque) + (idx * 16) + \
1592 offsetof(DNBArchMachARM64::Context, vfp))
1593 #endif
1594 #define VFP_OFFSET_NAME(reg) \
1595 (offsetof(DNBArchMachARM64::FPU, reg) + \
1596 offsetof(DNBArchMachARM64::Context, vfp))
1597 #define EXC_OFFSET(reg) \
1598 (offsetof(DNBArchMachARM64::EXC, reg) + \
1599 offsetof(DNBArchMachARM64::Context, exc))
1600
1601 //#define FLOAT_FORMAT Float
1602 #define DEFINE_VFP_V_IDX(idx) \
1603 { \
1604 e_regSetVFP, vfp_v##idx, "v" #idx, "q" #idx, Vector, VectorOfUInt8, 16, \
1605 VFP_V_OFFSET_IDX(idx), INVALID_NUB_REGNUM, dwarf_v##idx, \
1606 INVALID_NUB_REGNUM, debugserver_vfp_v##idx, NULL, g_invalidate_v##idx \
1607 }
1608 #define DEFINE_PSEUDO_VFP_S_IDX(idx) \
1609 { \
1610 e_regSetVFP, vfp_s##idx, "s" #idx, NULL, IEEE754, Float, 4, 0, \
1611 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \
1612 INVALID_NUB_REGNUM, g_contained_v##idx, g_invalidate_v##idx \
1613 }
1614 #define DEFINE_PSEUDO_VFP_D_IDX(idx) \
1615 { \
1616 e_regSetVFP, vfp_d##idx, "d" #idx, NULL, IEEE754, Float, 8, 0, \
1617 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \
1618 INVALID_NUB_REGNUM, g_contained_v##idx, g_invalidate_v##idx \
1619 }
1620
1621 // Floating point registers
1622 const DNBRegisterInfo DNBArchMachARM64::g_vfp_registers[] = {
1623 DEFINE_VFP_V_IDX(0),
1624 DEFINE_VFP_V_IDX(1),
1625 DEFINE_VFP_V_IDX(2),
1626 DEFINE_VFP_V_IDX(3),
1627 DEFINE_VFP_V_IDX(4),
1628 DEFINE_VFP_V_IDX(5),
1629 DEFINE_VFP_V_IDX(6),
1630 DEFINE_VFP_V_IDX(7),
1631 DEFINE_VFP_V_IDX(8),
1632 DEFINE_VFP_V_IDX(9),
1633 DEFINE_VFP_V_IDX(10),
1634 DEFINE_VFP_V_IDX(11),
1635 DEFINE_VFP_V_IDX(12),
1636 DEFINE_VFP_V_IDX(13),
1637 DEFINE_VFP_V_IDX(14),
1638 DEFINE_VFP_V_IDX(15),
1639 DEFINE_VFP_V_IDX(16),
1640 DEFINE_VFP_V_IDX(17),
1641 DEFINE_VFP_V_IDX(18),
1642 DEFINE_VFP_V_IDX(19),
1643 DEFINE_VFP_V_IDX(20),
1644 DEFINE_VFP_V_IDX(21),
1645 DEFINE_VFP_V_IDX(22),
1646 DEFINE_VFP_V_IDX(23),
1647 DEFINE_VFP_V_IDX(24),
1648 DEFINE_VFP_V_IDX(25),
1649 DEFINE_VFP_V_IDX(26),
1650 DEFINE_VFP_V_IDX(27),
1651 DEFINE_VFP_V_IDX(28),
1652 DEFINE_VFP_V_IDX(29),
1653 DEFINE_VFP_V_IDX(30),
1654 DEFINE_VFP_V_IDX(31),
1655 {e_regSetVFP, vfp_fpsr, "fpsr", NULL, Uint, Hex, 4,
1656 VFP_V_OFFSET_IDX(32) + 0, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
1657 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL},
1658 {e_regSetVFP, vfp_fpcr, "fpcr", NULL, Uint, Hex, 4,
1659 VFP_V_OFFSET_IDX(32) + 4, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
1660 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL},
1661
1662 DEFINE_PSEUDO_VFP_S_IDX(0),
1663 DEFINE_PSEUDO_VFP_S_IDX(1),
1664 DEFINE_PSEUDO_VFP_S_IDX(2),
1665 DEFINE_PSEUDO_VFP_S_IDX(3),
1666 DEFINE_PSEUDO_VFP_S_IDX(4),
1667 DEFINE_PSEUDO_VFP_S_IDX(5),
1668 DEFINE_PSEUDO_VFP_S_IDX(6),
1669 DEFINE_PSEUDO_VFP_S_IDX(7),
1670 DEFINE_PSEUDO_VFP_S_IDX(8),
1671 DEFINE_PSEUDO_VFP_S_IDX(9),
1672 DEFINE_PSEUDO_VFP_S_IDX(10),
1673 DEFINE_PSEUDO_VFP_S_IDX(11),
1674 DEFINE_PSEUDO_VFP_S_IDX(12),
1675 DEFINE_PSEUDO_VFP_S_IDX(13),
1676 DEFINE_PSEUDO_VFP_S_IDX(14),
1677 DEFINE_PSEUDO_VFP_S_IDX(15),
1678 DEFINE_PSEUDO_VFP_S_IDX(16),
1679 DEFINE_PSEUDO_VFP_S_IDX(17),
1680 DEFINE_PSEUDO_VFP_S_IDX(18),
1681 DEFINE_PSEUDO_VFP_S_IDX(19),
1682 DEFINE_PSEUDO_VFP_S_IDX(20),
1683 DEFINE_PSEUDO_VFP_S_IDX(21),
1684 DEFINE_PSEUDO_VFP_S_IDX(22),
1685 DEFINE_PSEUDO_VFP_S_IDX(23),
1686 DEFINE_PSEUDO_VFP_S_IDX(24),
1687 DEFINE_PSEUDO_VFP_S_IDX(25),
1688 DEFINE_PSEUDO_VFP_S_IDX(26),
1689 DEFINE_PSEUDO_VFP_S_IDX(27),
1690 DEFINE_PSEUDO_VFP_S_IDX(28),
1691 DEFINE_PSEUDO_VFP_S_IDX(29),
1692 DEFINE_PSEUDO_VFP_S_IDX(30),
1693 DEFINE_PSEUDO_VFP_S_IDX(31),
1694
1695 DEFINE_PSEUDO_VFP_D_IDX(0),
1696 DEFINE_PSEUDO_VFP_D_IDX(1),
1697 DEFINE_PSEUDO_VFP_D_IDX(2),
1698 DEFINE_PSEUDO_VFP_D_IDX(3),
1699 DEFINE_PSEUDO_VFP_D_IDX(4),
1700 DEFINE_PSEUDO_VFP_D_IDX(5),
1701 DEFINE_PSEUDO_VFP_D_IDX(6),
1702 DEFINE_PSEUDO_VFP_D_IDX(7),
1703 DEFINE_PSEUDO_VFP_D_IDX(8),
1704 DEFINE_PSEUDO_VFP_D_IDX(9),
1705 DEFINE_PSEUDO_VFP_D_IDX(10),
1706 DEFINE_PSEUDO_VFP_D_IDX(11),
1707 DEFINE_PSEUDO_VFP_D_IDX(12),
1708 DEFINE_PSEUDO_VFP_D_IDX(13),
1709 DEFINE_PSEUDO_VFP_D_IDX(14),
1710 DEFINE_PSEUDO_VFP_D_IDX(15),
1711 DEFINE_PSEUDO_VFP_D_IDX(16),
1712 DEFINE_PSEUDO_VFP_D_IDX(17),
1713 DEFINE_PSEUDO_VFP_D_IDX(18),
1714 DEFINE_PSEUDO_VFP_D_IDX(19),
1715 DEFINE_PSEUDO_VFP_D_IDX(20),
1716 DEFINE_PSEUDO_VFP_D_IDX(21),
1717 DEFINE_PSEUDO_VFP_D_IDX(22),
1718 DEFINE_PSEUDO_VFP_D_IDX(23),
1719 DEFINE_PSEUDO_VFP_D_IDX(24),
1720 DEFINE_PSEUDO_VFP_D_IDX(25),
1721 DEFINE_PSEUDO_VFP_D_IDX(26),
1722 DEFINE_PSEUDO_VFP_D_IDX(27),
1723 DEFINE_PSEUDO_VFP_D_IDX(28),
1724 DEFINE_PSEUDO_VFP_D_IDX(29),
1725 DEFINE_PSEUDO_VFP_D_IDX(30),
1726 DEFINE_PSEUDO_VFP_D_IDX(31)
1727
1728 };
1729
1730 //_STRUCT_ARM_EXCEPTION_STATE64
1731 //{
1732 // uint64_t far; /* Virtual Fault Address */
1733 // uint32_t esr; /* Exception syndrome */
1734 // uint32_t exception; /* number of arm exception taken */
1735 //};
1736
1737 // Exception registers
1738 const DNBRegisterInfo DNBArchMachARM64::g_exc_registers[] = {
1739 {e_regSetEXC, exc_far, "far", NULL, Uint, Hex, 8, EXC_OFFSET(__far),
1740 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
1741 INVALID_NUB_REGNUM, NULL, NULL},
1742 {e_regSetEXC, exc_esr, "esr", NULL, Uint, Hex, 4, EXC_OFFSET(__esr),
1743 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
1744 INVALID_NUB_REGNUM, NULL, NULL},
1745 {e_regSetEXC, exc_exception, "exception", NULL, Uint, Hex, 4,
1746 EXC_OFFSET(__exception), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
1747 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}};
1748
1749 // Number of registers in each register set
1750 const size_t DNBArchMachARM64::k_num_gpr_registers =
1751 sizeof(g_gpr_registers) / sizeof(DNBRegisterInfo);
1752 const size_t DNBArchMachARM64::k_num_vfp_registers =
1753 sizeof(g_vfp_registers) / sizeof(DNBRegisterInfo);
1754 const size_t DNBArchMachARM64::k_num_exc_registers =
1755 sizeof(g_exc_registers) / sizeof(DNBRegisterInfo);
1756 const size_t DNBArchMachARM64::k_num_all_registers =
1757 k_num_gpr_registers + k_num_vfp_registers + k_num_exc_registers;
1758
1759 // Register set definitions. The first definitions at register set index
1760 // of zero is for all registers, followed by other registers sets. The
1761 // register information for the all register set need not be filled in.
1762 const DNBRegisterSetInfo DNBArchMachARM64::g_reg_sets[] = {
1763 {"ARM64 Registers", NULL, k_num_all_registers},
1764 {"General Purpose Registers", g_gpr_registers, k_num_gpr_registers},
1765 {"Floating Point Registers", g_vfp_registers, k_num_vfp_registers},
1766 {"Exception State Registers", g_exc_registers, k_num_exc_registers}};
1767 // Total number of register sets for this architecture
1768 const size_t DNBArchMachARM64::k_num_register_sets =
1769 sizeof(g_reg_sets) / sizeof(DNBRegisterSetInfo);
1770
1771 const DNBRegisterSetInfo *
GetRegisterSetInfo(nub_size_t * num_reg_sets)1772 DNBArchMachARM64::GetRegisterSetInfo(nub_size_t *num_reg_sets) {
1773 *num_reg_sets = k_num_register_sets;
1774 return g_reg_sets;
1775 }
1776
FixGenericRegisterNumber(uint32_t & set,uint32_t & reg)1777 bool DNBArchMachARM64::FixGenericRegisterNumber(uint32_t &set, uint32_t ®) {
1778 if (set == REGISTER_SET_GENERIC) {
1779 switch (reg) {
1780 case GENERIC_REGNUM_PC: // Program Counter
1781 set = e_regSetGPR;
1782 reg = gpr_pc;
1783 break;
1784
1785 case GENERIC_REGNUM_SP: // Stack Pointer
1786 set = e_regSetGPR;
1787 reg = gpr_sp;
1788 break;
1789
1790 case GENERIC_REGNUM_FP: // Frame Pointer
1791 set = e_regSetGPR;
1792 reg = gpr_fp;
1793 break;
1794
1795 case GENERIC_REGNUM_RA: // Return Address
1796 set = e_regSetGPR;
1797 reg = gpr_lr;
1798 break;
1799
1800 case GENERIC_REGNUM_FLAGS: // Processor flags register
1801 set = e_regSetGPR;
1802 reg = gpr_cpsr;
1803 break;
1804
1805 case GENERIC_REGNUM_ARG1:
1806 case GENERIC_REGNUM_ARG2:
1807 case GENERIC_REGNUM_ARG3:
1808 case GENERIC_REGNUM_ARG4:
1809 case GENERIC_REGNUM_ARG5:
1810 case GENERIC_REGNUM_ARG6:
1811 set = e_regSetGPR;
1812 reg = gpr_x0 + reg - GENERIC_REGNUM_ARG1;
1813 break;
1814
1815 default:
1816 return false;
1817 }
1818 }
1819 return true;
1820 }
GetRegisterValue(uint32_t set,uint32_t reg,DNBRegisterValue * value)1821 bool DNBArchMachARM64::GetRegisterValue(uint32_t set, uint32_t reg,
1822 DNBRegisterValue *value) {
1823 if (!FixGenericRegisterNumber(set, reg))
1824 return false;
1825
1826 if (GetRegisterState(set, false) != KERN_SUCCESS)
1827 return false;
1828
1829 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
1830 if (regInfo) {
1831 value->info = *regInfo;
1832 switch (set) {
1833 case e_regSetGPR:
1834 if (reg <= gpr_pc) {
1835 #if defined(__LP64__)
1836 if (reg == gpr_pc)
1837 value->value.uint64 = arm_thread_state64_get_pc (m_state.context.gpr);
1838 else if (reg == gpr_lr)
1839 value->value.uint64 = arm_thread_state64_get_lr (m_state.context.gpr);
1840 else if (reg == gpr_sp)
1841 value->value.uint64 = arm_thread_state64_get_sp (m_state.context.gpr);
1842 else if (reg == gpr_fp)
1843 value->value.uint64 = arm_thread_state64_get_fp (m_state.context.gpr);
1844 else
1845 value->value.uint64 = m_state.context.gpr.__x[reg];
1846 #else
1847 value->value.uint64 = m_state.context.gpr.__x[reg];
1848 #endif
1849 return true;
1850 } else if (reg == gpr_cpsr) {
1851 value->value.uint32 = m_state.context.gpr.__cpsr;
1852 return true;
1853 }
1854 break;
1855
1856 case e_regSetVFP:
1857
1858 if (reg >= vfp_v0 && reg <= vfp_v31) {
1859 #if defined(__arm64__) || defined(__aarch64__)
1860 memcpy(&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_v0],
1861 16);
1862 #else
1863 memcpy(&value->value.v_uint8,
1864 ((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_v0) * 16),
1865 16);
1866 #endif
1867 return true;
1868 } else if (reg == vfp_fpsr) {
1869 #if defined(__arm64__) || defined(__aarch64__)
1870 memcpy(&value->value.uint32, &m_state.context.vfp.__fpsr, 4);
1871 #else
1872 memcpy(&value->value.uint32,
1873 ((uint8_t *)&m_state.context.vfp.opaque) + (32 * 16) + 0, 4);
1874 #endif
1875 return true;
1876 } else if (reg == vfp_fpcr) {
1877 #if defined(__arm64__) || defined(__aarch64__)
1878 memcpy(&value->value.uint32, &m_state.context.vfp.__fpcr, 4);
1879 #else
1880 memcpy(&value->value.uint32,
1881 ((uint8_t *)&m_state.context.vfp.opaque) + (32 * 16) + 4, 4);
1882 #endif
1883 return true;
1884 } else if (reg >= vfp_s0 && reg <= vfp_s31) {
1885 #if defined(__arm64__) || defined(__aarch64__)
1886 memcpy(&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_s0],
1887 4);
1888 #else
1889 memcpy(&value->value.v_uint8,
1890 ((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_s0) * 16),
1891 4);
1892 #endif
1893 return true;
1894 } else if (reg >= vfp_d0 && reg <= vfp_d31) {
1895 #if defined(__arm64__) || defined(__aarch64__)
1896 memcpy(&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_d0],
1897 8);
1898 #else
1899 memcpy(&value->value.v_uint8,
1900 ((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_d0) * 16),
1901 8);
1902 #endif
1903 return true;
1904 }
1905 break;
1906
1907 case e_regSetEXC:
1908 if (reg == exc_far) {
1909 value->value.uint64 = m_state.context.exc.__far;
1910 return true;
1911 } else if (reg == exc_esr) {
1912 value->value.uint32 = m_state.context.exc.__esr;
1913 return true;
1914 } else if (reg == exc_exception) {
1915 value->value.uint32 = m_state.context.exc.__exception;
1916 return true;
1917 }
1918 break;
1919 }
1920 }
1921 return false;
1922 }
1923
SetRegisterValue(uint32_t set,uint32_t reg,const DNBRegisterValue * value)1924 bool DNBArchMachARM64::SetRegisterValue(uint32_t set, uint32_t reg,
1925 const DNBRegisterValue *value) {
1926 if (!FixGenericRegisterNumber(set, reg))
1927 return false;
1928
1929 if (GetRegisterState(set, false) != KERN_SUCCESS)
1930 return false;
1931
1932 bool success = false;
1933 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
1934 if (regInfo) {
1935 switch (set) {
1936 case e_regSetGPR:
1937 if (reg <= gpr_pc) {
1938 #if defined(__LP64__)
1939 uint64_t signed_value = value->value.uint64;
1940 #if __has_feature(ptrauth_calls)
1941 // The incoming value could be garbage. Strip it to avoid
1942 // trapping when it gets resigned in the thread state.
1943 signed_value = (uint64_t) ptrauth_strip((void*) signed_value, ptrauth_key_function_pointer);
1944 signed_value = (uint64_t) ptrauth_sign_unauthenticated((void*) signed_value, ptrauth_key_function_pointer, 0);
1945 #endif
1946 if (reg == gpr_pc)
1947 arm_thread_state64_set_pc_fptr (m_state.context.gpr, (void*) signed_value);
1948 else if (reg == gpr_lr)
1949 arm_thread_state64_set_lr_fptr (m_state.context.gpr, (void*) signed_value);
1950 else if (reg == gpr_sp)
1951 arm_thread_state64_set_sp (m_state.context.gpr, value->value.uint64);
1952 else if (reg == gpr_fp)
1953 arm_thread_state64_set_fp (m_state.context.gpr, value->value.uint64);
1954 else
1955 m_state.context.gpr.__x[reg] = value->value.uint64;
1956 #else
1957 m_state.context.gpr.__x[reg] = value->value.uint64;
1958 #endif
1959 success = true;
1960 } else if (reg == gpr_cpsr) {
1961 m_state.context.gpr.__cpsr = value->value.uint32;
1962 success = true;
1963 }
1964 break;
1965
1966 case e_regSetVFP:
1967 if (reg >= vfp_v0 && reg <= vfp_v31) {
1968 #if defined(__arm64__) || defined(__aarch64__)
1969 memcpy(&m_state.context.vfp.__v[reg - vfp_v0], &value->value.v_uint8,
1970 16);
1971 #else
1972 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_v0) * 16),
1973 &value->value.v_uint8, 16);
1974 #endif
1975 success = true;
1976 } else if (reg == vfp_fpsr) {
1977 #if defined(__arm64__) || defined(__aarch64__)
1978 memcpy(&m_state.context.vfp.__fpsr, &value->value.uint32, 4);
1979 #else
1980 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + (32 * 16) + 0,
1981 &value->value.uint32, 4);
1982 #endif
1983 success = true;
1984 } else if (reg == vfp_fpcr) {
1985 #if defined(__arm64__) || defined(__aarch64__)
1986 memcpy(&m_state.context.vfp.__fpcr, &value->value.uint32, 4);
1987 #else
1988 memcpy(((uint8_t *)m_state.context.vfp.opaque) + (32 * 16) + 4,
1989 &value->value.uint32, 4);
1990 #endif
1991 success = true;
1992 } else if (reg >= vfp_s0 && reg <= vfp_s31) {
1993 #if defined(__arm64__) || defined(__aarch64__)
1994 memcpy(&m_state.context.vfp.__v[reg - vfp_s0], &value->value.v_uint8,
1995 4);
1996 #else
1997 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_s0) * 16),
1998 &value->value.v_uint8, 4);
1999 #endif
2000 success = true;
2001 } else if (reg >= vfp_d0 && reg <= vfp_d31) {
2002 #if defined(__arm64__) || defined(__aarch64__)
2003 memcpy(&m_state.context.vfp.__v[reg - vfp_d0], &value->value.v_uint8,
2004 8);
2005 #else
2006 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_d0) * 16),
2007 &value->value.v_uint8, 8);
2008 #endif
2009 success = true;
2010 }
2011 break;
2012
2013 case e_regSetEXC:
2014 if (reg == exc_far) {
2015 m_state.context.exc.__far = value->value.uint64;
2016 success = true;
2017 } else if (reg == exc_esr) {
2018 m_state.context.exc.__esr = value->value.uint32;
2019 success = true;
2020 } else if (reg == exc_exception) {
2021 m_state.context.exc.__exception = value->value.uint32;
2022 success = true;
2023 }
2024 break;
2025 }
2026 }
2027 if (success)
2028 return SetRegisterState(set) == KERN_SUCCESS;
2029 return false;
2030 }
2031
GetRegisterState(int set,bool force)2032 kern_return_t DNBArchMachARM64::GetRegisterState(int set, bool force) {
2033 switch (set) {
2034 case e_regSetALL:
2035 return GetGPRState(force) | GetVFPState(force) | GetEXCState(force) |
2036 GetDBGState(force);
2037 case e_regSetGPR:
2038 return GetGPRState(force);
2039 case e_regSetVFP:
2040 return GetVFPState(force);
2041 case e_regSetEXC:
2042 return GetEXCState(force);
2043 case e_regSetDBG:
2044 return GetDBGState(force);
2045 default:
2046 break;
2047 }
2048 return KERN_INVALID_ARGUMENT;
2049 }
2050
SetRegisterState(int set)2051 kern_return_t DNBArchMachARM64::SetRegisterState(int set) {
2052 // Make sure we have a valid context to set.
2053 kern_return_t err = GetRegisterState(set, false);
2054 if (err != KERN_SUCCESS)
2055 return err;
2056
2057 switch (set) {
2058 case e_regSetALL:
2059 return SetGPRState() | SetVFPState() | SetEXCState() | SetDBGState(false);
2060 case e_regSetGPR:
2061 return SetGPRState();
2062 case e_regSetVFP:
2063 return SetVFPState();
2064 case e_regSetEXC:
2065 return SetEXCState();
2066 case e_regSetDBG:
2067 return SetDBGState(false);
2068 default:
2069 break;
2070 }
2071 return KERN_INVALID_ARGUMENT;
2072 }
2073
RegisterSetStateIsValid(int set) const2074 bool DNBArchMachARM64::RegisterSetStateIsValid(int set) const {
2075 return m_state.RegsAreValid(set);
2076 }
2077
GetRegisterContext(void * buf,nub_size_t buf_len)2078 nub_size_t DNBArchMachARM64::GetRegisterContext(void *buf, nub_size_t buf_len) {
2079 nub_size_t size = sizeof(m_state.context.gpr) + sizeof(m_state.context.vfp) +
2080 sizeof(m_state.context.exc);
2081
2082 if (buf && buf_len) {
2083 if (size > buf_len)
2084 size = buf_len;
2085
2086 bool force = false;
2087 if (GetGPRState(force) | GetVFPState(force) | GetEXCState(force))
2088 return 0;
2089
2090 // Copy each struct individually to avoid any padding that might be between
2091 // the structs in m_state.context
2092 uint8_t *p = (uint8_t *)buf;
2093 ::memcpy(p, &m_state.context.gpr, sizeof(m_state.context.gpr));
2094 p += sizeof(m_state.context.gpr);
2095 ::memcpy(p, &m_state.context.vfp, sizeof(m_state.context.vfp));
2096 p += sizeof(m_state.context.vfp);
2097 ::memcpy(p, &m_state.context.exc, sizeof(m_state.context.exc));
2098 p += sizeof(m_state.context.exc);
2099
2100 size_t bytes_written = p - (uint8_t *)buf;
2101 UNUSED_IF_ASSERT_DISABLED(bytes_written);
2102 assert(bytes_written == size);
2103 }
2104 DNBLogThreadedIf(
2105 LOG_THREAD,
2106 "DNBArchMachARM64::GetRegisterContext (buf = %p, len = %zu) => %zu", buf,
2107 buf_len, size);
2108 // Return the size of the register context even if NULL was passed in
2109 return size;
2110 }
2111
SetRegisterContext(const void * buf,nub_size_t buf_len)2112 nub_size_t DNBArchMachARM64::SetRegisterContext(const void *buf,
2113 nub_size_t buf_len) {
2114 nub_size_t size = sizeof(m_state.context.gpr) + sizeof(m_state.context.vfp) +
2115 sizeof(m_state.context.exc);
2116
2117 if (buf == NULL || buf_len == 0)
2118 size = 0;
2119
2120 if (size) {
2121 if (size > buf_len)
2122 size = buf_len;
2123
2124 // Copy each struct individually to avoid any padding that might be between
2125 // the structs in m_state.context
2126 uint8_t *p = (uint8_t *)buf;
2127 ::memcpy(&m_state.context.gpr, p, sizeof(m_state.context.gpr));
2128 p += sizeof(m_state.context.gpr);
2129 ::memcpy(&m_state.context.vfp, p, sizeof(m_state.context.vfp));
2130 p += sizeof(m_state.context.vfp);
2131 ::memcpy(&m_state.context.exc, p, sizeof(m_state.context.exc));
2132 p += sizeof(m_state.context.exc);
2133
2134 size_t bytes_written = p - (uint8_t *)buf;
2135 UNUSED_IF_ASSERT_DISABLED(bytes_written);
2136 assert(bytes_written == size);
2137 SetGPRState();
2138 SetVFPState();
2139 SetEXCState();
2140 }
2141 DNBLogThreadedIf(
2142 LOG_THREAD,
2143 "DNBArchMachARM64::SetRegisterContext (buf = %p, len = %zu) => %zu", buf,
2144 buf_len, size);
2145 return size;
2146 }
2147
SaveRegisterState()2148 uint32_t DNBArchMachARM64::SaveRegisterState() {
2149 kern_return_t kret = ::thread_abort_safely(m_thread->MachPortNumber());
2150 DNBLogThreadedIf(
2151 LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u "
2152 "(SetGPRState() for stop_count = %u)",
2153 m_thread->MachPortNumber(), kret, m_thread->Process()->StopCount());
2154
2155 // Always re-read the registers because above we call thread_abort_safely();
2156 bool force = true;
2157
2158 if ((kret = GetGPRState(force)) != KERN_SUCCESS) {
2159 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::SaveRegisterState () "
2160 "error: GPR regs failed to read: %u ",
2161 kret);
2162 } else if ((kret = GetVFPState(force)) != KERN_SUCCESS) {
2163 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::SaveRegisterState () "
2164 "error: %s regs failed to read: %u",
2165 "VFP", kret);
2166 } else {
2167 const uint32_t save_id = GetNextRegisterStateSaveID();
2168 m_saved_register_states[save_id] = m_state.context;
2169 return save_id;
2170 }
2171 return UINT32_MAX;
2172 }
2173
RestoreRegisterState(uint32_t save_id)2174 bool DNBArchMachARM64::RestoreRegisterState(uint32_t save_id) {
2175 SaveRegisterStates::iterator pos = m_saved_register_states.find(save_id);
2176 if (pos != m_saved_register_states.end()) {
2177 m_state.context.gpr = pos->second.gpr;
2178 m_state.context.vfp = pos->second.vfp;
2179 kern_return_t kret;
2180 bool success = true;
2181 if ((kret = SetGPRState()) != KERN_SUCCESS) {
2182 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::RestoreRegisterState "
2183 "(save_id = %u) error: GPR regs failed to "
2184 "write: %u",
2185 save_id, kret);
2186 success = false;
2187 } else if ((kret = SetVFPState()) != KERN_SUCCESS) {
2188 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::RestoreRegisterState "
2189 "(save_id = %u) error: %s regs failed to "
2190 "write: %u",
2191 save_id, "VFP", kret);
2192 success = false;
2193 }
2194 m_saved_register_states.erase(pos);
2195 return success;
2196 }
2197 return false;
2198 }
2199
2200 #endif // #if defined (ARM_THREAD_STATE64_COUNT)
2201 #endif // #if defined (__arm__) || defined (__arm64__) || defined (__aarch64__)
2202