1 /* Copyright (C) 2009-2020 Free Software Foundation, Inc.
2 Contributed by ARM Ltd.
3
4 This file is part of GDB.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program. If not, see <http://www.gnu.org/licenses/>. */
18
19 #include "gdbsupport/common-defs.h"
20 #include "gdbsupport/break-common.h"
21 #include "gdbsupport/common-regcache.h"
22 #include "nat/linux-nat.h"
23 #include "aarch64-linux-hw-point.h"
24
25 #include <sys/uio.h>
26 #include <asm/ptrace.h>
27 #include <sys/ptrace.h>
28 #include <elf.h>
29
30 /* Number of hardware breakpoints/watchpoints the target supports.
31 They are initialized with values obtained via the ptrace calls
32 with NT_ARM_HW_BREAK and NT_ARM_HW_WATCH respectively. */
33
34 int aarch64_num_bp_regs;
35 int aarch64_num_wp_regs;
36
37 /* True if this kernel does not have the bug described by PR
38 external/20207 (Linux >= 4.10). A fixed kernel supports any
39 contiguous range of bits in 8-bit byte DR_CONTROL_MASK. A buggy
40 kernel supports only 0x01, 0x03, 0x0f and 0xff. We start by
41 assuming the bug is fixed, and then detect the bug at
42 PTRACE_SETREGSET time. */
43 static bool kernel_supports_any_contiguous_range = true;
44
45 /* Return starting byte 0..7 incl. of a watchpoint encoded by CTRL. */
46
47 unsigned int
aarch64_watchpoint_offset(unsigned int ctrl)48 aarch64_watchpoint_offset (unsigned int ctrl)
49 {
50 uint8_t mask = DR_CONTROL_MASK (ctrl);
51 unsigned retval;
52
53 /* Shift out bottom zeros. */
54 for (retval = 0; mask && (mask & 1) == 0; ++retval)
55 mask >>= 1;
56
57 return retval;
58 }
59
60 /* Utility function that returns the length in bytes of a watchpoint
61 according to the content of a hardware debug control register CTRL.
62 Any contiguous range of bytes in CTRL is supported. The returned
63 value can be between 0..8 (inclusive). */
64
65 unsigned int
aarch64_watchpoint_length(unsigned int ctrl)66 aarch64_watchpoint_length (unsigned int ctrl)
67 {
68 uint8_t mask = DR_CONTROL_MASK (ctrl);
69 unsigned retval;
70
71 /* Shift out bottom zeros. */
72 mask >>= aarch64_watchpoint_offset (ctrl);
73
74 /* Count bottom ones. */
75 for (retval = 0; (mask & 1) != 0; ++retval)
76 mask >>= 1;
77
78 if (mask != 0)
79 error (_("Unexpected hardware watchpoint length register value 0x%x"),
80 DR_CONTROL_MASK (ctrl));
81
82 return retval;
83 }
84
85 /* Given the hardware breakpoint or watchpoint type TYPE and its
86 length LEN, return the expected encoding for a hardware
87 breakpoint/watchpoint control register. */
88
89 static unsigned int
aarch64_point_encode_ctrl_reg(enum target_hw_bp_type type,int offset,int len)90 aarch64_point_encode_ctrl_reg (enum target_hw_bp_type type, int offset, int len)
91 {
92 unsigned int ctrl, ttype;
93
94 gdb_assert (offset == 0 || kernel_supports_any_contiguous_range);
95 gdb_assert (offset + len <= AARCH64_HWP_MAX_LEN_PER_REG);
96
97 /* type */
98 switch (type)
99 {
100 case hw_write:
101 ttype = 2;
102 break;
103 case hw_read:
104 ttype = 1;
105 break;
106 case hw_access:
107 ttype = 3;
108 break;
109 case hw_execute:
110 ttype = 0;
111 break;
112 default:
113 perror_with_name (_("Unrecognized breakpoint/watchpoint type"));
114 }
115
116 ctrl = ttype << 3;
117
118 /* offset and length bitmask */
119 ctrl |= ((1 << len) - 1) << (5 + offset);
120 /* enabled at el0 */
121 ctrl |= (2 << 1) | 1;
122
123 return ctrl;
124 }
125
126 /* Addresses to be written to the hardware breakpoint and watchpoint
127 value registers need to be aligned; the alignment is 4-byte and
128 8-type respectively. Linux kernel rejects any non-aligned address
129 it receives from the related ptrace call. Furthermore, the kernel
130 currently only supports the following Byte Address Select (BAS)
131 values: 0x1, 0x3, 0xf and 0xff, which means that for a hardware
132 watchpoint to be accepted by the kernel (via ptrace call), its
133 valid length can only be 1 byte, 2 bytes, 4 bytes or 8 bytes.
134 Despite these limitations, the unaligned watchpoint is supported in
135 this port.
136
137 Return 0 for any non-compliant ADDR and/or LEN; return 1 otherwise. */
138
139 static int
aarch64_point_is_aligned(int is_watchpoint,CORE_ADDR addr,int len)140 aarch64_point_is_aligned (int is_watchpoint, CORE_ADDR addr, int len)
141 {
142 unsigned int alignment = 0;
143
144 if (is_watchpoint)
145 alignment = AARCH64_HWP_ALIGNMENT;
146 else
147 {
148 struct regcache *regcache
149 = get_thread_regcache_for_ptid (current_lwp_ptid ());
150
151 /* Set alignment to 2 only if the current process is 32-bit,
152 since thumb instruction can be 2-byte aligned. Otherwise, set
153 alignment to AARCH64_HBP_ALIGNMENT. */
154 if (regcache_register_size (regcache, 0) == 8)
155 alignment = AARCH64_HBP_ALIGNMENT;
156 else
157 alignment = 2;
158 }
159
160 if (addr & (alignment - 1))
161 return 0;
162
163 if ((!kernel_supports_any_contiguous_range
164 && len != 8 && len != 4 && len != 2 && len != 1)
165 || (kernel_supports_any_contiguous_range
166 && (len < 1 || len > 8)))
167 return 0;
168
169 return 1;
170 }
171
172 /* Given the (potentially unaligned) watchpoint address in ADDR and
173 length in LEN, return the aligned address, offset from that base
174 address, and aligned length in *ALIGNED_ADDR_P, *ALIGNED_OFFSET_P
175 and *ALIGNED_LEN_P, respectively. The returned values will be
176 valid values to write to the hardware watchpoint value and control
177 registers.
178
179 The given watchpoint may get truncated if more than one hardware
180 register is needed to cover the watched region. *NEXT_ADDR_P
181 and *NEXT_LEN_P, if non-NULL, will return the address and length
182 of the remaining part of the watchpoint (which can be processed
183 by calling this routine again to generate another aligned address,
184 offset and length tuple.
185
186 Essentially, unaligned watchpoint is achieved by minimally
187 enlarging the watched area to meet the alignment requirement, and
188 if necessary, splitting the watchpoint over several hardware
189 watchpoint registers.
190
191 On kernels that predate the support for Byte Address Select (BAS)
192 in the hardware watchpoint control register, the offset from the
193 base address is always zero, and so in that case the trade-off is
194 that there will be false-positive hits for the read-type or the
195 access-type hardware watchpoints; for the write type, which is more
196 commonly used, there will be no such issues, as the higher-level
197 breakpoint management in gdb always examines the exact watched
198 region for any content change, and transparently resumes a thread
199 from a watchpoint trap if there is no change to the watched region.
200
201 Another limitation is that because the watched region is enlarged,
202 the watchpoint fault address discovered by
203 aarch64_stopped_data_address may be outside of the original watched
204 region, especially when the triggering instruction is accessing a
205 larger region. When the fault address is not within any known
206 range, watchpoints_triggered in gdb will get confused, as the
207 higher-level watchpoint management is only aware of original
208 watched regions, and will think that some unknown watchpoint has
209 been triggered. To prevent such a case,
210 aarch64_stopped_data_address implementations in gdb and gdbserver
211 try to match the trapped address with a watched region, and return
212 an address within the latter. */
213
214 static void
aarch64_align_watchpoint(CORE_ADDR addr,int len,CORE_ADDR * aligned_addr_p,int * aligned_offset_p,int * aligned_len_p,CORE_ADDR * next_addr_p,int * next_len_p,CORE_ADDR * next_addr_orig_p)215 aarch64_align_watchpoint (CORE_ADDR addr, int len, CORE_ADDR *aligned_addr_p,
216 int *aligned_offset_p, int *aligned_len_p,
217 CORE_ADDR *next_addr_p, int *next_len_p,
218 CORE_ADDR *next_addr_orig_p)
219 {
220 int aligned_len;
221 unsigned int offset, aligned_offset;
222 CORE_ADDR aligned_addr;
223 const unsigned int alignment = AARCH64_HWP_ALIGNMENT;
224 const unsigned int max_wp_len = AARCH64_HWP_MAX_LEN_PER_REG;
225
226 /* As assumed by the algorithm. */
227 gdb_assert (alignment == max_wp_len);
228
229 if (len <= 0)
230 return;
231
232 /* The address put into the hardware watchpoint value register must
233 be aligned. */
234 offset = addr & (alignment - 1);
235 aligned_addr = addr - offset;
236 aligned_offset
237 = kernel_supports_any_contiguous_range ? addr & (alignment - 1) : 0;
238
239 gdb_assert (offset >= 0 && offset < alignment);
240 gdb_assert (aligned_addr >= 0 && aligned_addr <= addr);
241 gdb_assert (offset + len > 0);
242
243 if (offset + len >= max_wp_len)
244 {
245 /* Need more than one watchpoint register; truncate at the
246 alignment boundary. */
247 aligned_len
248 = max_wp_len - (kernel_supports_any_contiguous_range ? offset : 0);
249 len -= (max_wp_len - offset);
250 addr += (max_wp_len - offset);
251 gdb_assert ((addr & (alignment - 1)) == 0);
252 }
253 else
254 {
255 /* Find the smallest valid length that is large enough to
256 accommodate this watchpoint. */
257 static const unsigned char
258 aligned_len_array[AARCH64_HWP_MAX_LEN_PER_REG] =
259 { 1, 2, 4, 4, 8, 8, 8, 8 };
260
261 aligned_len = (kernel_supports_any_contiguous_range
262 ? len : aligned_len_array[offset + len - 1]);
263 addr += len;
264 len = 0;
265 }
266
267 if (aligned_addr_p)
268 *aligned_addr_p = aligned_addr;
269 if (aligned_offset_p)
270 *aligned_offset_p = aligned_offset;
271 if (aligned_len_p)
272 *aligned_len_p = aligned_len;
273 if (next_addr_p)
274 *next_addr_p = addr;
275 if (next_len_p)
276 *next_len_p = len;
277 if (next_addr_orig_p)
278 *next_addr_orig_p = align_down (*next_addr_orig_p + alignment, alignment);
279 }
280
281 /* Helper for aarch64_notify_debug_reg_change. Records the
282 information about the change of one hardware breakpoint/watchpoint
283 setting for the thread LWP.
284 N.B. The actual updating of hardware debug registers is not
285 carried out until the moment the thread is resumed. */
286
287 static int
debug_reg_change_callback(struct lwp_info * lwp,int is_watchpoint,unsigned int idx)288 debug_reg_change_callback (struct lwp_info *lwp, int is_watchpoint,
289 unsigned int idx)
290 {
291 int tid = ptid_of_lwp (lwp).lwp ();
292 struct arch_lwp_info *info = lwp_arch_private_info (lwp);
293 dr_changed_t *dr_changed_ptr;
294 dr_changed_t dr_changed;
295
296 if (info == NULL)
297 {
298 info = XCNEW (struct arch_lwp_info);
299 lwp_set_arch_private_info (lwp, info);
300 }
301
302 if (show_debug_regs)
303 {
304 debug_printf ("debug_reg_change_callback: \n\tOn entry:\n");
305 debug_printf ("\ttid%d, dr_changed_bp=0x%s, "
306 "dr_changed_wp=0x%s\n", tid,
307 phex (info->dr_changed_bp, 8),
308 phex (info->dr_changed_wp, 8));
309 }
310
311 dr_changed_ptr = is_watchpoint ? &info->dr_changed_wp
312 : &info->dr_changed_bp;
313 dr_changed = *dr_changed_ptr;
314
315 gdb_assert (idx >= 0
316 && (idx <= (is_watchpoint ? aarch64_num_wp_regs
317 : aarch64_num_bp_regs)));
318
319 /* The actual update is done later just before resuming the lwp,
320 we just mark that one register pair needs updating. */
321 DR_MARK_N_CHANGED (dr_changed, idx);
322 *dr_changed_ptr = dr_changed;
323
324 /* If the lwp isn't stopped, force it to momentarily pause, so
325 we can update its debug registers. */
326 if (!lwp_is_stopped (lwp))
327 linux_stop_lwp (lwp);
328
329 if (show_debug_regs)
330 {
331 debug_printf ("\tOn exit:\n\ttid%d, dr_changed_bp=0x%s, "
332 "dr_changed_wp=0x%s\n", tid,
333 phex (info->dr_changed_bp, 8),
334 phex (info->dr_changed_wp, 8));
335 }
336
337 return 0;
338 }
339
340 /* Notify each thread that their IDXth breakpoint/watchpoint register
341 pair needs to be updated. The message will be recorded in each
342 thread's arch-specific data area, the actual updating will be done
343 when the thread is resumed. */
344
345 static void
aarch64_notify_debug_reg_change(const struct aarch64_debug_reg_state * state,int is_watchpoint,unsigned int idx)346 aarch64_notify_debug_reg_change (const struct aarch64_debug_reg_state *state,
347 int is_watchpoint, unsigned int idx)
348 {
349 ptid_t pid_ptid = ptid_t (current_lwp_ptid ().pid ());
350
351 iterate_over_lwps (pid_ptid, [=] (struct lwp_info *info)
352 {
353 return debug_reg_change_callback (info,
354 is_watchpoint,
355 idx);
356 });
357 }
358
359 /* Reconfigure STATE to be compatible with Linux kernels with the PR
360 external/20207 bug. This is called when
361 KERNEL_SUPPORTS_ANY_CONTIGUOUS_RANGE transitions to false. Note we
362 don't try to support combining watchpoints with matching (and thus
363 shared) masks, as it's too late when we get here. On buggy
364 kernels, GDB will try to first setup the perfect matching ranges,
365 which will run out of registers before this function can merge
366 them. It doesn't look like worth the effort to improve that, given
367 eventually buggy kernels will be phased out. */
368
369 static void
aarch64_downgrade_regs(struct aarch64_debug_reg_state * state)370 aarch64_downgrade_regs (struct aarch64_debug_reg_state *state)
371 {
372 for (int i = 0; i < aarch64_num_wp_regs; ++i)
373 if ((state->dr_ctrl_wp[i] & 1) != 0)
374 {
375 gdb_assert (state->dr_ref_count_wp[i] != 0);
376 uint8_t mask_orig = (state->dr_ctrl_wp[i] >> 5) & 0xff;
377 gdb_assert (mask_orig != 0);
378 static const uint8_t old_valid[] = { 0x01, 0x03, 0x0f, 0xff };
379 uint8_t mask = 0;
380 for (const uint8_t old_mask : old_valid)
381 if (mask_orig <= old_mask)
382 {
383 mask = old_mask;
384 break;
385 }
386 gdb_assert (mask != 0);
387
388 /* No update needed for this watchpoint? */
389 if (mask == mask_orig)
390 continue;
391 state->dr_ctrl_wp[i] |= mask << 5;
392 state->dr_addr_wp[i]
393 = align_down (state->dr_addr_wp[i], AARCH64_HWP_ALIGNMENT);
394
395 /* Try to match duplicate entries. */
396 for (int j = 0; j < i; ++j)
397 if ((state->dr_ctrl_wp[j] & 1) != 0
398 && state->dr_addr_wp[j] == state->dr_addr_wp[i]
399 && state->dr_addr_orig_wp[j] == state->dr_addr_orig_wp[i]
400 && state->dr_ctrl_wp[j] == state->dr_ctrl_wp[i])
401 {
402 state->dr_ref_count_wp[j] += state->dr_ref_count_wp[i];
403 state->dr_ref_count_wp[i] = 0;
404 state->dr_addr_wp[i] = 0;
405 state->dr_addr_orig_wp[i] = 0;
406 state->dr_ctrl_wp[i] &= ~1;
407 break;
408 }
409
410 aarch64_notify_debug_reg_change (state, 1 /* is_watchpoint */, i);
411 }
412 }
413
414 /* Record the insertion of one breakpoint/watchpoint, as represented
415 by ADDR and CTRL, in the process' arch-specific data area *STATE. */
416
417 static int
aarch64_dr_state_insert_one_point(struct aarch64_debug_reg_state * state,enum target_hw_bp_type type,CORE_ADDR addr,int offset,int len,CORE_ADDR addr_orig)418 aarch64_dr_state_insert_one_point (struct aarch64_debug_reg_state *state,
419 enum target_hw_bp_type type,
420 CORE_ADDR addr, int offset, int len,
421 CORE_ADDR addr_orig)
422 {
423 int i, idx, num_regs, is_watchpoint;
424 unsigned int ctrl, *dr_ctrl_p, *dr_ref_count;
425 CORE_ADDR *dr_addr_p, *dr_addr_orig_p;
426
427 /* Set up state pointers. */
428 is_watchpoint = (type != hw_execute);
429 gdb_assert (aarch64_point_is_aligned (is_watchpoint, addr, len));
430 if (is_watchpoint)
431 {
432 num_regs = aarch64_num_wp_regs;
433 dr_addr_p = state->dr_addr_wp;
434 dr_addr_orig_p = state->dr_addr_orig_wp;
435 dr_ctrl_p = state->dr_ctrl_wp;
436 dr_ref_count = state->dr_ref_count_wp;
437 }
438 else
439 {
440 num_regs = aarch64_num_bp_regs;
441 dr_addr_p = state->dr_addr_bp;
442 dr_addr_orig_p = nullptr;
443 dr_ctrl_p = state->dr_ctrl_bp;
444 dr_ref_count = state->dr_ref_count_bp;
445 }
446
447 ctrl = aarch64_point_encode_ctrl_reg (type, offset, len);
448
449 /* Find an existing or free register in our cache. */
450 idx = -1;
451 for (i = 0; i < num_regs; ++i)
452 {
453 if ((dr_ctrl_p[i] & 1) == 0)
454 {
455 gdb_assert (dr_ref_count[i] == 0);
456 idx = i;
457 /* no break; continue hunting for an exising one. */
458 }
459 else if (dr_addr_p[i] == addr
460 && (dr_addr_orig_p == nullptr || dr_addr_orig_p[i] == addr_orig)
461 && dr_ctrl_p[i] == ctrl)
462 {
463 gdb_assert (dr_ref_count[i] != 0);
464 idx = i;
465 break;
466 }
467 }
468
469 /* No space. */
470 if (idx == -1)
471 return -1;
472
473 /* Update our cache. */
474 if ((dr_ctrl_p[idx] & 1) == 0)
475 {
476 /* new entry */
477 dr_addr_p[idx] = addr;
478 if (dr_addr_orig_p != nullptr)
479 dr_addr_orig_p[idx] = addr_orig;
480 dr_ctrl_p[idx] = ctrl;
481 dr_ref_count[idx] = 1;
482 /* Notify the change. */
483 aarch64_notify_debug_reg_change (state, is_watchpoint, idx);
484 }
485 else
486 {
487 /* existing entry */
488 dr_ref_count[idx]++;
489 }
490
491 return 0;
492 }
493
494 /* Record the removal of one breakpoint/watchpoint, as represented by
495 ADDR and CTRL, in the process' arch-specific data area *STATE. */
496
497 static int
aarch64_dr_state_remove_one_point(struct aarch64_debug_reg_state * state,enum target_hw_bp_type type,CORE_ADDR addr,int offset,int len,CORE_ADDR addr_orig)498 aarch64_dr_state_remove_one_point (struct aarch64_debug_reg_state *state,
499 enum target_hw_bp_type type,
500 CORE_ADDR addr, int offset, int len,
501 CORE_ADDR addr_orig)
502 {
503 int i, num_regs, is_watchpoint;
504 unsigned int ctrl, *dr_ctrl_p, *dr_ref_count;
505 CORE_ADDR *dr_addr_p, *dr_addr_orig_p;
506
507 /* Set up state pointers. */
508 is_watchpoint = (type != hw_execute);
509 if (is_watchpoint)
510 {
511 num_regs = aarch64_num_wp_regs;
512 dr_addr_p = state->dr_addr_wp;
513 dr_addr_orig_p = state->dr_addr_orig_wp;
514 dr_ctrl_p = state->dr_ctrl_wp;
515 dr_ref_count = state->dr_ref_count_wp;
516 }
517 else
518 {
519 num_regs = aarch64_num_bp_regs;
520 dr_addr_p = state->dr_addr_bp;
521 dr_addr_orig_p = nullptr;
522 dr_ctrl_p = state->dr_ctrl_bp;
523 dr_ref_count = state->dr_ref_count_bp;
524 }
525
526 ctrl = aarch64_point_encode_ctrl_reg (type, offset, len);
527
528 /* Find the entry that matches the ADDR and CTRL. */
529 for (i = 0; i < num_regs; ++i)
530 if (dr_addr_p[i] == addr
531 && (dr_addr_orig_p == nullptr || dr_addr_orig_p[i] == addr_orig)
532 && dr_ctrl_p[i] == ctrl)
533 {
534 gdb_assert (dr_ref_count[i] != 0);
535 break;
536 }
537
538 /* Not found. */
539 if (i == num_regs)
540 return -1;
541
542 /* Clear our cache. */
543 if (--dr_ref_count[i] == 0)
544 {
545 /* Clear the enable bit. */
546 ctrl &= ~1;
547 dr_addr_p[i] = 0;
548 if (dr_addr_orig_p != nullptr)
549 dr_addr_orig_p[i] = 0;
550 dr_ctrl_p[i] = ctrl;
551 /* Notify the change. */
552 aarch64_notify_debug_reg_change (state, is_watchpoint, i);
553 }
554
555 return 0;
556 }
557
558 int
aarch64_handle_breakpoint(enum target_hw_bp_type type,CORE_ADDR addr,int len,int is_insert,struct aarch64_debug_reg_state * state)559 aarch64_handle_breakpoint (enum target_hw_bp_type type, CORE_ADDR addr,
560 int len, int is_insert,
561 struct aarch64_debug_reg_state *state)
562 {
563 if (is_insert)
564 {
565 /* The hardware breakpoint on AArch64 should always be 4-byte
566 aligned, but on AArch32, it can be 2-byte aligned. Note that
567 we only check the alignment on inserting breakpoint because
568 aarch64_point_is_aligned needs the inferior_ptid inferior's
569 regcache to decide whether the inferior is 32-bit or 64-bit.
570 However when GDB follows the parent process and detach breakpoints
571 from child process, inferior_ptid is the child ptid, but the
572 child inferior doesn't exist in GDB's view yet. */
573 if (!aarch64_point_is_aligned (0 /* is_watchpoint */ , addr, len))
574 return -1;
575
576 return aarch64_dr_state_insert_one_point (state, type, addr, 0, len, -1);
577 }
578 else
579 return aarch64_dr_state_remove_one_point (state, type, addr, 0, len, -1);
580 }
581
582 /* This is essentially the same as aarch64_handle_breakpoint, apart
583 from that it is an aligned watchpoint to be handled. */
584
585 static int
aarch64_handle_aligned_watchpoint(enum target_hw_bp_type type,CORE_ADDR addr,int len,int is_insert,struct aarch64_debug_reg_state * state)586 aarch64_handle_aligned_watchpoint (enum target_hw_bp_type type,
587 CORE_ADDR addr, int len, int is_insert,
588 struct aarch64_debug_reg_state *state)
589 {
590 if (is_insert)
591 return aarch64_dr_state_insert_one_point (state, type, addr, 0, len, addr);
592 else
593 return aarch64_dr_state_remove_one_point (state, type, addr, 0, len, addr);
594 }
595
596 /* Insert/remove unaligned watchpoint by calling
597 aarch64_align_watchpoint repeatedly until the whole watched region,
598 as represented by ADDR and LEN, has been properly aligned and ready
599 to be written to one or more hardware watchpoint registers.
600 IS_INSERT indicates whether this is an insertion or a deletion.
601 Return 0 if succeed. */
602
603 static int
aarch64_handle_unaligned_watchpoint(enum target_hw_bp_type type,CORE_ADDR addr,int len,int is_insert,struct aarch64_debug_reg_state * state)604 aarch64_handle_unaligned_watchpoint (enum target_hw_bp_type type,
605 CORE_ADDR addr, int len, int is_insert,
606 struct aarch64_debug_reg_state *state)
607 {
608 CORE_ADDR addr_orig = addr;
609
610 while (len > 0)
611 {
612 CORE_ADDR aligned_addr;
613 int aligned_offset, aligned_len, ret;
614 CORE_ADDR addr_orig_next = addr_orig;
615
616 aarch64_align_watchpoint (addr, len, &aligned_addr, &aligned_offset,
617 &aligned_len, &addr, &len, &addr_orig_next);
618
619 if (is_insert)
620 ret = aarch64_dr_state_insert_one_point (state, type, aligned_addr,
621 aligned_offset,
622 aligned_len, addr_orig);
623 else
624 ret = aarch64_dr_state_remove_one_point (state, type, aligned_addr,
625 aligned_offset,
626 aligned_len, addr_orig);
627
628 if (show_debug_regs)
629 debug_printf ("handle_unaligned_watchpoint: is_insert: %d\n"
630 " "
631 "aligned_addr: %s, aligned_len: %d\n"
632 " "
633 "addr_orig: %s\n"
634 " "
635 "next_addr: %s, next_len: %d\n"
636 " "
637 "addr_orig_next: %s\n",
638 is_insert, core_addr_to_string_nz (aligned_addr),
639 aligned_len, core_addr_to_string_nz (addr_orig),
640 core_addr_to_string_nz (addr), len,
641 core_addr_to_string_nz (addr_orig_next));
642
643 addr_orig = addr_orig_next;
644
645 if (ret != 0)
646 return ret;
647 }
648
649 return 0;
650 }
651
652 int
aarch64_handle_watchpoint(enum target_hw_bp_type type,CORE_ADDR addr,int len,int is_insert,struct aarch64_debug_reg_state * state)653 aarch64_handle_watchpoint (enum target_hw_bp_type type, CORE_ADDR addr,
654 int len, int is_insert,
655 struct aarch64_debug_reg_state *state)
656 {
657 if (aarch64_point_is_aligned (1 /* is_watchpoint */ , addr, len))
658 return aarch64_handle_aligned_watchpoint (type, addr, len, is_insert,
659 state);
660 else
661 return aarch64_handle_unaligned_watchpoint (type, addr, len, is_insert,
662 state);
663 }
664
665 /* Call ptrace to set the thread TID's hardware breakpoint/watchpoint
666 registers with data from *STATE. */
667
668 void
aarch64_linux_set_debug_regs(struct aarch64_debug_reg_state * state,int tid,int watchpoint)669 aarch64_linux_set_debug_regs (struct aarch64_debug_reg_state *state,
670 int tid, int watchpoint)
671 {
672 int i, count;
673 struct iovec iov;
674 struct user_hwdebug_state regs;
675 const CORE_ADDR *addr;
676 const unsigned int *ctrl;
677
678 memset (®s, 0, sizeof (regs));
679 iov.iov_base = ®s;
680 count = watchpoint ? aarch64_num_wp_regs : aarch64_num_bp_regs;
681 addr = watchpoint ? state->dr_addr_wp : state->dr_addr_bp;
682 ctrl = watchpoint ? state->dr_ctrl_wp : state->dr_ctrl_bp;
683 if (count == 0)
684 return;
685 iov.iov_len = (offsetof (struct user_hwdebug_state, dbg_regs)
686 + count * sizeof (regs.dbg_regs[0]));
687
688 for (i = 0; i < count; i++)
689 {
690 regs.dbg_regs[i].addr = addr[i];
691 regs.dbg_regs[i].ctrl = ctrl[i];
692 }
693
694 if (ptrace (PTRACE_SETREGSET, tid,
695 watchpoint ? NT_ARM_HW_WATCH : NT_ARM_HW_BREAK,
696 (void *) &iov))
697 {
698 /* Handle Linux kernels with the PR external/20207 bug. */
699 if (watchpoint && errno == EINVAL
700 && kernel_supports_any_contiguous_range)
701 {
702 kernel_supports_any_contiguous_range = false;
703 aarch64_downgrade_regs (state);
704 aarch64_linux_set_debug_regs (state, tid, watchpoint);
705 return;
706 }
707 error (_("Unexpected error setting hardware debug registers"));
708 }
709 }
710
711 /* See nat/aarch64-linux-hw-point.h. */
712
713 bool
aarch64_linux_any_set_debug_regs_state(aarch64_debug_reg_state * state,bool watchpoint)714 aarch64_linux_any_set_debug_regs_state (aarch64_debug_reg_state *state,
715 bool watchpoint)
716 {
717 int count = watchpoint ? aarch64_num_wp_regs : aarch64_num_bp_regs;
718 if (count == 0)
719 return false;
720
721 const CORE_ADDR *addr = watchpoint ? state->dr_addr_wp : state->dr_addr_bp;
722 const unsigned int *ctrl = watchpoint ? state->dr_ctrl_wp : state->dr_ctrl_bp;
723
724 for (int i = 0; i < count; i++)
725 if (addr[i] != 0 || ctrl[i] != 0)
726 return true;
727
728 return false;
729 }
730
731 /* Print the values of the cached breakpoint/watchpoint registers. */
732
733 void
aarch64_show_debug_reg_state(struct aarch64_debug_reg_state * state,const char * func,CORE_ADDR addr,int len,enum target_hw_bp_type type)734 aarch64_show_debug_reg_state (struct aarch64_debug_reg_state *state,
735 const char *func, CORE_ADDR addr,
736 int len, enum target_hw_bp_type type)
737 {
738 int i;
739
740 debug_printf ("%s", func);
741 if (addr || len)
742 debug_printf (" (addr=0x%08lx, len=%d, type=%s)",
743 (unsigned long) addr, len,
744 type == hw_write ? "hw-write-watchpoint"
745 : (type == hw_read ? "hw-read-watchpoint"
746 : (type == hw_access ? "hw-access-watchpoint"
747 : (type == hw_execute ? "hw-breakpoint"
748 : "??unknown??"))));
749 debug_printf (":\n");
750
751 debug_printf ("\tBREAKPOINTs:\n");
752 for (i = 0; i < aarch64_num_bp_regs; i++)
753 debug_printf ("\tBP%d: addr=%s, ctrl=0x%08x, ref.count=%d\n",
754 i, core_addr_to_string_nz (state->dr_addr_bp[i]),
755 state->dr_ctrl_bp[i], state->dr_ref_count_bp[i]);
756
757 debug_printf ("\tWATCHPOINTs:\n");
758 for (i = 0; i < aarch64_num_wp_regs; i++)
759 debug_printf ("\tWP%d: addr=%s (orig=%s), ctrl=0x%08x, ref.count=%d\n",
760 i, core_addr_to_string_nz (state->dr_addr_wp[i]),
761 core_addr_to_string_nz (state->dr_addr_orig_wp[i]),
762 state->dr_ctrl_wp[i], state->dr_ref_count_wp[i]);
763 }
764
765 /* Get the hardware debug register capacity information from the
766 process represented by TID. */
767
768 void
aarch64_linux_get_debug_reg_capacity(int tid)769 aarch64_linux_get_debug_reg_capacity (int tid)
770 {
771 struct iovec iov;
772 struct user_hwdebug_state dreg_state;
773
774 iov.iov_base = &dreg_state;
775 iov.iov_len = sizeof (dreg_state);
776
777 /* Get hardware watchpoint register info. */
778 if (ptrace (PTRACE_GETREGSET, tid, NT_ARM_HW_WATCH, &iov) == 0
779 && (AARCH64_DEBUG_ARCH (dreg_state.dbg_info) == AARCH64_DEBUG_ARCH_V8
780 || AARCH64_DEBUG_ARCH (dreg_state.dbg_info) == AARCH64_DEBUG_ARCH_V8_1
781 || AARCH64_DEBUG_ARCH (dreg_state.dbg_info) == AARCH64_DEBUG_ARCH_V8_2))
782 {
783 aarch64_num_wp_regs = AARCH64_DEBUG_NUM_SLOTS (dreg_state.dbg_info);
784 if (aarch64_num_wp_regs > AARCH64_HWP_MAX_NUM)
785 {
786 warning (_("Unexpected number of hardware watchpoint registers"
787 " reported by ptrace, got %d, expected %d."),
788 aarch64_num_wp_regs, AARCH64_HWP_MAX_NUM);
789 aarch64_num_wp_regs = AARCH64_HWP_MAX_NUM;
790 }
791 }
792 else
793 {
794 warning (_("Unable to determine the number of hardware watchpoints"
795 " available."));
796 aarch64_num_wp_regs = 0;
797 }
798
799 /* Get hardware breakpoint register info. */
800 if (ptrace (PTRACE_GETREGSET, tid, NT_ARM_HW_BREAK, &iov) == 0
801 && (AARCH64_DEBUG_ARCH (dreg_state.dbg_info) == AARCH64_DEBUG_ARCH_V8
802 || AARCH64_DEBUG_ARCH (dreg_state.dbg_info) == AARCH64_DEBUG_ARCH_V8_1
803 || AARCH64_DEBUG_ARCH (dreg_state.dbg_info) == AARCH64_DEBUG_ARCH_V8_2))
804 {
805 aarch64_num_bp_regs = AARCH64_DEBUG_NUM_SLOTS (dreg_state.dbg_info);
806 if (aarch64_num_bp_regs > AARCH64_HBP_MAX_NUM)
807 {
808 warning (_("Unexpected number of hardware breakpoint registers"
809 " reported by ptrace, got %d, expected %d."),
810 aarch64_num_bp_regs, AARCH64_HBP_MAX_NUM);
811 aarch64_num_bp_regs = AARCH64_HBP_MAX_NUM;
812 }
813 }
814 else
815 {
816 warning (_("Unable to determine the number of hardware breakpoints"
817 " available."));
818 aarch64_num_bp_regs = 0;
819 }
820 }
821
822 /* Return true if we can watch a memory region that starts address
823 ADDR and whose length is LEN in bytes. */
824
825 int
aarch64_linux_region_ok_for_watchpoint(CORE_ADDR addr,int len)826 aarch64_linux_region_ok_for_watchpoint (CORE_ADDR addr, int len)
827 {
828 CORE_ADDR aligned_addr;
829
830 /* Can not set watchpoints for zero or negative lengths. */
831 if (len <= 0)
832 return 0;
833
834 /* Must have hardware watchpoint debug register(s). */
835 if (aarch64_num_wp_regs == 0)
836 return 0;
837
838 /* We support unaligned watchpoint address and arbitrary length,
839 as long as the size of the whole watched area after alignment
840 doesn't exceed size of the total area that all watchpoint debug
841 registers can watch cooperatively.
842
843 This is a very relaxed rule, but unfortunately there are
844 limitations, e.g. false-positive hits, due to limited support of
845 hardware debug registers in the kernel. See comment above
846 aarch64_align_watchpoint for more information. */
847
848 aligned_addr = addr & ~(AARCH64_HWP_MAX_LEN_PER_REG - 1);
849 if (aligned_addr + aarch64_num_wp_regs * AARCH64_HWP_MAX_LEN_PER_REG
850 < addr + len)
851 return 0;
852
853 /* All tests passed so we are likely to be able to set the watchpoint.
854 The reason that it is 'likely' rather than 'must' is because
855 we don't check the current usage of the watchpoint registers, and
856 there may not be enough registers available for this watchpoint.
857 Ideally we should check the cached debug register state, however
858 the checking is costly. */
859 return 1;
860 }
861