1 /* Common target dependent code for GDB on AArch64 systems.
2 
3    Copyright (C) 2009-2021 Free Software Foundation, Inc.
4    Contributed by ARM Ltd.
5 
6    This file is part of GDB.
7 
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 3 of the License, or
11    (at your option) any later version.
12 
13    This program is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17 
18    You should have received a copy of the GNU General Public License
19    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
20 
21 #include "defs.h"
22 
23 #include "frame.h"
24 #include "gdbcmd.h"
25 #include "gdbcore.h"
26 #include "dis-asm.h"
27 #include "regcache.h"
28 #include "reggroups.h"
29 #include "value.h"
30 #include "arch-utils.h"
31 #include "osabi.h"
32 #include "frame-unwind.h"
33 #include "frame-base.h"
34 #include "trad-frame.h"
35 #include "objfiles.h"
36 #include "dwarf2.h"
37 #include "dwarf2/frame.h"
38 #include "gdbtypes.h"
39 #include "prologue-value.h"
40 #include "target-descriptions.h"
41 #include "user-regs.h"
42 #include "ax-gdb.h"
43 #include "gdbsupport/selftest.h"
44 
45 #include "aarch64-tdep.h"
46 #include "aarch64-ravenscar-thread.h"
47 
48 #include "record.h"
49 #include "record-full.h"
50 #include "arch/aarch64-insn.h"
51 #include "gdbarch.h"
52 
53 #include "opcode/aarch64.h"
54 #include <algorithm>
55 
56 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
57    four members.  */
58 #define HA_MAX_NUM_FLDS		4
59 
60 /* All possible aarch64 target descriptors.  */
61 static target_desc *tdesc_aarch64_list[AARCH64_MAX_SVE_VQ + 1][2/*pauth*/][2 /* mte */];
62 
63 /* The standard register names, and all the valid aliases for them.  */
64 static const struct
65 {
66   const char *const name;
67   int regnum;
68 } aarch64_register_aliases[] =
69 {
70   /* 64-bit register names.  */
71   {"fp", AARCH64_FP_REGNUM},
72   {"lr", AARCH64_LR_REGNUM},
73   {"sp", AARCH64_SP_REGNUM},
74 
75   /* 32-bit register names.  */
76   {"w0", AARCH64_X0_REGNUM + 0},
77   {"w1", AARCH64_X0_REGNUM + 1},
78   {"w2", AARCH64_X0_REGNUM + 2},
79   {"w3", AARCH64_X0_REGNUM + 3},
80   {"w4", AARCH64_X0_REGNUM + 4},
81   {"w5", AARCH64_X0_REGNUM + 5},
82   {"w6", AARCH64_X0_REGNUM + 6},
83   {"w7", AARCH64_X0_REGNUM + 7},
84   {"w8", AARCH64_X0_REGNUM + 8},
85   {"w9", AARCH64_X0_REGNUM + 9},
86   {"w10", AARCH64_X0_REGNUM + 10},
87   {"w11", AARCH64_X0_REGNUM + 11},
88   {"w12", AARCH64_X0_REGNUM + 12},
89   {"w13", AARCH64_X0_REGNUM + 13},
90   {"w14", AARCH64_X0_REGNUM + 14},
91   {"w15", AARCH64_X0_REGNUM + 15},
92   {"w16", AARCH64_X0_REGNUM + 16},
93   {"w17", AARCH64_X0_REGNUM + 17},
94   {"w18", AARCH64_X0_REGNUM + 18},
95   {"w19", AARCH64_X0_REGNUM + 19},
96   {"w20", AARCH64_X0_REGNUM + 20},
97   {"w21", AARCH64_X0_REGNUM + 21},
98   {"w22", AARCH64_X0_REGNUM + 22},
99   {"w23", AARCH64_X0_REGNUM + 23},
100   {"w24", AARCH64_X0_REGNUM + 24},
101   {"w25", AARCH64_X0_REGNUM + 25},
102   {"w26", AARCH64_X0_REGNUM + 26},
103   {"w27", AARCH64_X0_REGNUM + 27},
104   {"w28", AARCH64_X0_REGNUM + 28},
105   {"w29", AARCH64_X0_REGNUM + 29},
106   {"w30", AARCH64_X0_REGNUM + 30},
107 
108   /*  specials */
109   {"ip0", AARCH64_X0_REGNUM + 16},
110   {"ip1", AARCH64_X0_REGNUM + 17}
111 };
112 
113 /* The required core 'R' registers.  */
114 static const char *const aarch64_r_register_names[] =
115 {
116   /* These registers must appear in consecutive RAW register number
117      order and they must begin with AARCH64_X0_REGNUM! */
118   "x0", "x1", "x2", "x3",
119   "x4", "x5", "x6", "x7",
120   "x8", "x9", "x10", "x11",
121   "x12", "x13", "x14", "x15",
122   "x16", "x17", "x18", "x19",
123   "x20", "x21", "x22", "x23",
124   "x24", "x25", "x26", "x27",
125   "x28", "x29", "x30", "sp",
126   "pc", "cpsr"
127 };
128 
129 /* The FP/SIMD 'V' registers.  */
130 static const char *const aarch64_v_register_names[] =
131 {
132   /* These registers must appear in consecutive RAW register number
133      order and they must begin with AARCH64_V0_REGNUM! */
134   "v0", "v1", "v2", "v3",
135   "v4", "v5", "v6", "v7",
136   "v8", "v9", "v10", "v11",
137   "v12", "v13", "v14", "v15",
138   "v16", "v17", "v18", "v19",
139   "v20", "v21", "v22", "v23",
140   "v24", "v25", "v26", "v27",
141   "v28", "v29", "v30", "v31",
142   "fpsr",
143   "fpcr"
144 };
145 
146 /* The SVE 'Z' and 'P' registers.  */
147 static const char *const aarch64_sve_register_names[] =
148 {
149   /* These registers must appear in consecutive RAW register number
150      order and they must begin with AARCH64_SVE_Z0_REGNUM! */
151   "z0", "z1", "z2", "z3",
152   "z4", "z5", "z6", "z7",
153   "z8", "z9", "z10", "z11",
154   "z12", "z13", "z14", "z15",
155   "z16", "z17", "z18", "z19",
156   "z20", "z21", "z22", "z23",
157   "z24", "z25", "z26", "z27",
158   "z28", "z29", "z30", "z31",
159   "fpsr", "fpcr",
160   "p0", "p1", "p2", "p3",
161   "p4", "p5", "p6", "p7",
162   "p8", "p9", "p10", "p11",
163   "p12", "p13", "p14", "p15",
164   "ffr", "vg"
165 };
166 
167 static const char *const aarch64_pauth_register_names[] =
168 {
169   /* Authentication mask for data pointer.  */
170   "pauth_dmask",
171   /* Authentication mask for code pointer.  */
172   "pauth_cmask"
173 };
174 
175 static const char *const aarch64_mte_register_names[] =
176 {
177   /* Tag Control Register.  */
178   "tag_ctl"
179 };
180 
181 /* AArch64 prologue cache structure.  */
182 struct aarch64_prologue_cache
183 {
184   /* The program counter at the start of the function.  It is used to
185      identify this frame as a prologue frame.  */
186   CORE_ADDR func;
187 
188   /* The program counter at the time this frame was created; i.e. where
189      this function was called from.  It is used to identify this frame as a
190      stub frame.  */
191   CORE_ADDR prev_pc;
192 
193   /* The stack pointer at the time this frame was created; i.e. the
194      caller's stack pointer when this function was called.  It is used
195      to identify this frame.  */
196   CORE_ADDR prev_sp;
197 
198   /* Is the target available to read from?  */
199   int available_p;
200 
201   /* The frame base for this frame is just prev_sp - frame size.
202      FRAMESIZE is the distance from the frame pointer to the
203      initial stack pointer.  */
204   int framesize;
205 
206   /* The register used to hold the frame pointer for this frame.  */
207   int framereg;
208 
209   /* Saved register offsets.  */
210   trad_frame_saved_reg *saved_regs;
211 };
212 
213 static void
show_aarch64_debug(struct ui_file * file,int from_tty,struct cmd_list_element * c,const char * value)214 show_aarch64_debug (struct ui_file *file, int from_tty,
215 		    struct cmd_list_element *c, const char *value)
216 {
217   fprintf_filtered (file, _("AArch64 debugging is %s.\n"), value);
218 }
219 
220 namespace {
221 
222 /* Abstract instruction reader.  */
223 
224 class abstract_instruction_reader
225 {
226 public:
227   /* Read in one instruction.  */
228   virtual ULONGEST read (CORE_ADDR memaddr, int len,
229 			 enum bfd_endian byte_order) = 0;
230 };
231 
232 /* Instruction reader from real target.  */
233 
234 class instruction_reader : public abstract_instruction_reader
235 {
236  public:
read(CORE_ADDR memaddr,int len,enum bfd_endian byte_order)237   ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
238     override
239   {
240     return read_code_unsigned_integer (memaddr, len, byte_order);
241   }
242 };
243 
244 } // namespace
245 
246 /* If address signing is enabled, mask off the signature bits from the link
247    register, which is passed by value in ADDR, using the register values in
248    THIS_FRAME.  */
249 
250 static CORE_ADDR
aarch64_frame_unmask_lr(struct gdbarch_tdep * tdep,struct frame_info * this_frame,CORE_ADDR addr)251 aarch64_frame_unmask_lr (struct gdbarch_tdep *tdep,
252 			 struct frame_info *this_frame, CORE_ADDR addr)
253 {
254   if (tdep->has_pauth ()
255       && frame_unwind_register_unsigned (this_frame,
256 					 tdep->pauth_ra_state_regnum))
257     {
258       int cmask_num = AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base);
259       CORE_ADDR cmask = frame_unwind_register_unsigned (this_frame, cmask_num);
260       addr = addr & ~cmask;
261 
262       /* Record in the frame that the link register required unmasking.  */
263       set_frame_previous_pc_masked (this_frame);
264     }
265 
266   return addr;
267 }
268 
269 /* Implement the "get_pc_address_flags" gdbarch method.  */
270 
271 static std::string
aarch64_get_pc_address_flags(frame_info * frame,CORE_ADDR pc)272 aarch64_get_pc_address_flags (frame_info *frame, CORE_ADDR pc)
273 {
274   if (pc != 0 && get_frame_pc_masked (frame))
275     return "PAC";
276 
277   return "";
278 }
279 
280 /* Analyze a prologue, looking for a recognizable stack frame
281    and frame pointer.  Scan until we encounter a store that could
282    clobber the stack frame unexpectedly, or an unknown instruction.  */
283 
284 static CORE_ADDR
aarch64_analyze_prologue(struct gdbarch * gdbarch,CORE_ADDR start,CORE_ADDR limit,struct aarch64_prologue_cache * cache,abstract_instruction_reader & reader)285 aarch64_analyze_prologue (struct gdbarch *gdbarch,
286 			  CORE_ADDR start, CORE_ADDR limit,
287 			  struct aarch64_prologue_cache *cache,
288 			  abstract_instruction_reader& reader)
289 {
290   enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
291   int i;
292 
293   /* Whether the stack has been set.  This should be true when we notice a SP
294      to FP move or if we are using the SP as the base register for storing
295      data, in case the FP is ommitted.  */
296   bool seen_stack_set = false;
297 
298   /* Track X registers and D registers in prologue.  */
299   pv_t regs[AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT];
300 
301   for (i = 0; i < AARCH64_X_REGISTER_COUNT + AARCH64_D_REGISTER_COUNT; i++)
302     regs[i] = pv_register (i, 0);
303   pv_area stack (AARCH64_SP_REGNUM, gdbarch_addr_bit (gdbarch));
304 
305   for (; start < limit; start += 4)
306     {
307       uint32_t insn;
308       aarch64_inst inst;
309 
310       insn = reader.read (start, 4, byte_order_for_code);
311 
312       if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
313 	break;
314 
315       if (inst.opcode->iclass == addsub_imm
316 	  && (inst.opcode->op == OP_ADD
317 	      || strcmp ("sub", inst.opcode->name) == 0))
318 	{
319 	  unsigned rd = inst.operands[0].reg.regno;
320 	  unsigned rn = inst.operands[1].reg.regno;
321 
322 	  gdb_assert (aarch64_num_of_operands (inst.opcode) == 3);
323 	  gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd_SP);
324 	  gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn_SP);
325 	  gdb_assert (inst.operands[2].type == AARCH64_OPND_AIMM);
326 
327 	  if (inst.opcode->op == OP_ADD)
328 	    {
329 	      regs[rd] = pv_add_constant (regs[rn],
330 					  inst.operands[2].imm.value);
331 	    }
332 	  else
333 	    {
334 	      regs[rd] = pv_add_constant (regs[rn],
335 					  -inst.operands[2].imm.value);
336 	    }
337 
338 	  /* Did we move SP to FP?  */
339 	  if (rn == AARCH64_SP_REGNUM && rd == AARCH64_FP_REGNUM)
340 	    seen_stack_set = true;
341 	}
342       else if (inst.opcode->iclass == pcreladdr
343 	       && inst.operands[1].type == AARCH64_OPND_ADDR_ADRP)
344 	{
345 	  gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
346 	  gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
347 
348 	  regs[inst.operands[0].reg.regno] = pv_unknown ();
349 	}
350       else if (inst.opcode->iclass == branch_imm)
351 	{
352 	  /* Stop analysis on branch.  */
353 	  break;
354 	}
355       else if (inst.opcode->iclass == condbranch)
356 	{
357 	  /* Stop analysis on branch.  */
358 	  break;
359 	}
360       else if (inst.opcode->iclass == branch_reg)
361 	{
362 	  /* Stop analysis on branch.  */
363 	  break;
364 	}
365       else if (inst.opcode->iclass == compbranch)
366 	{
367 	  /* Stop analysis on branch.  */
368 	  break;
369 	}
370       else if (inst.opcode->op == OP_MOVZ)
371 	{
372 	  gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
373 
374 	  /* If this shows up before we set the stack, keep going.  Otherwise
375 	     stop the analysis.  */
376 	  if (seen_stack_set)
377 	    break;
378 
379 	  regs[inst.operands[0].reg.regno] = pv_unknown ();
380 	}
381       else if (inst.opcode->iclass == log_shift
382 	       && strcmp (inst.opcode->name, "orr") == 0)
383 	{
384 	  unsigned rd = inst.operands[0].reg.regno;
385 	  unsigned rn = inst.operands[1].reg.regno;
386 	  unsigned rm = inst.operands[2].reg.regno;
387 
388 	  gdb_assert (inst.operands[0].type == AARCH64_OPND_Rd);
389 	  gdb_assert (inst.operands[1].type == AARCH64_OPND_Rn);
390 	  gdb_assert (inst.operands[2].type == AARCH64_OPND_Rm_SFT);
391 
392 	  if (inst.operands[2].shifter.amount == 0
393 	      && rn == AARCH64_SP_REGNUM)
394 	    regs[rd] = regs[rm];
395 	  else
396 	    {
397 	      aarch64_debug_printf ("prologue analysis gave up "
398 				    "addr=%s opcode=0x%x (orr x register)",
399 				    core_addr_to_string_nz (start), insn);
400 
401 	      break;
402 	    }
403 	}
404       else if (inst.opcode->op == OP_STUR)
405 	{
406 	  unsigned rt = inst.operands[0].reg.regno;
407 	  unsigned rn = inst.operands[1].addr.base_regno;
408 	  int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
409 
410 	  gdb_assert (aarch64_num_of_operands (inst.opcode) == 2);
411 	  gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt);
412 	  gdb_assert (inst.operands[1].type == AARCH64_OPND_ADDR_SIMM9);
413 	  gdb_assert (!inst.operands[1].addr.offset.is_reg);
414 
415 	  stack.store
416 	    (pv_add_constant (regs[rn], inst.operands[1].addr.offset.imm),
417 	     size, regs[rt]);
418 
419 	  /* Are we storing with SP as a base?  */
420 	  if (rn == AARCH64_SP_REGNUM)
421 	    seen_stack_set = true;
422 	}
423       else if ((inst.opcode->iclass == ldstpair_off
424 		|| (inst.opcode->iclass == ldstpair_indexed
425 		    && inst.operands[2].addr.preind))
426 	       && strcmp ("stp", inst.opcode->name) == 0)
427 	{
428 	  /* STP with addressing mode Pre-indexed and Base register.  */
429 	  unsigned rt1;
430 	  unsigned rt2;
431 	  unsigned rn = inst.operands[2].addr.base_regno;
432 	  int32_t imm = inst.operands[2].addr.offset.imm;
433 	  int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
434 
435 	  gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
436 		      || inst.operands[0].type == AARCH64_OPND_Ft);
437 	  gdb_assert (inst.operands[1].type == AARCH64_OPND_Rt2
438 		      || inst.operands[1].type == AARCH64_OPND_Ft2);
439 	  gdb_assert (inst.operands[2].type == AARCH64_OPND_ADDR_SIMM7);
440 	  gdb_assert (!inst.operands[2].addr.offset.is_reg);
441 
442 	  /* If recording this store would invalidate the store area
443 	     (perhaps because rn is not known) then we should abandon
444 	     further prologue analysis.  */
445 	  if (stack.store_would_trash (pv_add_constant (regs[rn], imm)))
446 	    break;
447 
448 	  if (stack.store_would_trash (pv_add_constant (regs[rn], imm + 8)))
449 	    break;
450 
451 	  rt1 = inst.operands[0].reg.regno;
452 	  rt2 = inst.operands[1].reg.regno;
453 	  if (inst.operands[0].type == AARCH64_OPND_Ft)
454 	    {
455 	      rt1 += AARCH64_X_REGISTER_COUNT;
456 	      rt2 += AARCH64_X_REGISTER_COUNT;
457 	    }
458 
459 	  stack.store (pv_add_constant (regs[rn], imm), size, regs[rt1]);
460 	  stack.store (pv_add_constant (regs[rn], imm + size), size, regs[rt2]);
461 
462 	  if (inst.operands[2].addr.writeback)
463 	    regs[rn] = pv_add_constant (regs[rn], imm);
464 
465 	  /* Ignore the instruction that allocates stack space and sets
466 	     the SP.  */
467 	  if (rn == AARCH64_SP_REGNUM && !inst.operands[2].addr.writeback)
468 	    seen_stack_set = true;
469 	}
470       else if ((inst.opcode->iclass == ldst_imm9 /* Signed immediate.  */
471 		|| (inst.opcode->iclass == ldst_pos /* Unsigned immediate.  */
472 		    && (inst.opcode->op == OP_STR_POS
473 			|| inst.opcode->op == OP_STRF_POS)))
474 	       && inst.operands[1].addr.base_regno == AARCH64_SP_REGNUM
475 	       && strcmp ("str", inst.opcode->name) == 0)
476 	{
477 	  /* STR (immediate) */
478 	  unsigned int rt = inst.operands[0].reg.regno;
479 	  int32_t imm = inst.operands[1].addr.offset.imm;
480 	  unsigned int rn = inst.operands[1].addr.base_regno;
481 	  int size = aarch64_get_qualifier_esize (inst.operands[0].qualifier);
482 	  gdb_assert (inst.operands[0].type == AARCH64_OPND_Rt
483 		      || inst.operands[0].type == AARCH64_OPND_Ft);
484 
485 	  if (inst.operands[0].type == AARCH64_OPND_Ft)
486 	    rt += AARCH64_X_REGISTER_COUNT;
487 
488 	  stack.store (pv_add_constant (regs[rn], imm), size, regs[rt]);
489 	  if (inst.operands[1].addr.writeback)
490 	    regs[rn] = pv_add_constant (regs[rn], imm);
491 
492 	  /* Are we storing with SP as a base?  */
493 	  if (rn == AARCH64_SP_REGNUM)
494 	    seen_stack_set = true;
495 	}
496       else if (inst.opcode->iclass == testbranch)
497 	{
498 	  /* Stop analysis on branch.  */
499 	  break;
500 	}
501       else if (inst.opcode->iclass == ic_system)
502 	{
503 	  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
504 	  int ra_state_val = 0;
505 
506 	  if (insn == 0xd503233f /* paciasp.  */
507 	      || insn == 0xd503237f  /* pacibsp.  */)
508 	    {
509 	      /* Return addresses are mangled.  */
510 	      ra_state_val = 1;
511 	    }
512 	  else if (insn == 0xd50323bf /* autiasp.  */
513 		   || insn == 0xd50323ff /* autibsp.  */)
514 	    {
515 	      /* Return addresses are not mangled.  */
516 	      ra_state_val = 0;
517 	    }
518 	  else
519 	    {
520 	      aarch64_debug_printf ("prologue analysis gave up addr=%s"
521 				    " opcode=0x%x (iclass)",
522 				    core_addr_to_string_nz (start), insn);
523 	      break;
524 	    }
525 
526 	  if (tdep->has_pauth () && cache != nullptr)
527 	    {
528 	      int regnum = tdep->pauth_ra_state_regnum;
529 	      cache->saved_regs[regnum].set_value (ra_state_val);
530 	    }
531 	}
532       else
533 	{
534 	  aarch64_debug_printf ("prologue analysis gave up addr=%s"
535 				" opcode=0x%x",
536 				core_addr_to_string_nz (start), insn);
537 
538 	  break;
539 	}
540     }
541 
542   if (cache == NULL)
543     return start;
544 
545   if (pv_is_register (regs[AARCH64_FP_REGNUM], AARCH64_SP_REGNUM))
546     {
547       /* Frame pointer is fp.  Frame size is constant.  */
548       cache->framereg = AARCH64_FP_REGNUM;
549       cache->framesize = -regs[AARCH64_FP_REGNUM].k;
550     }
551   else if (pv_is_register (regs[AARCH64_SP_REGNUM], AARCH64_SP_REGNUM))
552     {
553       /* Try the stack pointer.  */
554       cache->framesize = -regs[AARCH64_SP_REGNUM].k;
555       cache->framereg = AARCH64_SP_REGNUM;
556     }
557   else
558     {
559       /* We're just out of luck.  We don't know where the frame is.  */
560       cache->framereg = -1;
561       cache->framesize = 0;
562     }
563 
564   for (i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
565     {
566       CORE_ADDR offset;
567 
568       if (stack.find_reg (gdbarch, i, &offset))
569 	cache->saved_regs[i].set_addr (offset);
570     }
571 
572   for (i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
573     {
574       int regnum = gdbarch_num_regs (gdbarch);
575       CORE_ADDR offset;
576 
577       if (stack.find_reg (gdbarch, i + AARCH64_X_REGISTER_COUNT,
578 			  &offset))
579 	cache->saved_regs[i + regnum + AARCH64_D0_REGNUM].set_addr (offset);
580     }
581 
582   return start;
583 }
584 
585 static CORE_ADDR
aarch64_analyze_prologue(struct gdbarch * gdbarch,CORE_ADDR start,CORE_ADDR limit,struct aarch64_prologue_cache * cache)586 aarch64_analyze_prologue (struct gdbarch *gdbarch,
587 			  CORE_ADDR start, CORE_ADDR limit,
588 			  struct aarch64_prologue_cache *cache)
589 {
590   instruction_reader reader;
591 
592   return aarch64_analyze_prologue (gdbarch, start, limit, cache,
593 				   reader);
594 }
595 
596 #if GDB_SELF_TEST
597 
598 namespace selftests {
599 
600 /* Instruction reader from manually cooked instruction sequences.  */
601 
602 class instruction_reader_test : public abstract_instruction_reader
603 {
604 public:
605   template<size_t SIZE>
instruction_reader_test(const uint32_t (& insns)[SIZE])606   explicit instruction_reader_test (const uint32_t (&insns)[SIZE])
607   : m_insns (insns), m_insns_size (SIZE)
608   {}
609 
read(CORE_ADDR memaddr,int len,enum bfd_endian byte_order)610   ULONGEST read (CORE_ADDR memaddr, int len, enum bfd_endian byte_order)
611     override
612   {
613     SELF_CHECK (len == 4);
614     SELF_CHECK (memaddr % 4 == 0);
615     SELF_CHECK (memaddr / 4 < m_insns_size);
616 
617     return m_insns[memaddr / 4];
618   }
619 
620 private:
621   const uint32_t *m_insns;
622   size_t m_insns_size;
623 };
624 
625 static void
aarch64_analyze_prologue_test(void)626 aarch64_analyze_prologue_test (void)
627 {
628   struct gdbarch_info info;
629 
630   info.bfd_arch_info = bfd_scan_arch ("aarch64");
631 
632   struct gdbarch *gdbarch = gdbarch_find_by_info (info);
633   SELF_CHECK (gdbarch != NULL);
634 
635   struct aarch64_prologue_cache cache;
636   cache.saved_regs = trad_frame_alloc_saved_regs (gdbarch);
637 
638   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
639 
640   /* Test the simple prologue in which frame pointer is used.  */
641   {
642     static const uint32_t insns[] = {
643       0xa9af7bfd, /* stp     x29, x30, [sp,#-272]! */
644       0x910003fd, /* mov     x29, sp */
645       0x97ffffe6, /* bl      0x400580 */
646     };
647     instruction_reader_test reader (insns);
648 
649     CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
650     SELF_CHECK (end == 4 * 2);
651 
652     SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
653     SELF_CHECK (cache.framesize == 272);
654 
655     for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
656       {
657 	if (i == AARCH64_FP_REGNUM)
658 	  SELF_CHECK (cache.saved_regs[i].addr () == -272);
659 	else if (i == AARCH64_LR_REGNUM)
660 	  SELF_CHECK (cache.saved_regs[i].addr () == -264);
661 	else
662 	  SELF_CHECK (cache.saved_regs[i].is_realreg ()
663 		      && cache.saved_regs[i].realreg () == i);
664       }
665 
666     for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
667       {
668 	int num_regs = gdbarch_num_regs (gdbarch);
669 	int regnum = i + num_regs + AARCH64_D0_REGNUM;
670 
671 	SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
672 		    && cache.saved_regs[regnum].realreg () == regnum);
673       }
674   }
675 
676   /* Test a prologue in which STR is used and frame pointer is not
677      used.  */
678   {
679     static const uint32_t insns[] = {
680       0xf81d0ff3, /* str	x19, [sp, #-48]! */
681       0xb9002fe0, /* str	w0, [sp, #44] */
682       0xf90013e1, /* str	x1, [sp, #32]*/
683       0xfd000fe0, /* str	d0, [sp, #24] */
684       0xaa0203f3, /* mov	x19, x2 */
685       0xf94013e0, /* ldr	x0, [sp, #32] */
686     };
687     instruction_reader_test reader (insns);
688 
689     trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
690     CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
691 
692     SELF_CHECK (end == 4 * 5);
693 
694     SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
695     SELF_CHECK (cache.framesize == 48);
696 
697     for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
698       {
699 	if (i == 1)
700 	  SELF_CHECK (cache.saved_regs[i].addr () == -16);
701 	else if (i == 19)
702 	  SELF_CHECK (cache.saved_regs[i].addr () == -48);
703 	else
704 	  SELF_CHECK (cache.saved_regs[i].is_realreg ()
705 		      && cache.saved_regs[i].realreg () == i);
706       }
707 
708     for (int i = 0; i < AARCH64_D_REGISTER_COUNT; i++)
709       {
710 	int num_regs = gdbarch_num_regs (gdbarch);
711 	int regnum = i + num_regs + AARCH64_D0_REGNUM;
712 
713 
714 	if (i == 0)
715 	  SELF_CHECK (cache.saved_regs[regnum].addr () == -24);
716 	else
717 	  SELF_CHECK (cache.saved_regs[regnum].is_realreg ()
718 		      && cache.saved_regs[regnum].realreg () == regnum);
719       }
720   }
721 
722   /* Test handling of movz before setting the frame pointer.  */
723   {
724     static const uint32_t insns[] = {
725       0xa9bf7bfd, /* stp     x29, x30, [sp, #-16]! */
726       0x52800020, /* mov     w0, #0x1 */
727       0x910003fd, /* mov     x29, sp */
728       0x528000a2, /* mov     w2, #0x5 */
729       0x97fffff8, /* bl      6e4 */
730     };
731 
732     instruction_reader_test reader (insns);
733 
734     trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
735     CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
736 
737     /* We should stop at the 4th instruction.  */
738     SELF_CHECK (end == (4 - 1) * 4);
739     SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
740     SELF_CHECK (cache.framesize == 16);
741   }
742 
743   /* Test handling of movz/stp when using the stack pointer as frame
744      pointer.  */
745   {
746     static const uint32_t insns[] = {
747       0xa9bc7bfd, /* stp     x29, x30, [sp, #-64]! */
748       0x52800020, /* mov     w0, #0x1 */
749       0x290207e0, /* stp     w0, w1, [sp, #16] */
750       0xa9018fe2, /* stp     x2, x3, [sp, #24] */
751       0x528000a2, /* mov     w2, #0x5 */
752       0x97fffff8, /* bl      6e4 */
753     };
754 
755     instruction_reader_test reader (insns);
756 
757     trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
758     CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
759 
760     /* We should stop at the 5th instruction.  */
761     SELF_CHECK (end == (5 - 1) * 4);
762     SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
763     SELF_CHECK (cache.framesize == 64);
764   }
765 
766   /* Test handling of movz/str when using the stack pointer as frame
767      pointer  */
768   {
769     static const uint32_t insns[] = {
770       0xa9bc7bfd, /* stp     x29, x30, [sp, #-64]! */
771       0x52800020, /* mov     w0, #0x1 */
772       0xb9002be4, /* str     w4, [sp, #40] */
773       0xf9001be5, /* str     x5, [sp, #48] */
774       0x528000a2, /* mov     w2, #0x5 */
775       0x97fffff8, /* bl      6e4 */
776     };
777 
778     instruction_reader_test reader (insns);
779 
780     trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
781     CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
782 
783     /* We should stop at the 5th instruction.  */
784     SELF_CHECK (end == (5 - 1) * 4);
785     SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
786     SELF_CHECK (cache.framesize == 64);
787   }
788 
789   /* Test handling of movz/stur when using the stack pointer as frame
790      pointer.  */
791   {
792     static const uint32_t insns[] = {
793       0xa9bc7bfd, /* stp     x29, x30, [sp, #-64]! */
794       0x52800020, /* mov     w0, #0x1 */
795       0xb80343e6, /* stur    w6, [sp, #52] */
796       0xf80383e7, /* stur    x7, [sp, #56] */
797       0x528000a2, /* mov     w2, #0x5 */
798       0x97fffff8, /* bl      6e4 */
799     };
800 
801     instruction_reader_test reader (insns);
802 
803     trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
804     CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
805 
806     /* We should stop at the 5th instruction.  */
807     SELF_CHECK (end == (5 - 1) * 4);
808     SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
809     SELF_CHECK (cache.framesize == 64);
810   }
811 
812   /* Test handling of movz when there is no frame pointer set or no stack
813      pointer used.  */
814   {
815     static const uint32_t insns[] = {
816       0xa9bf7bfd, /* stp     x29, x30, [sp, #-16]! */
817       0x52800020, /* mov     w0, #0x1 */
818       0x528000a2, /* mov     w2, #0x5 */
819       0x97fffff8, /* bl      6e4 */
820     };
821 
822     instruction_reader_test reader (insns);
823 
824     trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
825     CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache, reader);
826 
827     /* We should stop at the 4th instruction.  */
828     SELF_CHECK (end == (4 - 1) * 4);
829     SELF_CHECK (cache.framereg == AARCH64_SP_REGNUM);
830     SELF_CHECK (cache.framesize == 16);
831   }
832 
833   /* Test a prologue in which there is a return address signing instruction.  */
834   if (tdep->has_pauth ())
835     {
836       static const uint32_t insns[] = {
837 	0xd503233f, /* paciasp */
838 	0xa9bd7bfd, /* stp	x29, x30, [sp, #-48]! */
839 	0x910003fd, /* mov	x29, sp */
840 	0xf801c3f3, /* str	x19, [sp, #28] */
841 	0xb9401fa0, /* ldr	x19, [x29, #28] */
842       };
843       instruction_reader_test reader (insns);
844 
845       trad_frame_reset_saved_regs (gdbarch, cache.saved_regs);
846       CORE_ADDR end = aarch64_analyze_prologue (gdbarch, 0, 128, &cache,
847 						reader);
848 
849       SELF_CHECK (end == 4 * 4);
850       SELF_CHECK (cache.framereg == AARCH64_FP_REGNUM);
851       SELF_CHECK (cache.framesize == 48);
852 
853       for (int i = 0; i < AARCH64_X_REGISTER_COUNT; i++)
854 	{
855 	  if (i == 19)
856 	    SELF_CHECK (cache.saved_regs[i].addr () == -20);
857 	  else if (i == AARCH64_FP_REGNUM)
858 	    SELF_CHECK (cache.saved_regs[i].addr () == -48);
859 	  else if (i == AARCH64_LR_REGNUM)
860 	    SELF_CHECK (cache.saved_regs[i].addr () == -40);
861 	  else
862 	    SELF_CHECK (cache.saved_regs[i].is_realreg ()
863 			&& cache.saved_regs[i].realreg () == i);
864 	}
865 
866       if (tdep->has_pauth ())
867 	{
868 	  int regnum = tdep->pauth_ra_state_regnum;
869 	  SELF_CHECK (cache.saved_regs[regnum].is_value ());
870 	}
871     }
872 }
873 } // namespace selftests
874 #endif /* GDB_SELF_TEST */
875 
876 /* Implement the "skip_prologue" gdbarch method.  */
877 
878 static CORE_ADDR
aarch64_skip_prologue(struct gdbarch * gdbarch,CORE_ADDR pc)879 aarch64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
880 {
881   CORE_ADDR func_addr, limit_pc;
882 
883   /* See if we can determine the end of the prologue via the symbol
884      table.  If so, then return either PC, or the PC after the
885      prologue, whichever is greater.  */
886   if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
887     {
888       CORE_ADDR post_prologue_pc
889 	= skip_prologue_using_sal (gdbarch, func_addr);
890 
891       if (post_prologue_pc != 0)
892 	return std::max (pc, post_prologue_pc);
893     }
894 
895   /* Can't determine prologue from the symbol table, need to examine
896      instructions.  */
897 
898   /* Find an upper limit on the function prologue using the debug
899      information.  If the debug information could not be used to
900      provide that bound, then use an arbitrary large number as the
901      upper bound.  */
902   limit_pc = skip_prologue_using_sal (gdbarch, pc);
903   if (limit_pc == 0)
904     limit_pc = pc + 128;	/* Magic.  */
905 
906   /* Try disassembling prologue.  */
907   return aarch64_analyze_prologue (gdbarch, pc, limit_pc, NULL);
908 }
909 
910 /* Scan the function prologue for THIS_FRAME and populate the prologue
911    cache CACHE.  */
912 
913 static void
aarch64_scan_prologue(struct frame_info * this_frame,struct aarch64_prologue_cache * cache)914 aarch64_scan_prologue (struct frame_info *this_frame,
915 		       struct aarch64_prologue_cache *cache)
916 {
917   CORE_ADDR block_addr = get_frame_address_in_block (this_frame);
918   CORE_ADDR prologue_start;
919   CORE_ADDR prologue_end;
920   CORE_ADDR prev_pc = get_frame_pc (this_frame);
921   struct gdbarch *gdbarch = get_frame_arch (this_frame);
922 
923   cache->prev_pc = prev_pc;
924 
925   /* Assume we do not find a frame.  */
926   cache->framereg = -1;
927   cache->framesize = 0;
928 
929   if (find_pc_partial_function (block_addr, NULL, &prologue_start,
930 				&prologue_end))
931     {
932       struct symtab_and_line sal = find_pc_line (prologue_start, 0);
933 
934       if (sal.line == 0)
935 	{
936 	  /* No line info so use the current PC.  */
937 	  prologue_end = prev_pc;
938 	}
939       else if (sal.end < prologue_end)
940 	{
941 	  /* The next line begins after the function end.  */
942 	  prologue_end = sal.end;
943 	}
944 
945       prologue_end = std::min (prologue_end, prev_pc);
946       aarch64_analyze_prologue (gdbarch, prologue_start, prologue_end, cache);
947     }
948   else
949     {
950       CORE_ADDR frame_loc;
951 
952       frame_loc = get_frame_register_unsigned (this_frame, AARCH64_FP_REGNUM);
953       if (frame_loc == 0)
954 	return;
955 
956       cache->framereg = AARCH64_FP_REGNUM;
957       cache->framesize = 16;
958       cache->saved_regs[29].set_addr (0);
959       cache->saved_regs[30].set_addr (8);
960     }
961 }
962 
963 /* Fill in *CACHE with information about the prologue of *THIS_FRAME.  This
964    function may throw an exception if the inferior's registers or memory is
965    not available.  */
966 
967 static void
aarch64_make_prologue_cache_1(struct frame_info * this_frame,struct aarch64_prologue_cache * cache)968 aarch64_make_prologue_cache_1 (struct frame_info *this_frame,
969 			       struct aarch64_prologue_cache *cache)
970 {
971   CORE_ADDR unwound_fp;
972   int reg;
973 
974   aarch64_scan_prologue (this_frame, cache);
975 
976   if (cache->framereg == -1)
977     return;
978 
979   unwound_fp = get_frame_register_unsigned (this_frame, cache->framereg);
980   if (unwound_fp == 0)
981     return;
982 
983   cache->prev_sp = unwound_fp + cache->framesize;
984 
985   /* Calculate actual addresses of saved registers using offsets
986      determined by aarch64_analyze_prologue.  */
987   for (reg = 0; reg < gdbarch_num_regs (get_frame_arch (this_frame)); reg++)
988     if (cache->saved_regs[reg].is_addr ())
989       cache->saved_regs[reg].set_addr (cache->saved_regs[reg].addr ()
990 				       + cache->prev_sp);
991 
992   cache->func = get_frame_func (this_frame);
993 
994   cache->available_p = 1;
995 }
996 
997 /* Allocate and fill in *THIS_CACHE with information about the prologue of
998    *THIS_FRAME.  Do not do this is if *THIS_CACHE was already allocated.
999    Return a pointer to the current aarch64_prologue_cache in
1000    *THIS_CACHE.  */
1001 
1002 static struct aarch64_prologue_cache *
aarch64_make_prologue_cache(struct frame_info * this_frame,void ** this_cache)1003 aarch64_make_prologue_cache (struct frame_info *this_frame, void **this_cache)
1004 {
1005   struct aarch64_prologue_cache *cache;
1006 
1007   if (*this_cache != NULL)
1008     return (struct aarch64_prologue_cache *) *this_cache;
1009 
1010   cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1011   cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1012   *this_cache = cache;
1013 
1014   try
1015     {
1016       aarch64_make_prologue_cache_1 (this_frame, cache);
1017     }
1018   catch (const gdb_exception_error &ex)
1019     {
1020       if (ex.error != NOT_AVAILABLE_ERROR)
1021 	throw;
1022     }
1023 
1024   return cache;
1025 }
1026 
1027 /* Implement the "stop_reason" frame_unwind method.  */
1028 
1029 static enum unwind_stop_reason
aarch64_prologue_frame_unwind_stop_reason(struct frame_info * this_frame,void ** this_cache)1030 aarch64_prologue_frame_unwind_stop_reason (struct frame_info *this_frame,
1031 					   void **this_cache)
1032 {
1033   struct aarch64_prologue_cache *cache
1034     = aarch64_make_prologue_cache (this_frame, this_cache);
1035 
1036   if (!cache->available_p)
1037     return UNWIND_UNAVAILABLE;
1038 
1039   /* Halt the backtrace at "_start".  */
1040   if (cache->prev_pc <= gdbarch_tdep (get_frame_arch (this_frame))->lowest_pc)
1041     return UNWIND_OUTERMOST;
1042 
1043   /* We've hit a wall, stop.  */
1044   if (cache->prev_sp == 0)
1045     return UNWIND_OUTERMOST;
1046 
1047   return UNWIND_NO_REASON;
1048 }
1049 
1050 /* Our frame ID for a normal frame is the current function's starting
1051    PC and the caller's SP when we were called.  */
1052 
1053 static void
aarch64_prologue_this_id(struct frame_info * this_frame,void ** this_cache,struct frame_id * this_id)1054 aarch64_prologue_this_id (struct frame_info *this_frame,
1055 			  void **this_cache, struct frame_id *this_id)
1056 {
1057   struct aarch64_prologue_cache *cache
1058     = aarch64_make_prologue_cache (this_frame, this_cache);
1059 
1060   if (!cache->available_p)
1061     *this_id = frame_id_build_unavailable_stack (cache->func);
1062   else
1063     *this_id = frame_id_build (cache->prev_sp, cache->func);
1064 }
1065 
1066 /* Implement the "prev_register" frame_unwind method.  */
1067 
1068 static struct value *
aarch64_prologue_prev_register(struct frame_info * this_frame,void ** this_cache,int prev_regnum)1069 aarch64_prologue_prev_register (struct frame_info *this_frame,
1070 				void **this_cache, int prev_regnum)
1071 {
1072   struct aarch64_prologue_cache *cache
1073     = aarch64_make_prologue_cache (this_frame, this_cache);
1074 
1075   /* If we are asked to unwind the PC, then we need to return the LR
1076      instead.  The prologue may save PC, but it will point into this
1077      frame's prologue, not the next frame's resume location.  */
1078   if (prev_regnum == AARCH64_PC_REGNUM)
1079     {
1080       CORE_ADDR lr;
1081       struct gdbarch *gdbarch = get_frame_arch (this_frame);
1082       struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1083 
1084       lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1085 
1086       if (tdep->has_pauth ()
1087 	  && cache->saved_regs[tdep->pauth_ra_state_regnum].is_value ())
1088 	lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1089 
1090       return frame_unwind_got_constant (this_frame, prev_regnum, lr);
1091     }
1092 
1093   /* SP is generally not saved to the stack, but this frame is
1094      identified by the next frame's stack pointer at the time of the
1095      call.  The value was already reconstructed into PREV_SP.  */
1096   /*
1097 	 +----------+  ^
1098 	 | saved lr |  |
1099       +->| saved fp |--+
1100       |  |          |
1101       |  |          |     <- Previous SP
1102       |  +----------+
1103       |  | saved lr |
1104       +--| saved fp |<- FP
1105 	 |          |
1106 	 |          |<- SP
1107 	 +----------+  */
1108   if (prev_regnum == AARCH64_SP_REGNUM)
1109     return frame_unwind_got_constant (this_frame, prev_regnum,
1110 				      cache->prev_sp);
1111 
1112   return trad_frame_get_prev_register (this_frame, cache->saved_regs,
1113 				       prev_regnum);
1114 }
1115 
1116 /* AArch64 prologue unwinder.  */
1117 static frame_unwind aarch64_prologue_unwind =
1118 {
1119   "aarch64 prologue",
1120   NORMAL_FRAME,
1121   aarch64_prologue_frame_unwind_stop_reason,
1122   aarch64_prologue_this_id,
1123   aarch64_prologue_prev_register,
1124   NULL,
1125   default_frame_sniffer
1126 };
1127 
1128 /* Allocate and fill in *THIS_CACHE with information about the prologue of
1129    *THIS_FRAME.  Do not do this is if *THIS_CACHE was already allocated.
1130    Return a pointer to the current aarch64_prologue_cache in
1131    *THIS_CACHE.  */
1132 
1133 static struct aarch64_prologue_cache *
aarch64_make_stub_cache(struct frame_info * this_frame,void ** this_cache)1134 aarch64_make_stub_cache (struct frame_info *this_frame, void **this_cache)
1135 {
1136   struct aarch64_prologue_cache *cache;
1137 
1138   if (*this_cache != NULL)
1139     return (struct aarch64_prologue_cache *) *this_cache;
1140 
1141   cache = FRAME_OBSTACK_ZALLOC (struct aarch64_prologue_cache);
1142   cache->saved_regs = trad_frame_alloc_saved_regs (this_frame);
1143   *this_cache = cache;
1144 
1145   try
1146     {
1147       cache->prev_sp = get_frame_register_unsigned (this_frame,
1148 						    AARCH64_SP_REGNUM);
1149       cache->prev_pc = get_frame_pc (this_frame);
1150       cache->available_p = 1;
1151     }
1152   catch (const gdb_exception_error &ex)
1153     {
1154       if (ex.error != NOT_AVAILABLE_ERROR)
1155 	throw;
1156     }
1157 
1158   return cache;
1159 }
1160 
1161 /* Implement the "stop_reason" frame_unwind method.  */
1162 
1163 static enum unwind_stop_reason
aarch64_stub_frame_unwind_stop_reason(struct frame_info * this_frame,void ** this_cache)1164 aarch64_stub_frame_unwind_stop_reason (struct frame_info *this_frame,
1165 				       void **this_cache)
1166 {
1167   struct aarch64_prologue_cache *cache
1168     = aarch64_make_stub_cache (this_frame, this_cache);
1169 
1170   if (!cache->available_p)
1171     return UNWIND_UNAVAILABLE;
1172 
1173   return UNWIND_NO_REASON;
1174 }
1175 
1176 /* Our frame ID for a stub frame is the current SP and LR.  */
1177 
1178 static void
aarch64_stub_this_id(struct frame_info * this_frame,void ** this_cache,struct frame_id * this_id)1179 aarch64_stub_this_id (struct frame_info *this_frame,
1180 		      void **this_cache, struct frame_id *this_id)
1181 {
1182   struct aarch64_prologue_cache *cache
1183     = aarch64_make_stub_cache (this_frame, this_cache);
1184 
1185   if (cache->available_p)
1186     *this_id = frame_id_build (cache->prev_sp, cache->prev_pc);
1187   else
1188     *this_id = frame_id_build_unavailable_stack (cache->prev_pc);
1189 }
1190 
1191 /* Implement the "sniffer" frame_unwind method.  */
1192 
1193 static int
aarch64_stub_unwind_sniffer(const struct frame_unwind * self,struct frame_info * this_frame,void ** this_prologue_cache)1194 aarch64_stub_unwind_sniffer (const struct frame_unwind *self,
1195 			     struct frame_info *this_frame,
1196 			     void **this_prologue_cache)
1197 {
1198   CORE_ADDR addr_in_block;
1199   gdb_byte dummy[4];
1200 
1201   addr_in_block = get_frame_address_in_block (this_frame);
1202   if (in_plt_section (addr_in_block)
1203       /* We also use the stub winder if the target memory is unreadable
1204 	 to avoid having the prologue unwinder trying to read it.  */
1205       || target_read_memory (get_frame_pc (this_frame), dummy, 4) != 0)
1206     return 1;
1207 
1208   return 0;
1209 }
1210 
1211 /* AArch64 stub unwinder.  */
1212 static frame_unwind aarch64_stub_unwind =
1213 {
1214   "aarch64 stub",
1215   NORMAL_FRAME,
1216   aarch64_stub_frame_unwind_stop_reason,
1217   aarch64_stub_this_id,
1218   aarch64_prologue_prev_register,
1219   NULL,
1220   aarch64_stub_unwind_sniffer
1221 };
1222 
1223 /* Return the frame base address of *THIS_FRAME.  */
1224 
1225 static CORE_ADDR
aarch64_normal_frame_base(struct frame_info * this_frame,void ** this_cache)1226 aarch64_normal_frame_base (struct frame_info *this_frame, void **this_cache)
1227 {
1228   struct aarch64_prologue_cache *cache
1229     = aarch64_make_prologue_cache (this_frame, this_cache);
1230 
1231   return cache->prev_sp - cache->framesize;
1232 }
1233 
1234 /* AArch64 default frame base information.  */
1235 static frame_base aarch64_normal_base =
1236 {
1237   &aarch64_prologue_unwind,
1238   aarch64_normal_frame_base,
1239   aarch64_normal_frame_base,
1240   aarch64_normal_frame_base
1241 };
1242 
1243 /* Return the value of the REGNUM register in the previous frame of
1244    *THIS_FRAME.  */
1245 
1246 static struct value *
aarch64_dwarf2_prev_register(struct frame_info * this_frame,void ** this_cache,int regnum)1247 aarch64_dwarf2_prev_register (struct frame_info *this_frame,
1248 			      void **this_cache, int regnum)
1249 {
1250   struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1251   CORE_ADDR lr;
1252 
1253   switch (regnum)
1254     {
1255     case AARCH64_PC_REGNUM:
1256       lr = frame_unwind_register_unsigned (this_frame, AARCH64_LR_REGNUM);
1257       lr = aarch64_frame_unmask_lr (tdep, this_frame, lr);
1258       return frame_unwind_got_constant (this_frame, regnum, lr);
1259 
1260     default:
1261       internal_error (__FILE__, __LINE__,
1262 		      _("Unexpected register %d"), regnum);
1263     }
1264 }
1265 
1266 static const unsigned char op_lit0 = DW_OP_lit0;
1267 static const unsigned char op_lit1 = DW_OP_lit1;
1268 
1269 /* Implement the "init_reg" dwarf2_frame_ops method.  */
1270 
1271 static void
aarch64_dwarf2_frame_init_reg(struct gdbarch * gdbarch,int regnum,struct dwarf2_frame_state_reg * reg,struct frame_info * this_frame)1272 aarch64_dwarf2_frame_init_reg (struct gdbarch *gdbarch, int regnum,
1273 			       struct dwarf2_frame_state_reg *reg,
1274 			       struct frame_info *this_frame)
1275 {
1276   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1277 
1278   switch (regnum)
1279     {
1280     case AARCH64_PC_REGNUM:
1281       reg->how = DWARF2_FRAME_REG_FN;
1282       reg->loc.fn = aarch64_dwarf2_prev_register;
1283       return;
1284 
1285     case AARCH64_SP_REGNUM:
1286       reg->how = DWARF2_FRAME_REG_CFA;
1287       return;
1288     }
1289 
1290   /* Init pauth registers.  */
1291   if (tdep->has_pauth ())
1292     {
1293       if (regnum == tdep->pauth_ra_state_regnum)
1294 	{
1295 	  /* Initialize RA_STATE to zero.  */
1296 	  reg->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1297 	  reg->loc.exp.start = &op_lit0;
1298 	  reg->loc.exp.len = 1;
1299 	  return;
1300 	}
1301       else if (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
1302 	       || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base))
1303 	{
1304 	  reg->how = DWARF2_FRAME_REG_SAME_VALUE;
1305 	  return;
1306 	}
1307     }
1308 }
1309 
1310 /* Implement the execute_dwarf_cfa_vendor_op method.  */
1311 
1312 static bool
aarch64_execute_dwarf_cfa_vendor_op(struct gdbarch * gdbarch,gdb_byte op,struct dwarf2_frame_state * fs)1313 aarch64_execute_dwarf_cfa_vendor_op (struct gdbarch *gdbarch, gdb_byte op,
1314 				     struct dwarf2_frame_state *fs)
1315 {
1316   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1317   struct dwarf2_frame_state_reg *ra_state;
1318 
1319   if (op == DW_CFA_AARCH64_negate_ra_state)
1320     {
1321       /* On systems without pauth, treat as a nop.  */
1322       if (!tdep->has_pauth ())
1323 	return true;
1324 
1325       /* Allocate RA_STATE column if it's not allocated yet.  */
1326       fs->regs.alloc_regs (AARCH64_DWARF_PAUTH_RA_STATE + 1);
1327 
1328       /* Toggle the status of RA_STATE between 0 and 1.  */
1329       ra_state = &(fs->regs.reg[AARCH64_DWARF_PAUTH_RA_STATE]);
1330       ra_state->how = DWARF2_FRAME_REG_SAVED_VAL_EXP;
1331 
1332       if (ra_state->loc.exp.start == nullptr
1333 	  || ra_state->loc.exp.start == &op_lit0)
1334 	ra_state->loc.exp.start = &op_lit1;
1335       else
1336 	ra_state->loc.exp.start = &op_lit0;
1337 
1338       ra_state->loc.exp.len = 1;
1339 
1340       return true;
1341     }
1342 
1343   return false;
1344 }
1345 
1346 /* Used for matching BRK instructions for AArch64.  */
1347 static constexpr uint32_t BRK_INSN_MASK = 0xffe0001f;
1348 static constexpr uint32_t BRK_INSN_BASE = 0xd4200000;
1349 
1350 /* Implementation of gdbarch_program_breakpoint_here_p for aarch64.  */
1351 
1352 static bool
aarch64_program_breakpoint_here_p(gdbarch * gdbarch,CORE_ADDR address)1353 aarch64_program_breakpoint_here_p (gdbarch *gdbarch, CORE_ADDR address)
1354 {
1355   const uint32_t insn_len = 4;
1356   gdb_byte target_mem[4];
1357 
1358   /* Enable the automatic memory restoration from breakpoints while
1359      we read the memory.  Otherwise we may find temporary breakpoints, ones
1360      inserted by GDB, and flag them as permanent breakpoints.  */
1361   scoped_restore restore_memory
1362     = make_scoped_restore_show_memory_breakpoints (0);
1363 
1364   if (target_read_memory (address, target_mem, insn_len) == 0)
1365     {
1366       uint32_t insn =
1367 	(uint32_t) extract_unsigned_integer (target_mem, insn_len,
1368 					     gdbarch_byte_order_for_code (gdbarch));
1369 
1370       /* Check if INSN is a BRK instruction pattern.  There are multiple choices
1371 	 of such instructions with different immediate values.  Different OS'
1372 	 may use a different variation, but they have the same outcome.  */
1373 	return ((insn & BRK_INSN_MASK) == BRK_INSN_BASE);
1374     }
1375 
1376   return false;
1377 }
1378 
1379 /* When arguments must be pushed onto the stack, they go on in reverse
1380    order.  The code below implements a FILO (stack) to do this.  */
1381 
1382 struct stack_item_t
1383 {
1384   /* Value to pass on stack.  It can be NULL if this item is for stack
1385      padding.  */
1386   const gdb_byte *data;
1387 
1388   /* Size in bytes of value to pass on stack.  */
1389   int len;
1390 };
1391 
1392 /* Implement the gdbarch type alignment method, overrides the generic
1393    alignment algorithm for anything that is aarch64 specific.  */
1394 
1395 static ULONGEST
aarch64_type_align(gdbarch * gdbarch,struct type * t)1396 aarch64_type_align (gdbarch *gdbarch, struct type *t)
1397 {
1398   t = check_typedef (t);
1399   if (t->code () == TYPE_CODE_ARRAY && t->is_vector ())
1400     {
1401       /* Use the natural alignment for vector types (the same for
1402 	 scalar type), but the maximum alignment is 128-bit.  */
1403       if (TYPE_LENGTH (t) > 16)
1404 	return 16;
1405       else
1406 	return TYPE_LENGTH (t);
1407     }
1408 
1409   /* Allow the common code to calculate the alignment.  */
1410   return 0;
1411 }
1412 
1413 /* Worker function for aapcs_is_vfp_call_or_return_candidate.
1414 
1415    Return the number of register required, or -1 on failure.
1416 
1417    When encountering a base element, if FUNDAMENTAL_TYPE is not set then set it
1418    to the element, else fail if the type of this element does not match the
1419    existing value.  */
1420 
1421 static int
aapcs_is_vfp_call_or_return_candidate_1(struct type * type,struct type ** fundamental_type)1422 aapcs_is_vfp_call_or_return_candidate_1 (struct type *type,
1423 					 struct type **fundamental_type)
1424 {
1425   if (type == nullptr)
1426     return -1;
1427 
1428   switch (type->code ())
1429     {
1430     case TYPE_CODE_FLT:
1431       if (TYPE_LENGTH (type) > 16)
1432 	return -1;
1433 
1434       if (*fundamental_type == nullptr)
1435 	*fundamental_type = type;
1436       else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1437 	       || type->code () != (*fundamental_type)->code ())
1438 	return -1;
1439 
1440       return 1;
1441 
1442     case TYPE_CODE_COMPLEX:
1443       {
1444 	struct type *target_type = check_typedef (TYPE_TARGET_TYPE (type));
1445 	if (TYPE_LENGTH (target_type) > 16)
1446 	  return -1;
1447 
1448 	if (*fundamental_type == nullptr)
1449 	  *fundamental_type = target_type;
1450 	else if (TYPE_LENGTH (target_type) != TYPE_LENGTH (*fundamental_type)
1451 		 || target_type->code () != (*fundamental_type)->code ())
1452 	  return -1;
1453 
1454 	return 2;
1455       }
1456 
1457     case TYPE_CODE_ARRAY:
1458       {
1459 	if (type->is_vector ())
1460 	  {
1461 	    if (TYPE_LENGTH (type) != 8 && TYPE_LENGTH (type) != 16)
1462 	      return -1;
1463 
1464 	    if (*fundamental_type == nullptr)
1465 	      *fundamental_type = type;
1466 	    else if (TYPE_LENGTH (type) != TYPE_LENGTH (*fundamental_type)
1467 		     || type->code () != (*fundamental_type)->code ())
1468 	      return -1;
1469 
1470 	    return 1;
1471 	  }
1472 	else
1473 	  {
1474 	    struct type *target_type = TYPE_TARGET_TYPE (type);
1475 	    int count = aapcs_is_vfp_call_or_return_candidate_1
1476 			  (target_type, fundamental_type);
1477 
1478 	    if (count == -1)
1479 	      return count;
1480 
1481 	    count *= (TYPE_LENGTH (type) / TYPE_LENGTH (target_type));
1482 	      return count;
1483 	  }
1484       }
1485 
1486     case TYPE_CODE_STRUCT:
1487     case TYPE_CODE_UNION:
1488       {
1489 	int count = 0;
1490 
1491 	for (int i = 0; i < type->num_fields (); i++)
1492 	  {
1493 	    /* Ignore any static fields.  */
1494 	    if (field_is_static (&type->field (i)))
1495 	      continue;
1496 
1497 	    struct type *member = check_typedef (type->field (i).type ());
1498 
1499 	    int sub_count = aapcs_is_vfp_call_or_return_candidate_1
1500 			      (member, fundamental_type);
1501 	    if (sub_count == -1)
1502 	      return -1;
1503 	    count += sub_count;
1504 	  }
1505 
1506 	/* Ensure there is no padding between the fields (allowing for empty
1507 	   zero length structs)  */
1508 	int ftype_length = (*fundamental_type == nullptr)
1509 			   ? 0 : TYPE_LENGTH (*fundamental_type);
1510 	if (count * ftype_length != TYPE_LENGTH (type))
1511 	  return -1;
1512 
1513 	return count;
1514       }
1515 
1516     default:
1517       break;
1518     }
1519 
1520   return -1;
1521 }
1522 
1523 /* Return true if an argument, whose type is described by TYPE, can be passed or
1524    returned in simd/fp registers, providing enough parameter passing registers
1525    are available.  This is as described in the AAPCS64.
1526 
1527    Upon successful return, *COUNT returns the number of needed registers,
1528    *FUNDAMENTAL_TYPE contains the type of those registers.
1529 
1530    Candidate as per the AAPCS64 5.4.2.C is either a:
1531    - float.
1532    - short-vector.
1533    - HFA (Homogeneous Floating-point Aggregate, 4.3.5.1). A Composite type where
1534      all the members are floats and has at most 4 members.
1535    - HVA (Homogeneous Short-vector Aggregate, 4.3.5.2). A Composite type where
1536      all the members are short vectors and has at most 4 members.
1537    - Complex (7.1.1)
1538 
1539    Note that HFAs and HVAs can include nested structures and arrays.  */
1540 
1541 static bool
aapcs_is_vfp_call_or_return_candidate(struct type * type,int * count,struct type ** fundamental_type)1542 aapcs_is_vfp_call_or_return_candidate (struct type *type, int *count,
1543 				       struct type **fundamental_type)
1544 {
1545   if (type == nullptr)
1546     return false;
1547 
1548   *fundamental_type = nullptr;
1549 
1550   int ag_count = aapcs_is_vfp_call_or_return_candidate_1 (type,
1551 							  fundamental_type);
1552 
1553   if (ag_count > 0 && ag_count <= HA_MAX_NUM_FLDS)
1554     {
1555       *count = ag_count;
1556       return true;
1557     }
1558   else
1559     return false;
1560 }
1561 
1562 /* AArch64 function call information structure.  */
1563 struct aarch64_call_info
1564 {
1565   /* the current argument number.  */
1566   unsigned argnum = 0;
1567 
1568   /* The next general purpose register number, equivalent to NGRN as
1569      described in the AArch64 Procedure Call Standard.  */
1570   unsigned ngrn = 0;
1571 
1572   /* The next SIMD and floating point register number, equivalent to
1573      NSRN as described in the AArch64 Procedure Call Standard.  */
1574   unsigned nsrn = 0;
1575 
1576   /* The next stacked argument address, equivalent to NSAA as
1577      described in the AArch64 Procedure Call Standard.  */
1578   unsigned nsaa = 0;
1579 
1580   /* Stack item vector.  */
1581   std::vector<stack_item_t> si;
1582 };
1583 
1584 /* Pass a value in a sequence of consecutive X registers.  The caller
1585    is responsible for ensuring sufficient registers are available.  */
1586 
1587 static void
pass_in_x(struct gdbarch * gdbarch,struct regcache * regcache,struct aarch64_call_info * info,struct type * type,struct value * arg)1588 pass_in_x (struct gdbarch *gdbarch, struct regcache *regcache,
1589 	   struct aarch64_call_info *info, struct type *type,
1590 	   struct value *arg)
1591 {
1592   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1593   int len = TYPE_LENGTH (type);
1594   enum type_code typecode = type->code ();
1595   int regnum = AARCH64_X0_REGNUM + info->ngrn;
1596   const bfd_byte *buf = value_contents (arg);
1597 
1598   info->argnum++;
1599 
1600   while (len > 0)
1601     {
1602       int partial_len = len < X_REGISTER_SIZE ? len : X_REGISTER_SIZE;
1603       CORE_ADDR regval = extract_unsigned_integer (buf, partial_len,
1604 						   byte_order);
1605 
1606 
1607       /* Adjust sub-word struct/union args when big-endian.  */
1608       if (byte_order == BFD_ENDIAN_BIG
1609 	  && partial_len < X_REGISTER_SIZE
1610 	  && (typecode == TYPE_CODE_STRUCT || typecode == TYPE_CODE_UNION))
1611 	regval <<= ((X_REGISTER_SIZE - partial_len) * TARGET_CHAR_BIT);
1612 
1613       aarch64_debug_printf ("arg %d in %s = 0x%s", info->argnum,
1614 			    gdbarch_register_name (gdbarch, regnum),
1615 			    phex (regval, X_REGISTER_SIZE));
1616 
1617       regcache_cooked_write_unsigned (regcache, regnum, regval);
1618       len -= partial_len;
1619       buf += partial_len;
1620       regnum++;
1621     }
1622 }
1623 
1624 /* Attempt to marshall a value in a V register.  Return 1 if
1625    successful, or 0 if insufficient registers are available.  This
1626    function, unlike the equivalent pass_in_x() function does not
1627    handle arguments spread across multiple registers.  */
1628 
1629 static int
pass_in_v(struct gdbarch * gdbarch,struct regcache * regcache,struct aarch64_call_info * info,int len,const bfd_byte * buf)1630 pass_in_v (struct gdbarch *gdbarch,
1631 	   struct regcache *regcache,
1632 	   struct aarch64_call_info *info,
1633 	   int len, const bfd_byte *buf)
1634 {
1635   if (info->nsrn < 8)
1636     {
1637       int regnum = AARCH64_V0_REGNUM + info->nsrn;
1638       /* Enough space for a full vector register.  */
1639       gdb_byte reg[register_size (gdbarch, regnum)];
1640       gdb_assert (len <= sizeof (reg));
1641 
1642       info->argnum++;
1643       info->nsrn++;
1644 
1645       memset (reg, 0, sizeof (reg));
1646       /* PCS C.1, the argument is allocated to the least significant
1647 	 bits of V register.  */
1648       memcpy (reg, buf, len);
1649       regcache->cooked_write (regnum, reg);
1650 
1651       aarch64_debug_printf ("arg %d in %s", info->argnum,
1652 			    gdbarch_register_name (gdbarch, regnum));
1653 
1654       return 1;
1655     }
1656   info->nsrn = 8;
1657   return 0;
1658 }
1659 
1660 /* Marshall an argument onto the stack.  */
1661 
1662 static void
pass_on_stack(struct aarch64_call_info * info,struct type * type,struct value * arg)1663 pass_on_stack (struct aarch64_call_info *info, struct type *type,
1664 	       struct value *arg)
1665 {
1666   const bfd_byte *buf = value_contents (arg);
1667   int len = TYPE_LENGTH (type);
1668   int align;
1669   stack_item_t item;
1670 
1671   info->argnum++;
1672 
1673   align = type_align (type);
1674 
1675   /* PCS C.17 Stack should be aligned to the larger of 8 bytes or the
1676      Natural alignment of the argument's type.  */
1677   align = align_up (align, 8);
1678 
1679   /* The AArch64 PCS requires at most doubleword alignment.  */
1680   if (align > 16)
1681     align = 16;
1682 
1683   aarch64_debug_printf ("arg %d len=%d @ sp + %d\n", info->argnum, len,
1684 			info->nsaa);
1685 
1686   item.len = len;
1687   item.data = buf;
1688   info->si.push_back (item);
1689 
1690   info->nsaa += len;
1691   if (info->nsaa & (align - 1))
1692     {
1693       /* Push stack alignment padding.  */
1694       int pad = align - (info->nsaa & (align - 1));
1695 
1696       item.len = pad;
1697       item.data = NULL;
1698 
1699       info->si.push_back (item);
1700       info->nsaa += pad;
1701     }
1702 }
1703 
1704 /* Marshall an argument into a sequence of one or more consecutive X
1705    registers or, if insufficient X registers are available then onto
1706    the stack.  */
1707 
1708 static void
pass_in_x_or_stack(struct gdbarch * gdbarch,struct regcache * regcache,struct aarch64_call_info * info,struct type * type,struct value * arg)1709 pass_in_x_or_stack (struct gdbarch *gdbarch, struct regcache *regcache,
1710 		    struct aarch64_call_info *info, struct type *type,
1711 		    struct value *arg)
1712 {
1713   int len = TYPE_LENGTH (type);
1714   int nregs = (len + X_REGISTER_SIZE - 1) / X_REGISTER_SIZE;
1715 
1716   /* PCS C.13 - Pass in registers if we have enough spare */
1717   if (info->ngrn + nregs <= 8)
1718     {
1719       pass_in_x (gdbarch, regcache, info, type, arg);
1720       info->ngrn += nregs;
1721     }
1722   else
1723     {
1724       info->ngrn = 8;
1725       pass_on_stack (info, type, arg);
1726     }
1727 }
1728 
1729 /* Pass a value, which is of type arg_type, in a V register.  Assumes value is a
1730    aapcs_is_vfp_call_or_return_candidate and there are enough spare V
1731    registers.  A return value of false is an error state as the value will have
1732    been partially passed to the stack.  */
1733 static bool
pass_in_v_vfp_candidate(struct gdbarch * gdbarch,struct regcache * regcache,struct aarch64_call_info * info,struct type * arg_type,struct value * arg)1734 pass_in_v_vfp_candidate (struct gdbarch *gdbarch, struct regcache *regcache,
1735 			 struct aarch64_call_info *info, struct type *arg_type,
1736 			 struct value *arg)
1737 {
1738   switch (arg_type->code ())
1739     {
1740     case TYPE_CODE_FLT:
1741       return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1742 			value_contents (arg));
1743       break;
1744 
1745     case TYPE_CODE_COMPLEX:
1746       {
1747 	const bfd_byte *buf = value_contents (arg);
1748 	struct type *target_type = check_typedef (TYPE_TARGET_TYPE (arg_type));
1749 
1750 	if (!pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1751 			buf))
1752 	  return false;
1753 
1754 	return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (target_type),
1755 			  buf + TYPE_LENGTH (target_type));
1756       }
1757 
1758     case TYPE_CODE_ARRAY:
1759       if (arg_type->is_vector ())
1760 	return pass_in_v (gdbarch, regcache, info, TYPE_LENGTH (arg_type),
1761 			  value_contents (arg));
1762       /* fall through.  */
1763 
1764     case TYPE_CODE_STRUCT:
1765     case TYPE_CODE_UNION:
1766       for (int i = 0; i < arg_type->num_fields (); i++)
1767 	{
1768 	  /* Don't include static fields.  */
1769 	  if (field_is_static (&arg_type->field (i)))
1770 	    continue;
1771 
1772 	  struct value *field = value_primitive_field (arg, 0, i, arg_type);
1773 	  struct type *field_type = check_typedef (value_type (field));
1774 
1775 	  if (!pass_in_v_vfp_candidate (gdbarch, regcache, info, field_type,
1776 					field))
1777 	    return false;
1778 	}
1779       return true;
1780 
1781     default:
1782       return false;
1783     }
1784 }
1785 
1786 /* Implement the "push_dummy_call" gdbarch method.  */
1787 
1788 static CORE_ADDR
aarch64_push_dummy_call(struct gdbarch * gdbarch,struct value * function,struct regcache * regcache,CORE_ADDR bp_addr,int nargs,struct value ** args,CORE_ADDR sp,function_call_return_method return_method,CORE_ADDR struct_addr)1789 aarch64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1790 			 struct regcache *regcache, CORE_ADDR bp_addr,
1791 			 int nargs,
1792 			 struct value **args, CORE_ADDR sp,
1793 			 function_call_return_method return_method,
1794 			 CORE_ADDR struct_addr)
1795 {
1796   int argnum;
1797   struct aarch64_call_info info;
1798 
1799   /* We need to know what the type of the called function is in order
1800      to determine the number of named/anonymous arguments for the
1801      actual argument placement, and the return type in order to handle
1802      return value correctly.
1803 
1804      The generic code above us views the decision of return in memory
1805      or return in registers as a two stage processes.  The language
1806      handler is consulted first and may decide to return in memory (eg
1807      class with copy constructor returned by value), this will cause
1808      the generic code to allocate space AND insert an initial leading
1809      argument.
1810 
1811      If the language code does not decide to pass in memory then the
1812      target code is consulted.
1813 
1814      If the language code decides to pass in memory we want to move
1815      the pointer inserted as the initial argument from the argument
1816      list and into X8, the conventional AArch64 struct return pointer
1817      register.  */
1818 
1819   /* Set the return address.  For the AArch64, the return breakpoint
1820      is always at BP_ADDR.  */
1821   regcache_cooked_write_unsigned (regcache, AARCH64_LR_REGNUM, bp_addr);
1822 
1823   /* If we were given an initial argument for the return slot, lose it.  */
1824   if (return_method == return_method_hidden_param)
1825     {
1826       args++;
1827       nargs--;
1828     }
1829 
1830   /* The struct_return pointer occupies X8.  */
1831   if (return_method != return_method_normal)
1832     {
1833       aarch64_debug_printf ("struct return in %s = 0x%s",
1834 			    gdbarch_register_name
1835 			      (gdbarch, AARCH64_STRUCT_RETURN_REGNUM),
1836 			    paddress (gdbarch, struct_addr));
1837 
1838       regcache_cooked_write_unsigned (regcache, AARCH64_STRUCT_RETURN_REGNUM,
1839 				      struct_addr);
1840     }
1841 
1842   for (argnum = 0; argnum < nargs; argnum++)
1843     {
1844       struct value *arg = args[argnum];
1845       struct type *arg_type, *fundamental_type;
1846       int len, elements;
1847 
1848       arg_type = check_typedef (value_type (arg));
1849       len = TYPE_LENGTH (arg_type);
1850 
1851       /* If arg can be passed in v registers as per the AAPCS64, then do so if
1852 	 if there are enough spare registers.  */
1853       if (aapcs_is_vfp_call_or_return_candidate (arg_type, &elements,
1854 						 &fundamental_type))
1855 	{
1856 	  if (info.nsrn + elements <= 8)
1857 	    {
1858 	      /* We know that we have sufficient registers available therefore
1859 		 this will never need to fallback to the stack.  */
1860 	      if (!pass_in_v_vfp_candidate (gdbarch, regcache, &info, arg_type,
1861 					    arg))
1862 		gdb_assert_not_reached ("Failed to push args");
1863 	    }
1864 	  else
1865 	    {
1866 	      info.nsrn = 8;
1867 	      pass_on_stack (&info, arg_type, arg);
1868 	    }
1869 	  continue;
1870 	}
1871 
1872       switch (arg_type->code ())
1873 	{
1874 	case TYPE_CODE_INT:
1875 	case TYPE_CODE_BOOL:
1876 	case TYPE_CODE_CHAR:
1877 	case TYPE_CODE_RANGE:
1878 	case TYPE_CODE_ENUM:
1879 	  if (len < 4)
1880 	    {
1881 	      /* Promote to 32 bit integer.  */
1882 	      if (arg_type->is_unsigned ())
1883 		arg_type = builtin_type (gdbarch)->builtin_uint32;
1884 	      else
1885 		arg_type = builtin_type (gdbarch)->builtin_int32;
1886 	      arg = value_cast (arg_type, arg);
1887 	    }
1888 	  pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1889 	  break;
1890 
1891 	case TYPE_CODE_STRUCT:
1892 	case TYPE_CODE_ARRAY:
1893 	case TYPE_CODE_UNION:
1894 	  if (len > 16)
1895 	    {
1896 	      /* PCS B.7 Aggregates larger than 16 bytes are passed by
1897 		 invisible reference.  */
1898 
1899 	      /* Allocate aligned storage.  */
1900 	      sp = align_down (sp - len, 16);
1901 
1902 	      /* Write the real data into the stack.  */
1903 	      write_memory (sp, value_contents (arg), len);
1904 
1905 	      /* Construct the indirection.  */
1906 	      arg_type = lookup_pointer_type (arg_type);
1907 	      arg = value_from_pointer (arg_type, sp);
1908 	      pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1909 	    }
1910 	  else
1911 	    /* PCS C.15 / C.18 multiple values pass.  */
1912 	    pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1913 	  break;
1914 
1915 	default:
1916 	  pass_in_x_or_stack (gdbarch, regcache, &info, arg_type, arg);
1917 	  break;
1918 	}
1919     }
1920 
1921   /* Make sure stack retains 16 byte alignment.  */
1922   if (info.nsaa & 15)
1923     sp -= 16 - (info.nsaa & 15);
1924 
1925   while (!info.si.empty ())
1926     {
1927       const stack_item_t &si = info.si.back ();
1928 
1929       sp -= si.len;
1930       if (si.data != NULL)
1931 	write_memory (sp, si.data, si.len);
1932       info.si.pop_back ();
1933     }
1934 
1935   /* Finally, update the SP register.  */
1936   regcache_cooked_write_unsigned (regcache, AARCH64_SP_REGNUM, sp);
1937 
1938   return sp;
1939 }
1940 
1941 /* Implement the "frame_align" gdbarch method.  */
1942 
1943 static CORE_ADDR
aarch64_frame_align(struct gdbarch * gdbarch,CORE_ADDR sp)1944 aarch64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1945 {
1946   /* Align the stack to sixteen bytes.  */
1947   return sp & ~(CORE_ADDR) 15;
1948 }
1949 
1950 /* Return the type for an AdvSISD Q register.  */
1951 
1952 static struct type *
aarch64_vnq_type(struct gdbarch * gdbarch)1953 aarch64_vnq_type (struct gdbarch *gdbarch)
1954 {
1955   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1956 
1957   if (tdep->vnq_type == NULL)
1958     {
1959       struct type *t;
1960       struct type *elem;
1961 
1962       t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
1963 			       TYPE_CODE_UNION);
1964 
1965       elem = builtin_type (gdbarch)->builtin_uint128;
1966       append_composite_type_field (t, "u", elem);
1967 
1968       elem = builtin_type (gdbarch)->builtin_int128;
1969       append_composite_type_field (t, "s", elem);
1970 
1971       tdep->vnq_type = t;
1972     }
1973 
1974   return tdep->vnq_type;
1975 }
1976 
1977 /* Return the type for an AdvSISD D register.  */
1978 
1979 static struct type *
aarch64_vnd_type(struct gdbarch * gdbarch)1980 aarch64_vnd_type (struct gdbarch *gdbarch)
1981 {
1982   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1983 
1984   if (tdep->vnd_type == NULL)
1985     {
1986       struct type *t;
1987       struct type *elem;
1988 
1989       t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
1990 			       TYPE_CODE_UNION);
1991 
1992       elem = builtin_type (gdbarch)->builtin_double;
1993       append_composite_type_field (t, "f", elem);
1994 
1995       elem = builtin_type (gdbarch)->builtin_uint64;
1996       append_composite_type_field (t, "u", elem);
1997 
1998       elem = builtin_type (gdbarch)->builtin_int64;
1999       append_composite_type_field (t, "s", elem);
2000 
2001       tdep->vnd_type = t;
2002     }
2003 
2004   return tdep->vnd_type;
2005 }
2006 
2007 /* Return the type for an AdvSISD S register.  */
2008 
2009 static struct type *
aarch64_vns_type(struct gdbarch * gdbarch)2010 aarch64_vns_type (struct gdbarch *gdbarch)
2011 {
2012   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2013 
2014   if (tdep->vns_type == NULL)
2015     {
2016       struct type *t;
2017       struct type *elem;
2018 
2019       t = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2020 			       TYPE_CODE_UNION);
2021 
2022       elem = builtin_type (gdbarch)->builtin_float;
2023       append_composite_type_field (t, "f", elem);
2024 
2025       elem = builtin_type (gdbarch)->builtin_uint32;
2026       append_composite_type_field (t, "u", elem);
2027 
2028       elem = builtin_type (gdbarch)->builtin_int32;
2029       append_composite_type_field (t, "s", elem);
2030 
2031       tdep->vns_type = t;
2032     }
2033 
2034   return tdep->vns_type;
2035 }
2036 
2037 /* Return the type for an AdvSISD H register.  */
2038 
2039 static struct type *
aarch64_vnh_type(struct gdbarch * gdbarch)2040 aarch64_vnh_type (struct gdbarch *gdbarch)
2041 {
2042   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2043 
2044   if (tdep->vnh_type == NULL)
2045     {
2046       struct type *t;
2047       struct type *elem;
2048 
2049       t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2050 			       TYPE_CODE_UNION);
2051 
2052       elem = builtin_type (gdbarch)->builtin_bfloat16;
2053       append_composite_type_field (t, "bf", elem);
2054 
2055       elem = builtin_type (gdbarch)->builtin_half;
2056       append_composite_type_field (t, "f", elem);
2057 
2058       elem = builtin_type (gdbarch)->builtin_uint16;
2059       append_composite_type_field (t, "u", elem);
2060 
2061       elem = builtin_type (gdbarch)->builtin_int16;
2062       append_composite_type_field (t, "s", elem);
2063 
2064       tdep->vnh_type = t;
2065     }
2066 
2067   return tdep->vnh_type;
2068 }
2069 
2070 /* Return the type for an AdvSISD B register.  */
2071 
2072 static struct type *
aarch64_vnb_type(struct gdbarch * gdbarch)2073 aarch64_vnb_type (struct gdbarch *gdbarch)
2074 {
2075   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2076 
2077   if (tdep->vnb_type == NULL)
2078     {
2079       struct type *t;
2080       struct type *elem;
2081 
2082       t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2083 			       TYPE_CODE_UNION);
2084 
2085       elem = builtin_type (gdbarch)->builtin_uint8;
2086       append_composite_type_field (t, "u", elem);
2087 
2088       elem = builtin_type (gdbarch)->builtin_int8;
2089       append_composite_type_field (t, "s", elem);
2090 
2091       tdep->vnb_type = t;
2092     }
2093 
2094   return tdep->vnb_type;
2095 }
2096 
2097 /* Return the type for an AdvSISD V register.  */
2098 
2099 static struct type *
aarch64_vnv_type(struct gdbarch * gdbarch)2100 aarch64_vnv_type (struct gdbarch *gdbarch)
2101 {
2102   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2103 
2104   if (tdep->vnv_type == NULL)
2105     {
2106       /* The other AArch64 pseudo registers (Q,D,H,S,B) refer to a single value
2107 	 slice from the non-pseudo vector registers.  However NEON V registers
2108 	 are always vector registers, and need constructing as such.  */
2109       const struct builtin_type *bt = builtin_type (gdbarch);
2110 
2111       struct type *t = arch_composite_type (gdbarch, "__gdb_builtin_type_vnv",
2112 					    TYPE_CODE_UNION);
2113 
2114       struct type *sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnd",
2115 				 TYPE_CODE_UNION);
2116       append_composite_type_field (sub, "f",
2117 				   init_vector_type (bt->builtin_double, 2));
2118       append_composite_type_field (sub, "u",
2119 				   init_vector_type (bt->builtin_uint64, 2));
2120       append_composite_type_field (sub, "s",
2121 				   init_vector_type (bt->builtin_int64, 2));
2122       append_composite_type_field (t, "d", sub);
2123 
2124       sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vns",
2125 				 TYPE_CODE_UNION);
2126       append_composite_type_field (sub, "f",
2127 				   init_vector_type (bt->builtin_float, 4));
2128       append_composite_type_field (sub, "u",
2129 				   init_vector_type (bt->builtin_uint32, 4));
2130       append_composite_type_field (sub, "s",
2131 				   init_vector_type (bt->builtin_int32, 4));
2132       append_composite_type_field (t, "s", sub);
2133 
2134       sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnh",
2135 				 TYPE_CODE_UNION);
2136       append_composite_type_field (sub, "bf",
2137 				   init_vector_type (bt->builtin_bfloat16, 8));
2138       append_composite_type_field (sub, "f",
2139 				   init_vector_type (bt->builtin_half, 8));
2140       append_composite_type_field (sub, "u",
2141 				   init_vector_type (bt->builtin_uint16, 8));
2142       append_composite_type_field (sub, "s",
2143 				   init_vector_type (bt->builtin_int16, 8));
2144       append_composite_type_field (t, "h", sub);
2145 
2146       sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnb",
2147 				 TYPE_CODE_UNION);
2148       append_composite_type_field (sub, "u",
2149 				   init_vector_type (bt->builtin_uint8, 16));
2150       append_composite_type_field (sub, "s",
2151 				   init_vector_type (bt->builtin_int8, 16));
2152       append_composite_type_field (t, "b", sub);
2153 
2154       sub = arch_composite_type (gdbarch, "__gdb_builtin_type_vnq",
2155 				 TYPE_CODE_UNION);
2156       append_composite_type_field (sub, "u",
2157 				   init_vector_type (bt->builtin_uint128, 1));
2158       append_composite_type_field (sub, "s",
2159 				   init_vector_type (bt->builtin_int128, 1));
2160       append_composite_type_field (t, "q", sub);
2161 
2162       tdep->vnv_type = t;
2163     }
2164 
2165   return tdep->vnv_type;
2166 }
2167 
2168 /* Implement the "dwarf2_reg_to_regnum" gdbarch method.  */
2169 
2170 static int
aarch64_dwarf_reg_to_regnum(struct gdbarch * gdbarch,int reg)2171 aarch64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
2172 {
2173   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2174 
2175   if (reg >= AARCH64_DWARF_X0 && reg <= AARCH64_DWARF_X0 + 30)
2176     return AARCH64_X0_REGNUM + reg - AARCH64_DWARF_X0;
2177 
2178   if (reg == AARCH64_DWARF_SP)
2179     return AARCH64_SP_REGNUM;
2180 
2181   if (reg >= AARCH64_DWARF_V0 && reg <= AARCH64_DWARF_V0 + 31)
2182     return AARCH64_V0_REGNUM + reg - AARCH64_DWARF_V0;
2183 
2184   if (reg == AARCH64_DWARF_SVE_VG)
2185     return AARCH64_SVE_VG_REGNUM;
2186 
2187   if (reg == AARCH64_DWARF_SVE_FFR)
2188     return AARCH64_SVE_FFR_REGNUM;
2189 
2190   if (reg >= AARCH64_DWARF_SVE_P0 && reg <= AARCH64_DWARF_SVE_P0 + 15)
2191     return AARCH64_SVE_P0_REGNUM + reg - AARCH64_DWARF_SVE_P0;
2192 
2193   if (reg >= AARCH64_DWARF_SVE_Z0 && reg <= AARCH64_DWARF_SVE_Z0 + 15)
2194     return AARCH64_SVE_Z0_REGNUM + reg - AARCH64_DWARF_SVE_Z0;
2195 
2196   if (tdep->has_pauth ())
2197     {
2198       if (reg >= AARCH64_DWARF_PAUTH_DMASK && reg <= AARCH64_DWARF_PAUTH_CMASK)
2199 	return tdep->pauth_reg_base + reg - AARCH64_DWARF_PAUTH_DMASK;
2200 
2201       if (reg == AARCH64_DWARF_PAUTH_RA_STATE)
2202 	return tdep->pauth_ra_state_regnum;
2203     }
2204 
2205   return -1;
2206 }
2207 
2208 /* Implement the "print_insn" gdbarch method.  */
2209 
2210 static int
aarch64_gdb_print_insn(bfd_vma memaddr,disassemble_info * info)2211 aarch64_gdb_print_insn (bfd_vma memaddr, disassemble_info *info)
2212 {
2213   info->symbols = NULL;
2214   return default_print_insn (memaddr, info);
2215 }
2216 
2217 /* AArch64 BRK software debug mode instruction.
2218    Note that AArch64 code is always little-endian.
2219    1101.0100.0010.0000.0000.0000.0000.0000 = 0xd4200000.  */
2220 constexpr gdb_byte aarch64_default_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
2221 
2222 typedef BP_MANIPULATION (aarch64_default_breakpoint) aarch64_breakpoint;
2223 
2224 /* Extract from an array REGS containing the (raw) register state a
2225    function return value of type TYPE, and copy that, in virtual
2226    format, into VALBUF.  */
2227 
2228 static void
aarch64_extract_return_value(struct type * type,struct regcache * regs,gdb_byte * valbuf)2229 aarch64_extract_return_value (struct type *type, struct regcache *regs,
2230 			      gdb_byte *valbuf)
2231 {
2232   struct gdbarch *gdbarch = regs->arch ();
2233   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2234   int elements;
2235   struct type *fundamental_type;
2236 
2237   if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2238 					     &fundamental_type))
2239     {
2240       int len = TYPE_LENGTH (fundamental_type);
2241 
2242       for (int i = 0; i < elements; i++)
2243 	{
2244 	  int regno = AARCH64_V0_REGNUM + i;
2245 	  /* Enough space for a full vector register.  */
2246 	  gdb_byte buf[register_size (gdbarch, regno)];
2247 	  gdb_assert (len <= sizeof (buf));
2248 
2249 	  aarch64_debug_printf
2250 	    ("read HFA or HVA return value element %d from %s",
2251 	     i + 1, gdbarch_register_name (gdbarch, regno));
2252 
2253 	  regs->cooked_read (regno, buf);
2254 
2255 	  memcpy (valbuf, buf, len);
2256 	  valbuf += len;
2257 	}
2258     }
2259   else if (type->code () == TYPE_CODE_INT
2260 	   || type->code () == TYPE_CODE_CHAR
2261 	   || type->code () == TYPE_CODE_BOOL
2262 	   || type->code () == TYPE_CODE_PTR
2263 	   || TYPE_IS_REFERENCE (type)
2264 	   || type->code () == TYPE_CODE_ENUM)
2265     {
2266       /* If the type is a plain integer, then the access is
2267 	 straight-forward.  Otherwise we have to play around a bit
2268 	 more.  */
2269       int len = TYPE_LENGTH (type);
2270       int regno = AARCH64_X0_REGNUM;
2271       ULONGEST tmp;
2272 
2273       while (len > 0)
2274 	{
2275 	  /* By using store_unsigned_integer we avoid having to do
2276 	     anything special for small big-endian values.  */
2277 	  regcache_cooked_read_unsigned (regs, regno++, &tmp);
2278 	  store_unsigned_integer (valbuf,
2279 				  (len > X_REGISTER_SIZE
2280 				   ? X_REGISTER_SIZE : len), byte_order, tmp);
2281 	  len -= X_REGISTER_SIZE;
2282 	  valbuf += X_REGISTER_SIZE;
2283 	}
2284     }
2285   else
2286     {
2287       /* For a structure or union the behaviour is as if the value had
2288 	 been stored to word-aligned memory and then loaded into
2289 	 registers with 64-bit load instruction(s).  */
2290       int len = TYPE_LENGTH (type);
2291       int regno = AARCH64_X0_REGNUM;
2292       bfd_byte buf[X_REGISTER_SIZE];
2293 
2294       while (len > 0)
2295 	{
2296 	  regs->cooked_read (regno++, buf);
2297 	  memcpy (valbuf, buf, len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2298 	  len -= X_REGISTER_SIZE;
2299 	  valbuf += X_REGISTER_SIZE;
2300 	}
2301     }
2302 }
2303 
2304 
2305 /* Will a function return an aggregate type in memory or in a
2306    register?  Return 0 if an aggregate type can be returned in a
2307    register, 1 if it must be returned in memory.  */
2308 
2309 static int
aarch64_return_in_memory(struct gdbarch * gdbarch,struct type * type)2310 aarch64_return_in_memory (struct gdbarch *gdbarch, struct type *type)
2311 {
2312   type = check_typedef (type);
2313   int elements;
2314   struct type *fundamental_type;
2315 
2316   if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2317 					     &fundamental_type))
2318     {
2319       /* v0-v7 are used to return values and one register is allocated
2320 	 for one member.  However, HFA or HVA has at most four members.  */
2321       return 0;
2322     }
2323 
2324   if (TYPE_LENGTH (type) > 16)
2325     {
2326       /* PCS B.6 Aggregates larger than 16 bytes are passed by
2327 	 invisible reference.  */
2328 
2329       return 1;
2330     }
2331 
2332   return 0;
2333 }
2334 
2335 /* Write into appropriate registers a function return value of type
2336    TYPE, given in virtual format.  */
2337 
2338 static void
aarch64_store_return_value(struct type * type,struct regcache * regs,const gdb_byte * valbuf)2339 aarch64_store_return_value (struct type *type, struct regcache *regs,
2340 			    const gdb_byte *valbuf)
2341 {
2342   struct gdbarch *gdbarch = regs->arch ();
2343   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2344   int elements;
2345   struct type *fundamental_type;
2346 
2347   if (aapcs_is_vfp_call_or_return_candidate (type, &elements,
2348 					     &fundamental_type))
2349     {
2350       int len = TYPE_LENGTH (fundamental_type);
2351 
2352       for (int i = 0; i < elements; i++)
2353 	{
2354 	  int regno = AARCH64_V0_REGNUM + i;
2355 	  /* Enough space for a full vector register.  */
2356 	  gdb_byte tmpbuf[register_size (gdbarch, regno)];
2357 	  gdb_assert (len <= sizeof (tmpbuf));
2358 
2359 	  aarch64_debug_printf
2360 	    ("write HFA or HVA return value element %d to %s",
2361 	     i + 1, gdbarch_register_name (gdbarch, regno));
2362 
2363 	  memcpy (tmpbuf, valbuf,
2364 		  len > V_REGISTER_SIZE ? V_REGISTER_SIZE : len);
2365 	  regs->cooked_write (regno, tmpbuf);
2366 	  valbuf += len;
2367 	}
2368     }
2369   else if (type->code () == TYPE_CODE_INT
2370 	   || type->code () == TYPE_CODE_CHAR
2371 	   || type->code () == TYPE_CODE_BOOL
2372 	   || type->code () == TYPE_CODE_PTR
2373 	   || TYPE_IS_REFERENCE (type)
2374 	   || type->code () == TYPE_CODE_ENUM)
2375     {
2376       if (TYPE_LENGTH (type) <= X_REGISTER_SIZE)
2377 	{
2378 	  /* Values of one word or less are zero/sign-extended and
2379 	     returned in r0.  */
2380 	  bfd_byte tmpbuf[X_REGISTER_SIZE];
2381 	  LONGEST val = unpack_long (type, valbuf);
2382 
2383 	  store_signed_integer (tmpbuf, X_REGISTER_SIZE, byte_order, val);
2384 	  regs->cooked_write (AARCH64_X0_REGNUM, tmpbuf);
2385 	}
2386       else
2387 	{
2388 	  /* Integral values greater than one word are stored in
2389 	     consecutive registers starting with r0.  This will always
2390 	     be a multiple of the regiser size.  */
2391 	  int len = TYPE_LENGTH (type);
2392 	  int regno = AARCH64_X0_REGNUM;
2393 
2394 	  while (len > 0)
2395 	    {
2396 	      regs->cooked_write (regno++, valbuf);
2397 	      len -= X_REGISTER_SIZE;
2398 	      valbuf += X_REGISTER_SIZE;
2399 	    }
2400 	}
2401     }
2402   else
2403     {
2404       /* For a structure or union the behaviour is as if the value had
2405 	 been stored to word-aligned memory and then loaded into
2406 	 registers with 64-bit load instruction(s).  */
2407       int len = TYPE_LENGTH (type);
2408       int regno = AARCH64_X0_REGNUM;
2409       bfd_byte tmpbuf[X_REGISTER_SIZE];
2410 
2411       while (len > 0)
2412 	{
2413 	  memcpy (tmpbuf, valbuf,
2414 		  len > X_REGISTER_SIZE ? X_REGISTER_SIZE : len);
2415 	  regs->cooked_write (regno++, tmpbuf);
2416 	  len -= X_REGISTER_SIZE;
2417 	  valbuf += X_REGISTER_SIZE;
2418 	}
2419     }
2420 }
2421 
2422 /* Implement the "return_value" gdbarch method.  */
2423 
2424 static enum return_value_convention
aarch64_return_value(struct gdbarch * gdbarch,struct value * func_value,struct type * valtype,struct regcache * regcache,gdb_byte * readbuf,const gdb_byte * writebuf)2425 aarch64_return_value (struct gdbarch *gdbarch, struct value *func_value,
2426 		      struct type *valtype, struct regcache *regcache,
2427 		      gdb_byte *readbuf, const gdb_byte *writebuf)
2428 {
2429 
2430   if (valtype->code () == TYPE_CODE_STRUCT
2431       || valtype->code () == TYPE_CODE_UNION
2432       || valtype->code () == TYPE_CODE_ARRAY)
2433     {
2434       if (aarch64_return_in_memory (gdbarch, valtype))
2435 	{
2436 	  aarch64_debug_printf ("return value in memory");
2437 	  return RETURN_VALUE_STRUCT_CONVENTION;
2438 	}
2439     }
2440 
2441   if (writebuf)
2442     aarch64_store_return_value (valtype, regcache, writebuf);
2443 
2444   if (readbuf)
2445     aarch64_extract_return_value (valtype, regcache, readbuf);
2446 
2447   aarch64_debug_printf ("return value in registers");
2448 
2449   return RETURN_VALUE_REGISTER_CONVENTION;
2450 }
2451 
2452 /* Implement the "get_longjmp_target" gdbarch method.  */
2453 
2454 static int
aarch64_get_longjmp_target(struct frame_info * frame,CORE_ADDR * pc)2455 aarch64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2456 {
2457   CORE_ADDR jb_addr;
2458   gdb_byte buf[X_REGISTER_SIZE];
2459   struct gdbarch *gdbarch = get_frame_arch (frame);
2460   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2461   enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2462 
2463   jb_addr = get_frame_register_unsigned (frame, AARCH64_X0_REGNUM);
2464 
2465   if (target_read_memory (jb_addr + tdep->jb_pc * tdep->jb_elt_size, buf,
2466 			  X_REGISTER_SIZE))
2467     return 0;
2468 
2469   *pc = extract_unsigned_integer (buf, X_REGISTER_SIZE, byte_order);
2470   return 1;
2471 }
2472 
2473 /* Implement the "gen_return_address" gdbarch method.  */
2474 
2475 static void
aarch64_gen_return_address(struct gdbarch * gdbarch,struct agent_expr * ax,struct axs_value * value,CORE_ADDR scope)2476 aarch64_gen_return_address (struct gdbarch *gdbarch,
2477 			    struct agent_expr *ax, struct axs_value *value,
2478 			    CORE_ADDR scope)
2479 {
2480   value->type = register_type (gdbarch, AARCH64_LR_REGNUM);
2481   value->kind = axs_lvalue_register;
2482   value->u.reg = AARCH64_LR_REGNUM;
2483 }
2484 
2485 
2486 /* Return the pseudo register name corresponding to register regnum.  */
2487 
2488 static const char *
aarch64_pseudo_register_name(struct gdbarch * gdbarch,int regnum)2489 aarch64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
2490 {
2491   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2492 
2493   static const char *const q_name[] =
2494     {
2495       "q0", "q1", "q2", "q3",
2496       "q4", "q5", "q6", "q7",
2497       "q8", "q9", "q10", "q11",
2498       "q12", "q13", "q14", "q15",
2499       "q16", "q17", "q18", "q19",
2500       "q20", "q21", "q22", "q23",
2501       "q24", "q25", "q26", "q27",
2502       "q28", "q29", "q30", "q31",
2503     };
2504 
2505   static const char *const d_name[] =
2506     {
2507       "d0", "d1", "d2", "d3",
2508       "d4", "d5", "d6", "d7",
2509       "d8", "d9", "d10", "d11",
2510       "d12", "d13", "d14", "d15",
2511       "d16", "d17", "d18", "d19",
2512       "d20", "d21", "d22", "d23",
2513       "d24", "d25", "d26", "d27",
2514       "d28", "d29", "d30", "d31",
2515     };
2516 
2517   static const char *const s_name[] =
2518     {
2519       "s0", "s1", "s2", "s3",
2520       "s4", "s5", "s6", "s7",
2521       "s8", "s9", "s10", "s11",
2522       "s12", "s13", "s14", "s15",
2523       "s16", "s17", "s18", "s19",
2524       "s20", "s21", "s22", "s23",
2525       "s24", "s25", "s26", "s27",
2526       "s28", "s29", "s30", "s31",
2527     };
2528 
2529   static const char *const h_name[] =
2530     {
2531       "h0", "h1", "h2", "h3",
2532       "h4", "h5", "h6", "h7",
2533       "h8", "h9", "h10", "h11",
2534       "h12", "h13", "h14", "h15",
2535       "h16", "h17", "h18", "h19",
2536       "h20", "h21", "h22", "h23",
2537       "h24", "h25", "h26", "h27",
2538       "h28", "h29", "h30", "h31",
2539     };
2540 
2541   static const char *const b_name[] =
2542     {
2543       "b0", "b1", "b2", "b3",
2544       "b4", "b5", "b6", "b7",
2545       "b8", "b9", "b10", "b11",
2546       "b12", "b13", "b14", "b15",
2547       "b16", "b17", "b18", "b19",
2548       "b20", "b21", "b22", "b23",
2549       "b24", "b25", "b26", "b27",
2550       "b28", "b29", "b30", "b31",
2551     };
2552 
2553   int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2554 
2555   if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2556     return q_name[p_regnum - AARCH64_Q0_REGNUM];
2557 
2558   if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2559     return d_name[p_regnum - AARCH64_D0_REGNUM];
2560 
2561   if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2562     return s_name[p_regnum - AARCH64_S0_REGNUM];
2563 
2564   if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2565     return h_name[p_regnum - AARCH64_H0_REGNUM];
2566 
2567   if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2568     return b_name[p_regnum - AARCH64_B0_REGNUM];
2569 
2570   if (tdep->has_sve ())
2571     {
2572       static const char *const sve_v_name[] =
2573 	{
2574 	  "v0", "v1", "v2", "v3",
2575 	  "v4", "v5", "v6", "v7",
2576 	  "v8", "v9", "v10", "v11",
2577 	  "v12", "v13", "v14", "v15",
2578 	  "v16", "v17", "v18", "v19",
2579 	  "v20", "v21", "v22", "v23",
2580 	  "v24", "v25", "v26", "v27",
2581 	  "v28", "v29", "v30", "v31",
2582 	};
2583 
2584       if (p_regnum >= AARCH64_SVE_V0_REGNUM
2585 	  && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2586 	return sve_v_name[p_regnum - AARCH64_SVE_V0_REGNUM];
2587     }
2588 
2589   /* RA_STATE is used for unwinding only.  Do not assign it a name - this
2590      prevents it from being read by methods such as
2591      mi_cmd_trace_frame_collected.  */
2592   if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2593     return "";
2594 
2595   internal_error (__FILE__, __LINE__,
2596 		  _("aarch64_pseudo_register_name: bad register number %d"),
2597 		  p_regnum);
2598 }
2599 
2600 /* Implement the "pseudo_register_type" tdesc_arch_data method.  */
2601 
2602 static struct type *
aarch64_pseudo_register_type(struct gdbarch * gdbarch,int regnum)2603 aarch64_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
2604 {
2605   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2606 
2607   int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2608 
2609   if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2610     return aarch64_vnq_type (gdbarch);
2611 
2612   if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2613     return aarch64_vnd_type (gdbarch);
2614 
2615   if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2616     return aarch64_vns_type (gdbarch);
2617 
2618   if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2619     return aarch64_vnh_type (gdbarch);
2620 
2621   if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2622     return aarch64_vnb_type (gdbarch);
2623 
2624   if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2625       && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2626     return aarch64_vnv_type (gdbarch);
2627 
2628   if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2629     return builtin_type (gdbarch)->builtin_uint64;
2630 
2631   internal_error (__FILE__, __LINE__,
2632 		  _("aarch64_pseudo_register_type: bad register number %d"),
2633 		  p_regnum);
2634 }
2635 
2636 /* Implement the "pseudo_register_reggroup_p" tdesc_arch_data method.  */
2637 
2638 static int
aarch64_pseudo_register_reggroup_p(struct gdbarch * gdbarch,int regnum,struct reggroup * group)2639 aarch64_pseudo_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
2640 				    struct reggroup *group)
2641 {
2642   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2643 
2644   int p_regnum = regnum - gdbarch_num_regs (gdbarch);
2645 
2646   if (p_regnum >= AARCH64_Q0_REGNUM && p_regnum < AARCH64_Q0_REGNUM + 32)
2647     return group == all_reggroup || group == vector_reggroup;
2648   else if (p_regnum >= AARCH64_D0_REGNUM && p_regnum < AARCH64_D0_REGNUM + 32)
2649     return (group == all_reggroup || group == vector_reggroup
2650 	    || group == float_reggroup);
2651   else if (p_regnum >= AARCH64_S0_REGNUM && p_regnum < AARCH64_S0_REGNUM + 32)
2652     return (group == all_reggroup || group == vector_reggroup
2653 	    || group == float_reggroup);
2654   else if (p_regnum >= AARCH64_H0_REGNUM && p_regnum < AARCH64_H0_REGNUM + 32)
2655     return group == all_reggroup || group == vector_reggroup;
2656   else if (p_regnum >= AARCH64_B0_REGNUM && p_regnum < AARCH64_B0_REGNUM + 32)
2657     return group == all_reggroup || group == vector_reggroup;
2658   else if (tdep->has_sve () && p_regnum >= AARCH64_SVE_V0_REGNUM
2659 	   && p_regnum < AARCH64_SVE_V0_REGNUM + AARCH64_V_REGS_NUM)
2660     return group == all_reggroup || group == vector_reggroup;
2661   /* RA_STATE is used for unwinding only.  Do not assign it to any groups.  */
2662   if (tdep->has_pauth () && regnum == tdep->pauth_ra_state_regnum)
2663     return 0;
2664 
2665   return group == all_reggroup;
2666 }
2667 
2668 /* Helper for aarch64_pseudo_read_value.  */
2669 
2670 static struct value *
aarch64_pseudo_read_value_1(struct gdbarch * gdbarch,readable_regcache * regcache,int regnum_offset,int regsize,struct value * result_value)2671 aarch64_pseudo_read_value_1 (struct gdbarch *gdbarch,
2672 			     readable_regcache *regcache, int regnum_offset,
2673 			     int regsize, struct value *result_value)
2674 {
2675   unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2676 
2677   /* Enough space for a full vector register.  */
2678   gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2679   gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2680 
2681   if (regcache->raw_read (v_regnum, reg_buf) != REG_VALID)
2682     mark_value_bytes_unavailable (result_value, 0,
2683 				  TYPE_LENGTH (value_type (result_value)));
2684   else
2685     memcpy (value_contents_raw (result_value), reg_buf, regsize);
2686 
2687   return result_value;
2688  }
2689 
2690 /* Implement the "pseudo_register_read_value" gdbarch method.  */
2691 
2692 static struct value *
aarch64_pseudo_read_value(struct gdbarch * gdbarch,readable_regcache * regcache,int regnum)2693 aarch64_pseudo_read_value (struct gdbarch *gdbarch, readable_regcache *regcache,
2694 			   int regnum)
2695 {
2696   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2697   struct value *result_value = allocate_value (register_type (gdbarch, regnum));
2698 
2699   VALUE_LVAL (result_value) = lval_register;
2700   VALUE_REGNUM (result_value) = regnum;
2701 
2702   regnum -= gdbarch_num_regs (gdbarch);
2703 
2704   if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2705     return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2706 					regnum - AARCH64_Q0_REGNUM,
2707 					Q_REGISTER_SIZE, result_value);
2708 
2709   if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2710     return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2711 					regnum - AARCH64_D0_REGNUM,
2712 					D_REGISTER_SIZE, result_value);
2713 
2714   if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2715     return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2716 					regnum - AARCH64_S0_REGNUM,
2717 					S_REGISTER_SIZE, result_value);
2718 
2719   if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2720     return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2721 					regnum - AARCH64_H0_REGNUM,
2722 					H_REGISTER_SIZE, result_value);
2723 
2724   if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2725     return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2726 					regnum - AARCH64_B0_REGNUM,
2727 					B_REGISTER_SIZE, result_value);
2728 
2729   if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2730       && regnum < AARCH64_SVE_V0_REGNUM + 32)
2731     return aarch64_pseudo_read_value_1 (gdbarch, regcache,
2732 					regnum - AARCH64_SVE_V0_REGNUM,
2733 					V_REGISTER_SIZE, result_value);
2734 
2735   gdb_assert_not_reached ("regnum out of bound");
2736 }
2737 
2738 /* Helper for aarch64_pseudo_write.  */
2739 
2740 static void
aarch64_pseudo_write_1(struct gdbarch * gdbarch,struct regcache * regcache,int regnum_offset,int regsize,const gdb_byte * buf)2741 aarch64_pseudo_write_1 (struct gdbarch *gdbarch, struct regcache *regcache,
2742 			int regnum_offset, int regsize, const gdb_byte *buf)
2743 {
2744   unsigned v_regnum = AARCH64_V0_REGNUM + regnum_offset;
2745 
2746   /* Enough space for a full vector register.  */
2747   gdb_byte reg_buf[register_size (gdbarch, AARCH64_V0_REGNUM)];
2748   gdb_static_assert (AARCH64_V0_REGNUM == AARCH64_SVE_Z0_REGNUM);
2749 
2750   /* Ensure the register buffer is zero, we want gdb writes of the
2751      various 'scalar' pseudo registers to behavior like architectural
2752      writes, register width bytes are written the remainder are set to
2753      zero.  */
2754   memset (reg_buf, 0, register_size (gdbarch, AARCH64_V0_REGNUM));
2755 
2756   memcpy (reg_buf, buf, regsize);
2757   regcache->raw_write (v_regnum, reg_buf);
2758 }
2759 
2760 /* Implement the "pseudo_register_write" gdbarch method.  */
2761 
2762 static void
aarch64_pseudo_write(struct gdbarch * gdbarch,struct regcache * regcache,int regnum,const gdb_byte * buf)2763 aarch64_pseudo_write (struct gdbarch *gdbarch, struct regcache *regcache,
2764 		      int regnum, const gdb_byte *buf)
2765 {
2766   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2767   regnum -= gdbarch_num_regs (gdbarch);
2768 
2769   if (regnum >= AARCH64_Q0_REGNUM && regnum < AARCH64_Q0_REGNUM + 32)
2770     return aarch64_pseudo_write_1 (gdbarch, regcache,
2771 				   regnum - AARCH64_Q0_REGNUM, Q_REGISTER_SIZE,
2772 				   buf);
2773 
2774   if (regnum >= AARCH64_D0_REGNUM && regnum < AARCH64_D0_REGNUM + 32)
2775     return aarch64_pseudo_write_1 (gdbarch, regcache,
2776 				   regnum - AARCH64_D0_REGNUM, D_REGISTER_SIZE,
2777 				   buf);
2778 
2779   if (regnum >= AARCH64_S0_REGNUM && regnum < AARCH64_S0_REGNUM + 32)
2780     return aarch64_pseudo_write_1 (gdbarch, regcache,
2781 				   regnum - AARCH64_S0_REGNUM, S_REGISTER_SIZE,
2782 				   buf);
2783 
2784   if (regnum >= AARCH64_H0_REGNUM && regnum < AARCH64_H0_REGNUM + 32)
2785     return aarch64_pseudo_write_1 (gdbarch, regcache,
2786 				   regnum - AARCH64_H0_REGNUM, H_REGISTER_SIZE,
2787 				   buf);
2788 
2789   if (regnum >= AARCH64_B0_REGNUM && regnum < AARCH64_B0_REGNUM + 32)
2790     return aarch64_pseudo_write_1 (gdbarch, regcache,
2791 				   regnum - AARCH64_B0_REGNUM, B_REGISTER_SIZE,
2792 				   buf);
2793 
2794   if (tdep->has_sve () && regnum >= AARCH64_SVE_V0_REGNUM
2795       && regnum < AARCH64_SVE_V0_REGNUM + 32)
2796     return aarch64_pseudo_write_1 (gdbarch, regcache,
2797 				   regnum - AARCH64_SVE_V0_REGNUM,
2798 				   V_REGISTER_SIZE, buf);
2799 
2800   gdb_assert_not_reached ("regnum out of bound");
2801 }
2802 
2803 /* Callback function for user_reg_add.  */
2804 
2805 static struct value *
value_of_aarch64_user_reg(struct frame_info * frame,const void * baton)2806 value_of_aarch64_user_reg (struct frame_info *frame, const void *baton)
2807 {
2808   const int *reg_p = (const int *) baton;
2809 
2810   return value_of_register (*reg_p, frame);
2811 }
2812 
2813 
2814 /* Implement the "software_single_step" gdbarch method, needed to
2815    single step through atomic sequences on AArch64.  */
2816 
2817 static std::vector<CORE_ADDR>
aarch64_software_single_step(struct regcache * regcache)2818 aarch64_software_single_step (struct regcache *regcache)
2819 {
2820   struct gdbarch *gdbarch = regcache->arch ();
2821   enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
2822   const int insn_size = 4;
2823   const int atomic_sequence_length = 16; /* Instruction sequence length.  */
2824   CORE_ADDR pc = regcache_read_pc (regcache);
2825   CORE_ADDR breaks[2] = { CORE_ADDR_MAX, CORE_ADDR_MAX };
2826   CORE_ADDR loc = pc;
2827   CORE_ADDR closing_insn = 0;
2828   uint32_t insn = read_memory_unsigned_integer (loc, insn_size,
2829 						byte_order_for_code);
2830   int index;
2831   int insn_count;
2832   int bc_insn_count = 0; /* Conditional branch instruction count.  */
2833   int last_breakpoint = 0; /* Defaults to 0 (no breakpoints placed).  */
2834   aarch64_inst inst;
2835 
2836   if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2837     return {};
2838 
2839   /* Look for a Load Exclusive instruction which begins the sequence.  */
2840   if (inst.opcode->iclass != ldstexcl || bit (insn, 22) == 0)
2841     return {};
2842 
2843   for (insn_count = 0; insn_count < atomic_sequence_length; ++insn_count)
2844     {
2845       loc += insn_size;
2846       insn = read_memory_unsigned_integer (loc, insn_size,
2847 					   byte_order_for_code);
2848 
2849       if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
2850 	return {};
2851       /* Check if the instruction is a conditional branch.  */
2852       if (inst.opcode->iclass == condbranch)
2853 	{
2854 	  gdb_assert (inst.operands[0].type == AARCH64_OPND_ADDR_PCREL19);
2855 
2856 	  if (bc_insn_count >= 1)
2857 	    return {};
2858 
2859 	  /* It is, so we'll try to set a breakpoint at the destination.  */
2860 	  breaks[1] = loc + inst.operands[0].imm.value;
2861 
2862 	  bc_insn_count++;
2863 	  last_breakpoint++;
2864 	}
2865 
2866       /* Look for the Store Exclusive which closes the atomic sequence.  */
2867       if (inst.opcode->iclass == ldstexcl && bit (insn, 22) == 0)
2868 	{
2869 	  closing_insn = loc;
2870 	  break;
2871 	}
2872     }
2873 
2874   /* We didn't find a closing Store Exclusive instruction, fall back.  */
2875   if (!closing_insn)
2876     return {};
2877 
2878   /* Insert breakpoint after the end of the atomic sequence.  */
2879   breaks[0] = loc + insn_size;
2880 
2881   /* Check for duplicated breakpoints, and also check that the second
2882      breakpoint is not within the atomic sequence.  */
2883   if (last_breakpoint
2884       && (breaks[1] == breaks[0]
2885 	  || (breaks[1] >= pc && breaks[1] <= closing_insn)))
2886     last_breakpoint = 0;
2887 
2888   std::vector<CORE_ADDR> next_pcs;
2889 
2890   /* Insert the breakpoint at the end of the sequence, and one at the
2891      destination of the conditional branch, if it exists.  */
2892   for (index = 0; index <= last_breakpoint; index++)
2893     next_pcs.push_back (breaks[index]);
2894 
2895   return next_pcs;
2896 }
2897 
2898 struct aarch64_displaced_step_copy_insn_closure
2899   : public displaced_step_copy_insn_closure
2900 {
2901   /* It is true when condition instruction, such as B.CON, TBZ, etc,
2902      is being displaced stepping.  */
2903   bool cond = false;
2904 
2905   /* PC adjustment offset after displaced stepping.  If 0, then we don't
2906      write the PC back, assuming the PC is already the right address.  */
2907   int32_t pc_adjust = 0;
2908 };
2909 
2910 /* Data when visiting instructions for displaced stepping.  */
2911 
2912 struct aarch64_displaced_step_data
2913 {
2914   struct aarch64_insn_data base;
2915 
2916   /* The address where the instruction will be executed at.  */
2917   CORE_ADDR new_addr;
2918   /* Buffer of instructions to be copied to NEW_ADDR to execute.  */
2919   uint32_t insn_buf[AARCH64_DISPLACED_MODIFIED_INSNS];
2920   /* Number of instructions in INSN_BUF.  */
2921   unsigned insn_count;
2922   /* Registers when doing displaced stepping.  */
2923   struct regcache *regs;
2924 
2925   aarch64_displaced_step_copy_insn_closure *dsc;
2926 };
2927 
2928 /* Implementation of aarch64_insn_visitor method "b".  */
2929 
2930 static void
aarch64_displaced_step_b(const int is_bl,const int32_t offset,struct aarch64_insn_data * data)2931 aarch64_displaced_step_b (const int is_bl, const int32_t offset,
2932 			  struct aarch64_insn_data *data)
2933 {
2934   struct aarch64_displaced_step_data *dsd
2935     = (struct aarch64_displaced_step_data *) data;
2936   int64_t new_offset = data->insn_addr - dsd->new_addr + offset;
2937 
2938   if (can_encode_int32 (new_offset, 28))
2939     {
2940       /* Emit B rather than BL, because executing BL on a new address
2941 	 will get the wrong address into LR.  In order to avoid this,
2942 	 we emit B, and update LR if the instruction is BL.  */
2943       emit_b (dsd->insn_buf, 0, new_offset);
2944       dsd->insn_count++;
2945     }
2946   else
2947     {
2948       /* Write NOP.  */
2949       emit_nop (dsd->insn_buf);
2950       dsd->insn_count++;
2951       dsd->dsc->pc_adjust = offset;
2952     }
2953 
2954   if (is_bl)
2955     {
2956       /* Update LR.  */
2957       regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
2958 				      data->insn_addr + 4);
2959     }
2960 }
2961 
2962 /* Implementation of aarch64_insn_visitor method "b_cond".  */
2963 
2964 static void
aarch64_displaced_step_b_cond(const unsigned cond,const int32_t offset,struct aarch64_insn_data * data)2965 aarch64_displaced_step_b_cond (const unsigned cond, const int32_t offset,
2966 			       struct aarch64_insn_data *data)
2967 {
2968   struct aarch64_displaced_step_data *dsd
2969     = (struct aarch64_displaced_step_data *) data;
2970 
2971   /* GDB has to fix up PC after displaced step this instruction
2972      differently according to the condition is true or false.  Instead
2973      of checking COND against conditional flags, we can use
2974      the following instructions, and GDB can tell how to fix up PC
2975      according to the PC value.
2976 
2977      B.COND TAKEN    ; If cond is true, then jump to TAKEN.
2978      INSN1     ;
2979      TAKEN:
2980      INSN2
2981   */
2982 
2983   emit_bcond (dsd->insn_buf, cond, 8);
2984   dsd->dsc->cond = true;
2985   dsd->dsc->pc_adjust = offset;
2986   dsd->insn_count = 1;
2987 }
2988 
2989 /* Dynamically allocate a new register.  If we know the register
2990    statically, we should make it a global as above instead of using this
2991    helper function.  */
2992 
2993 static struct aarch64_register
aarch64_register(unsigned num,int is64)2994 aarch64_register (unsigned num, int is64)
2995 {
2996   return (struct aarch64_register) { num, is64 };
2997 }
2998 
2999 /* Implementation of aarch64_insn_visitor method "cb".  */
3000 
3001 static void
aarch64_displaced_step_cb(const int32_t offset,const int is_cbnz,const unsigned rn,int is64,struct aarch64_insn_data * data)3002 aarch64_displaced_step_cb (const int32_t offset, const int is_cbnz,
3003 			   const unsigned rn, int is64,
3004 			   struct aarch64_insn_data *data)
3005 {
3006   struct aarch64_displaced_step_data *dsd
3007     = (struct aarch64_displaced_step_data *) data;
3008 
3009   /* The offset is out of range for a compare and branch
3010      instruction.  We can use the following instructions instead:
3011 
3012 	 CBZ xn, TAKEN   ; xn == 0, then jump to TAKEN.
3013 	 INSN1     ;
3014 	 TAKEN:
3015 	 INSN2
3016   */
3017   emit_cb (dsd->insn_buf, is_cbnz, aarch64_register (rn, is64), 8);
3018   dsd->insn_count = 1;
3019   dsd->dsc->cond = true;
3020   dsd->dsc->pc_adjust = offset;
3021 }
3022 
3023 /* Implementation of aarch64_insn_visitor method "tb".  */
3024 
3025 static void
aarch64_displaced_step_tb(const int32_t offset,int is_tbnz,const unsigned rt,unsigned bit,struct aarch64_insn_data * data)3026 aarch64_displaced_step_tb (const int32_t offset, int is_tbnz,
3027 			   const unsigned rt, unsigned bit,
3028 			   struct aarch64_insn_data *data)
3029 {
3030   struct aarch64_displaced_step_data *dsd
3031     = (struct aarch64_displaced_step_data *) data;
3032 
3033   /* The offset is out of range for a test bit and branch
3034      instruction We can use the following instructions instead:
3035 
3036      TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
3037      INSN1         ;
3038      TAKEN:
3039      INSN2
3040 
3041   */
3042   emit_tb (dsd->insn_buf, is_tbnz, bit, aarch64_register (rt, 1), 8);
3043   dsd->insn_count = 1;
3044   dsd->dsc->cond = true;
3045   dsd->dsc->pc_adjust = offset;
3046 }
3047 
3048 /* Implementation of aarch64_insn_visitor method "adr".  */
3049 
3050 static void
aarch64_displaced_step_adr(const int32_t offset,const unsigned rd,const int is_adrp,struct aarch64_insn_data * data)3051 aarch64_displaced_step_adr (const int32_t offset, const unsigned rd,
3052 			    const int is_adrp, struct aarch64_insn_data *data)
3053 {
3054   struct aarch64_displaced_step_data *dsd
3055     = (struct aarch64_displaced_step_data *) data;
3056   /* We know exactly the address the ADR{P,} instruction will compute.
3057      We can just write it to the destination register.  */
3058   CORE_ADDR address = data->insn_addr + offset;
3059 
3060   if (is_adrp)
3061     {
3062       /* Clear the lower 12 bits of the offset to get the 4K page.  */
3063       regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3064 				      address & ~0xfff);
3065     }
3066   else
3067       regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rd,
3068 				      address);
3069 
3070   dsd->dsc->pc_adjust = 4;
3071   emit_nop (dsd->insn_buf);
3072   dsd->insn_count = 1;
3073 }
3074 
3075 /* Implementation of aarch64_insn_visitor method "ldr_literal".  */
3076 
3077 static void
aarch64_displaced_step_ldr_literal(const int32_t offset,const int is_sw,const unsigned rt,const int is64,struct aarch64_insn_data * data)3078 aarch64_displaced_step_ldr_literal (const int32_t offset, const int is_sw,
3079 				    const unsigned rt, const int is64,
3080 				    struct aarch64_insn_data *data)
3081 {
3082   struct aarch64_displaced_step_data *dsd
3083     = (struct aarch64_displaced_step_data *) data;
3084   CORE_ADDR address = data->insn_addr + offset;
3085   struct aarch64_memory_operand zero = { MEMORY_OPERAND_OFFSET, 0 };
3086 
3087   regcache_cooked_write_unsigned (dsd->regs, AARCH64_X0_REGNUM + rt,
3088 				  address);
3089 
3090   if (is_sw)
3091     dsd->insn_count = emit_ldrsw (dsd->insn_buf, aarch64_register (rt, 1),
3092 				  aarch64_register (rt, 1), zero);
3093   else
3094     dsd->insn_count = emit_ldr (dsd->insn_buf, aarch64_register (rt, is64),
3095 				aarch64_register (rt, 1), zero);
3096 
3097   dsd->dsc->pc_adjust = 4;
3098 }
3099 
3100 /* Implementation of aarch64_insn_visitor method "others".  */
3101 
3102 static void
aarch64_displaced_step_others(const uint32_t insn,struct aarch64_insn_data * data)3103 aarch64_displaced_step_others (const uint32_t insn,
3104 			       struct aarch64_insn_data *data)
3105 {
3106   struct aarch64_displaced_step_data *dsd
3107     = (struct aarch64_displaced_step_data *) data;
3108 
3109   uint32_t masked_insn = (insn & CLEAR_Rn_MASK);
3110   if (masked_insn == BLR)
3111     {
3112       /* Emit a BR to the same register and then update LR to the original
3113 	 address (similar to aarch64_displaced_step_b).  */
3114       aarch64_emit_insn (dsd->insn_buf, insn & 0xffdfffff);
3115       regcache_cooked_write_unsigned (dsd->regs, AARCH64_LR_REGNUM,
3116 				      data->insn_addr + 4);
3117     }
3118   else
3119     aarch64_emit_insn (dsd->insn_buf, insn);
3120   dsd->insn_count = 1;
3121 
3122   if (masked_insn == RET || masked_insn == BR || masked_insn == BLR)
3123     dsd->dsc->pc_adjust = 0;
3124   else
3125     dsd->dsc->pc_adjust = 4;
3126 }
3127 
3128 static const struct aarch64_insn_visitor visitor =
3129 {
3130   aarch64_displaced_step_b,
3131   aarch64_displaced_step_b_cond,
3132   aarch64_displaced_step_cb,
3133   aarch64_displaced_step_tb,
3134   aarch64_displaced_step_adr,
3135   aarch64_displaced_step_ldr_literal,
3136   aarch64_displaced_step_others,
3137 };
3138 
3139 /* Implement the "displaced_step_copy_insn" gdbarch method.  */
3140 
3141 displaced_step_copy_insn_closure_up
aarch64_displaced_step_copy_insn(struct gdbarch * gdbarch,CORE_ADDR from,CORE_ADDR to,struct regcache * regs)3142 aarch64_displaced_step_copy_insn (struct gdbarch *gdbarch,
3143 				  CORE_ADDR from, CORE_ADDR to,
3144 				  struct regcache *regs)
3145 {
3146   enum bfd_endian byte_order_for_code = gdbarch_byte_order_for_code (gdbarch);
3147   uint32_t insn = read_memory_unsigned_integer (from, 4, byte_order_for_code);
3148   struct aarch64_displaced_step_data dsd;
3149   aarch64_inst inst;
3150 
3151   if (aarch64_decode_insn (insn, &inst, 1, NULL) != 0)
3152     return NULL;
3153 
3154   /* Look for a Load Exclusive instruction which begins the sequence.  */
3155   if (inst.opcode->iclass == ldstexcl && bit (insn, 22))
3156     {
3157       /* We can't displaced step atomic sequences.  */
3158       return NULL;
3159     }
3160 
3161   std::unique_ptr<aarch64_displaced_step_copy_insn_closure> dsc
3162     (new aarch64_displaced_step_copy_insn_closure);
3163   dsd.base.insn_addr = from;
3164   dsd.new_addr = to;
3165   dsd.regs = regs;
3166   dsd.dsc = dsc.get ();
3167   dsd.insn_count = 0;
3168   aarch64_relocate_instruction (insn, &visitor,
3169 				(struct aarch64_insn_data *) &dsd);
3170   gdb_assert (dsd.insn_count <= AARCH64_DISPLACED_MODIFIED_INSNS);
3171 
3172   if (dsd.insn_count != 0)
3173     {
3174       int i;
3175 
3176       /* Instruction can be relocated to scratch pad.  Copy
3177 	 relocated instruction(s) there.  */
3178       for (i = 0; i < dsd.insn_count; i++)
3179 	{
3180 	  displaced_debug_printf ("writing insn %.8x at %s",
3181 				  dsd.insn_buf[i],
3182 				  paddress (gdbarch, to + i * 4));
3183 
3184 	  write_memory_unsigned_integer (to + i * 4, 4, byte_order_for_code,
3185 					 (ULONGEST) dsd.insn_buf[i]);
3186 	}
3187     }
3188   else
3189     {
3190       dsc = NULL;
3191     }
3192 
3193   /* This is a work around for a problem with g++ 4.8.  */
3194   return displaced_step_copy_insn_closure_up (dsc.release ());
3195 }
3196 
3197 /* Implement the "displaced_step_fixup" gdbarch method.  */
3198 
3199 void
aarch64_displaced_step_fixup(struct gdbarch * gdbarch,struct displaced_step_copy_insn_closure * dsc_,CORE_ADDR from,CORE_ADDR to,struct regcache * regs)3200 aarch64_displaced_step_fixup (struct gdbarch *gdbarch,
3201 			      struct displaced_step_copy_insn_closure *dsc_,
3202 			      CORE_ADDR from, CORE_ADDR to,
3203 			      struct regcache *regs)
3204 {
3205   aarch64_displaced_step_copy_insn_closure *dsc
3206     = (aarch64_displaced_step_copy_insn_closure *) dsc_;
3207 
3208   ULONGEST pc;
3209 
3210   regcache_cooked_read_unsigned (regs, AARCH64_PC_REGNUM, &pc);
3211 
3212   displaced_debug_printf ("PC after stepping: %s (was %s).",
3213 			  paddress (gdbarch, pc), paddress (gdbarch, to));
3214 
3215   if (dsc->cond)
3216     {
3217       displaced_debug_printf ("[Conditional] pc_adjust before: %d",
3218 			      dsc->pc_adjust);
3219 
3220       if (pc - to == 8)
3221 	{
3222 	  /* Condition is true.  */
3223 	}
3224       else if (pc - to == 4)
3225 	{
3226 	  /* Condition is false.  */
3227 	  dsc->pc_adjust = 4;
3228 	}
3229       else
3230 	gdb_assert_not_reached ("Unexpected PC value after displaced stepping");
3231 
3232       displaced_debug_printf ("[Conditional] pc_adjust after: %d",
3233 			      dsc->pc_adjust);
3234     }
3235 
3236   displaced_debug_printf ("%s PC by %d",
3237 			  dsc->pc_adjust ? "adjusting" : "not adjusting",
3238 			  dsc->pc_adjust);
3239 
3240   if (dsc->pc_adjust != 0)
3241     {
3242       /* Make sure the previous instruction was executed (that is, the PC
3243 	 has changed).  If the PC didn't change, then discard the adjustment
3244 	 offset.  Otherwise we may skip an instruction before its execution
3245 	 took place.  */
3246       if ((pc - to) == 0)
3247 	{
3248 	  displaced_debug_printf ("PC did not move. Discarding PC adjustment.");
3249 	  dsc->pc_adjust = 0;
3250 	}
3251 
3252       displaced_debug_printf ("fixup: set PC to %s:%d",
3253 			      paddress (gdbarch, from), dsc->pc_adjust);
3254 
3255       regcache_cooked_write_unsigned (regs, AARCH64_PC_REGNUM,
3256 				      from + dsc->pc_adjust);
3257     }
3258 }
3259 
3260 /* Implement the "displaced_step_hw_singlestep" gdbarch method.  */
3261 
3262 bool
aarch64_displaced_step_hw_singlestep(struct gdbarch * gdbarch)3263 aarch64_displaced_step_hw_singlestep (struct gdbarch *gdbarch)
3264 {
3265   return true;
3266 }
3267 
3268 /* Get the correct target description for the given VQ value.
3269    If VQ is zero then it is assumed SVE is not supported.
3270    (It is not possible to set VQ to zero on an SVE system).
3271 
3272    MTE_P indicates the presence of the Memory Tagging Extension feature. */
3273 
3274 const target_desc *
aarch64_read_description(uint64_t vq,bool pauth_p,bool mte_p)3275 aarch64_read_description (uint64_t vq, bool pauth_p, bool mte_p)
3276 {
3277   if (vq > AARCH64_MAX_SVE_VQ)
3278     error (_("VQ is %" PRIu64 ", maximum supported value is %d"), vq,
3279 	   AARCH64_MAX_SVE_VQ);
3280 
3281   struct target_desc *tdesc = tdesc_aarch64_list[vq][pauth_p][mte_p];
3282 
3283   if (tdesc == NULL)
3284     {
3285       tdesc = aarch64_create_target_description (vq, pauth_p, mte_p);
3286       tdesc_aarch64_list[vq][pauth_p][mte_p] = tdesc;
3287     }
3288 
3289   return tdesc;
3290 }
3291 
3292 /* Return the VQ used when creating the target description TDESC.  */
3293 
3294 static uint64_t
aarch64_get_tdesc_vq(const struct target_desc * tdesc)3295 aarch64_get_tdesc_vq (const struct target_desc *tdesc)
3296 {
3297   const struct tdesc_feature *feature_sve;
3298 
3299   if (!tdesc_has_registers (tdesc))
3300     return 0;
3301 
3302   feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3303 
3304   if (feature_sve == nullptr)
3305     return 0;
3306 
3307   uint64_t vl = tdesc_register_bitsize (feature_sve,
3308 					aarch64_sve_register_names[0]) / 8;
3309   return sve_vq_from_vl (vl);
3310 }
3311 
3312 /* Add all the expected register sets into GDBARCH.  */
3313 
3314 static void
aarch64_add_reggroups(struct gdbarch * gdbarch)3315 aarch64_add_reggroups (struct gdbarch *gdbarch)
3316 {
3317   reggroup_add (gdbarch, general_reggroup);
3318   reggroup_add (gdbarch, float_reggroup);
3319   reggroup_add (gdbarch, system_reggroup);
3320   reggroup_add (gdbarch, vector_reggroup);
3321   reggroup_add (gdbarch, all_reggroup);
3322   reggroup_add (gdbarch, save_reggroup);
3323   reggroup_add (gdbarch, restore_reggroup);
3324 }
3325 
3326 /* Implement the "cannot_store_register" gdbarch method.  */
3327 
3328 static int
aarch64_cannot_store_register(struct gdbarch * gdbarch,int regnum)3329 aarch64_cannot_store_register (struct gdbarch *gdbarch, int regnum)
3330 {
3331   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3332 
3333   if (!tdep->has_pauth ())
3334     return 0;
3335 
3336   /* Pointer authentication registers are read-only.  */
3337   return (regnum == AARCH64_PAUTH_DMASK_REGNUM (tdep->pauth_reg_base)
3338 	  || regnum == AARCH64_PAUTH_CMASK_REGNUM (tdep->pauth_reg_base));
3339 }
3340 
3341 /* Initialize the current architecture based on INFO.  If possible,
3342    re-use an architecture from ARCHES, which is a list of
3343    architectures already created during this debugging session.
3344 
3345    Called e.g. at program startup, when reading a core file, and when
3346    reading a binary file.  */
3347 
3348 static struct gdbarch *
aarch64_gdbarch_init(struct gdbarch_info info,struct gdbarch_list * arches)3349 aarch64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3350 {
3351   const struct tdesc_feature *feature_core, *feature_fpu, *feature_sve;
3352   const struct tdesc_feature *feature_pauth;
3353   bool valid_p = true;
3354   int i, num_regs = 0, num_pseudo_regs = 0;
3355   int first_pauth_regnum = -1, pauth_ra_state_offset = -1;
3356   int first_mte_regnum = -1;
3357 
3358   /* Use the vector length passed via the target info.  Here -1 is used for no
3359      SVE, and 0 is unset.  If unset then use the vector length from the existing
3360      tdesc.  */
3361   uint64_t vq = 0;
3362   if (info.id == (int *) -1)
3363     vq = 0;
3364   else if (info.id != 0)
3365     vq = (uint64_t) info.id;
3366   else
3367     vq = aarch64_get_tdesc_vq (info.target_desc);
3368 
3369   if (vq > AARCH64_MAX_SVE_VQ)
3370     internal_error (__FILE__, __LINE__, _("VQ out of bounds: %s (max %d)"),
3371 		    pulongest (vq), AARCH64_MAX_SVE_VQ);
3372 
3373   /* If there is already a candidate, use it.  */
3374   for (gdbarch_list *best_arch = gdbarch_list_lookup_by_info (arches, &info);
3375        best_arch != nullptr;
3376        best_arch = gdbarch_list_lookup_by_info (best_arch->next, &info))
3377     {
3378       struct gdbarch_tdep *tdep = gdbarch_tdep (best_arch->gdbarch);
3379       if (tdep && tdep->vq == vq)
3380 	return best_arch->gdbarch;
3381     }
3382 
3383   /* Ensure we always have a target descriptor, and that it is for the given VQ
3384      value.  */
3385   const struct target_desc *tdesc = info.target_desc;
3386   if (!tdesc_has_registers (tdesc) || vq != aarch64_get_tdesc_vq (tdesc))
3387     tdesc = aarch64_read_description (vq, false, false);
3388   gdb_assert (tdesc);
3389 
3390   feature_core = tdesc_find_feature (tdesc,"org.gnu.gdb.aarch64.core");
3391   feature_fpu = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.fpu");
3392   feature_sve = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.sve");
3393   feature_pauth = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.pauth");
3394   const struct tdesc_feature *feature_mte
3395     = tdesc_find_feature (tdesc, "org.gnu.gdb.aarch64.mte");
3396 
3397   if (feature_core == nullptr)
3398     return nullptr;
3399 
3400   tdesc_arch_data_up tdesc_data = tdesc_data_alloc ();
3401 
3402   /* Validate the description provides the mandatory core R registers
3403      and allocate their numbers.  */
3404   for (i = 0; i < ARRAY_SIZE (aarch64_r_register_names); i++)
3405     valid_p &= tdesc_numbered_register (feature_core, tdesc_data.get (),
3406 					AARCH64_X0_REGNUM + i,
3407 					aarch64_r_register_names[i]);
3408 
3409   num_regs = AARCH64_X0_REGNUM + i;
3410 
3411   /* Add the V registers.  */
3412   if (feature_fpu != nullptr)
3413     {
3414       if (feature_sve != nullptr)
3415 	error (_("Program contains both fpu and SVE features."));
3416 
3417       /* Validate the description provides the mandatory V registers
3418 	 and allocate their numbers.  */
3419       for (i = 0; i < ARRAY_SIZE (aarch64_v_register_names); i++)
3420 	valid_p &= tdesc_numbered_register (feature_fpu, tdesc_data.get (),
3421 					    AARCH64_V0_REGNUM + i,
3422 					    aarch64_v_register_names[i]);
3423 
3424       num_regs = AARCH64_V0_REGNUM + i;
3425     }
3426 
3427   /* Add the SVE registers.  */
3428   if (feature_sve != nullptr)
3429     {
3430       /* Validate the description provides the mandatory SVE registers
3431 	 and allocate their numbers.  */
3432       for (i = 0; i < ARRAY_SIZE (aarch64_sve_register_names); i++)
3433 	valid_p &= tdesc_numbered_register (feature_sve, tdesc_data.get (),
3434 					    AARCH64_SVE_Z0_REGNUM + i,
3435 					    aarch64_sve_register_names[i]);
3436 
3437       num_regs = AARCH64_SVE_Z0_REGNUM + i;
3438       num_pseudo_regs += 32;	/* add the Vn register pseudos.  */
3439     }
3440 
3441   if (feature_fpu != nullptr || feature_sve != nullptr)
3442     {
3443       num_pseudo_regs += 32;	/* add the Qn scalar register pseudos */
3444       num_pseudo_regs += 32;	/* add the Dn scalar register pseudos */
3445       num_pseudo_regs += 32;	/* add the Sn scalar register pseudos */
3446       num_pseudo_regs += 32;	/* add the Hn scalar register pseudos */
3447       num_pseudo_regs += 32;	/* add the Bn scalar register pseudos */
3448     }
3449 
3450   /* Add the pauth registers.  */
3451   if (feature_pauth != NULL)
3452     {
3453       first_pauth_regnum = num_regs;
3454       pauth_ra_state_offset = num_pseudo_regs;
3455       /* Validate the descriptor provides the mandatory PAUTH registers and
3456 	 allocate their numbers.  */
3457       for (i = 0; i < ARRAY_SIZE (aarch64_pauth_register_names); i++)
3458 	valid_p &= tdesc_numbered_register (feature_pauth, tdesc_data.get (),
3459 					    first_pauth_regnum + i,
3460 					    aarch64_pauth_register_names[i]);
3461 
3462       num_regs += i;
3463       num_pseudo_regs += 1;	/* Count RA_STATE pseudo register.  */
3464     }
3465 
3466   /* Add the MTE registers.  */
3467   if (feature_mte != NULL)
3468     {
3469       first_mte_regnum = num_regs;
3470       /* Validate the descriptor provides the mandatory MTE registers and
3471 	 allocate their numbers.  */
3472       for (i = 0; i < ARRAY_SIZE (aarch64_mte_register_names); i++)
3473 	valid_p &= tdesc_numbered_register (feature_mte, tdesc_data.get (),
3474 					    first_mte_regnum + i,
3475 					    aarch64_mte_register_names[i]);
3476 
3477       num_regs += i;
3478     }
3479 
3480   if (!valid_p)
3481     return nullptr;
3482 
3483   /* AArch64 code is always little-endian.  */
3484   info.byte_order_for_code = BFD_ENDIAN_LITTLE;
3485 
3486   struct gdbarch_tdep *tdep = XCNEW (struct gdbarch_tdep);
3487   struct gdbarch *gdbarch = gdbarch_alloc (&info, tdep);
3488 
3489   /* This should be low enough for everything.  */
3490   tdep->lowest_pc = 0x20;
3491   tdep->jb_pc = -1;		/* Longjump support not enabled by default.  */
3492   tdep->jb_elt_size = 8;
3493   tdep->vq = vq;
3494   tdep->pauth_reg_base = first_pauth_regnum;
3495   tdep->pauth_ra_state_regnum = (feature_pauth == NULL) ? -1
3496 				: pauth_ra_state_offset + num_regs;
3497   tdep->mte_reg_base = first_mte_regnum;
3498 
3499   set_gdbarch_push_dummy_call (gdbarch, aarch64_push_dummy_call);
3500   set_gdbarch_frame_align (gdbarch, aarch64_frame_align);
3501 
3502   /* Advance PC across function entry code.  */
3503   set_gdbarch_skip_prologue (gdbarch, aarch64_skip_prologue);
3504 
3505   /* The stack grows downward.  */
3506   set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
3507 
3508   /* Breakpoint manipulation.  */
3509   set_gdbarch_breakpoint_kind_from_pc (gdbarch,
3510 				       aarch64_breakpoint::kind_from_pc);
3511   set_gdbarch_sw_breakpoint_from_kind (gdbarch,
3512 				       aarch64_breakpoint::bp_from_kind);
3513   set_gdbarch_have_nonsteppable_watchpoint (gdbarch, 1);
3514   set_gdbarch_software_single_step (gdbarch, aarch64_software_single_step);
3515 
3516   /* Information about registers, etc.  */
3517   set_gdbarch_sp_regnum (gdbarch, AARCH64_SP_REGNUM);
3518   set_gdbarch_pc_regnum (gdbarch, AARCH64_PC_REGNUM);
3519   set_gdbarch_num_regs (gdbarch, num_regs);
3520 
3521   set_gdbarch_num_pseudo_regs (gdbarch, num_pseudo_regs);
3522   set_gdbarch_pseudo_register_read_value (gdbarch, aarch64_pseudo_read_value);
3523   set_gdbarch_pseudo_register_write (gdbarch, aarch64_pseudo_write);
3524   set_tdesc_pseudo_register_name (gdbarch, aarch64_pseudo_register_name);
3525   set_tdesc_pseudo_register_type (gdbarch, aarch64_pseudo_register_type);
3526   set_tdesc_pseudo_register_reggroup_p (gdbarch,
3527 					aarch64_pseudo_register_reggroup_p);
3528   set_gdbarch_cannot_store_register (gdbarch, aarch64_cannot_store_register);
3529 
3530   /* ABI */
3531   set_gdbarch_short_bit (gdbarch, 16);
3532   set_gdbarch_int_bit (gdbarch, 32);
3533   set_gdbarch_float_bit (gdbarch, 32);
3534   set_gdbarch_double_bit (gdbarch, 64);
3535   set_gdbarch_long_double_bit (gdbarch, 128);
3536   set_gdbarch_long_bit (gdbarch, 64);
3537   set_gdbarch_long_long_bit (gdbarch, 64);
3538   set_gdbarch_ptr_bit (gdbarch, 64);
3539   set_gdbarch_char_signed (gdbarch, 0);
3540   set_gdbarch_wchar_signed (gdbarch, 0);
3541   set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
3542   set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
3543   set_gdbarch_long_double_format (gdbarch, floatformats_ia64_quad);
3544   set_gdbarch_type_align (gdbarch, aarch64_type_align);
3545 
3546   /* Internal <-> external register number maps.  */
3547   set_gdbarch_dwarf2_reg_to_regnum (gdbarch, aarch64_dwarf_reg_to_regnum);
3548 
3549   /* Returning results.  */
3550   set_gdbarch_return_value (gdbarch, aarch64_return_value);
3551 
3552   /* Disassembly.  */
3553   set_gdbarch_print_insn (gdbarch, aarch64_gdb_print_insn);
3554 
3555   /* Virtual tables.  */
3556   set_gdbarch_vbit_in_delta (gdbarch, 1);
3557 
3558   /* Register architecture.  */
3559   aarch64_add_reggroups (gdbarch);
3560 
3561   /* Hook in the ABI-specific overrides, if they have been registered.  */
3562   info.target_desc = tdesc;
3563   info.tdesc_data = tdesc_data.get ();
3564   gdbarch_init_osabi (info, gdbarch);
3565 
3566   dwarf2_frame_set_init_reg (gdbarch, aarch64_dwarf2_frame_init_reg);
3567   /* Register DWARF CFA vendor handler.  */
3568   set_gdbarch_execute_dwarf_cfa_vendor_op (gdbarch,
3569 					   aarch64_execute_dwarf_cfa_vendor_op);
3570 
3571   /* Permanent/Program breakpoint handling.  */
3572   set_gdbarch_program_breakpoint_here_p (gdbarch,
3573 					 aarch64_program_breakpoint_here_p);
3574 
3575   /* Add some default predicates.  */
3576   frame_unwind_append_unwinder (gdbarch, &aarch64_stub_unwind);
3577   dwarf2_append_unwinders (gdbarch);
3578   frame_unwind_append_unwinder (gdbarch, &aarch64_prologue_unwind);
3579 
3580   frame_base_set_default (gdbarch, &aarch64_normal_base);
3581 
3582   /* Now we have tuned the configuration, set a few final things,
3583      based on what the OS ABI has told us.  */
3584 
3585   if (tdep->jb_pc >= 0)
3586     set_gdbarch_get_longjmp_target (gdbarch, aarch64_get_longjmp_target);
3587 
3588   set_gdbarch_gen_return_address (gdbarch, aarch64_gen_return_address);
3589 
3590   set_gdbarch_get_pc_address_flags (gdbarch, aarch64_get_pc_address_flags);
3591 
3592   tdesc_use_registers (gdbarch, tdesc, std::move (tdesc_data));
3593 
3594   /* Add standard register aliases.  */
3595   for (i = 0; i < ARRAY_SIZE (aarch64_register_aliases); i++)
3596     user_reg_add (gdbarch, aarch64_register_aliases[i].name,
3597 		  value_of_aarch64_user_reg,
3598 		  &aarch64_register_aliases[i].regnum);
3599 
3600   register_aarch64_ravenscar_ops (gdbarch);
3601 
3602   return gdbarch;
3603 }
3604 
3605 static void
aarch64_dump_tdep(struct gdbarch * gdbarch,struct ui_file * file)3606 aarch64_dump_tdep (struct gdbarch *gdbarch, struct ui_file *file)
3607 {
3608   struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3609 
3610   if (tdep == NULL)
3611     return;
3612 
3613   fprintf_unfiltered (file, _("aarch64_dump_tdep: Lowest pc = 0x%s"),
3614 		      paddress (gdbarch, tdep->lowest_pc));
3615 }
3616 
3617 #if GDB_SELF_TEST
3618 namespace selftests
3619 {
3620 static void aarch64_process_record_test (void);
3621 }
3622 #endif
3623 
3624 void _initialize_aarch64_tdep ();
3625 void
_initialize_aarch64_tdep()3626 _initialize_aarch64_tdep ()
3627 {
3628   gdbarch_register (bfd_arch_aarch64, aarch64_gdbarch_init,
3629 		    aarch64_dump_tdep);
3630 
3631   /* Debug this file's internals.  */
3632   add_setshow_boolean_cmd ("aarch64", class_maintenance, &aarch64_debug, _("\
3633 Set AArch64 debugging."), _("\
3634 Show AArch64 debugging."), _("\
3635 When on, AArch64 specific debugging is enabled."),
3636 			    NULL,
3637 			    show_aarch64_debug,
3638 			    &setdebuglist, &showdebuglist);
3639 
3640 #if GDB_SELF_TEST
3641   selftests::register_test ("aarch64-analyze-prologue",
3642 			    selftests::aarch64_analyze_prologue_test);
3643   selftests::register_test ("aarch64-process-record",
3644 			    selftests::aarch64_process_record_test);
3645 #endif
3646 }
3647 
3648 /* AArch64 process record-replay related structures, defines etc.  */
3649 
3650 #define REG_ALLOC(REGS, LENGTH, RECORD_BUF) \
3651 	do  \
3652 	  { \
3653 	    unsigned int reg_len = LENGTH; \
3654 	    if (reg_len) \
3655 	      { \
3656 		REGS = XNEWVEC (uint32_t, reg_len); \
3657 		memcpy(&REGS[0], &RECORD_BUF[0], sizeof(uint32_t)*LENGTH); \
3658 	      } \
3659 	  } \
3660 	while (0)
3661 
3662 #define MEM_ALLOC(MEMS, LENGTH, RECORD_BUF) \
3663 	do  \
3664 	  { \
3665 	    unsigned int mem_len = LENGTH; \
3666 	    if (mem_len) \
3667 	      { \
3668 		MEMS =  XNEWVEC (struct aarch64_mem_r, mem_len);  \
3669 		memcpy(&MEMS->len, &RECORD_BUF[0], \
3670 		       sizeof(struct aarch64_mem_r) * LENGTH); \
3671 	      } \
3672 	  } \
3673 	  while (0)
3674 
3675 /* AArch64 record/replay structures and enumerations.  */
3676 
3677 struct aarch64_mem_r
3678 {
3679   uint64_t len;    /* Record length.  */
3680   uint64_t addr;   /* Memory address.  */
3681 };
3682 
3683 enum aarch64_record_result
3684 {
3685   AARCH64_RECORD_SUCCESS,
3686   AARCH64_RECORD_UNSUPPORTED,
3687   AARCH64_RECORD_UNKNOWN
3688 };
3689 
3690 typedef struct insn_decode_record_t
3691 {
3692   struct gdbarch *gdbarch;
3693   struct regcache *regcache;
3694   CORE_ADDR this_addr;                 /* Address of insn to be recorded.  */
3695   uint32_t aarch64_insn;               /* Insn to be recorded.  */
3696   uint32_t mem_rec_count;              /* Count of memory records.  */
3697   uint32_t reg_rec_count;              /* Count of register records.  */
3698   uint32_t *aarch64_regs;              /* Registers to be recorded.  */
3699   struct aarch64_mem_r *aarch64_mems;  /* Memory locations to be recorded.  */
3700 } insn_decode_record;
3701 
3702 /* Record handler for data processing - register instructions.  */
3703 
3704 static unsigned int
aarch64_record_data_proc_reg(insn_decode_record * aarch64_insn_r)3705 aarch64_record_data_proc_reg (insn_decode_record *aarch64_insn_r)
3706 {
3707   uint8_t reg_rd, insn_bits24_27, insn_bits21_23;
3708   uint32_t record_buf[4];
3709 
3710   reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3711   insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3712   insn_bits21_23 = bits (aarch64_insn_r->aarch64_insn, 21, 23);
3713 
3714   if (!bit (aarch64_insn_r->aarch64_insn, 28))
3715     {
3716       uint8_t setflags;
3717 
3718       /* Logical (shifted register).  */
3719       if (insn_bits24_27 == 0x0a)
3720 	setflags = (bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03);
3721       /* Add/subtract.  */
3722       else if (insn_bits24_27 == 0x0b)
3723 	setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3724       else
3725 	return AARCH64_RECORD_UNKNOWN;
3726 
3727       record_buf[0] = reg_rd;
3728       aarch64_insn_r->reg_rec_count = 1;
3729       if (setflags)
3730 	record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3731     }
3732   else
3733     {
3734       if (insn_bits24_27 == 0x0b)
3735 	{
3736 	  /* Data-processing (3 source).  */
3737 	  record_buf[0] = reg_rd;
3738 	  aarch64_insn_r->reg_rec_count = 1;
3739 	}
3740       else if (insn_bits24_27 == 0x0a)
3741 	{
3742 	  if (insn_bits21_23 == 0x00)
3743 	    {
3744 	      /* Add/subtract (with carry).  */
3745 	      record_buf[0] = reg_rd;
3746 	      aarch64_insn_r->reg_rec_count = 1;
3747 	      if (bit (aarch64_insn_r->aarch64_insn, 29))
3748 		{
3749 		  record_buf[1] = AARCH64_CPSR_REGNUM;
3750 		  aarch64_insn_r->reg_rec_count = 2;
3751 		}
3752 	    }
3753 	  else if (insn_bits21_23 == 0x02)
3754 	    {
3755 	      /* Conditional compare (register) and conditional compare
3756 		 (immediate) instructions.  */
3757 	      record_buf[0] = AARCH64_CPSR_REGNUM;
3758 	      aarch64_insn_r->reg_rec_count = 1;
3759 	    }
3760 	  else if (insn_bits21_23 == 0x04 || insn_bits21_23 == 0x06)
3761 	    {
3762 	      /* Conditional select.  */
3763 	      /* Data-processing (2 source).  */
3764 	      /* Data-processing (1 source).  */
3765 	      record_buf[0] = reg_rd;
3766 	      aarch64_insn_r->reg_rec_count = 1;
3767 	    }
3768 	  else
3769 	    return AARCH64_RECORD_UNKNOWN;
3770 	}
3771     }
3772 
3773   REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3774 	     record_buf);
3775   return AARCH64_RECORD_SUCCESS;
3776 }
3777 
3778 /* Record handler for data processing - immediate instructions.  */
3779 
3780 static unsigned int
aarch64_record_data_proc_imm(insn_decode_record * aarch64_insn_r)3781 aarch64_record_data_proc_imm (insn_decode_record *aarch64_insn_r)
3782 {
3783   uint8_t reg_rd, insn_bit23, insn_bits24_27, setflags;
3784   uint32_t record_buf[4];
3785 
3786   reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3787   insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
3788   insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3789 
3790   if (insn_bits24_27 == 0x00                     /* PC rel addressing.  */
3791      || insn_bits24_27 == 0x03                   /* Bitfield and Extract.  */
3792      || (insn_bits24_27 == 0x02 && insn_bit23))  /* Move wide (immediate).  */
3793     {
3794       record_buf[0] = reg_rd;
3795       aarch64_insn_r->reg_rec_count = 1;
3796     }
3797   else if (insn_bits24_27 == 0x01)
3798     {
3799       /* Add/Subtract (immediate).  */
3800       setflags = bit (aarch64_insn_r->aarch64_insn, 29);
3801       record_buf[0] = reg_rd;
3802       aarch64_insn_r->reg_rec_count = 1;
3803       if (setflags)
3804 	record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3805     }
3806   else if (insn_bits24_27 == 0x02 && !insn_bit23)
3807     {
3808       /* Logical (immediate).  */
3809       setflags = bits (aarch64_insn_r->aarch64_insn, 29, 30) == 0x03;
3810       record_buf[0] = reg_rd;
3811       aarch64_insn_r->reg_rec_count = 1;
3812       if (setflags)
3813 	record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_CPSR_REGNUM;
3814     }
3815   else
3816     return AARCH64_RECORD_UNKNOWN;
3817 
3818   REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3819 	     record_buf);
3820   return AARCH64_RECORD_SUCCESS;
3821 }
3822 
3823 /* Record handler for branch, exception generation and system instructions.  */
3824 
3825 static unsigned int
aarch64_record_branch_except_sys(insn_decode_record * aarch64_insn_r)3826 aarch64_record_branch_except_sys (insn_decode_record *aarch64_insn_r)
3827 {
3828   struct gdbarch_tdep *tdep = gdbarch_tdep (aarch64_insn_r->gdbarch);
3829   uint8_t insn_bits24_27, insn_bits28_31, insn_bits22_23;
3830   uint32_t record_buf[4];
3831 
3832   insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
3833   insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
3834   insn_bits22_23 = bits (aarch64_insn_r->aarch64_insn, 22, 23);
3835 
3836   if (insn_bits28_31 == 0x0d)
3837     {
3838       /* Exception generation instructions. */
3839       if (insn_bits24_27 == 0x04)
3840 	{
3841 	  if (!bits (aarch64_insn_r->aarch64_insn, 2, 4)
3842 	      && !bits (aarch64_insn_r->aarch64_insn, 21, 23)
3843 	      && bits (aarch64_insn_r->aarch64_insn, 0, 1) == 0x01)
3844 	    {
3845 	      ULONGEST svc_number;
3846 
3847 	      regcache_raw_read_unsigned (aarch64_insn_r->regcache, 8,
3848 					  &svc_number);
3849 	      return tdep->aarch64_syscall_record (aarch64_insn_r->regcache,
3850 						   svc_number);
3851 	    }
3852 	  else
3853 	    return AARCH64_RECORD_UNSUPPORTED;
3854 	}
3855       /* System instructions. */
3856       else if (insn_bits24_27 == 0x05 && insn_bits22_23 == 0x00)
3857 	{
3858 	  uint32_t reg_rt, reg_crn;
3859 
3860 	  reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3861 	  reg_crn = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3862 
3863 	  /* Record rt in case of sysl and mrs instructions.  */
3864 	  if (bit (aarch64_insn_r->aarch64_insn, 21))
3865 	    {
3866 	      record_buf[0] = reg_rt;
3867 	      aarch64_insn_r->reg_rec_count = 1;
3868 	    }
3869 	  /* Record cpsr for hint and msr(immediate) instructions.  */
3870 	  else if (reg_crn == 0x02 || reg_crn == 0x04)
3871 	    {
3872 	      record_buf[0] = AARCH64_CPSR_REGNUM;
3873 	      aarch64_insn_r->reg_rec_count = 1;
3874 	    }
3875 	}
3876       /* Unconditional branch (register).  */
3877       else if((insn_bits24_27 & 0x0e) == 0x06)
3878 	{
3879 	  record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3880 	  if (bits (aarch64_insn_r->aarch64_insn, 21, 22) == 0x01)
3881 	    record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3882 	}
3883       else
3884 	return AARCH64_RECORD_UNKNOWN;
3885     }
3886   /* Unconditional branch (immediate).  */
3887   else if ((insn_bits28_31 & 0x07) == 0x01 && (insn_bits24_27 & 0x0c) == 0x04)
3888     {
3889       record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3890       if (bit (aarch64_insn_r->aarch64_insn, 31))
3891 	record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_LR_REGNUM;
3892     }
3893   else
3894     /* Compare & branch (immediate), Test & branch (immediate) and
3895        Conditional branch (immediate).  */
3896     record_buf[aarch64_insn_r->reg_rec_count++] = AARCH64_PC_REGNUM;
3897 
3898   REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
3899 	     record_buf);
3900   return AARCH64_RECORD_SUCCESS;
3901 }
3902 
3903 /* Record handler for advanced SIMD load and store instructions.  */
3904 
3905 static unsigned int
aarch64_record_asimd_load_store(insn_decode_record * aarch64_insn_r)3906 aarch64_record_asimd_load_store (insn_decode_record *aarch64_insn_r)
3907 {
3908   CORE_ADDR address;
3909   uint64_t addr_offset = 0;
3910   uint32_t record_buf[24];
3911   uint64_t record_buf_mem[24];
3912   uint32_t reg_rn, reg_rt;
3913   uint32_t reg_index = 0, mem_index = 0;
3914   uint8_t opcode_bits, size_bits;
3915 
3916   reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
3917   reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
3918   size_bits = bits (aarch64_insn_r->aarch64_insn, 10, 11);
3919   opcode_bits = bits (aarch64_insn_r->aarch64_insn, 12, 15);
3920   regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn, &address);
3921 
3922   if (record_debug)
3923     debug_printf ("Process record: Advanced SIMD load/store\n");
3924 
3925   /* Load/store single structure.  */
3926   if (bit (aarch64_insn_r->aarch64_insn, 24))
3927     {
3928       uint8_t sindex, scale, selem, esize, replicate = 0;
3929       scale = opcode_bits >> 2;
3930       selem = ((opcode_bits & 0x02) |
3931 	      bit (aarch64_insn_r->aarch64_insn, 21)) + 1;
3932       switch (scale)
3933 	{
3934 	case 1:
3935 	  if (size_bits & 0x01)
3936 	    return AARCH64_RECORD_UNKNOWN;
3937 	  break;
3938 	case 2:
3939 	  if ((size_bits >> 1) & 0x01)
3940 	    return AARCH64_RECORD_UNKNOWN;
3941 	  if (size_bits & 0x01)
3942 	    {
3943 	      if (!((opcode_bits >> 1) & 0x01))
3944 		scale = 3;
3945 	      else
3946 		return AARCH64_RECORD_UNKNOWN;
3947 	    }
3948 	  break;
3949 	case 3:
3950 	  if (bit (aarch64_insn_r->aarch64_insn, 22) && !(opcode_bits & 0x01))
3951 	    {
3952 	      scale = size_bits;
3953 	      replicate = 1;
3954 	      break;
3955 	    }
3956 	  else
3957 	    return AARCH64_RECORD_UNKNOWN;
3958 	default:
3959 	  break;
3960 	}
3961       esize = 8 << scale;
3962       if (replicate)
3963 	for (sindex = 0; sindex < selem; sindex++)
3964 	  {
3965 	    record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3966 	    reg_rt = (reg_rt + 1) % 32;
3967 	  }
3968       else
3969 	{
3970 	  for (sindex = 0; sindex < selem; sindex++)
3971 	    {
3972 	      if (bit (aarch64_insn_r->aarch64_insn, 22))
3973 		record_buf[reg_index++] = reg_rt + AARCH64_V0_REGNUM;
3974 	      else
3975 		{
3976 		  record_buf_mem[mem_index++] = esize / 8;
3977 		  record_buf_mem[mem_index++] = address + addr_offset;
3978 		}
3979 	      addr_offset = addr_offset + (esize / 8);
3980 	      reg_rt = (reg_rt + 1) % 32;
3981 	    }
3982 	}
3983     }
3984   /* Load/store multiple structure.  */
3985   else
3986     {
3987       uint8_t selem, esize, rpt, elements;
3988       uint8_t eindex, rindex;
3989 
3990       esize = 8 << size_bits;
3991       if (bit (aarch64_insn_r->aarch64_insn, 30))
3992 	elements = 128 / esize;
3993       else
3994 	elements = 64 / esize;
3995 
3996       switch (opcode_bits)
3997 	{
3998 	/*LD/ST4 (4 Registers).  */
3999 	case 0:
4000 	  rpt = 1;
4001 	  selem = 4;
4002 	  break;
4003 	/*LD/ST1 (4 Registers).  */
4004 	case 2:
4005 	  rpt = 4;
4006 	  selem = 1;
4007 	  break;
4008 	/*LD/ST3 (3 Registers).  */
4009 	case 4:
4010 	  rpt = 1;
4011 	  selem = 3;
4012 	  break;
4013 	/*LD/ST1 (3 Registers).  */
4014 	case 6:
4015 	  rpt = 3;
4016 	  selem = 1;
4017 	  break;
4018 	/*LD/ST1 (1 Register).  */
4019 	case 7:
4020 	  rpt = 1;
4021 	  selem = 1;
4022 	  break;
4023 	/*LD/ST2 (2 Registers).  */
4024 	case 8:
4025 	  rpt = 1;
4026 	  selem = 2;
4027 	  break;
4028 	/*LD/ST1 (2 Registers).  */
4029 	case 10:
4030 	  rpt = 2;
4031 	  selem = 1;
4032 	  break;
4033 	default:
4034 	  return AARCH64_RECORD_UNSUPPORTED;
4035 	  break;
4036 	}
4037       for (rindex = 0; rindex < rpt; rindex++)
4038 	for (eindex = 0; eindex < elements; eindex++)
4039 	  {
4040 	    uint8_t reg_tt, sindex;
4041 	    reg_tt = (reg_rt + rindex) % 32;
4042 	    for (sindex = 0; sindex < selem; sindex++)
4043 	      {
4044 		if (bit (aarch64_insn_r->aarch64_insn, 22))
4045 		  record_buf[reg_index++] = reg_tt + AARCH64_V0_REGNUM;
4046 		else
4047 		  {
4048 		    record_buf_mem[mem_index++] = esize / 8;
4049 		    record_buf_mem[mem_index++] = address + addr_offset;
4050 		  }
4051 		addr_offset = addr_offset + (esize / 8);
4052 		reg_tt = (reg_tt + 1) % 32;
4053 	      }
4054 	  }
4055     }
4056 
4057   if (bit (aarch64_insn_r->aarch64_insn, 23))
4058     record_buf[reg_index++] = reg_rn;
4059 
4060   aarch64_insn_r->reg_rec_count = reg_index;
4061   aarch64_insn_r->mem_rec_count = mem_index / 2;
4062   MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4063 	     record_buf_mem);
4064   REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4065 	     record_buf);
4066   return AARCH64_RECORD_SUCCESS;
4067 }
4068 
4069 /* Record handler for load and store instructions.  */
4070 
4071 static unsigned int
aarch64_record_load_store(insn_decode_record * aarch64_insn_r)4072 aarch64_record_load_store (insn_decode_record *aarch64_insn_r)
4073 {
4074   uint8_t insn_bits24_27, insn_bits28_29, insn_bits10_11;
4075   uint8_t insn_bit23, insn_bit21;
4076   uint8_t opc, size_bits, ld_flag, vector_flag;
4077   uint32_t reg_rn, reg_rt, reg_rt2;
4078   uint64_t datasize, offset;
4079   uint32_t record_buf[8];
4080   uint64_t record_buf_mem[8];
4081   CORE_ADDR address;
4082 
4083   insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4084   insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4085   insn_bits28_29 = bits (aarch64_insn_r->aarch64_insn, 28, 29);
4086   insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4087   insn_bit23 = bit (aarch64_insn_r->aarch64_insn, 23);
4088   ld_flag = bit (aarch64_insn_r->aarch64_insn, 22);
4089   vector_flag = bit (aarch64_insn_r->aarch64_insn, 26);
4090   reg_rt = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4091   reg_rn = bits (aarch64_insn_r->aarch64_insn, 5, 9);
4092   reg_rt2 = bits (aarch64_insn_r->aarch64_insn, 10, 14);
4093   size_bits = bits (aarch64_insn_r->aarch64_insn, 30, 31);
4094 
4095   /* Load/store exclusive.  */
4096   if (insn_bits24_27 == 0x08 && insn_bits28_29 == 0x00)
4097     {
4098       if (record_debug)
4099 	debug_printf ("Process record: load/store exclusive\n");
4100 
4101       if (ld_flag)
4102 	{
4103 	  record_buf[0] = reg_rt;
4104 	  aarch64_insn_r->reg_rec_count = 1;
4105 	  if (insn_bit21)
4106 	    {
4107 	      record_buf[1] = reg_rt2;
4108 	      aarch64_insn_r->reg_rec_count = 2;
4109 	    }
4110 	}
4111       else
4112 	{
4113 	  if (insn_bit21)
4114 	    datasize = (8 << size_bits) * 2;
4115 	  else
4116 	    datasize = (8 << size_bits);
4117 	  regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4118 				      &address);
4119 	  record_buf_mem[0] = datasize / 8;
4120 	  record_buf_mem[1] = address;
4121 	  aarch64_insn_r->mem_rec_count = 1;
4122 	  if (!insn_bit23)
4123 	    {
4124 	      /* Save register rs.  */
4125 	      record_buf[0] = bits (aarch64_insn_r->aarch64_insn, 16, 20);
4126 	      aarch64_insn_r->reg_rec_count = 1;
4127 	    }
4128 	}
4129     }
4130   /* Load register (literal) instructions decoding.  */
4131   else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x01)
4132     {
4133       if (record_debug)
4134 	debug_printf ("Process record: load register (literal)\n");
4135       if (vector_flag)
4136 	record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4137       else
4138 	record_buf[0] = reg_rt;
4139       aarch64_insn_r->reg_rec_count = 1;
4140     }
4141   /* All types of load/store pair instructions decoding.  */
4142   else if ((insn_bits24_27 & 0x0a) == 0x08 && insn_bits28_29 == 0x02)
4143     {
4144       if (record_debug)
4145 	debug_printf ("Process record: load/store pair\n");
4146 
4147       if (ld_flag)
4148 	{
4149 	  if (vector_flag)
4150 	    {
4151 	      record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4152 	      record_buf[1] = reg_rt2 + AARCH64_V0_REGNUM;
4153 	    }
4154 	  else
4155 	    {
4156 	      record_buf[0] = reg_rt;
4157 	      record_buf[1] = reg_rt2;
4158 	    }
4159 	  aarch64_insn_r->reg_rec_count = 2;
4160 	}
4161       else
4162 	{
4163 	  uint16_t imm7_off;
4164 	  imm7_off = bits (aarch64_insn_r->aarch64_insn, 15, 21);
4165 	  if (!vector_flag)
4166 	    size_bits = size_bits >> 1;
4167 	  datasize = 8 << (2 + size_bits);
4168 	  offset = (imm7_off & 0x40) ? (~imm7_off & 0x007f) + 1 : imm7_off;
4169 	  offset = offset << (2 + size_bits);
4170 	  regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4171 				      &address);
4172 	  if (!((insn_bits24_27 & 0x0b) == 0x08 && insn_bit23))
4173 	    {
4174 	      if (imm7_off & 0x40)
4175 		address = address - offset;
4176 	      else
4177 		address = address + offset;
4178 	    }
4179 
4180 	  record_buf_mem[0] = datasize / 8;
4181 	  record_buf_mem[1] = address;
4182 	  record_buf_mem[2] = datasize / 8;
4183 	  record_buf_mem[3] = address + (datasize / 8);
4184 	  aarch64_insn_r->mem_rec_count = 2;
4185 	}
4186       if (bit (aarch64_insn_r->aarch64_insn, 23))
4187 	record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4188     }
4189   /* Load/store register (unsigned immediate) instructions.  */
4190   else if ((insn_bits24_27 & 0x0b) == 0x09 && insn_bits28_29 == 0x03)
4191     {
4192       opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4193       if (!(opc >> 1))
4194 	{
4195 	  if (opc & 0x01)
4196 	    ld_flag = 0x01;
4197 	  else
4198 	    ld_flag = 0x0;
4199 	}
4200       else
4201 	{
4202 	  if (size_bits == 0x3 && vector_flag == 0x0 && opc == 0x2)
4203 	    {
4204 	      /* PRFM (immediate) */
4205 	      return AARCH64_RECORD_SUCCESS;
4206 	    }
4207 	  else if (size_bits == 0x2 && vector_flag == 0x0 && opc == 0x2)
4208 	    {
4209 	      /* LDRSW (immediate) */
4210 	      ld_flag = 0x1;
4211 	    }
4212 	  else
4213 	    {
4214 	      if (opc & 0x01)
4215 		ld_flag = 0x01;
4216 	      else
4217 		ld_flag = 0x0;
4218 	    }
4219 	}
4220 
4221       if (record_debug)
4222 	{
4223 	  debug_printf ("Process record: load/store (unsigned immediate):"
4224 			" size %x V %d opc %x\n", size_bits, vector_flag,
4225 			opc);
4226 	}
4227 
4228       if (!ld_flag)
4229 	{
4230 	  offset = bits (aarch64_insn_r->aarch64_insn, 10, 21);
4231 	  datasize = 8 << size_bits;
4232 	  regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4233 				      &address);
4234 	  offset = offset << size_bits;
4235 	  address = address + offset;
4236 
4237 	  record_buf_mem[0] = datasize >> 3;
4238 	  record_buf_mem[1] = address;
4239 	  aarch64_insn_r->mem_rec_count = 1;
4240 	}
4241       else
4242 	{
4243 	  if (vector_flag)
4244 	    record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4245 	  else
4246 	    record_buf[0] = reg_rt;
4247 	  aarch64_insn_r->reg_rec_count = 1;
4248 	}
4249     }
4250   /* Load/store register (register offset) instructions.  */
4251   else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4252 	   && insn_bits10_11 == 0x02 && insn_bit21)
4253     {
4254       if (record_debug)
4255 	debug_printf ("Process record: load/store (register offset)\n");
4256       opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4257       if (!(opc >> 1))
4258 	if (opc & 0x01)
4259 	  ld_flag = 0x01;
4260 	else
4261 	  ld_flag = 0x0;
4262       else
4263 	if (size_bits != 0x03)
4264 	  ld_flag = 0x01;
4265 	else
4266 	  return AARCH64_RECORD_UNKNOWN;
4267 
4268       if (!ld_flag)
4269 	{
4270 	  ULONGEST reg_rm_val;
4271 
4272 	  regcache_raw_read_unsigned (aarch64_insn_r->regcache,
4273 		     bits (aarch64_insn_r->aarch64_insn, 16, 20), &reg_rm_val);
4274 	  if (bit (aarch64_insn_r->aarch64_insn, 12))
4275 	    offset = reg_rm_val << size_bits;
4276 	  else
4277 	    offset = reg_rm_val;
4278 	  datasize = 8 << size_bits;
4279 	  regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4280 				      &address);
4281 	  address = address + offset;
4282 	  record_buf_mem[0] = datasize >> 3;
4283 	  record_buf_mem[1] = address;
4284 	  aarch64_insn_r->mem_rec_count = 1;
4285 	}
4286       else
4287 	{
4288 	  if (vector_flag)
4289 	    record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4290 	  else
4291 	    record_buf[0] = reg_rt;
4292 	  aarch64_insn_r->reg_rec_count = 1;
4293 	}
4294     }
4295   /* Load/store register (immediate and unprivileged) instructions.  */
4296   else if ((insn_bits24_27 & 0x0b) == 0x08 && insn_bits28_29 == 0x03
4297 	   && !insn_bit21)
4298     {
4299       if (record_debug)
4300 	{
4301 	  debug_printf ("Process record: load/store "
4302 			"(immediate and unprivileged)\n");
4303 	}
4304       opc = bits (aarch64_insn_r->aarch64_insn, 22, 23);
4305       if (!(opc >> 1))
4306 	if (opc & 0x01)
4307 	  ld_flag = 0x01;
4308 	else
4309 	  ld_flag = 0x0;
4310       else
4311 	if (size_bits != 0x03)
4312 	  ld_flag = 0x01;
4313 	else
4314 	  return AARCH64_RECORD_UNKNOWN;
4315 
4316       if (!ld_flag)
4317 	{
4318 	  uint16_t imm9_off;
4319 	  imm9_off = bits (aarch64_insn_r->aarch64_insn, 12, 20);
4320 	  offset = (imm9_off & 0x0100) ? (((~imm9_off) & 0x01ff) + 1) : imm9_off;
4321 	  datasize = 8 << size_bits;
4322 	  regcache_raw_read_unsigned (aarch64_insn_r->regcache, reg_rn,
4323 				      &address);
4324 	  if (insn_bits10_11 != 0x01)
4325 	    {
4326 	      if (imm9_off & 0x0100)
4327 		address = address - offset;
4328 	      else
4329 		address = address + offset;
4330 	    }
4331 	  record_buf_mem[0] = datasize >> 3;
4332 	  record_buf_mem[1] = address;
4333 	  aarch64_insn_r->mem_rec_count = 1;
4334 	}
4335       else
4336 	{
4337 	  if (vector_flag)
4338 	    record_buf[0] = reg_rt + AARCH64_V0_REGNUM;
4339 	  else
4340 	    record_buf[0] = reg_rt;
4341 	  aarch64_insn_r->reg_rec_count = 1;
4342 	}
4343       if (insn_bits10_11 == 0x01 || insn_bits10_11 == 0x03)
4344 	record_buf[aarch64_insn_r->reg_rec_count++] = reg_rn;
4345     }
4346   /* Advanced SIMD load/store instructions.  */
4347   else
4348     return aarch64_record_asimd_load_store (aarch64_insn_r);
4349 
4350   MEM_ALLOC (aarch64_insn_r->aarch64_mems, aarch64_insn_r->mem_rec_count,
4351 	     record_buf_mem);
4352   REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4353 	     record_buf);
4354   return AARCH64_RECORD_SUCCESS;
4355 }
4356 
4357 /* Record handler for data processing SIMD and floating point instructions.  */
4358 
4359 static unsigned int
aarch64_record_data_proc_simd_fp(insn_decode_record * aarch64_insn_r)4360 aarch64_record_data_proc_simd_fp (insn_decode_record *aarch64_insn_r)
4361 {
4362   uint8_t insn_bit21, opcode, rmode, reg_rd;
4363   uint8_t insn_bits24_27, insn_bits28_31, insn_bits10_11, insn_bits12_15;
4364   uint8_t insn_bits11_14;
4365   uint32_t record_buf[2];
4366 
4367   insn_bits24_27 = bits (aarch64_insn_r->aarch64_insn, 24, 27);
4368   insn_bits28_31 = bits (aarch64_insn_r->aarch64_insn, 28, 31);
4369   insn_bits10_11 = bits (aarch64_insn_r->aarch64_insn, 10, 11);
4370   insn_bits12_15 = bits (aarch64_insn_r->aarch64_insn, 12, 15);
4371   insn_bits11_14 = bits (aarch64_insn_r->aarch64_insn, 11, 14);
4372   opcode = bits (aarch64_insn_r->aarch64_insn, 16, 18);
4373   rmode = bits (aarch64_insn_r->aarch64_insn, 19, 20);
4374   reg_rd = bits (aarch64_insn_r->aarch64_insn, 0, 4);
4375   insn_bit21 = bit (aarch64_insn_r->aarch64_insn, 21);
4376 
4377   if (record_debug)
4378     debug_printf ("Process record: data processing SIMD/FP: ");
4379 
4380   if ((insn_bits28_31 & 0x05) == 0x01 && insn_bits24_27 == 0x0e)
4381     {
4382       /* Floating point - fixed point conversion instructions.  */
4383       if (!insn_bit21)
4384 	{
4385 	  if (record_debug)
4386 	    debug_printf ("FP - fixed point conversion");
4387 
4388 	  if ((opcode >> 1) == 0x0 && rmode == 0x03)
4389 	    record_buf[0] = reg_rd;
4390 	  else
4391 	    record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4392 	}
4393       /* Floating point - conditional compare instructions.  */
4394       else if (insn_bits10_11 == 0x01)
4395 	{
4396 	  if (record_debug)
4397 	    debug_printf ("FP - conditional compare");
4398 
4399 	  record_buf[0] = AARCH64_CPSR_REGNUM;
4400 	}
4401       /* Floating point - data processing (2-source) and
4402 	 conditional select instructions.  */
4403       else if (insn_bits10_11 == 0x02 || insn_bits10_11 == 0x03)
4404 	{
4405 	  if (record_debug)
4406 	    debug_printf ("FP - DP (2-source)");
4407 
4408 	  record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4409 	}
4410       else if (insn_bits10_11 == 0x00)
4411 	{
4412 	  /* Floating point - immediate instructions.  */
4413 	  if ((insn_bits12_15 & 0x01) == 0x01
4414 	      || (insn_bits12_15 & 0x07) == 0x04)
4415 	    {
4416 	      if (record_debug)
4417 		debug_printf ("FP - immediate");
4418 	      record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4419 	    }
4420 	  /* Floating point - compare instructions.  */
4421 	  else if ((insn_bits12_15 & 0x03) == 0x02)
4422 	    {
4423 	      if (record_debug)
4424 		debug_printf ("FP - immediate");
4425 	      record_buf[0] = AARCH64_CPSR_REGNUM;
4426 	    }
4427 	  /* Floating point - integer conversions instructions.  */
4428 	  else if (insn_bits12_15 == 0x00)
4429 	    {
4430 	      /* Convert float to integer instruction.  */
4431 	      if (!(opcode >> 1) || ((opcode >> 1) == 0x02 && !rmode))
4432 		{
4433 		  if (record_debug)
4434 		    debug_printf ("float to int conversion");
4435 
4436 		  record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4437 		}
4438 	      /* Convert integer to float instruction.  */
4439 	      else if ((opcode >> 1) == 0x01 && !rmode)
4440 		{
4441 		  if (record_debug)
4442 		    debug_printf ("int to float conversion");
4443 
4444 		  record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4445 		}
4446 	      /* Move float to integer instruction.  */
4447 	      else if ((opcode >> 1) == 0x03)
4448 		{
4449 		  if (record_debug)
4450 		    debug_printf ("move float to int");
4451 
4452 		  if (!(opcode & 0x01))
4453 		    record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4454 		  else
4455 		    record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4456 		}
4457 	      else
4458 		return AARCH64_RECORD_UNKNOWN;
4459 	    }
4460 	  else
4461 	    return AARCH64_RECORD_UNKNOWN;
4462 	}
4463       else
4464 	return AARCH64_RECORD_UNKNOWN;
4465     }
4466   else if ((insn_bits28_31 & 0x09) == 0x00 && insn_bits24_27 == 0x0e)
4467     {
4468       if (record_debug)
4469 	debug_printf ("SIMD copy");
4470 
4471       /* Advanced SIMD copy instructions.  */
4472       if (!bits (aarch64_insn_r->aarch64_insn, 21, 23)
4473 	  && !bit (aarch64_insn_r->aarch64_insn, 15)
4474 	  && bit (aarch64_insn_r->aarch64_insn, 10))
4475 	{
4476 	  if (insn_bits11_14 == 0x05 || insn_bits11_14 == 0x07)
4477 	    record_buf[0] = reg_rd + AARCH64_X0_REGNUM;
4478 	  else
4479 	    record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4480 	}
4481       else
4482 	record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4483     }
4484   /* All remaining floating point or advanced SIMD instructions.  */
4485   else
4486     {
4487       if (record_debug)
4488 	debug_printf ("all remain");
4489 
4490       record_buf[0] = reg_rd + AARCH64_V0_REGNUM;
4491     }
4492 
4493   if (record_debug)
4494     debug_printf ("\n");
4495 
4496   /* Record the V/X register.  */
4497   aarch64_insn_r->reg_rec_count++;
4498 
4499   /* Some of these instructions may set bits in the FPSR, so record it
4500      too.  */
4501   record_buf[1] = AARCH64_FPSR_REGNUM;
4502   aarch64_insn_r->reg_rec_count++;
4503 
4504   gdb_assert (aarch64_insn_r->reg_rec_count == 2);
4505   REG_ALLOC (aarch64_insn_r->aarch64_regs, aarch64_insn_r->reg_rec_count,
4506 	     record_buf);
4507   return AARCH64_RECORD_SUCCESS;
4508 }
4509 
4510 /* Decodes insns type and invokes its record handler.  */
4511 
4512 static unsigned int
aarch64_record_decode_insn_handler(insn_decode_record * aarch64_insn_r)4513 aarch64_record_decode_insn_handler (insn_decode_record *aarch64_insn_r)
4514 {
4515   uint32_t ins_bit25, ins_bit26, ins_bit27, ins_bit28;
4516 
4517   ins_bit25 = bit (aarch64_insn_r->aarch64_insn, 25);
4518   ins_bit26 = bit (aarch64_insn_r->aarch64_insn, 26);
4519   ins_bit27 = bit (aarch64_insn_r->aarch64_insn, 27);
4520   ins_bit28 = bit (aarch64_insn_r->aarch64_insn, 28);
4521 
4522   /* Data processing - immediate instructions.  */
4523   if (!ins_bit26 && !ins_bit27 && ins_bit28)
4524     return aarch64_record_data_proc_imm (aarch64_insn_r);
4525 
4526   /* Branch, exception generation and system instructions.  */
4527   if (ins_bit26 && !ins_bit27 && ins_bit28)
4528     return aarch64_record_branch_except_sys (aarch64_insn_r);
4529 
4530   /* Load and store instructions.  */
4531   if (!ins_bit25 && ins_bit27)
4532     return aarch64_record_load_store (aarch64_insn_r);
4533 
4534   /* Data processing - register instructions.  */
4535   if (ins_bit25 && !ins_bit26 && ins_bit27)
4536     return aarch64_record_data_proc_reg (aarch64_insn_r);
4537 
4538   /* Data processing - SIMD and floating point instructions.  */
4539   if (ins_bit25 && ins_bit26 && ins_bit27)
4540     return aarch64_record_data_proc_simd_fp (aarch64_insn_r);
4541 
4542   return AARCH64_RECORD_UNSUPPORTED;
4543 }
4544 
4545 /* Cleans up local record registers and memory allocations.  */
4546 
4547 static void
deallocate_reg_mem(insn_decode_record * record)4548 deallocate_reg_mem (insn_decode_record *record)
4549 {
4550   xfree (record->aarch64_regs);
4551   xfree (record->aarch64_mems);
4552 }
4553 
4554 #if GDB_SELF_TEST
4555 namespace selftests {
4556 
4557 static void
aarch64_process_record_test(void)4558 aarch64_process_record_test (void)
4559 {
4560   struct gdbarch_info info;
4561   uint32_t ret;
4562 
4563   info.bfd_arch_info = bfd_scan_arch ("aarch64");
4564 
4565   struct gdbarch *gdbarch = gdbarch_find_by_info (info);
4566   SELF_CHECK (gdbarch != NULL);
4567 
4568   insn_decode_record aarch64_record;
4569 
4570   memset (&aarch64_record, 0, sizeof (insn_decode_record));
4571   aarch64_record.regcache = NULL;
4572   aarch64_record.this_addr = 0;
4573   aarch64_record.gdbarch = gdbarch;
4574 
4575   /* 20 00 80 f9	prfm	pldl1keep, [x1] */
4576   aarch64_record.aarch64_insn = 0xf9800020;
4577   ret = aarch64_record_decode_insn_handler (&aarch64_record);
4578   SELF_CHECK (ret == AARCH64_RECORD_SUCCESS);
4579   SELF_CHECK (aarch64_record.reg_rec_count == 0);
4580   SELF_CHECK (aarch64_record.mem_rec_count == 0);
4581 
4582   deallocate_reg_mem (&aarch64_record);
4583 }
4584 
4585 } // namespace selftests
4586 #endif /* GDB_SELF_TEST */
4587 
4588 /* Parse the current instruction and record the values of the registers and
4589    memory that will be changed in current instruction to record_arch_list
4590    return -1 if something is wrong.  */
4591 
4592 int
aarch64_process_record(struct gdbarch * gdbarch,struct regcache * regcache,CORE_ADDR insn_addr)4593 aarch64_process_record (struct gdbarch *gdbarch, struct regcache *regcache,
4594 			CORE_ADDR insn_addr)
4595 {
4596   uint32_t rec_no = 0;
4597   uint8_t insn_size = 4;
4598   uint32_t ret = 0;
4599   gdb_byte buf[insn_size];
4600   insn_decode_record aarch64_record;
4601 
4602   memset (&buf[0], 0, insn_size);
4603   memset (&aarch64_record, 0, sizeof (insn_decode_record));
4604   target_read_memory (insn_addr, &buf[0], insn_size);
4605   aarch64_record.aarch64_insn
4606     = (uint32_t) extract_unsigned_integer (&buf[0],
4607 					   insn_size,
4608 					   gdbarch_byte_order (gdbarch));
4609   aarch64_record.regcache = regcache;
4610   aarch64_record.this_addr = insn_addr;
4611   aarch64_record.gdbarch = gdbarch;
4612 
4613   ret = aarch64_record_decode_insn_handler (&aarch64_record);
4614   if (ret == AARCH64_RECORD_UNSUPPORTED)
4615     {
4616       printf_unfiltered (_("Process record does not support instruction "
4617 			   "0x%0x at address %s.\n"),
4618 			 aarch64_record.aarch64_insn,
4619 			 paddress (gdbarch, insn_addr));
4620       ret = -1;
4621     }
4622 
4623   if (0 == ret)
4624     {
4625       /* Record registers.  */
4626       record_full_arch_list_add_reg (aarch64_record.regcache,
4627 				     AARCH64_PC_REGNUM);
4628       /* Always record register CPSR.  */
4629       record_full_arch_list_add_reg (aarch64_record.regcache,
4630 				     AARCH64_CPSR_REGNUM);
4631       if (aarch64_record.aarch64_regs)
4632 	for (rec_no = 0; rec_no < aarch64_record.reg_rec_count; rec_no++)
4633 	  if (record_full_arch_list_add_reg (aarch64_record.regcache,
4634 					     aarch64_record.aarch64_regs[rec_no]))
4635 	    ret = -1;
4636 
4637       /* Record memories.  */
4638       if (aarch64_record.aarch64_mems)
4639 	for (rec_no = 0; rec_no < aarch64_record.mem_rec_count; rec_no++)
4640 	  if (record_full_arch_list_add_mem
4641 	      ((CORE_ADDR)aarch64_record.aarch64_mems[rec_no].addr,
4642 	       aarch64_record.aarch64_mems[rec_no].len))
4643 	    ret = -1;
4644 
4645       if (record_full_arch_list_add_end ())
4646 	ret = -1;
4647     }
4648 
4649   deallocate_reg_mem (&aarch64_record);
4650   return ret;
4651 }
4652