1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
2    GDB.
3 
4    Copyright (C) 2009-2021 Free Software Foundation, Inc.
5    Contributed by ARM Ltd.
6 
7    This file is part of GDB.
8 
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License as published by
11    the Free Software Foundation; either version 3 of the License, or
12    (at your option) any later version.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
21 
22 #include "server.h"
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
29 #include "ax.h"
30 #include "tracepoint.h"
31 #include "debug.h"
32 
33 #include <signal.h>
34 #include <sys/user.h>
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
37 #include <inttypes.h>
38 #include <endian.h>
39 #include <sys/uio.h>
40 
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "arch/aarch64-mte-linux.h"
44 #include "linux-aarch32-tdesc.h"
45 #include "linux-aarch64-tdesc.h"
46 #include "nat/aarch64-mte-linux-ptrace.h"
47 #include "nat/aarch64-sve-linux-ptrace.h"
48 #include "tdesc.h"
49 
50 #ifdef HAVE_SYS_REG_H
51 #include <sys/reg.h>
52 #endif
53 
54 #ifdef HAVE_GETAUXVAL
55 #include <sys/auxv.h>
56 #endif
57 
58 /* Linux target op definitions for the AArch64 architecture.  */
59 
60 class aarch64_target : public linux_process_target
61 {
62 public:
63 
64   const regs_info *get_regs_info () override;
65 
66   int breakpoint_kind_from_pc (CORE_ADDR *pcptr) override;
67 
68   int breakpoint_kind_from_current_state (CORE_ADDR *pcptr) override;
69 
70   const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
71 
72   bool supports_z_point_type (char z_type) override;
73 
74   bool supports_tracepoints () override;
75 
76   bool supports_fast_tracepoints () override;
77 
78   int install_fast_tracepoint_jump_pad
79     (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
80      CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
81      CORE_ADDR *trampoline, ULONGEST *trampoline_size,
82      unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
83      CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
84      char *err) override;
85 
86   int get_min_fast_tracepoint_insn_len () override;
87 
88   struct emit_ops *emit_ops () override;
89 
90   bool supports_memory_tagging () override;
91 
92   bool fetch_memtags (CORE_ADDR address, size_t len,
93 		      gdb::byte_vector &tags, int type) override;
94 
95   bool store_memtags (CORE_ADDR address, size_t len,
96 		      const gdb::byte_vector &tags, int type) override;
97 
98 protected:
99 
100   void low_arch_setup () override;
101 
102   bool low_cannot_fetch_register (int regno) override;
103 
104   bool low_cannot_store_register (int regno) override;
105 
106   bool low_supports_breakpoints () override;
107 
108   CORE_ADDR low_get_pc (regcache *regcache) override;
109 
110   void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
111 
112   bool low_breakpoint_at (CORE_ADDR pc) override;
113 
114   int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
115 			int size, raw_breakpoint *bp) override;
116 
117   int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
118 			int size, raw_breakpoint *bp) override;
119 
120   bool low_stopped_by_watchpoint () override;
121 
122   CORE_ADDR low_stopped_data_address () override;
123 
124   bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
125 			  int direction) override;
126 
127   arch_process_info *low_new_process () override;
128 
129   void low_delete_process (arch_process_info *info) override;
130 
131   void low_new_thread (lwp_info *) override;
132 
133   void low_delete_thread (arch_lwp_info *) override;
134 
135   void low_new_fork (process_info *parent, process_info *child) override;
136 
137   void low_prepare_to_resume (lwp_info *lwp) override;
138 
139   int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
140 
141   bool low_supports_range_stepping () override;
142 
143   bool low_supports_catch_syscall () override;
144 
145   void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
146 };
147 
148 /* The singleton target ops object.  */
149 
150 static aarch64_target the_aarch64_target;
151 
152 bool
low_cannot_fetch_register(int regno)153 aarch64_target::low_cannot_fetch_register (int regno)
154 {
155   gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
156 			  "is not implemented by the target");
157 }
158 
159 bool
low_cannot_store_register(int regno)160 aarch64_target::low_cannot_store_register (int regno)
161 {
162   gdb_assert_not_reached ("linux target op low_cannot_store_register "
163 			  "is not implemented by the target");
164 }
165 
166 void
low_prepare_to_resume(lwp_info * lwp)167 aarch64_target::low_prepare_to_resume (lwp_info *lwp)
168 {
169   aarch64_linux_prepare_to_resume (lwp);
170 }
171 
172 /* Per-process arch-specific data we want to keep.  */
173 
174 struct arch_process_info
175 {
176   /* Hardware breakpoint/watchpoint data.
177      The reason for them to be per-process rather than per-thread is
178      due to the lack of information in the gdbserver environment;
179      gdbserver is not told that whether a requested hardware
180      breakpoint/watchpoint is thread specific or not, so it has to set
181      each hw bp/wp for every thread in the current process.  The
182      higher level bp/wp management in gdb will resume a thread if a hw
183      bp/wp trap is not expected for it.  Since the hw bp/wp setting is
184      same for each thread, it is reasonable for the data to live here.
185      */
186   struct aarch64_debug_reg_state debug_reg_state;
187 };
188 
189 /* Return true if the size of register 0 is 8 byte.  */
190 
191 static int
is_64bit_tdesc(void)192 is_64bit_tdesc (void)
193 {
194   struct regcache *regcache = get_thread_regcache (current_thread, 0);
195 
196   return register_size (regcache->tdesc, 0) == 8;
197 }
198 
199 /* Return true if the regcache contains the number of SVE registers.  */
200 
201 static bool
is_sve_tdesc(void)202 is_sve_tdesc (void)
203 {
204   struct regcache *regcache = get_thread_regcache (current_thread, 0);
205 
206   return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
207 }
208 
209 static void
aarch64_fill_gregset(struct regcache * regcache,void * buf)210 aarch64_fill_gregset (struct regcache *regcache, void *buf)
211 {
212   struct user_pt_regs *regset = (struct user_pt_regs *) buf;
213   int i;
214 
215   for (i = 0; i < AARCH64_X_REGS_NUM; i++)
216     collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
217   collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
218   collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
219   collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
220 }
221 
222 static void
aarch64_store_gregset(struct regcache * regcache,const void * buf)223 aarch64_store_gregset (struct regcache *regcache, const void *buf)
224 {
225   const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
226   int i;
227 
228   for (i = 0; i < AARCH64_X_REGS_NUM; i++)
229     supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
230   supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
231   supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
232   supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
233 }
234 
235 static void
aarch64_fill_fpregset(struct regcache * regcache,void * buf)236 aarch64_fill_fpregset (struct regcache *regcache, void *buf)
237 {
238   struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
239   int i;
240 
241   for (i = 0; i < AARCH64_V_REGS_NUM; i++)
242     collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
243   collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
244   collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
245 }
246 
247 static void
aarch64_store_fpregset(struct regcache * regcache,const void * buf)248 aarch64_store_fpregset (struct regcache *regcache, const void *buf)
249 {
250   const struct user_fpsimd_state *regset
251     = (const struct user_fpsimd_state *) buf;
252   int i;
253 
254   for (i = 0; i < AARCH64_V_REGS_NUM; i++)
255     supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
256   supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
257   supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
258 }
259 
260 /* Store the pauth registers to regcache.  */
261 
262 static void
aarch64_store_pauthregset(struct regcache * regcache,const void * buf)263 aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
264 {
265   uint64_t *pauth_regset = (uint64_t *) buf;
266   int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
267 
268   if (pauth_base == 0)
269     return;
270 
271   supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
272 		   &pauth_regset[0]);
273   supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
274 		   &pauth_regset[1]);
275 }
276 
277 /* Fill BUF with the MTE registers from the regcache.  */
278 
279 static void
aarch64_fill_mteregset(struct regcache * regcache,void * buf)280 aarch64_fill_mteregset (struct regcache *regcache, void *buf)
281 {
282   uint64_t *mte_regset = (uint64_t *) buf;
283   int mte_base = find_regno (regcache->tdesc, "tag_ctl");
284 
285   collect_register (regcache, mte_base, mte_regset);
286 }
287 
288 /* Store the MTE registers to regcache.  */
289 
290 static void
aarch64_store_mteregset(struct regcache * regcache,const void * buf)291 aarch64_store_mteregset (struct regcache *regcache, const void *buf)
292 {
293   uint64_t *mte_regset = (uint64_t *) buf;
294   int mte_base = find_regno (regcache->tdesc, "tag_ctl");
295 
296   /* Tag Control register */
297   supply_register (regcache, mte_base, mte_regset);
298 }
299 
300 bool
low_supports_breakpoints()301 aarch64_target::low_supports_breakpoints ()
302 {
303   return true;
304 }
305 
306 /* Implementation of linux target ops method "low_get_pc".  */
307 
308 CORE_ADDR
low_get_pc(regcache * regcache)309 aarch64_target::low_get_pc (regcache *regcache)
310 {
311   if (register_size (regcache->tdesc, 0) == 8)
312     return linux_get_pc_64bit (regcache);
313   else
314     return linux_get_pc_32bit (regcache);
315 }
316 
317 /* Implementation of linux target ops method "low_set_pc".  */
318 
319 void
low_set_pc(regcache * regcache,CORE_ADDR pc)320 aarch64_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
321 {
322   if (register_size (regcache->tdesc, 0) == 8)
323     linux_set_pc_64bit (regcache, pc);
324   else
325     linux_set_pc_32bit (regcache, pc);
326 }
327 
328 #define aarch64_breakpoint_len 4
329 
330 /* AArch64 BRK software debug mode instruction.
331    This instruction needs to match gdb/aarch64-tdep.c
332    (aarch64_default_breakpoint).  */
333 static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
334 
335 /* Implementation of linux target ops method "low_breakpoint_at".  */
336 
337 bool
low_breakpoint_at(CORE_ADDR where)338 aarch64_target::low_breakpoint_at (CORE_ADDR where)
339 {
340   if (is_64bit_tdesc ())
341     {
342       gdb_byte insn[aarch64_breakpoint_len];
343 
344       read_memory (where, (unsigned char *) &insn, aarch64_breakpoint_len);
345       if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
346 	return true;
347 
348       return false;
349     }
350   else
351     return arm_breakpoint_at (where);
352 }
353 
354 static void
aarch64_init_debug_reg_state(struct aarch64_debug_reg_state * state)355 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
356 {
357   int i;
358 
359   for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
360     {
361       state->dr_addr_bp[i] = 0;
362       state->dr_ctrl_bp[i] = 0;
363       state->dr_ref_count_bp[i] = 0;
364     }
365 
366   for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
367     {
368       state->dr_addr_wp[i] = 0;
369       state->dr_ctrl_wp[i] = 0;
370       state->dr_ref_count_wp[i] = 0;
371     }
372 }
373 
374 /* Return the pointer to the debug register state structure in the
375    current process' arch-specific data area.  */
376 
377 struct aarch64_debug_reg_state *
aarch64_get_debug_reg_state(pid_t pid)378 aarch64_get_debug_reg_state (pid_t pid)
379 {
380   struct process_info *proc = find_process_pid (pid);
381 
382   return &proc->priv->arch_private->debug_reg_state;
383 }
384 
385 /* Implementation of target ops method "supports_z_point_type".  */
386 
387 bool
supports_z_point_type(char z_type)388 aarch64_target::supports_z_point_type (char z_type)
389 {
390   switch (z_type)
391     {
392     case Z_PACKET_SW_BP:
393     case Z_PACKET_HW_BP:
394     case Z_PACKET_WRITE_WP:
395     case Z_PACKET_READ_WP:
396     case Z_PACKET_ACCESS_WP:
397       return true;
398     default:
399       return false;
400     }
401 }
402 
403 /* Implementation of linux target ops method "low_insert_point".
404 
405    It actually only records the info of the to-be-inserted bp/wp;
406    the actual insertion will happen when threads are resumed.  */
407 
408 int
low_insert_point(raw_bkpt_type type,CORE_ADDR addr,int len,raw_breakpoint * bp)409 aarch64_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
410 				  int len, raw_breakpoint *bp)
411 {
412   int ret;
413   enum target_hw_bp_type targ_type;
414   struct aarch64_debug_reg_state *state
415     = aarch64_get_debug_reg_state (pid_of (current_thread));
416 
417   if (show_debug_regs)
418     fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
419 	     (unsigned long) addr, len);
420 
421   /* Determine the type from the raw breakpoint type.  */
422   targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
423 
424   if (targ_type != hw_execute)
425     {
426       if (aarch64_linux_region_ok_for_watchpoint (addr, len))
427 	ret = aarch64_handle_watchpoint (targ_type, addr, len,
428 					 1 /* is_insert */, state);
429       else
430 	ret = -1;
431     }
432   else
433     {
434       if (len == 3)
435 	{
436 	  /* LEN is 3 means the breakpoint is set on a 32-bit thumb
437 	     instruction.   Set it to 2 to correctly encode length bit
438 	     mask in hardware/watchpoint control register.  */
439 	  len = 2;
440 	}
441       ret = aarch64_handle_breakpoint (targ_type, addr, len,
442 				       1 /* is_insert */, state);
443     }
444 
445   if (show_debug_regs)
446     aarch64_show_debug_reg_state (state, "insert_point", addr, len,
447 				  targ_type);
448 
449   return ret;
450 }
451 
452 /* Implementation of linux target ops method "low_remove_point".
453 
454    It actually only records the info of the to-be-removed bp/wp,
455    the actual removal will be done when threads are resumed.  */
456 
457 int
low_remove_point(raw_bkpt_type type,CORE_ADDR addr,int len,raw_breakpoint * bp)458 aarch64_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
459 				  int len, raw_breakpoint *bp)
460 {
461   int ret;
462   enum target_hw_bp_type targ_type;
463   struct aarch64_debug_reg_state *state
464     = aarch64_get_debug_reg_state (pid_of (current_thread));
465 
466   if (show_debug_regs)
467     fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
468 	     (unsigned long) addr, len);
469 
470   /* Determine the type from the raw breakpoint type.  */
471   targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
472 
473   /* Set up state pointers.  */
474   if (targ_type != hw_execute)
475     ret =
476       aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
477 				 state);
478   else
479     {
480       if (len == 3)
481 	{
482 	  /* LEN is 3 means the breakpoint is set on a 32-bit thumb
483 	     instruction.   Set it to 2 to correctly encode length bit
484 	     mask in hardware/watchpoint control register.  */
485 	  len = 2;
486 	}
487       ret = aarch64_handle_breakpoint (targ_type, addr, len,
488 				       0 /* is_insert */,  state);
489     }
490 
491   if (show_debug_regs)
492     aarch64_show_debug_reg_state (state, "remove_point", addr, len,
493 				  targ_type);
494 
495   return ret;
496 }
497 
498 /* Return the address only having significant bits.  This is used to ignore
499    the top byte (TBI).  */
500 
501 static CORE_ADDR
address_significant(CORE_ADDR addr)502 address_significant (CORE_ADDR addr)
503 {
504   /* Clear insignificant bits of a target address and sign extend resulting
505      address.  */
506   int addr_bit = 56;
507 
508   CORE_ADDR sign = (CORE_ADDR) 1 << (addr_bit - 1);
509   addr &= ((CORE_ADDR) 1 << addr_bit) - 1;
510   addr = (addr ^ sign) - sign;
511 
512   return addr;
513 }
514 
515 /* Implementation of linux target ops method "low_stopped_data_address".  */
516 
517 CORE_ADDR
low_stopped_data_address()518 aarch64_target::low_stopped_data_address ()
519 {
520   siginfo_t siginfo;
521   int pid, i;
522   struct aarch64_debug_reg_state *state;
523 
524   pid = lwpid_of (current_thread);
525 
526   /* Get the siginfo.  */
527   if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
528     return (CORE_ADDR) 0;
529 
530   /* Need to be a hardware breakpoint/watchpoint trap.  */
531   if (siginfo.si_signo != SIGTRAP
532       || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
533     return (CORE_ADDR) 0;
534 
535   /* Make sure to ignore the top byte, otherwise we may not recognize a
536      hardware watchpoint hit.  The stopped data addresses coming from the
537      kernel can potentially be tagged addresses.  */
538   const CORE_ADDR addr_trap
539     = address_significant ((CORE_ADDR) siginfo.si_addr);
540 
541   /* Check if the address matches any watched address.  */
542   state = aarch64_get_debug_reg_state (pid_of (current_thread));
543   for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
544     {
545       const unsigned int offset
546 	= aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
547       const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
548       const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
549       const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
550       const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
551 
552       if (state->dr_ref_count_wp[i]
553 	  && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
554 	  && addr_trap >= addr_watch_aligned
555 	  && addr_trap < addr_watch + len)
556 	{
557 	  /* ADDR_TRAP reports the first address of the memory range
558 	     accessed by the CPU, regardless of what was the memory
559 	     range watched.  Thus, a large CPU access that straddles
560 	     the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
561 	     ADDR_TRAP that is lower than the
562 	     ADDR_WATCH..ADDR_WATCH+LEN range.  E.g.:
563 
564 	     addr: |   4   |   5   |   6   |   7   |   8   |
565 				   |---- range watched ----|
566 		   |----------- range accessed ------------|
567 
568 	     In this case, ADDR_TRAP will be 4.
569 
570 	     To match a watchpoint known to GDB core, we must never
571 	     report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
572 	     range.  ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
573 	     positive on kernels older than 4.10.  See PR
574 	     external/20207.  */
575 	  return addr_orig;
576 	}
577     }
578 
579   return (CORE_ADDR) 0;
580 }
581 
582 /* Implementation of linux target ops method "low_stopped_by_watchpoint".  */
583 
584 bool
low_stopped_by_watchpoint()585 aarch64_target::low_stopped_by_watchpoint ()
586 {
587   return (low_stopped_data_address () != 0);
588 }
589 
590 /* Fetch the thread-local storage pointer for libthread_db.  */
591 
592 ps_err_e
ps_get_thread_area(struct ps_prochandle * ph,lwpid_t lwpid,int idx,void ** base)593 ps_get_thread_area (struct ps_prochandle *ph,
594 		    lwpid_t lwpid, int idx, void **base)
595 {
596   return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
597 				     is_64bit_tdesc ());
598 }
599 
600 /* Implementation of linux target ops method "low_siginfo_fixup".  */
601 
602 bool
low_siginfo_fixup(siginfo_t * native,gdb_byte * inf,int direction)603 aarch64_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
604 				   int direction)
605 {
606   /* Is the inferior 32-bit?  If so, then fixup the siginfo object.  */
607   if (!is_64bit_tdesc ())
608     {
609       if (direction == 0)
610 	aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
611 					     native);
612       else
613 	aarch64_siginfo_from_compat_siginfo (native,
614 					     (struct compat_siginfo *) inf);
615 
616       return true;
617     }
618 
619   return false;
620 }
621 
622 /* Implementation of linux target ops method "low_new_process".  */
623 
624 arch_process_info *
low_new_process()625 aarch64_target::low_new_process ()
626 {
627   struct arch_process_info *info = XCNEW (struct arch_process_info);
628 
629   aarch64_init_debug_reg_state (&info->debug_reg_state);
630 
631   return info;
632 }
633 
634 /* Implementation of linux target ops method "low_delete_process".  */
635 
636 void
low_delete_process(arch_process_info * info)637 aarch64_target::low_delete_process (arch_process_info *info)
638 {
639   xfree (info);
640 }
641 
642 void
low_new_thread(lwp_info * lwp)643 aarch64_target::low_new_thread (lwp_info *lwp)
644 {
645   aarch64_linux_new_thread (lwp);
646 }
647 
648 void
low_delete_thread(arch_lwp_info * arch_lwp)649 aarch64_target::low_delete_thread (arch_lwp_info *arch_lwp)
650 {
651   aarch64_linux_delete_thread (arch_lwp);
652 }
653 
654 /* Implementation of linux target ops method "low_new_fork".  */
655 
656 void
low_new_fork(process_info * parent,process_info * child)657 aarch64_target::low_new_fork (process_info *parent,
658 			      process_info *child)
659 {
660   /* These are allocated by linux_add_process.  */
661   gdb_assert (parent->priv != NULL
662 	      && parent->priv->arch_private != NULL);
663   gdb_assert (child->priv != NULL
664 	      && child->priv->arch_private != NULL);
665 
666   /* Linux kernel before 2.6.33 commit
667      72f674d203cd230426437cdcf7dd6f681dad8b0d
668      will inherit hardware debug registers from parent
669      on fork/vfork/clone.  Newer Linux kernels create such tasks with
670      zeroed debug registers.
671 
672      GDB core assumes the child inherits the watchpoints/hw
673      breakpoints of the parent, and will remove them all from the
674      forked off process.  Copy the debug registers mirrors into the
675      new process so that all breakpoints and watchpoints can be
676      removed together.  The debug registers mirror will become zeroed
677      in the end before detaching the forked off process, thus making
678      this compatible with older Linux kernels too.  */
679 
680   *child->priv->arch_private = *parent->priv->arch_private;
681 }
682 
683 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h.  */
684 #define AARCH64_HWCAP_PACA (1 << 30)
685 
686 /* Implementation of linux target ops method "low_arch_setup".  */
687 
688 void
low_arch_setup()689 aarch64_target::low_arch_setup ()
690 {
691   unsigned int machine;
692   int is_elf64;
693   int tid;
694 
695   tid = lwpid_of (current_thread);
696 
697   is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
698 
699   if (is_elf64)
700     {
701       uint64_t vq = aarch64_sve_get_vq (tid);
702       unsigned long hwcap = linux_get_hwcap (8);
703       unsigned long hwcap2 = linux_get_hwcap2 (8);
704       bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
705       /* MTE is AArch64-only.  */
706       bool mte_p = hwcap2 & HWCAP2_MTE;
707 
708       current_process ()->tdesc
709 	= aarch64_linux_read_description (vq, pauth_p, mte_p);
710     }
711   else
712     current_process ()->tdesc = aarch32_linux_read_description ();
713 
714   aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
715 }
716 
717 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf.  */
718 
719 static void
aarch64_sve_regs_copy_to_regcache(struct regcache * regcache,const void * buf)720 aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
721 {
722   return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
723 }
724 
725 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf.  */
726 
727 static void
aarch64_sve_regs_copy_from_regcache(struct regcache * regcache,void * buf)728 aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
729 {
730   return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
731 }
732 
733 static struct regset_info aarch64_regsets[] =
734 {
735   { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
736     sizeof (struct user_pt_regs), GENERAL_REGS,
737     aarch64_fill_gregset, aarch64_store_gregset },
738   { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
739     sizeof (struct user_fpsimd_state), FP_REGS,
740     aarch64_fill_fpregset, aarch64_store_fpregset
741   },
742   { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
743     AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
744     NULL, aarch64_store_pauthregset },
745   { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_TAGGED_ADDR_CTRL,
746     AARCH64_LINUX_SIZEOF_MTE, OPTIONAL_REGS, aarch64_fill_mteregset,
747     aarch64_store_mteregset },
748   NULL_REGSET
749 };
750 
751 static struct regsets_info aarch64_regsets_info =
752   {
753     aarch64_regsets, /* regsets */
754     0, /* num_regsets */
755     NULL, /* disabled_regsets */
756   };
757 
758 static struct regs_info regs_info_aarch64 =
759   {
760     NULL, /* regset_bitmap */
761     NULL, /* usrregs */
762     &aarch64_regsets_info,
763   };
764 
765 static struct regset_info aarch64_sve_regsets[] =
766 {
767   { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
768     sizeof (struct user_pt_regs), GENERAL_REGS,
769     aarch64_fill_gregset, aarch64_store_gregset },
770   { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
771     SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
772     aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
773   },
774   { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
775     AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
776     NULL, aarch64_store_pauthregset },
777   { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_TAGGED_ADDR_CTRL,
778     AARCH64_LINUX_SIZEOF_MTE, OPTIONAL_REGS, aarch64_fill_mteregset,
779     aarch64_store_mteregset },
780   NULL_REGSET
781 };
782 
783 static struct regsets_info aarch64_sve_regsets_info =
784   {
785     aarch64_sve_regsets, /* regsets.  */
786     0, /* num_regsets.  */
787     NULL, /* disabled_regsets.  */
788   };
789 
790 static struct regs_info regs_info_aarch64_sve =
791   {
792     NULL, /* regset_bitmap.  */
793     NULL, /* usrregs.  */
794     &aarch64_sve_regsets_info,
795   };
796 
797 /* Implementation of linux target ops method "get_regs_info".  */
798 
799 const regs_info *
get_regs_info()800 aarch64_target::get_regs_info ()
801 {
802   if (!is_64bit_tdesc ())
803     return &regs_info_aarch32;
804 
805   if (is_sve_tdesc ())
806     return &regs_info_aarch64_sve;
807 
808   return &regs_info_aarch64;
809 }
810 
811 /* Implementation of target ops method "supports_tracepoints".  */
812 
813 bool
supports_tracepoints()814 aarch64_target::supports_tracepoints ()
815 {
816   if (current_thread == NULL)
817     return true;
818   else
819     {
820       /* We don't support tracepoints on aarch32 now.  */
821       return is_64bit_tdesc ();
822     }
823 }
824 
825 /* Implementation of linux target ops method "low_get_thread_area".  */
826 
827 int
low_get_thread_area(int lwpid,CORE_ADDR * addrp)828 aarch64_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
829 {
830   struct iovec iovec;
831   uint64_t reg;
832 
833   iovec.iov_base = &reg;
834   iovec.iov_len = sizeof (reg);
835 
836   if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
837     return -1;
838 
839   *addrp = reg;
840 
841   return 0;
842 }
843 
844 bool
low_supports_catch_syscall()845 aarch64_target::low_supports_catch_syscall ()
846 {
847   return true;
848 }
849 
850 /* Implementation of linux target ops method "low_get_syscall_trapinfo".  */
851 
852 void
low_get_syscall_trapinfo(regcache * regcache,int * sysno)853 aarch64_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
854 {
855   int use_64bit = register_size (regcache->tdesc, 0) == 8;
856 
857   if (use_64bit)
858     {
859       long l_sysno;
860 
861       collect_register_by_name (regcache, "x8", &l_sysno);
862       *sysno = (int) l_sysno;
863     }
864   else
865     collect_register_by_name (regcache, "r7", sysno);
866 }
867 
868 /* List of condition codes that we need.  */
869 
870 enum aarch64_condition_codes
871 {
872   EQ = 0x0,
873   NE = 0x1,
874   LO = 0x3,
875   GE = 0xa,
876   LT = 0xb,
877   GT = 0xc,
878   LE = 0xd,
879 };
880 
881 enum aarch64_operand_type
882 {
883   OPERAND_IMMEDIATE,
884   OPERAND_REGISTER,
885 };
886 
887 /* Representation of an operand.  At this time, it only supports register
888    and immediate types.  */
889 
890 struct aarch64_operand
891 {
892   /* Type of the operand.  */
893   enum aarch64_operand_type type;
894 
895   /* Value of the operand according to the type.  */
896   union
897     {
898       uint32_t imm;
899       struct aarch64_register reg;
900     };
901 };
902 
903 /* List of registers that we are currently using, we can add more here as
904    we need to use them.  */
905 
906 /* General purpose scratch registers (64 bit).  */
907 static const struct aarch64_register x0 = { 0, 1 };
908 static const struct aarch64_register x1 = { 1, 1 };
909 static const struct aarch64_register x2 = { 2, 1 };
910 static const struct aarch64_register x3 = { 3, 1 };
911 static const struct aarch64_register x4 = { 4, 1 };
912 
913 /* General purpose scratch registers (32 bit).  */
914 static const struct aarch64_register w0 = { 0, 0 };
915 static const struct aarch64_register w2 = { 2, 0 };
916 
917 /* Intra-procedure scratch registers.  */
918 static const struct aarch64_register ip0 = { 16, 1 };
919 
920 /* Special purpose registers.  */
921 static const struct aarch64_register fp = { 29, 1 };
922 static const struct aarch64_register lr = { 30, 1 };
923 static const struct aarch64_register sp = { 31, 1 };
924 static const struct aarch64_register xzr = { 31, 1 };
925 
926 /* Dynamically allocate a new register.  If we know the register
927    statically, we should make it a global as above instead of using this
928    helper function.  */
929 
930 static struct aarch64_register
aarch64_register(unsigned num,int is64)931 aarch64_register (unsigned num, int is64)
932 {
933   return (struct aarch64_register) { num, is64 };
934 }
935 
936 /* Helper function to create a register operand, for instructions with
937    different types of operands.
938 
939    For example:
940    p += emit_mov (p, x0, register_operand (x1));  */
941 
942 static struct aarch64_operand
register_operand(struct aarch64_register reg)943 register_operand (struct aarch64_register reg)
944 {
945   struct aarch64_operand operand;
946 
947   operand.type = OPERAND_REGISTER;
948   operand.reg = reg;
949 
950   return operand;
951 }
952 
953 /* Helper function to create an immediate operand, for instructions with
954    different types of operands.
955 
956    For example:
957    p += emit_mov (p, x0, immediate_operand (12));  */
958 
959 static struct aarch64_operand
immediate_operand(uint32_t imm)960 immediate_operand (uint32_t imm)
961 {
962   struct aarch64_operand operand;
963 
964   operand.type = OPERAND_IMMEDIATE;
965   operand.imm = imm;
966 
967   return operand;
968 }
969 
970 /* Helper function to create an offset memory operand.
971 
972    For example:
973    p += emit_ldr (p, x0, sp, offset_memory_operand (16));  */
974 
975 static struct aarch64_memory_operand
offset_memory_operand(int32_t offset)976 offset_memory_operand (int32_t offset)
977 {
978   return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
979 }
980 
981 /* Helper function to create a pre-index memory operand.
982 
983    For example:
984    p += emit_ldr (p, x0, sp, preindex_memory_operand (16));  */
985 
986 static struct aarch64_memory_operand
preindex_memory_operand(int32_t index)987 preindex_memory_operand (int32_t index)
988 {
989   return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
990 }
991 
992 /* Helper function to create a post-index memory operand.
993 
994    For example:
995    p += emit_ldr (p, x0, sp, postindex_memory_operand (16));  */
996 
997 static struct aarch64_memory_operand
postindex_memory_operand(int32_t index)998 postindex_memory_operand (int32_t index)
999 {
1000   return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
1001 }
1002 
1003 /* System control registers.  These special registers can be written and
1004    read with the MRS and MSR instructions.
1005 
1006    - NZCV: Condition flags.  GDB refers to this register under the CPSR
1007 	   name.
1008    - FPSR: Floating-point status register.
1009    - FPCR: Floating-point control registers.
1010    - TPIDR_EL0: Software thread ID register.  */
1011 
1012 enum aarch64_system_control_registers
1013 {
1014   /*          op0           op1           crn          crm          op2  */
1015   NZCV =      (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
1016   FPSR =      (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
1017   FPCR =      (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
1018   TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
1019 };
1020 
1021 /* Write a BLR instruction into *BUF.
1022 
1023      BLR rn
1024 
1025    RN is the register to branch to.  */
1026 
1027 static int
emit_blr(uint32_t * buf,struct aarch64_register rn)1028 emit_blr (uint32_t *buf, struct aarch64_register rn)
1029 {
1030   return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
1031 }
1032 
1033 /* Write a RET instruction into *BUF.
1034 
1035      RET xn
1036 
1037    RN is the register to branch to.  */
1038 
1039 static int
emit_ret(uint32_t * buf,struct aarch64_register rn)1040 emit_ret (uint32_t *buf, struct aarch64_register rn)
1041 {
1042   return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
1043 }
1044 
1045 static int
emit_load_store_pair(uint32_t * buf,enum aarch64_opcodes opcode,struct aarch64_register rt,struct aarch64_register rt2,struct aarch64_register rn,struct aarch64_memory_operand operand)1046 emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
1047 		      struct aarch64_register rt,
1048 		      struct aarch64_register rt2,
1049 		      struct aarch64_register rn,
1050 		      struct aarch64_memory_operand operand)
1051 {
1052   uint32_t opc;
1053   uint32_t pre_index;
1054   uint32_t write_back;
1055 
1056   if (rt.is64)
1057     opc = ENCODE (2, 2, 30);
1058   else
1059     opc = ENCODE (0, 2, 30);
1060 
1061   switch (operand.type)
1062     {
1063     case MEMORY_OPERAND_OFFSET:
1064       {
1065 	pre_index = ENCODE (1, 1, 24);
1066 	write_back = ENCODE (0, 1, 23);
1067 	break;
1068       }
1069     case MEMORY_OPERAND_POSTINDEX:
1070       {
1071 	pre_index = ENCODE (0, 1, 24);
1072 	write_back = ENCODE (1, 1, 23);
1073 	break;
1074       }
1075     case MEMORY_OPERAND_PREINDEX:
1076       {
1077 	pre_index = ENCODE (1, 1, 24);
1078 	write_back = ENCODE (1, 1, 23);
1079 	break;
1080       }
1081     default:
1082       return 0;
1083     }
1084 
1085   return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
1086 			    | ENCODE (operand.index >> 3, 7, 15)
1087 			    | ENCODE (rt2.num, 5, 10)
1088 			    | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
1089 }
1090 
1091 /* Write a STP instruction into *BUF.
1092 
1093      STP rt, rt2, [rn, #offset]
1094      STP rt, rt2, [rn, #index]!
1095      STP rt, rt2, [rn], #index
1096 
1097    RT and RT2 are the registers to store.
1098    RN is the base address register.
1099    OFFSET is the immediate to add to the base address.  It is limited to a
1100    -512 .. 504 range (7 bits << 3).  */
1101 
1102 static int
emit_stp(uint32_t * buf,struct aarch64_register rt,struct aarch64_register rt2,struct aarch64_register rn,struct aarch64_memory_operand operand)1103 emit_stp (uint32_t *buf, struct aarch64_register rt,
1104 	  struct aarch64_register rt2, struct aarch64_register rn,
1105 	  struct aarch64_memory_operand operand)
1106 {
1107   return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
1108 }
1109 
1110 /* Write a LDP instruction into *BUF.
1111 
1112      LDP rt, rt2, [rn, #offset]
1113      LDP rt, rt2, [rn, #index]!
1114      LDP rt, rt2, [rn], #index
1115 
1116    RT and RT2 are the registers to store.
1117    RN is the base address register.
1118    OFFSET is the immediate to add to the base address.  It is limited to a
1119    -512 .. 504 range (7 bits << 3).  */
1120 
1121 static int
emit_ldp(uint32_t * buf,struct aarch64_register rt,struct aarch64_register rt2,struct aarch64_register rn,struct aarch64_memory_operand operand)1122 emit_ldp (uint32_t *buf, struct aarch64_register rt,
1123 	  struct aarch64_register rt2, struct aarch64_register rn,
1124 	  struct aarch64_memory_operand operand)
1125 {
1126   return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
1127 }
1128 
1129 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1130 
1131      LDP qt, qt2, [rn, #offset]
1132 
1133    RT and RT2 are the Q registers to store.
1134    RN is the base address register.
1135    OFFSET is the immediate to add to the base address.  It is limited to
1136    -1024 .. 1008 range (7 bits << 4).  */
1137 
1138 static int
emit_ldp_q_offset(uint32_t * buf,unsigned rt,unsigned rt2,struct aarch64_register rn,int32_t offset)1139 emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1140 		   struct aarch64_register rn, int32_t offset)
1141 {
1142   uint32_t opc = ENCODE (2, 2, 30);
1143   uint32_t pre_index = ENCODE (1, 1, 24);
1144 
1145   return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
1146 			    | ENCODE (offset >> 4, 7, 15)
1147 			    | ENCODE (rt2, 5, 10)
1148 			    | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
1149 }
1150 
1151 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1152 
1153      STP qt, qt2, [rn, #offset]
1154 
1155    RT and RT2 are the Q registers to store.
1156    RN is the base address register.
1157    OFFSET is the immediate to add to the base address.  It is limited to
1158    -1024 .. 1008 range (7 bits << 4).  */
1159 
1160 static int
emit_stp_q_offset(uint32_t * buf,unsigned rt,unsigned rt2,struct aarch64_register rn,int32_t offset)1161 emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1162 		   struct aarch64_register rn, int32_t offset)
1163 {
1164   uint32_t opc = ENCODE (2, 2, 30);
1165   uint32_t pre_index = ENCODE (1, 1, 24);
1166 
1167   return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
1168 			    | ENCODE (offset >> 4, 7, 15)
1169 			    | ENCODE (rt2, 5, 10)
1170 			    | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
1171 }
1172 
1173 /* Write a LDRH instruction into *BUF.
1174 
1175      LDRH wt, [xn, #offset]
1176      LDRH wt, [xn, #index]!
1177      LDRH wt, [xn], #index
1178 
1179    RT is the register to store.
1180    RN is the base address register.
1181    OFFSET is the immediate to add to the base address.  It is limited to
1182    0 .. 32760 range (12 bits << 3).  */
1183 
1184 static int
emit_ldrh(uint32_t * buf,struct aarch64_register rt,struct aarch64_register rn,struct aarch64_memory_operand operand)1185 emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1186 	   struct aarch64_register rn,
1187 	   struct aarch64_memory_operand operand)
1188 {
1189   return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
1190 }
1191 
1192 /* Write a LDRB instruction into *BUF.
1193 
1194      LDRB wt, [xn, #offset]
1195      LDRB wt, [xn, #index]!
1196      LDRB wt, [xn], #index
1197 
1198    RT is the register to store.
1199    RN is the base address register.
1200    OFFSET is the immediate to add to the base address.  It is limited to
1201    0 .. 32760 range (12 bits << 3).  */
1202 
1203 static int
emit_ldrb(uint32_t * buf,struct aarch64_register rt,struct aarch64_register rn,struct aarch64_memory_operand operand)1204 emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1205 	   struct aarch64_register rn,
1206 	   struct aarch64_memory_operand operand)
1207 {
1208   return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
1209 }
1210 
1211 
1212 
1213 /* Write a STR instruction into *BUF.
1214 
1215      STR rt, [rn, #offset]
1216      STR rt, [rn, #index]!
1217      STR rt, [rn], #index
1218 
1219    RT is the register to store.
1220    RN is the base address register.
1221    OFFSET is the immediate to add to the base address.  It is limited to
1222    0 .. 32760 range (12 bits << 3).  */
1223 
1224 static int
emit_str(uint32_t * buf,struct aarch64_register rt,struct aarch64_register rn,struct aarch64_memory_operand operand)1225 emit_str (uint32_t *buf, struct aarch64_register rt,
1226 	  struct aarch64_register rn,
1227 	  struct aarch64_memory_operand operand)
1228 {
1229   return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
1230 }
1231 
1232 /* Helper function emitting an exclusive load or store instruction.  */
1233 
1234 static int
emit_load_store_exclusive(uint32_t * buf,uint32_t size,enum aarch64_opcodes opcode,struct aarch64_register rs,struct aarch64_register rt,struct aarch64_register rt2,struct aarch64_register rn)1235 emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1236 			   enum aarch64_opcodes opcode,
1237 			   struct aarch64_register rs,
1238 			   struct aarch64_register rt,
1239 			   struct aarch64_register rt2,
1240 			   struct aarch64_register rn)
1241 {
1242   return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1243 			    | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1244 			    | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
1245 }
1246 
1247 /* Write a LAXR instruction into *BUF.
1248 
1249      LDAXR rt, [xn]
1250 
1251    RT is the destination register.
1252    RN is the base address register.  */
1253 
1254 static int
emit_ldaxr(uint32_t * buf,struct aarch64_register rt,struct aarch64_register rn)1255 emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1256 	    struct aarch64_register rn)
1257 {
1258   return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1259 				    xzr, rn);
1260 }
1261 
1262 /* Write a STXR instruction into *BUF.
1263 
1264      STXR ws, rt, [xn]
1265 
1266    RS is the result register, it indicates if the store succeeded or not.
1267    RT is the destination register.
1268    RN is the base address register.  */
1269 
1270 static int
emit_stxr(uint32_t * buf,struct aarch64_register rs,struct aarch64_register rt,struct aarch64_register rn)1271 emit_stxr (uint32_t *buf, struct aarch64_register rs,
1272 	   struct aarch64_register rt, struct aarch64_register rn)
1273 {
1274   return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1275 				    xzr, rn);
1276 }
1277 
1278 /* Write a STLR instruction into *BUF.
1279 
1280      STLR rt, [xn]
1281 
1282    RT is the register to store.
1283    RN is the base address register.  */
1284 
1285 static int
emit_stlr(uint32_t * buf,struct aarch64_register rt,struct aarch64_register rn)1286 emit_stlr (uint32_t *buf, struct aarch64_register rt,
1287 	   struct aarch64_register rn)
1288 {
1289   return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1290 				    xzr, rn);
1291 }
1292 
1293 /* Helper function for data processing instructions with register sources.  */
1294 
1295 static int
emit_data_processing_reg(uint32_t * buf,uint32_t opcode,struct aarch64_register rd,struct aarch64_register rn,struct aarch64_register rm)1296 emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
1297 			  struct aarch64_register rd,
1298 			  struct aarch64_register rn,
1299 			  struct aarch64_register rm)
1300 {
1301   uint32_t size = ENCODE (rd.is64, 1, 31);
1302 
1303   return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1304 			    | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
1305 }
1306 
1307 /* Helper function for data processing instructions taking either a register
1308    or an immediate.  */
1309 
1310 static int
emit_data_processing(uint32_t * buf,enum aarch64_opcodes opcode,struct aarch64_register rd,struct aarch64_register rn,struct aarch64_operand operand)1311 emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1312 		      struct aarch64_register rd,
1313 		      struct aarch64_register rn,
1314 		      struct aarch64_operand operand)
1315 {
1316   uint32_t size = ENCODE (rd.is64, 1, 31);
1317   /* The opcode is different for register and immediate source operands.  */
1318   uint32_t operand_opcode;
1319 
1320   if (operand.type == OPERAND_IMMEDIATE)
1321     {
1322       /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1323       operand_opcode = ENCODE (8, 4, 25);
1324 
1325       return aarch64_emit_insn (buf, opcode | operand_opcode | size
1326 				| ENCODE (operand.imm, 12, 10)
1327 				| ENCODE (rn.num, 5, 5)
1328 				| ENCODE (rd.num, 5, 0));
1329     }
1330   else
1331     {
1332       /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1333       operand_opcode = ENCODE (5, 4, 25);
1334 
1335       return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1336 				       rn, operand.reg);
1337     }
1338 }
1339 
1340 /* Write an ADD instruction into *BUF.
1341 
1342      ADD rd, rn, #imm
1343      ADD rd, rn, rm
1344 
1345    This function handles both an immediate and register add.
1346 
1347    RD is the destination register.
1348    RN is the input register.
1349    OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1350    OPERAND_REGISTER.  */
1351 
1352 static int
emit_add(uint32_t * buf,struct aarch64_register rd,struct aarch64_register rn,struct aarch64_operand operand)1353 emit_add (uint32_t *buf, struct aarch64_register rd,
1354 	  struct aarch64_register rn, struct aarch64_operand operand)
1355 {
1356   return emit_data_processing (buf, ADD, rd, rn, operand);
1357 }
1358 
1359 /* Write a SUB instruction into *BUF.
1360 
1361      SUB rd, rn, #imm
1362      SUB rd, rn, rm
1363 
1364    This function handles both an immediate and register sub.
1365 
1366    RD is the destination register.
1367    RN is the input register.
1368    IMM is the immediate to substract to RN.  */
1369 
1370 static int
emit_sub(uint32_t * buf,struct aarch64_register rd,struct aarch64_register rn,struct aarch64_operand operand)1371 emit_sub (uint32_t *buf, struct aarch64_register rd,
1372 	  struct aarch64_register rn, struct aarch64_operand operand)
1373 {
1374   return emit_data_processing (buf, SUB, rd, rn, operand);
1375 }
1376 
1377 /* Write a MOV instruction into *BUF.
1378 
1379      MOV rd, #imm
1380      MOV rd, rm
1381 
1382    This function handles both a wide immediate move and a register move,
1383    with the condition that the source register is not xzr.  xzr and the
1384    stack pointer share the same encoding and this function only supports
1385    the stack pointer.
1386 
1387    RD is the destination register.
1388    OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1389    OPERAND_REGISTER.  */
1390 
1391 static int
emit_mov(uint32_t * buf,struct aarch64_register rd,struct aarch64_operand operand)1392 emit_mov (uint32_t *buf, struct aarch64_register rd,
1393 	  struct aarch64_operand operand)
1394 {
1395   if (operand.type == OPERAND_IMMEDIATE)
1396     {
1397       uint32_t size = ENCODE (rd.is64, 1, 31);
1398       /* Do not shift the immediate.  */
1399       uint32_t shift = ENCODE (0, 2, 21);
1400 
1401       return aarch64_emit_insn (buf, MOV | size | shift
1402 				| ENCODE (operand.imm, 16, 5)
1403 				| ENCODE (rd.num, 5, 0));
1404     }
1405   else
1406     return emit_add (buf, rd, operand.reg, immediate_operand (0));
1407 }
1408 
1409 /* Write a MOVK instruction into *BUF.
1410 
1411      MOVK rd, #imm, lsl #shift
1412 
1413    RD is the destination register.
1414    IMM is the immediate.
1415    SHIFT is the logical shift left to apply to IMM.   */
1416 
1417 static int
emit_movk(uint32_t * buf,struct aarch64_register rd,uint32_t imm,unsigned shift)1418 emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1419 	   unsigned shift)
1420 {
1421   uint32_t size = ENCODE (rd.is64, 1, 31);
1422 
1423   return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1424 			    ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
1425 }
1426 
1427 /* Write instructions into *BUF in order to move ADDR into a register.
1428    ADDR can be a 64-bit value.
1429 
1430    This function will emit a series of MOV and MOVK instructions, such as:
1431 
1432      MOV  xd, #(addr)
1433      MOVK xd, #(addr >> 16), lsl #16
1434      MOVK xd, #(addr >> 32), lsl #32
1435      MOVK xd, #(addr >> 48), lsl #48  */
1436 
1437 static int
emit_mov_addr(uint32_t * buf,struct aarch64_register rd,CORE_ADDR addr)1438 emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1439 {
1440   uint32_t *p = buf;
1441 
1442   /* The MOV (wide immediate) instruction clears to top bits of the
1443      register.  */
1444   p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1445 
1446   if ((addr >> 16) != 0)
1447     p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1448   else
1449     return p - buf;
1450 
1451   if ((addr >> 32) != 0)
1452     p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1453   else
1454     return p - buf;
1455 
1456   if ((addr >> 48) != 0)
1457     p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1458 
1459   return p - buf;
1460 }
1461 
1462 /* Write a SUBS instruction into *BUF.
1463 
1464      SUBS rd, rn, rm
1465 
1466    This instruction update the condition flags.
1467 
1468    RD is the destination register.
1469    RN and RM are the source registers.  */
1470 
1471 static int
emit_subs(uint32_t * buf,struct aarch64_register rd,struct aarch64_register rn,struct aarch64_operand operand)1472 emit_subs (uint32_t *buf, struct aarch64_register rd,
1473 	   struct aarch64_register rn, struct aarch64_operand operand)
1474 {
1475   return emit_data_processing (buf, SUBS, rd, rn, operand);
1476 }
1477 
1478 /* Write a CMP instruction into *BUF.
1479 
1480      CMP rn, rm
1481 
1482    This instruction is an alias of SUBS xzr, rn, rm.
1483 
1484    RN and RM are the registers to compare.  */
1485 
1486 static int
emit_cmp(uint32_t * buf,struct aarch64_register rn,struct aarch64_operand operand)1487 emit_cmp (uint32_t *buf, struct aarch64_register rn,
1488 	      struct aarch64_operand operand)
1489 {
1490   return emit_subs (buf, xzr, rn, operand);
1491 }
1492 
1493 /* Write a AND instruction into *BUF.
1494 
1495      AND rd, rn, rm
1496 
1497    RD is the destination register.
1498    RN and RM are the source registers.  */
1499 
1500 static int
emit_and(uint32_t * buf,struct aarch64_register rd,struct aarch64_register rn,struct aarch64_register rm)1501 emit_and (uint32_t *buf, struct aarch64_register rd,
1502 	  struct aarch64_register rn, struct aarch64_register rm)
1503 {
1504   return emit_data_processing_reg (buf, AND, rd, rn, rm);
1505 }
1506 
1507 /* Write a ORR instruction into *BUF.
1508 
1509      ORR rd, rn, rm
1510 
1511    RD is the destination register.
1512    RN and RM are the source registers.  */
1513 
1514 static int
emit_orr(uint32_t * buf,struct aarch64_register rd,struct aarch64_register rn,struct aarch64_register rm)1515 emit_orr (uint32_t *buf, struct aarch64_register rd,
1516 	  struct aarch64_register rn, struct aarch64_register rm)
1517 {
1518   return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1519 }
1520 
1521 /* Write a ORN instruction into *BUF.
1522 
1523      ORN rd, rn, rm
1524 
1525    RD is the destination register.
1526    RN and RM are the source registers.  */
1527 
1528 static int
emit_orn(uint32_t * buf,struct aarch64_register rd,struct aarch64_register rn,struct aarch64_register rm)1529 emit_orn (uint32_t *buf, struct aarch64_register rd,
1530 	  struct aarch64_register rn, struct aarch64_register rm)
1531 {
1532   return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1533 }
1534 
1535 /* Write a EOR instruction into *BUF.
1536 
1537      EOR rd, rn, rm
1538 
1539    RD is the destination register.
1540    RN and RM are the source registers.  */
1541 
1542 static int
emit_eor(uint32_t * buf,struct aarch64_register rd,struct aarch64_register rn,struct aarch64_register rm)1543 emit_eor (uint32_t *buf, struct aarch64_register rd,
1544 	  struct aarch64_register rn, struct aarch64_register rm)
1545 {
1546   return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1547 }
1548 
1549 /* Write a MVN instruction into *BUF.
1550 
1551      MVN rd, rm
1552 
1553    This is an alias for ORN rd, xzr, rm.
1554 
1555    RD is the destination register.
1556    RM is the source register.  */
1557 
1558 static int
emit_mvn(uint32_t * buf,struct aarch64_register rd,struct aarch64_register rm)1559 emit_mvn (uint32_t *buf, struct aarch64_register rd,
1560 	  struct aarch64_register rm)
1561 {
1562   return emit_orn (buf, rd, xzr, rm);
1563 }
1564 
1565 /* Write a LSLV instruction into *BUF.
1566 
1567      LSLV rd, rn, rm
1568 
1569    RD is the destination register.
1570    RN and RM are the source registers.  */
1571 
1572 static int
emit_lslv(uint32_t * buf,struct aarch64_register rd,struct aarch64_register rn,struct aarch64_register rm)1573 emit_lslv (uint32_t *buf, struct aarch64_register rd,
1574 	   struct aarch64_register rn, struct aarch64_register rm)
1575 {
1576   return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1577 }
1578 
1579 /* Write a LSRV instruction into *BUF.
1580 
1581      LSRV rd, rn, rm
1582 
1583    RD is the destination register.
1584    RN and RM are the source registers.  */
1585 
1586 static int
emit_lsrv(uint32_t * buf,struct aarch64_register rd,struct aarch64_register rn,struct aarch64_register rm)1587 emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1588 	   struct aarch64_register rn, struct aarch64_register rm)
1589 {
1590   return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1591 }
1592 
1593 /* Write a ASRV instruction into *BUF.
1594 
1595      ASRV rd, rn, rm
1596 
1597    RD is the destination register.
1598    RN and RM are the source registers.  */
1599 
1600 static int
emit_asrv(uint32_t * buf,struct aarch64_register rd,struct aarch64_register rn,struct aarch64_register rm)1601 emit_asrv (uint32_t *buf, struct aarch64_register rd,
1602 	   struct aarch64_register rn, struct aarch64_register rm)
1603 {
1604   return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1605 }
1606 
1607 /* Write a MUL instruction into *BUF.
1608 
1609      MUL rd, rn, rm
1610 
1611    RD is the destination register.
1612    RN and RM are the source registers.  */
1613 
1614 static int
emit_mul(uint32_t * buf,struct aarch64_register rd,struct aarch64_register rn,struct aarch64_register rm)1615 emit_mul (uint32_t *buf, struct aarch64_register rd,
1616 	  struct aarch64_register rn, struct aarch64_register rm)
1617 {
1618   return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1619 }
1620 
1621 /* Write a MRS instruction into *BUF.  The register size is 64-bit.
1622 
1623      MRS xt, system_reg
1624 
1625    RT is the destination register.
1626    SYSTEM_REG is special purpose register to read.  */
1627 
1628 static int
emit_mrs(uint32_t * buf,struct aarch64_register rt,enum aarch64_system_control_registers system_reg)1629 emit_mrs (uint32_t *buf, struct aarch64_register rt,
1630 	  enum aarch64_system_control_registers system_reg)
1631 {
1632   return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1633 			    | ENCODE (rt.num, 5, 0));
1634 }
1635 
1636 /* Write a MSR instruction into *BUF.  The register size is 64-bit.
1637 
1638      MSR system_reg, xt
1639 
1640    SYSTEM_REG is special purpose register to write.
1641    RT is the input register.  */
1642 
1643 static int
emit_msr(uint32_t * buf,enum aarch64_system_control_registers system_reg,struct aarch64_register rt)1644 emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1645 	  struct aarch64_register rt)
1646 {
1647   return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1648 			    | ENCODE (rt.num, 5, 0));
1649 }
1650 
1651 /* Write a SEVL instruction into *BUF.
1652 
1653    This is a hint instruction telling the hardware to trigger an event.  */
1654 
1655 static int
emit_sevl(uint32_t * buf)1656 emit_sevl (uint32_t *buf)
1657 {
1658   return aarch64_emit_insn (buf, SEVL);
1659 }
1660 
1661 /* Write a WFE instruction into *BUF.
1662 
1663    This is a hint instruction telling the hardware to wait for an event.  */
1664 
1665 static int
emit_wfe(uint32_t * buf)1666 emit_wfe (uint32_t *buf)
1667 {
1668   return aarch64_emit_insn (buf, WFE);
1669 }
1670 
1671 /* Write a SBFM instruction into *BUF.
1672 
1673      SBFM rd, rn, #immr, #imms
1674 
1675    This instruction moves the bits from #immr to #imms into the
1676    destination, sign extending the result.
1677 
1678    RD is the destination register.
1679    RN is the source register.
1680    IMMR is the bit number to start at (least significant bit).
1681    IMMS is the bit number to stop at (most significant bit).  */
1682 
1683 static int
emit_sbfm(uint32_t * buf,struct aarch64_register rd,struct aarch64_register rn,uint32_t immr,uint32_t imms)1684 emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1685 	   struct aarch64_register rn, uint32_t immr, uint32_t imms)
1686 {
1687   uint32_t size = ENCODE (rd.is64, 1, 31);
1688   uint32_t n = ENCODE (rd.is64, 1, 22);
1689 
1690   return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1691 			    | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1692 			    | ENCODE (rd.num, 5, 0));
1693 }
1694 
1695 /* Write a SBFX instruction into *BUF.
1696 
1697      SBFX rd, rn, #lsb, #width
1698 
1699    This instruction moves #width bits from #lsb into the destination, sign
1700    extending the result.  This is an alias for:
1701 
1702      SBFM rd, rn, #lsb, #(lsb + width - 1)
1703 
1704    RD is the destination register.
1705    RN is the source register.
1706    LSB is the bit number to start at (least significant bit).
1707    WIDTH is the number of bits to move.  */
1708 
1709 static int
emit_sbfx(uint32_t * buf,struct aarch64_register rd,struct aarch64_register rn,uint32_t lsb,uint32_t width)1710 emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1711 	   struct aarch64_register rn, uint32_t lsb, uint32_t width)
1712 {
1713   return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1714 }
1715 
1716 /* Write a UBFM instruction into *BUF.
1717 
1718      UBFM rd, rn, #immr, #imms
1719 
1720    This instruction moves the bits from #immr to #imms into the
1721    destination, extending the result with zeros.
1722 
1723    RD is the destination register.
1724    RN is the source register.
1725    IMMR is the bit number to start at (least significant bit).
1726    IMMS is the bit number to stop at (most significant bit).  */
1727 
1728 static int
emit_ubfm(uint32_t * buf,struct aarch64_register rd,struct aarch64_register rn,uint32_t immr,uint32_t imms)1729 emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1730 	   struct aarch64_register rn, uint32_t immr, uint32_t imms)
1731 {
1732   uint32_t size = ENCODE (rd.is64, 1, 31);
1733   uint32_t n = ENCODE (rd.is64, 1, 22);
1734 
1735   return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1736 			    | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1737 			    | ENCODE (rd.num, 5, 0));
1738 }
1739 
1740 /* Write a UBFX instruction into *BUF.
1741 
1742      UBFX rd, rn, #lsb, #width
1743 
1744    This instruction moves #width bits from #lsb into the destination,
1745    extending the result with zeros.  This is an alias for:
1746 
1747      UBFM rd, rn, #lsb, #(lsb + width - 1)
1748 
1749    RD is the destination register.
1750    RN is the source register.
1751    LSB is the bit number to start at (least significant bit).
1752    WIDTH is the number of bits to move.  */
1753 
1754 static int
emit_ubfx(uint32_t * buf,struct aarch64_register rd,struct aarch64_register rn,uint32_t lsb,uint32_t width)1755 emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1756 	   struct aarch64_register rn, uint32_t lsb, uint32_t width)
1757 {
1758   return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1759 }
1760 
1761 /* Write a CSINC instruction into *BUF.
1762 
1763      CSINC rd, rn, rm, cond
1764 
1765    This instruction conditionally increments rn or rm and places the result
1766    in rd.  rn is chosen is the condition is true.
1767 
1768    RD is the destination register.
1769    RN and RM are the source registers.
1770    COND is the encoded condition.  */
1771 
1772 static int
emit_csinc(uint32_t * buf,struct aarch64_register rd,struct aarch64_register rn,struct aarch64_register rm,unsigned cond)1773 emit_csinc (uint32_t *buf, struct aarch64_register rd,
1774 	    struct aarch64_register rn, struct aarch64_register rm,
1775 	    unsigned cond)
1776 {
1777   uint32_t size = ENCODE (rd.is64, 1, 31);
1778 
1779   return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1780 			    | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1781 			    | ENCODE (rd.num, 5, 0));
1782 }
1783 
1784 /* Write a CSET instruction into *BUF.
1785 
1786      CSET rd, cond
1787 
1788    This instruction conditionally write 1 or 0 in the destination register.
1789    1 is written if the condition is true.  This is an alias for:
1790 
1791      CSINC rd, xzr, xzr, !cond
1792 
1793    Note that the condition needs to be inverted.
1794 
1795    RD is the destination register.
1796    RN and RM are the source registers.
1797    COND is the encoded condition.  */
1798 
1799 static int
emit_cset(uint32_t * buf,struct aarch64_register rd,unsigned cond)1800 emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1801 {
1802   /* The least significant bit of the condition needs toggling in order to
1803      invert it.  */
1804   return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1805 }
1806 
1807 /* Write LEN instructions from BUF into the inferior memory at *TO.
1808 
1809    Note instructions are always little endian on AArch64, unlike data.  */
1810 
1811 static void
append_insns(CORE_ADDR * to,size_t len,const uint32_t * buf)1812 append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1813 {
1814   size_t byte_len = len * sizeof (uint32_t);
1815 #if (__BYTE_ORDER == __BIG_ENDIAN)
1816   uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
1817   size_t i;
1818 
1819   for (i = 0; i < len; i++)
1820     le_buf[i] = htole32 (buf[i]);
1821 
1822   target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
1823 
1824   xfree (le_buf);
1825 #else
1826   target_write_memory (*to, (const unsigned char *) buf, byte_len);
1827 #endif
1828 
1829   *to += byte_len;
1830 }
1831 
1832 /* Sub-class of struct aarch64_insn_data, store information of
1833    instruction relocation for fast tracepoint.  Visitor can
1834    relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1835    the relocated instructions in buffer pointed by INSN_PTR.  */
1836 
1837 struct aarch64_insn_relocation_data
1838 {
1839   struct aarch64_insn_data base;
1840 
1841   /* The new address the instruction is relocated to.  */
1842   CORE_ADDR new_addr;
1843   /* Pointer to the buffer of relocated instruction(s).  */
1844   uint32_t *insn_ptr;
1845 };
1846 
1847 /* Implementation of aarch64_insn_visitor method "b".  */
1848 
1849 static void
aarch64_ftrace_insn_reloc_b(const int is_bl,const int32_t offset,struct aarch64_insn_data * data)1850 aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1851 			     struct aarch64_insn_data *data)
1852 {
1853   struct aarch64_insn_relocation_data *insn_reloc
1854     = (struct aarch64_insn_relocation_data *) data;
1855   int64_t new_offset
1856     = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1857 
1858   if (can_encode_int32 (new_offset, 28))
1859     insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1860 }
1861 
1862 /* Implementation of aarch64_insn_visitor method "b_cond".  */
1863 
1864 static void
aarch64_ftrace_insn_reloc_b_cond(const unsigned cond,const int32_t offset,struct aarch64_insn_data * data)1865 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1866 				  struct aarch64_insn_data *data)
1867 {
1868   struct aarch64_insn_relocation_data *insn_reloc
1869     = (struct aarch64_insn_relocation_data *) data;
1870   int64_t new_offset
1871     = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1872 
1873   if (can_encode_int32 (new_offset, 21))
1874     {
1875       insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1876 					  new_offset);
1877     }
1878   else if (can_encode_int32 (new_offset, 28))
1879     {
1880       /* The offset is out of range for a conditional branch
1881 	 instruction but not for a unconditional branch.  We can use
1882 	 the following instructions instead:
1883 
1884 	 B.COND TAKEN    ; If cond is true, then jump to TAKEN.
1885 	 B NOT_TAKEN     ; Else jump over TAKEN and continue.
1886 	 TAKEN:
1887 	 B #(offset - 8)
1888 	 NOT_TAKEN:
1889 
1890       */
1891 
1892       insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1893       insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1894       insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1895     }
1896 }
1897 
1898 /* Implementation of aarch64_insn_visitor method "cb".  */
1899 
1900 static void
aarch64_ftrace_insn_reloc_cb(const int32_t offset,const int is_cbnz,const unsigned rn,int is64,struct aarch64_insn_data * data)1901 aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1902 			      const unsigned rn, int is64,
1903 			      struct aarch64_insn_data *data)
1904 {
1905   struct aarch64_insn_relocation_data *insn_reloc
1906     = (struct aarch64_insn_relocation_data *) data;
1907   int64_t new_offset
1908     = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1909 
1910   if (can_encode_int32 (new_offset, 21))
1911     {
1912       insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1913 				       aarch64_register (rn, is64), new_offset);
1914     }
1915   else if (can_encode_int32 (new_offset, 28))
1916     {
1917       /* The offset is out of range for a compare and branch
1918 	 instruction but not for a unconditional branch.  We can use
1919 	 the following instructions instead:
1920 
1921 	 CBZ xn, TAKEN   ; xn == 0, then jump to TAKEN.
1922 	 B NOT_TAKEN     ; Else jump over TAKEN and continue.
1923 	 TAKEN:
1924 	 B #(offset - 8)
1925 	 NOT_TAKEN:
1926 
1927       */
1928       insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1929 				       aarch64_register (rn, is64), 8);
1930       insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1931       insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1932     }
1933 }
1934 
1935 /* Implementation of aarch64_insn_visitor method "tb".  */
1936 
1937 static void
aarch64_ftrace_insn_reloc_tb(const int32_t offset,int is_tbnz,const unsigned rt,unsigned bit,struct aarch64_insn_data * data)1938 aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1939 			      const unsigned rt, unsigned bit,
1940 			      struct aarch64_insn_data *data)
1941 {
1942   struct aarch64_insn_relocation_data *insn_reloc
1943     = (struct aarch64_insn_relocation_data *) data;
1944   int64_t new_offset
1945     = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1946 
1947   if (can_encode_int32 (new_offset, 16))
1948     {
1949       insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1950 				       aarch64_register (rt, 1), new_offset);
1951     }
1952   else if (can_encode_int32 (new_offset, 28))
1953     {
1954       /* The offset is out of range for a test bit and branch
1955 	 instruction but not for a unconditional branch.  We can use
1956 	 the following instructions instead:
1957 
1958 	 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1959 	 B NOT_TAKEN         ; Else jump over TAKEN and continue.
1960 	 TAKEN:
1961 	 B #(offset - 8)
1962 	 NOT_TAKEN:
1963 
1964       */
1965       insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1966 				       aarch64_register (rt, 1), 8);
1967       insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1968       insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1969 				      new_offset - 8);
1970     }
1971 }
1972 
1973 /* Implementation of aarch64_insn_visitor method "adr".  */
1974 
1975 static void
aarch64_ftrace_insn_reloc_adr(const int32_t offset,const unsigned rd,const int is_adrp,struct aarch64_insn_data * data)1976 aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1977 			       const int is_adrp,
1978 			       struct aarch64_insn_data *data)
1979 {
1980   struct aarch64_insn_relocation_data *insn_reloc
1981     = (struct aarch64_insn_relocation_data *) data;
1982   /* We know exactly the address the ADR{P,} instruction will compute.
1983      We can just write it to the destination register.  */
1984   CORE_ADDR address = data->insn_addr + offset;
1985 
1986   if (is_adrp)
1987     {
1988       /* Clear the lower 12 bits of the offset to get the 4K page.  */
1989       insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1990 					     aarch64_register (rd, 1),
1991 					     address & ~0xfff);
1992     }
1993   else
1994     insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1995 					   aarch64_register (rd, 1), address);
1996 }
1997 
1998 /* Implementation of aarch64_insn_visitor method "ldr_literal".  */
1999 
2000 static void
aarch64_ftrace_insn_reloc_ldr_literal(const int32_t offset,const int is_sw,const unsigned rt,const int is64,struct aarch64_insn_data * data)2001 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
2002 				       const unsigned rt, const int is64,
2003 				       struct aarch64_insn_data *data)
2004 {
2005   struct aarch64_insn_relocation_data *insn_reloc
2006     = (struct aarch64_insn_relocation_data *) data;
2007   CORE_ADDR address = data->insn_addr + offset;
2008 
2009   insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
2010 					 aarch64_register (rt, 1), address);
2011 
2012   /* We know exactly what address to load from, and what register we
2013      can use:
2014 
2015      MOV xd, #(oldloc + offset)
2016      MOVK xd, #((oldloc + offset) >> 16), lsl #16
2017      ...
2018 
2019      LDR xd, [xd] ; or LDRSW xd, [xd]
2020 
2021   */
2022 
2023   if (is_sw)
2024     insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
2025 					aarch64_register (rt, 1),
2026 					aarch64_register (rt, 1),
2027 					offset_memory_operand (0));
2028   else
2029     insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
2030 				      aarch64_register (rt, is64),
2031 				      aarch64_register (rt, 1),
2032 				      offset_memory_operand (0));
2033 }
2034 
2035 /* Implementation of aarch64_insn_visitor method "others".  */
2036 
2037 static void
aarch64_ftrace_insn_reloc_others(const uint32_t insn,struct aarch64_insn_data * data)2038 aarch64_ftrace_insn_reloc_others (const uint32_t insn,
2039 				  struct aarch64_insn_data *data)
2040 {
2041   struct aarch64_insn_relocation_data *insn_reloc
2042     = (struct aarch64_insn_relocation_data *) data;
2043 
2044   /* The instruction is not PC relative.  Just re-emit it at the new
2045      location.  */
2046   insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
2047 }
2048 
2049 static const struct aarch64_insn_visitor visitor =
2050 {
2051   aarch64_ftrace_insn_reloc_b,
2052   aarch64_ftrace_insn_reloc_b_cond,
2053   aarch64_ftrace_insn_reloc_cb,
2054   aarch64_ftrace_insn_reloc_tb,
2055   aarch64_ftrace_insn_reloc_adr,
2056   aarch64_ftrace_insn_reloc_ldr_literal,
2057   aarch64_ftrace_insn_reloc_others,
2058 };
2059 
2060 bool
supports_fast_tracepoints()2061 aarch64_target::supports_fast_tracepoints ()
2062 {
2063   return true;
2064 }
2065 
2066 /* Implementation of target ops method
2067    "install_fast_tracepoint_jump_pad".  */
2068 
2069 int
install_fast_tracepoint_jump_pad(CORE_ADDR tpoint,CORE_ADDR tpaddr,CORE_ADDR collector,CORE_ADDR lockaddr,ULONGEST orig_size,CORE_ADDR * jump_entry,CORE_ADDR * trampoline,ULONGEST * trampoline_size,unsigned char * jjump_pad_insn,ULONGEST * jjump_pad_insn_size,CORE_ADDR * adjusted_insn_addr,CORE_ADDR * adjusted_insn_addr_end,char * err)2070 aarch64_target::install_fast_tracepoint_jump_pad
2071   (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
2072    CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
2073    CORE_ADDR *trampoline, ULONGEST *trampoline_size,
2074    unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
2075    CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
2076    char *err)
2077 {
2078   uint32_t buf[256];
2079   uint32_t *p = buf;
2080   int64_t offset;
2081   int i;
2082   uint32_t insn;
2083   CORE_ADDR buildaddr = *jump_entry;
2084   struct aarch64_insn_relocation_data insn_data;
2085 
2086   /* We need to save the current state on the stack both to restore it
2087      later and to collect register values when the tracepoint is hit.
2088 
2089      The saved registers are pushed in a layout that needs to be in sync
2090      with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c).  Later on
2091      the supply_fast_tracepoint_registers function will fill in the
2092      register cache from a pointer to saved registers on the stack we build
2093      here.
2094 
2095      For simplicity, we set the size of each cell on the stack to 16 bytes.
2096      This way one cell can hold any register type, from system registers
2097      to the 128 bit SIMD&FP registers.  Furthermore, the stack pointer
2098      has to be 16 bytes aligned anyway.
2099 
2100      Note that the CPSR register does not exist on AArch64.  Instead we
2101      can access system bits describing the process state with the
2102      MRS/MSR instructions, namely the condition flags.  We save them as
2103      if they are part of a CPSR register because that's how GDB
2104      interprets these system bits.  At the moment, only the condition
2105      flags are saved in CPSR (NZCV).
2106 
2107      Stack layout, each cell is 16 bytes (descending):
2108 
2109      High *-------- SIMD&FP registers from 31 down to 0. --------*
2110 	  | q31                                                  |
2111 	  .                                                      .
2112 	  .                                                      . 32 cells
2113 	  .                                                      .
2114 	  | q0                                                   |
2115 	  *---- General purpose registers from 30 down to 0. ----*
2116 	  | x30                                                  |
2117 	  .                                                      .
2118 	  .                                                      . 31 cells
2119 	  .                                                      .
2120 	  | x0                                                   |
2121 	  *------------- Special purpose registers. -------------*
2122 	  | SP                                                   |
2123 	  | PC                                                   |
2124 	  | CPSR (NZCV)                                          | 5 cells
2125 	  | FPSR                                                 |
2126 	  | FPCR                                                 | <- SP + 16
2127 	  *------------- collecting_t object --------------------*
2128 	  | TPIDR_EL0               | struct tracepoint *        |
2129      Low  *------------------------------------------------------*
2130 
2131      After this stack is set up, we issue a call to the collector, passing
2132      it the saved registers at (SP + 16).  */
2133 
2134   /* Push SIMD&FP registers on the stack:
2135 
2136        SUB sp, sp, #(32 * 16)
2137 
2138        STP q30, q31, [sp, #(30 * 16)]
2139        ...
2140        STP q0, q1, [sp]
2141 
2142      */
2143   p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
2144   for (i = 30; i >= 0; i -= 2)
2145     p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
2146 
2147   /* Push general purpose registers on the stack.  Note that we do not need
2148      to push x31 as it represents the xzr register and not the stack
2149      pointer in a STR instruction.
2150 
2151        SUB sp, sp, #(31 * 16)
2152 
2153        STR x30, [sp, #(30 * 16)]
2154        ...
2155        STR x0, [sp]
2156 
2157      */
2158   p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
2159   for (i = 30; i >= 0; i -= 1)
2160     p += emit_str (p, aarch64_register (i, 1), sp,
2161 		   offset_memory_operand (i * 16));
2162 
2163   /* Make space for 5 more cells.
2164 
2165        SUB sp, sp, #(5 * 16)
2166 
2167      */
2168   p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
2169 
2170 
2171   /* Save SP:
2172 
2173        ADD x4, sp, #((32 + 31 + 5) * 16)
2174        STR x4, [sp, #(4 * 16)]
2175 
2176      */
2177   p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
2178   p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
2179 
2180   /* Save PC (tracepoint address):
2181 
2182        MOV  x3, #(tpaddr)
2183        ...
2184 
2185        STR x3, [sp, #(3 * 16)]
2186 
2187      */
2188 
2189   p += emit_mov_addr (p, x3, tpaddr);
2190   p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2191 
2192   /* Save CPSR (NZCV), FPSR and FPCR:
2193 
2194        MRS x2, nzcv
2195        MRS x1, fpsr
2196        MRS x0, fpcr
2197 
2198        STR x2, [sp, #(2 * 16)]
2199        STR x1, [sp, #(1 * 16)]
2200        STR x0, [sp, #(0 * 16)]
2201 
2202      */
2203   p += emit_mrs (p, x2, NZCV);
2204   p += emit_mrs (p, x1, FPSR);
2205   p += emit_mrs (p, x0, FPCR);
2206   p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2207   p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2208   p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2209 
2210   /* Push the collecting_t object.  It consist of the address of the
2211      tracepoint and an ID for the current thread.  We get the latter by
2212      reading the tpidr_el0 system register.  It corresponds to the
2213      NT_ARM_TLS register accessible with ptrace.
2214 
2215        MOV x0, #(tpoint)
2216        ...
2217 
2218        MRS x1, tpidr_el0
2219 
2220        STP x0, x1, [sp, #-16]!
2221 
2222      */
2223 
2224   p += emit_mov_addr (p, x0, tpoint);
2225   p += emit_mrs (p, x1, TPIDR_EL0);
2226   p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2227 
2228   /* Spin-lock:
2229 
2230      The shared memory for the lock is at lockaddr.  It will hold zero
2231      if no-one is holding the lock, otherwise it contains the address of
2232      the collecting_t object on the stack of the thread which acquired it.
2233 
2234      At this stage, the stack pointer points to this thread's collecting_t
2235      object.
2236 
2237      We use the following registers:
2238      - x0: Address of the lock.
2239      - x1: Pointer to collecting_t object.
2240      - x2: Scratch register.
2241 
2242        MOV x0, #(lockaddr)
2243        ...
2244        MOV x1, sp
2245 
2246        ; Trigger an event local to this core.  So the following WFE
2247        ; instruction is ignored.
2248        SEVL
2249      again:
2250        ; Wait for an event.  The event is triggered by either the SEVL
2251        ; or STLR instructions (store release).
2252        WFE
2253 
2254        ; Atomically read at lockaddr.  This marks the memory location as
2255        ; exclusive.  This instruction also has memory constraints which
2256        ; make sure all previous data reads and writes are done before
2257        ; executing it.
2258        LDAXR x2, [x0]
2259 
2260        ; Try again if another thread holds the lock.
2261        CBNZ x2, again
2262 
2263        ; We can lock it!  Write the address of the collecting_t object.
2264        ; This instruction will fail if the memory location is not marked
2265        ; as exclusive anymore.  If it succeeds, it will remove the
2266        ; exclusive mark on the memory location.  This way, if another
2267        ; thread executes this instruction before us, we will fail and try
2268        ; all over again.
2269        STXR w2, x1, [x0]
2270        CBNZ w2, again
2271 
2272      */
2273 
2274   p += emit_mov_addr (p, x0, lockaddr);
2275   p += emit_mov (p, x1, register_operand (sp));
2276 
2277   p += emit_sevl (p);
2278   p += emit_wfe (p);
2279   p += emit_ldaxr (p, x2, x0);
2280   p += emit_cb (p, 1, w2, -2 * 4);
2281   p += emit_stxr (p, w2, x1, x0);
2282   p += emit_cb (p, 1, x2, -4 * 4);
2283 
2284   /* Call collector (struct tracepoint *, unsigned char *):
2285 
2286        MOV x0, #(tpoint)
2287        ...
2288 
2289        ; Saved registers start after the collecting_t object.
2290        ADD x1, sp, #16
2291 
2292        ; We use an intra-procedure-call scratch register.
2293        MOV ip0, #(collector)
2294        ...
2295 
2296        ; And call back to C!
2297        BLR ip0
2298 
2299      */
2300 
2301   p += emit_mov_addr (p, x0, tpoint);
2302   p += emit_add (p, x1, sp, immediate_operand (16));
2303 
2304   p += emit_mov_addr (p, ip0, collector);
2305   p += emit_blr (p, ip0);
2306 
2307   /* Release the lock.
2308 
2309        MOV x0, #(lockaddr)
2310        ...
2311 
2312        ; This instruction is a normal store with memory ordering
2313        ; constraints.  Thanks to this we do not have to put a data
2314        ; barrier instruction to make sure all data read and writes are done
2315        ; before this instruction is executed.  Furthermore, this instruction
2316        ; will trigger an event, letting other threads know they can grab
2317        ; the lock.
2318        STLR xzr, [x0]
2319 
2320      */
2321   p += emit_mov_addr (p, x0, lockaddr);
2322   p += emit_stlr (p, xzr, x0);
2323 
2324   /* Free collecting_t object:
2325 
2326        ADD sp, sp, #16
2327 
2328      */
2329   p += emit_add (p, sp, sp, immediate_operand (16));
2330 
2331   /* Restore CPSR (NZCV), FPSR and FPCR.  And free all special purpose
2332      registers from the stack.
2333 
2334        LDR x2, [sp, #(2 * 16)]
2335        LDR x1, [sp, #(1 * 16)]
2336        LDR x0, [sp, #(0 * 16)]
2337 
2338        MSR NZCV, x2
2339        MSR FPSR, x1
2340        MSR FPCR, x0
2341 
2342        ADD sp, sp #(5 * 16)
2343 
2344      */
2345   p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2346   p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2347   p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2348   p += emit_msr (p, NZCV, x2);
2349   p += emit_msr (p, FPSR, x1);
2350   p += emit_msr (p, FPCR, x0);
2351 
2352   p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2353 
2354   /* Pop general purpose registers:
2355 
2356        LDR x0, [sp]
2357        ...
2358        LDR x30, [sp, #(30 * 16)]
2359 
2360        ADD sp, sp, #(31 * 16)
2361 
2362      */
2363   for (i = 0; i <= 30; i += 1)
2364     p += emit_ldr (p, aarch64_register (i, 1), sp,
2365 		   offset_memory_operand (i * 16));
2366   p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2367 
2368   /* Pop SIMD&FP registers:
2369 
2370        LDP q0, q1, [sp]
2371        ...
2372        LDP q30, q31, [sp, #(30 * 16)]
2373 
2374        ADD sp, sp, #(32 * 16)
2375 
2376      */
2377   for (i = 0; i <= 30; i += 2)
2378     p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2379   p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2380 
2381   /* Write the code into the inferior memory.  */
2382   append_insns (&buildaddr, p - buf, buf);
2383 
2384   /* Now emit the relocated instruction.  */
2385   *adjusted_insn_addr = buildaddr;
2386   target_read_uint32 (tpaddr, &insn);
2387 
2388   insn_data.base.insn_addr = tpaddr;
2389   insn_data.new_addr = buildaddr;
2390   insn_data.insn_ptr = buf;
2391 
2392   aarch64_relocate_instruction (insn, &visitor,
2393 				(struct aarch64_insn_data *) &insn_data);
2394 
2395   /* We may not have been able to relocate the instruction.  */
2396   if (insn_data.insn_ptr == buf)
2397     {
2398       sprintf (err,
2399 	       "E.Could not relocate instruction from %s to %s.",
2400 	       core_addr_to_string_nz (tpaddr),
2401 	       core_addr_to_string_nz (buildaddr));
2402       return 1;
2403     }
2404   else
2405     append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
2406   *adjusted_insn_addr_end = buildaddr;
2407 
2408   /* Go back to the start of the buffer.  */
2409   p = buf;
2410 
2411   /* Emit a branch back from the jump pad.  */
2412   offset = (tpaddr + orig_size - buildaddr);
2413   if (!can_encode_int32 (offset, 28))
2414     {
2415       sprintf (err,
2416 	       "E.Jump back from jump pad too far from tracepoint "
2417 	       "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2418 	       offset);
2419       return 1;
2420     }
2421 
2422   p += emit_b (p, 0, offset);
2423   append_insns (&buildaddr, p - buf, buf);
2424 
2425   /* Give the caller a branch instruction into the jump pad.  */
2426   offset = (*jump_entry - tpaddr);
2427   if (!can_encode_int32 (offset, 28))
2428     {
2429       sprintf (err,
2430 	       "E.Jump pad too far from tracepoint "
2431 	       "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2432 	       offset);
2433       return 1;
2434     }
2435 
2436   emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2437   *jjump_pad_insn_size = 4;
2438 
2439   /* Return the end address of our pad.  */
2440   *jump_entry = buildaddr;
2441 
2442   return 0;
2443 }
2444 
2445 /* Helper function writing LEN instructions from START into
2446    current_insn_ptr.  */
2447 
2448 static void
emit_ops_insns(const uint32_t * start,int len)2449 emit_ops_insns (const uint32_t *start, int len)
2450 {
2451   CORE_ADDR buildaddr = current_insn_ptr;
2452 
2453   if (debug_threads)
2454     debug_printf ("Adding %d instrucions at %s\n",
2455 		  len, paddress (buildaddr));
2456 
2457   append_insns (&buildaddr, len, start);
2458   current_insn_ptr = buildaddr;
2459 }
2460 
2461 /* Pop a register from the stack.  */
2462 
2463 static int
emit_pop(uint32_t * buf,struct aarch64_register rt)2464 emit_pop (uint32_t *buf, struct aarch64_register rt)
2465 {
2466   return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2467 }
2468 
2469 /* Push a register on the stack.  */
2470 
2471 static int
emit_push(uint32_t * buf,struct aarch64_register rt)2472 emit_push (uint32_t *buf, struct aarch64_register rt)
2473 {
2474   return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2475 }
2476 
2477 /* Implementation of emit_ops method "emit_prologue".  */
2478 
2479 static void
aarch64_emit_prologue(void)2480 aarch64_emit_prologue (void)
2481 {
2482   uint32_t buf[16];
2483   uint32_t *p = buf;
2484 
2485   /* This function emit a prologue for the following function prototype:
2486 
2487      enum eval_result_type f (unsigned char *regs,
2488 			      ULONGEST *value);
2489 
2490      The first argument is a buffer of raw registers.  The second
2491      argument is the result of
2492      evaluating the expression, which will be set to whatever is on top of
2493      the stack at the end.
2494 
2495      The stack set up by the prologue is as such:
2496 
2497      High *------------------------------------------------------*
2498 	  | LR                                                   |
2499 	  | FP                                                   | <- FP
2500 	  | x1  (ULONGEST *value)                                |
2501 	  | x0  (unsigned char *regs)                            |
2502      Low  *------------------------------------------------------*
2503 
2504      As we are implementing a stack machine, each opcode can expand the
2505      stack so we never know how far we are from the data saved by this
2506      prologue.  In order to be able refer to value and regs later, we save
2507      the current stack pointer in the frame pointer.  This way, it is not
2508      clobbered when calling C functions.
2509 
2510      Finally, throughout every operation, we are using register x0 as the
2511      top of the stack, and x1 as a scratch register.  */
2512 
2513   p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2514   p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2515   p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2516 
2517   p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2518 
2519 
2520   emit_ops_insns (buf, p - buf);
2521 }
2522 
2523 /* Implementation of emit_ops method "emit_epilogue".  */
2524 
2525 static void
aarch64_emit_epilogue(void)2526 aarch64_emit_epilogue (void)
2527 {
2528   uint32_t buf[16];
2529   uint32_t *p = buf;
2530 
2531   /* Store the result of the expression (x0) in *value.  */
2532   p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2533   p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2534   p += emit_str (p, x0, x1, offset_memory_operand (0));
2535 
2536   /* Restore the previous state.  */
2537   p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2538   p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2539 
2540   /* Return expr_eval_no_error.  */
2541   p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2542   p += emit_ret (p, lr);
2543 
2544   emit_ops_insns (buf, p - buf);
2545 }
2546 
2547 /* Implementation of emit_ops method "emit_add".  */
2548 
2549 static void
aarch64_emit_add(void)2550 aarch64_emit_add (void)
2551 {
2552   uint32_t buf[16];
2553   uint32_t *p = buf;
2554 
2555   p += emit_pop (p, x1);
2556   p += emit_add (p, x0, x1, register_operand (x0));
2557 
2558   emit_ops_insns (buf, p - buf);
2559 }
2560 
2561 /* Implementation of emit_ops method "emit_sub".  */
2562 
2563 static void
aarch64_emit_sub(void)2564 aarch64_emit_sub (void)
2565 {
2566   uint32_t buf[16];
2567   uint32_t *p = buf;
2568 
2569   p += emit_pop (p, x1);
2570   p += emit_sub (p, x0, x1, register_operand (x0));
2571 
2572   emit_ops_insns (buf, p - buf);
2573 }
2574 
2575 /* Implementation of emit_ops method "emit_mul".  */
2576 
2577 static void
aarch64_emit_mul(void)2578 aarch64_emit_mul (void)
2579 {
2580   uint32_t buf[16];
2581   uint32_t *p = buf;
2582 
2583   p += emit_pop (p, x1);
2584   p += emit_mul (p, x0, x1, x0);
2585 
2586   emit_ops_insns (buf, p - buf);
2587 }
2588 
2589 /* Implementation of emit_ops method "emit_lsh".  */
2590 
2591 static void
aarch64_emit_lsh(void)2592 aarch64_emit_lsh (void)
2593 {
2594   uint32_t buf[16];
2595   uint32_t *p = buf;
2596 
2597   p += emit_pop (p, x1);
2598   p += emit_lslv (p, x0, x1, x0);
2599 
2600   emit_ops_insns (buf, p - buf);
2601 }
2602 
2603 /* Implementation of emit_ops method "emit_rsh_signed".  */
2604 
2605 static void
aarch64_emit_rsh_signed(void)2606 aarch64_emit_rsh_signed (void)
2607 {
2608   uint32_t buf[16];
2609   uint32_t *p = buf;
2610 
2611   p += emit_pop (p, x1);
2612   p += emit_asrv (p, x0, x1, x0);
2613 
2614   emit_ops_insns (buf, p - buf);
2615 }
2616 
2617 /* Implementation of emit_ops method "emit_rsh_unsigned".  */
2618 
2619 static void
aarch64_emit_rsh_unsigned(void)2620 aarch64_emit_rsh_unsigned (void)
2621 {
2622   uint32_t buf[16];
2623   uint32_t *p = buf;
2624 
2625   p += emit_pop (p, x1);
2626   p += emit_lsrv (p, x0, x1, x0);
2627 
2628   emit_ops_insns (buf, p - buf);
2629 }
2630 
2631 /* Implementation of emit_ops method "emit_ext".  */
2632 
2633 static void
aarch64_emit_ext(int arg)2634 aarch64_emit_ext (int arg)
2635 {
2636   uint32_t buf[16];
2637   uint32_t *p = buf;
2638 
2639   p += emit_sbfx (p, x0, x0, 0, arg);
2640 
2641   emit_ops_insns (buf, p - buf);
2642 }
2643 
2644 /* Implementation of emit_ops method "emit_log_not".  */
2645 
2646 static void
aarch64_emit_log_not(void)2647 aarch64_emit_log_not (void)
2648 {
2649   uint32_t buf[16];
2650   uint32_t *p = buf;
2651 
2652   /* If the top of the stack is 0, replace it with 1.  Else replace it with
2653      0.  */
2654 
2655   p += emit_cmp (p, x0, immediate_operand (0));
2656   p += emit_cset (p, x0, EQ);
2657 
2658   emit_ops_insns (buf, p - buf);
2659 }
2660 
2661 /* Implementation of emit_ops method "emit_bit_and".  */
2662 
2663 static void
aarch64_emit_bit_and(void)2664 aarch64_emit_bit_and (void)
2665 {
2666   uint32_t buf[16];
2667   uint32_t *p = buf;
2668 
2669   p += emit_pop (p, x1);
2670   p += emit_and (p, x0, x0, x1);
2671 
2672   emit_ops_insns (buf, p - buf);
2673 }
2674 
2675 /* Implementation of emit_ops method "emit_bit_or".  */
2676 
2677 static void
aarch64_emit_bit_or(void)2678 aarch64_emit_bit_or (void)
2679 {
2680   uint32_t buf[16];
2681   uint32_t *p = buf;
2682 
2683   p += emit_pop (p, x1);
2684   p += emit_orr (p, x0, x0, x1);
2685 
2686   emit_ops_insns (buf, p - buf);
2687 }
2688 
2689 /* Implementation of emit_ops method "emit_bit_xor".  */
2690 
2691 static void
aarch64_emit_bit_xor(void)2692 aarch64_emit_bit_xor (void)
2693 {
2694   uint32_t buf[16];
2695   uint32_t *p = buf;
2696 
2697   p += emit_pop (p, x1);
2698   p += emit_eor (p, x0, x0, x1);
2699 
2700   emit_ops_insns (buf, p - buf);
2701 }
2702 
2703 /* Implementation of emit_ops method "emit_bit_not".  */
2704 
2705 static void
aarch64_emit_bit_not(void)2706 aarch64_emit_bit_not (void)
2707 {
2708   uint32_t buf[16];
2709   uint32_t *p = buf;
2710 
2711   p += emit_mvn (p, x0, x0);
2712 
2713   emit_ops_insns (buf, p - buf);
2714 }
2715 
2716 /* Implementation of emit_ops method "emit_equal".  */
2717 
2718 static void
aarch64_emit_equal(void)2719 aarch64_emit_equal (void)
2720 {
2721   uint32_t buf[16];
2722   uint32_t *p = buf;
2723 
2724   p += emit_pop (p, x1);
2725   p += emit_cmp (p, x0, register_operand (x1));
2726   p += emit_cset (p, x0, EQ);
2727 
2728   emit_ops_insns (buf, p - buf);
2729 }
2730 
2731 /* Implementation of emit_ops method "emit_less_signed".  */
2732 
2733 static void
aarch64_emit_less_signed(void)2734 aarch64_emit_less_signed (void)
2735 {
2736   uint32_t buf[16];
2737   uint32_t *p = buf;
2738 
2739   p += emit_pop (p, x1);
2740   p += emit_cmp (p, x1, register_operand (x0));
2741   p += emit_cset (p, x0, LT);
2742 
2743   emit_ops_insns (buf, p - buf);
2744 }
2745 
2746 /* Implementation of emit_ops method "emit_less_unsigned".  */
2747 
2748 static void
aarch64_emit_less_unsigned(void)2749 aarch64_emit_less_unsigned (void)
2750 {
2751   uint32_t buf[16];
2752   uint32_t *p = buf;
2753 
2754   p += emit_pop (p, x1);
2755   p += emit_cmp (p, x1, register_operand (x0));
2756   p += emit_cset (p, x0, LO);
2757 
2758   emit_ops_insns (buf, p - buf);
2759 }
2760 
2761 /* Implementation of emit_ops method "emit_ref".  */
2762 
2763 static void
aarch64_emit_ref(int size)2764 aarch64_emit_ref (int size)
2765 {
2766   uint32_t buf[16];
2767   uint32_t *p = buf;
2768 
2769   switch (size)
2770     {
2771     case 1:
2772       p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2773       break;
2774     case 2:
2775       p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2776       break;
2777     case 4:
2778       p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2779       break;
2780     case 8:
2781       p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2782       break;
2783     default:
2784       /* Unknown size, bail on compilation.  */
2785       emit_error = 1;
2786       break;
2787     }
2788 
2789   emit_ops_insns (buf, p - buf);
2790 }
2791 
2792 /* Implementation of emit_ops method "emit_if_goto".  */
2793 
2794 static void
aarch64_emit_if_goto(int * offset_p,int * size_p)2795 aarch64_emit_if_goto (int *offset_p, int *size_p)
2796 {
2797   uint32_t buf[16];
2798   uint32_t *p = buf;
2799 
2800   /* The Z flag is set or cleared here.  */
2801   p += emit_cmp (p, x0, immediate_operand (0));
2802   /* This instruction must not change the Z flag.  */
2803   p += emit_pop (p, x0);
2804   /* Branch over the next instruction if x0 == 0.  */
2805   p += emit_bcond (p, EQ, 8);
2806 
2807   /* The NOP instruction will be patched with an unconditional branch.  */
2808   if (offset_p)
2809     *offset_p = (p - buf) * 4;
2810   if (size_p)
2811     *size_p = 4;
2812   p += emit_nop (p);
2813 
2814   emit_ops_insns (buf, p - buf);
2815 }
2816 
2817 /* Implementation of emit_ops method "emit_goto".  */
2818 
2819 static void
aarch64_emit_goto(int * offset_p,int * size_p)2820 aarch64_emit_goto (int *offset_p, int *size_p)
2821 {
2822   uint32_t buf[16];
2823   uint32_t *p = buf;
2824 
2825   /* The NOP instruction will be patched with an unconditional branch.  */
2826   if (offset_p)
2827     *offset_p = 0;
2828   if (size_p)
2829     *size_p = 4;
2830   p += emit_nop (p);
2831 
2832   emit_ops_insns (buf, p - buf);
2833 }
2834 
2835 /* Implementation of emit_ops method "write_goto_address".  */
2836 
2837 static void
aarch64_write_goto_address(CORE_ADDR from,CORE_ADDR to,int size)2838 aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2839 {
2840   uint32_t insn;
2841 
2842   emit_b (&insn, 0, to - from);
2843   append_insns (&from, 1, &insn);
2844 }
2845 
2846 /* Implementation of emit_ops method "emit_const".  */
2847 
2848 static void
aarch64_emit_const(LONGEST num)2849 aarch64_emit_const (LONGEST num)
2850 {
2851   uint32_t buf[16];
2852   uint32_t *p = buf;
2853 
2854   p += emit_mov_addr (p, x0, num);
2855 
2856   emit_ops_insns (buf, p - buf);
2857 }
2858 
2859 /* Implementation of emit_ops method "emit_call".  */
2860 
2861 static void
aarch64_emit_call(CORE_ADDR fn)2862 aarch64_emit_call (CORE_ADDR fn)
2863 {
2864   uint32_t buf[16];
2865   uint32_t *p = buf;
2866 
2867   p += emit_mov_addr (p, ip0, fn);
2868   p += emit_blr (p, ip0);
2869 
2870   emit_ops_insns (buf, p - buf);
2871 }
2872 
2873 /* Implementation of emit_ops method "emit_reg".  */
2874 
2875 static void
aarch64_emit_reg(int reg)2876 aarch64_emit_reg (int reg)
2877 {
2878   uint32_t buf[16];
2879   uint32_t *p = buf;
2880 
2881   /* Set x0 to unsigned char *regs.  */
2882   p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2883   p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2884   p += emit_mov (p, x1, immediate_operand (reg));
2885 
2886   emit_ops_insns (buf, p - buf);
2887 
2888   aarch64_emit_call (get_raw_reg_func_addr ());
2889 }
2890 
2891 /* Implementation of emit_ops method "emit_pop".  */
2892 
2893 static void
aarch64_emit_pop(void)2894 aarch64_emit_pop (void)
2895 {
2896   uint32_t buf[16];
2897   uint32_t *p = buf;
2898 
2899   p += emit_pop (p, x0);
2900 
2901   emit_ops_insns (buf, p - buf);
2902 }
2903 
2904 /* Implementation of emit_ops method "emit_stack_flush".  */
2905 
2906 static void
aarch64_emit_stack_flush(void)2907 aarch64_emit_stack_flush (void)
2908 {
2909   uint32_t buf[16];
2910   uint32_t *p = buf;
2911 
2912   p += emit_push (p, x0);
2913 
2914   emit_ops_insns (buf, p - buf);
2915 }
2916 
2917 /* Implementation of emit_ops method "emit_zero_ext".  */
2918 
2919 static void
aarch64_emit_zero_ext(int arg)2920 aarch64_emit_zero_ext (int arg)
2921 {
2922   uint32_t buf[16];
2923   uint32_t *p = buf;
2924 
2925   p += emit_ubfx (p, x0, x0, 0, arg);
2926 
2927   emit_ops_insns (buf, p - buf);
2928 }
2929 
2930 /* Implementation of emit_ops method "emit_swap".  */
2931 
2932 static void
aarch64_emit_swap(void)2933 aarch64_emit_swap (void)
2934 {
2935   uint32_t buf[16];
2936   uint32_t *p = buf;
2937 
2938   p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2939   p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2940   p += emit_mov (p, x0, register_operand (x1));
2941 
2942   emit_ops_insns (buf, p - buf);
2943 }
2944 
2945 /* Implementation of emit_ops method "emit_stack_adjust".  */
2946 
2947 static void
aarch64_emit_stack_adjust(int n)2948 aarch64_emit_stack_adjust (int n)
2949 {
2950   /* This is not needed with our design.  */
2951   uint32_t buf[16];
2952   uint32_t *p = buf;
2953 
2954   p += emit_add (p, sp, sp, immediate_operand (n * 16));
2955 
2956   emit_ops_insns (buf, p - buf);
2957 }
2958 
2959 /* Implementation of emit_ops method "emit_int_call_1".  */
2960 
2961 static void
aarch64_emit_int_call_1(CORE_ADDR fn,int arg1)2962 aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2963 {
2964   uint32_t buf[16];
2965   uint32_t *p = buf;
2966 
2967   p += emit_mov (p, x0, immediate_operand (arg1));
2968 
2969   emit_ops_insns (buf, p - buf);
2970 
2971   aarch64_emit_call (fn);
2972 }
2973 
2974 /* Implementation of emit_ops method "emit_void_call_2".  */
2975 
2976 static void
aarch64_emit_void_call_2(CORE_ADDR fn,int arg1)2977 aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2978 {
2979   uint32_t buf[16];
2980   uint32_t *p = buf;
2981 
2982   /* Push x0 on the stack.  */
2983   aarch64_emit_stack_flush ();
2984 
2985   /* Setup arguments for the function call:
2986 
2987      x0: arg1
2988      x1: top of the stack
2989 
2990        MOV x1, x0
2991        MOV x0, #arg1  */
2992 
2993   p += emit_mov (p, x1, register_operand (x0));
2994   p += emit_mov (p, x0, immediate_operand (arg1));
2995 
2996   emit_ops_insns (buf, p - buf);
2997 
2998   aarch64_emit_call (fn);
2999 
3000   /* Restore x0.  */
3001   aarch64_emit_pop ();
3002 }
3003 
3004 /* Implementation of emit_ops method "emit_eq_goto".  */
3005 
3006 static void
aarch64_emit_eq_goto(int * offset_p,int * size_p)3007 aarch64_emit_eq_goto (int *offset_p, int *size_p)
3008 {
3009   uint32_t buf[16];
3010   uint32_t *p = buf;
3011 
3012   p += emit_pop (p, x1);
3013   p += emit_cmp (p, x1, register_operand (x0));
3014   /* Branch over the next instruction if x0 != x1.  */
3015   p += emit_bcond (p, NE, 8);
3016   /* The NOP instruction will be patched with an unconditional branch.  */
3017   if (offset_p)
3018     *offset_p = (p - buf) * 4;
3019   if (size_p)
3020     *size_p = 4;
3021   p += emit_nop (p);
3022 
3023   emit_ops_insns (buf, p - buf);
3024 }
3025 
3026 /* Implementation of emit_ops method "emit_ne_goto".  */
3027 
3028 static void
aarch64_emit_ne_goto(int * offset_p,int * size_p)3029 aarch64_emit_ne_goto (int *offset_p, int *size_p)
3030 {
3031   uint32_t buf[16];
3032   uint32_t *p = buf;
3033 
3034   p += emit_pop (p, x1);
3035   p += emit_cmp (p, x1, register_operand (x0));
3036   /* Branch over the next instruction if x0 == x1.  */
3037   p += emit_bcond (p, EQ, 8);
3038   /* The NOP instruction will be patched with an unconditional branch.  */
3039   if (offset_p)
3040     *offset_p = (p - buf) * 4;
3041   if (size_p)
3042     *size_p = 4;
3043   p += emit_nop (p);
3044 
3045   emit_ops_insns (buf, p - buf);
3046 }
3047 
3048 /* Implementation of emit_ops method "emit_lt_goto".  */
3049 
3050 static void
aarch64_emit_lt_goto(int * offset_p,int * size_p)3051 aarch64_emit_lt_goto (int *offset_p, int *size_p)
3052 {
3053   uint32_t buf[16];
3054   uint32_t *p = buf;
3055 
3056   p += emit_pop (p, x1);
3057   p += emit_cmp (p, x1, register_operand (x0));
3058   /* Branch over the next instruction if x0 >= x1.  */
3059   p += emit_bcond (p, GE, 8);
3060   /* The NOP instruction will be patched with an unconditional branch.  */
3061   if (offset_p)
3062     *offset_p = (p - buf) * 4;
3063   if (size_p)
3064     *size_p = 4;
3065   p += emit_nop (p);
3066 
3067   emit_ops_insns (buf, p - buf);
3068 }
3069 
3070 /* Implementation of emit_ops method "emit_le_goto".  */
3071 
3072 static void
aarch64_emit_le_goto(int * offset_p,int * size_p)3073 aarch64_emit_le_goto (int *offset_p, int *size_p)
3074 {
3075   uint32_t buf[16];
3076   uint32_t *p = buf;
3077 
3078   p += emit_pop (p, x1);
3079   p += emit_cmp (p, x1, register_operand (x0));
3080   /* Branch over the next instruction if x0 > x1.  */
3081   p += emit_bcond (p, GT, 8);
3082   /* The NOP instruction will be patched with an unconditional branch.  */
3083   if (offset_p)
3084     *offset_p = (p - buf) * 4;
3085   if (size_p)
3086     *size_p = 4;
3087   p += emit_nop (p);
3088 
3089   emit_ops_insns (buf, p - buf);
3090 }
3091 
3092 /* Implementation of emit_ops method "emit_gt_goto".  */
3093 
3094 static void
aarch64_emit_gt_goto(int * offset_p,int * size_p)3095 aarch64_emit_gt_goto (int *offset_p, int *size_p)
3096 {
3097   uint32_t buf[16];
3098   uint32_t *p = buf;
3099 
3100   p += emit_pop (p, x1);
3101   p += emit_cmp (p, x1, register_operand (x0));
3102   /* Branch over the next instruction if x0 <= x1.  */
3103   p += emit_bcond (p, LE, 8);
3104   /* The NOP instruction will be patched with an unconditional branch.  */
3105   if (offset_p)
3106     *offset_p = (p - buf) * 4;
3107   if (size_p)
3108     *size_p = 4;
3109   p += emit_nop (p);
3110 
3111   emit_ops_insns (buf, p - buf);
3112 }
3113 
3114 /* Implementation of emit_ops method "emit_ge_got".  */
3115 
3116 static void
aarch64_emit_ge_got(int * offset_p,int * size_p)3117 aarch64_emit_ge_got (int *offset_p, int *size_p)
3118 {
3119   uint32_t buf[16];
3120   uint32_t *p = buf;
3121 
3122   p += emit_pop (p, x1);
3123   p += emit_cmp (p, x1, register_operand (x0));
3124   /* Branch over the next instruction if x0 <= x1.  */
3125   p += emit_bcond (p, LT, 8);
3126   /* The NOP instruction will be patched with an unconditional branch.  */
3127   if (offset_p)
3128     *offset_p = (p - buf) * 4;
3129   if (size_p)
3130     *size_p = 4;
3131   p += emit_nop (p);
3132 
3133   emit_ops_insns (buf, p - buf);
3134 }
3135 
3136 static struct emit_ops aarch64_emit_ops_impl =
3137 {
3138   aarch64_emit_prologue,
3139   aarch64_emit_epilogue,
3140   aarch64_emit_add,
3141   aarch64_emit_sub,
3142   aarch64_emit_mul,
3143   aarch64_emit_lsh,
3144   aarch64_emit_rsh_signed,
3145   aarch64_emit_rsh_unsigned,
3146   aarch64_emit_ext,
3147   aarch64_emit_log_not,
3148   aarch64_emit_bit_and,
3149   aarch64_emit_bit_or,
3150   aarch64_emit_bit_xor,
3151   aarch64_emit_bit_not,
3152   aarch64_emit_equal,
3153   aarch64_emit_less_signed,
3154   aarch64_emit_less_unsigned,
3155   aarch64_emit_ref,
3156   aarch64_emit_if_goto,
3157   aarch64_emit_goto,
3158   aarch64_write_goto_address,
3159   aarch64_emit_const,
3160   aarch64_emit_call,
3161   aarch64_emit_reg,
3162   aarch64_emit_pop,
3163   aarch64_emit_stack_flush,
3164   aarch64_emit_zero_ext,
3165   aarch64_emit_swap,
3166   aarch64_emit_stack_adjust,
3167   aarch64_emit_int_call_1,
3168   aarch64_emit_void_call_2,
3169   aarch64_emit_eq_goto,
3170   aarch64_emit_ne_goto,
3171   aarch64_emit_lt_goto,
3172   aarch64_emit_le_goto,
3173   aarch64_emit_gt_goto,
3174   aarch64_emit_ge_got,
3175 };
3176 
3177 /* Implementation of target ops method "emit_ops".  */
3178 
3179 emit_ops *
emit_ops()3180 aarch64_target::emit_ops ()
3181 {
3182   return &aarch64_emit_ops_impl;
3183 }
3184 
3185 /* Implementation of target ops method
3186    "get_min_fast_tracepoint_insn_len".  */
3187 
3188 int
get_min_fast_tracepoint_insn_len()3189 aarch64_target::get_min_fast_tracepoint_insn_len ()
3190 {
3191   return 4;
3192 }
3193 
3194 /* Implementation of linux target ops method "low_supports_range_stepping".  */
3195 
3196 bool
low_supports_range_stepping()3197 aarch64_target::low_supports_range_stepping ()
3198 {
3199   return true;
3200 }
3201 
3202 /* Implementation of target ops method "sw_breakpoint_from_kind".  */
3203 
3204 const gdb_byte *
sw_breakpoint_from_kind(int kind,int * size)3205 aarch64_target::sw_breakpoint_from_kind (int kind, int *size)
3206 {
3207   if (is_64bit_tdesc ())
3208     {
3209       *size = aarch64_breakpoint_len;
3210       return aarch64_breakpoint;
3211     }
3212   else
3213     return arm_sw_breakpoint_from_kind (kind, size);
3214 }
3215 
3216 /* Implementation of target ops method "breakpoint_kind_from_pc".  */
3217 
3218 int
breakpoint_kind_from_pc(CORE_ADDR * pcptr)3219 aarch64_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
3220 {
3221   if (is_64bit_tdesc ())
3222     return aarch64_breakpoint_len;
3223   else
3224     return arm_breakpoint_kind_from_pc (pcptr);
3225 }
3226 
3227 /* Implementation of the target ops method
3228    "breakpoint_kind_from_current_state".  */
3229 
3230 int
breakpoint_kind_from_current_state(CORE_ADDR * pcptr)3231 aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
3232 {
3233   if (is_64bit_tdesc ())
3234     return aarch64_breakpoint_len;
3235   else
3236     return arm_breakpoint_kind_from_current_state (pcptr);
3237 }
3238 
3239 /* Returns true if memory tagging is supported.  */
3240 bool
supports_memory_tagging()3241 aarch64_target::supports_memory_tagging ()
3242 {
3243   if (current_thread == NULL)
3244     {
3245       /* We don't have any processes running, so don't attempt to
3246 	 use linux_get_hwcap2 as it will try to fetch the current
3247 	 thread id.  Instead, just fetch the auxv from the self
3248 	 PID.  */
3249 #ifdef HAVE_GETAUXVAL
3250       return (getauxval (AT_HWCAP2) & HWCAP2_MTE) != 0;
3251 #else
3252       return true;
3253 #endif
3254     }
3255 
3256   return (linux_get_hwcap2 (8) & HWCAP2_MTE) != 0;
3257 }
3258 
3259 bool
fetch_memtags(CORE_ADDR address,size_t len,gdb::byte_vector & tags,int type)3260 aarch64_target::fetch_memtags (CORE_ADDR address, size_t len,
3261 			       gdb::byte_vector &tags, int type)
3262 {
3263   /* Allocation tags are per-process, so any tid is fine.  */
3264   int tid = lwpid_of (current_thread);
3265 
3266   /* Allocation tag?  */
3267   if (type == static_cast <int> (aarch64_memtag_type::mte_allocation))
3268     return aarch64_mte_fetch_memtags (tid, address, len, tags);
3269 
3270   return false;
3271 }
3272 
3273 bool
store_memtags(CORE_ADDR address,size_t len,const gdb::byte_vector & tags,int type)3274 aarch64_target::store_memtags (CORE_ADDR address, size_t len,
3275 			       const gdb::byte_vector &tags, int type)
3276 {
3277   /* Allocation tags are per-process, so any tid is fine.  */
3278   int tid = lwpid_of (current_thread);
3279 
3280   /* Allocation tag?  */
3281   if (type == static_cast <int> (aarch64_memtag_type::mte_allocation))
3282     return aarch64_mte_store_memtags (tid, address, len, tags);
3283 
3284   return false;
3285 }
3286 
3287 /* The linux target ops object.  */
3288 
3289 linux_process_target *the_linux_target = &the_aarch64_target;
3290 
3291 void
initialize_low_arch(void)3292 initialize_low_arch (void)
3293 {
3294   initialize_low_arch_aarch32 ();
3295 
3296   initialize_regsets_info (&aarch64_regsets_info);
3297   initialize_regsets_info (&aarch64_sve_regsets_info);
3298 }
3299