1 /* tc-aarch64.c -- Assemble for the AArch64 ISA
2 
3    Copyright (C) 2009-2020 Free Software Foundation, Inc.
4    Contributed by ARM Ltd.
5 
6    This file is part of GAS.
7 
8    GAS is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License as published by
10    the Free Software Foundation; either version 3 of the license, or
11    (at your option) any later version.
12 
13    GAS is distributed in the hope that it will be useful,
14    but WITHOUT ANY WARRANTY; without even the implied warranty of
15    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16    GNU General Public License for more details.
17 
18    You should have received a copy of the GNU General Public License
19    along with this program; see the file COPYING3. If not,
20    see <http://www.gnu.org/licenses/>.  */
21 
22 #include "as.h"
23 #include <limits.h>
24 #include <stdarg.h>
25 #include "bfd_stdint.h"
26 #define	 NO_RELOC 0
27 #include "safe-ctype.h"
28 #include "subsegs.h"
29 #include "obstack.h"
30 
31 #ifdef OBJ_ELF
32 #include "elf/aarch64.h"
33 #include "dw2gencfi.h"
34 #endif
35 
36 #include "dwarf2dbg.h"
37 
38 /* Types of processor to assemble for.  */
39 #ifndef CPU_DEFAULT
40 #define CPU_DEFAULT AARCH64_ARCH_V8
41 #endif
42 
43 #define streq(a, b)	      (strcmp (a, b) == 0)
44 
45 #define END_OF_INSN '\0'
46 
47 static aarch64_feature_set cpu_variant;
48 
49 /* Variables that we set while parsing command-line options.  Once all
50    options have been read we re-process these values to set the real
51    assembly flags.  */
52 static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53 static const aarch64_feature_set *march_cpu_opt = NULL;
54 
55 /* Constants for known architecture features.  */
56 static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57 
58 /* Currently active instruction sequence.  */
59 static aarch64_instr_sequence *insn_sequence = NULL;
60 
61 #ifdef OBJ_ELF
62 /* Pre-defined "_GLOBAL_OFFSET_TABLE_"	*/
63 static symbolS *GOT_symbol;
64 
65 /* Which ABI to use.  */
66 enum aarch64_abi_type
67 {
68   AARCH64_ABI_NONE = 0,
69   AARCH64_ABI_LP64 = 1,
70   AARCH64_ABI_ILP32 = 2
71 };
72 
73 #ifndef DEFAULT_ARCH
74 #define DEFAULT_ARCH "aarch64"
75 #endif
76 
77 /* DEFAULT_ARCH is initialized in gas/configure.tgt.  */
78 static const char *default_arch = DEFAULT_ARCH;
79 
80 /* AArch64 ABI for the output file.  */
81 static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_NONE;
82 
83 /* When non-zero, program to a 32-bit model, in which the C data types
84    int, long and all pointer types are 32-bit objects (ILP32); or to a
85    64-bit model, in which the C int type is 32-bits but the C long type
86    and all pointer types are 64-bit objects (LP64).  */
87 #define ilp32_p		(aarch64_abi == AARCH64_ABI_ILP32)
88 #endif
89 
90 enum vector_el_type
91 {
92   NT_invtype = -1,
93   NT_b,
94   NT_h,
95   NT_s,
96   NT_d,
97   NT_q,
98   NT_zero,
99   NT_merge
100 };
101 
102 /* Bits for DEFINED field in vector_type_el.  */
103 #define NTA_HASTYPE     1
104 #define NTA_HASINDEX    2
105 #define NTA_HASVARWIDTH 4
106 
107 struct vector_type_el
108 {
109   enum vector_el_type type;
110   unsigned char defined;
111   unsigned width;
112   int64_t index;
113 };
114 
115 #define FIXUP_F_HAS_EXPLICIT_SHIFT	0x00000001
116 
117 struct reloc
118 {
119   bfd_reloc_code_real_type type;
120   expressionS exp;
121   int pc_rel;
122   enum aarch64_opnd opnd;
123   uint32_t flags;
124   unsigned need_libopcodes_p : 1;
125 };
126 
127 struct aarch64_instruction
128 {
129   /* libopcodes structure for instruction intermediate representation.  */
130   aarch64_inst base;
131   /* Record assembly errors found during the parsing.  */
132   struct
133     {
134       enum aarch64_operand_error_kind kind;
135       const char *error;
136     } parsing_error;
137   /* The condition that appears in the assembly line.  */
138   int cond;
139   /* Relocation information (including the GAS internal fixup).  */
140   struct reloc reloc;
141   /* Need to generate an immediate in the literal pool.  */
142   unsigned gen_lit_pool : 1;
143 };
144 
145 typedef struct aarch64_instruction aarch64_instruction;
146 
147 static aarch64_instruction inst;
148 
149 static bfd_boolean parse_operands (char *, const aarch64_opcode *);
150 static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
151 
152 #ifdef OBJ_ELF
153 #  define now_instr_sequence seg_info \
154 		(now_seg)->tc_segment_info_data.insn_sequence
155 #else
156 static struct aarch64_instr_sequence now_instr_sequence;
157 #endif
158 
159 /* Diagnostics inline function utilities.
160 
161    These are lightweight utilities which should only be called by parse_operands
162    and other parsers.  GAS processes each assembly line by parsing it against
163    instruction template(s), in the case of multiple templates (for the same
164    mnemonic name), those templates are tried one by one until one succeeds or
165    all fail.  An assembly line may fail a few templates before being
166    successfully parsed; an error saved here in most cases is not a user error
167    but an error indicating the current template is not the right template.
168    Therefore it is very important that errors can be saved at a low cost during
169    the parsing; we don't want to slow down the whole parsing by recording
170    non-user errors in detail.
171 
172    Remember that the objective is to help GAS pick up the most appropriate
173    error message in the case of multiple templates, e.g. FMOV which has 8
174    templates.  */
175 
176 static inline void
clear_error(void)177 clear_error (void)
178 {
179   inst.parsing_error.kind = AARCH64_OPDE_NIL;
180   inst.parsing_error.error = NULL;
181 }
182 
183 static inline bfd_boolean
error_p(void)184 error_p (void)
185 {
186   return inst.parsing_error.kind != AARCH64_OPDE_NIL;
187 }
188 
189 static inline const char *
get_error_message(void)190 get_error_message (void)
191 {
192   return inst.parsing_error.error;
193 }
194 
195 static inline enum aarch64_operand_error_kind
get_error_kind(void)196 get_error_kind (void)
197 {
198   return inst.parsing_error.kind;
199 }
200 
201 static inline void
set_error(enum aarch64_operand_error_kind kind,const char * error)202 set_error (enum aarch64_operand_error_kind kind, const char *error)
203 {
204   inst.parsing_error.kind = kind;
205   inst.parsing_error.error = error;
206 }
207 
208 static inline void
set_recoverable_error(const char * error)209 set_recoverable_error (const char *error)
210 {
211   set_error (AARCH64_OPDE_RECOVERABLE, error);
212 }
213 
214 /* Use the DESC field of the corresponding aarch64_operand entry to compose
215    the error message.  */
216 static inline void
set_default_error(void)217 set_default_error (void)
218 {
219   set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
220 }
221 
222 static inline void
set_syntax_error(const char * error)223 set_syntax_error (const char *error)
224 {
225   set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
226 }
227 
228 static inline void
set_first_syntax_error(const char * error)229 set_first_syntax_error (const char *error)
230 {
231   if (! error_p ())
232     set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
233 }
234 
235 static inline void
set_fatal_syntax_error(const char * error)236 set_fatal_syntax_error (const char *error)
237 {
238   set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
239 }
240 
241 /* Return value for certain parsers when the parsing fails; those parsers
242    return the information of the parsed result, e.g. register number, on
243    success.  */
244 #define PARSE_FAIL -1
245 
246 /* This is an invalid condition code that means no conditional field is
247    present. */
248 #define COND_ALWAYS 0x10
249 
250 typedef struct
251 {
252   const char *template;
253   uint32_t value;
254 } asm_nzcv;
255 
256 struct reloc_entry
257 {
258   char *name;
259   bfd_reloc_code_real_type reloc;
260 };
261 
262 /* Macros to define the register types and masks for the purpose
263    of parsing.  */
264 
265 #undef AARCH64_REG_TYPES
266 #define AARCH64_REG_TYPES	\
267   BASIC_REG_TYPE(R_32)	/* w[0-30] */	\
268   BASIC_REG_TYPE(R_64)	/* x[0-30] */	\
269   BASIC_REG_TYPE(SP_32)	/* wsp     */	\
270   BASIC_REG_TYPE(SP_64)	/* sp      */	\
271   BASIC_REG_TYPE(Z_32)	/* wzr     */	\
272   BASIC_REG_TYPE(Z_64)	/* xzr     */	\
273   BASIC_REG_TYPE(FP_B)	/* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
274   BASIC_REG_TYPE(FP_H)	/* h[0-31] */	\
275   BASIC_REG_TYPE(FP_S)	/* s[0-31] */	\
276   BASIC_REG_TYPE(FP_D)	/* d[0-31] */	\
277   BASIC_REG_TYPE(FP_Q)	/* q[0-31] */	\
278   BASIC_REG_TYPE(VN)	/* v[0-31] */	\
279   BASIC_REG_TYPE(ZN)	/* z[0-31] */	\
280   BASIC_REG_TYPE(PN)	/* p[0-15] */	\
281   /* Typecheck: any 64-bit int reg         (inc SP exc XZR).  */	\
282   MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64))		\
283   /* Typecheck: same, plus SVE registers.  */				\
284   MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64)		\
285 		 | REG_TYPE(ZN))					\
286   /* Typecheck: x[0-30], w[0-30] or [xw]zr.  */				\
287   MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64)			\
288 		 | REG_TYPE(Z_32) | REG_TYPE(Z_64))			\
289   /* Typecheck: same, plus SVE registers.  */				\
290   MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64)		\
291 		 | REG_TYPE(Z_32) | REG_TYPE(Z_64)			\
292 		 | REG_TYPE(ZN))					\
293   /* Typecheck: x[0-30], w[0-30] or {w}sp.  */				\
294   MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64)			\
295 		 | REG_TYPE(SP_32) | REG_TYPE(SP_64))			\
296   /* Typecheck: any int                    (inc {W}SP inc [WX]ZR).  */	\
297   MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64)		\
298 		 | REG_TYPE(SP_32) | REG_TYPE(SP_64)			\
299 		 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) 			\
300   /* Typecheck: any [BHSDQ]P FP.  */					\
301   MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H)			\
302 		 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q))	\
303   /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR).  */ \
304   MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64)		\
305 		 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN)	\
306 		 | REG_TYPE(FP_B) | REG_TYPE(FP_H)			\
307 		 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q))	\
308   /* Typecheck: as above, but also Zn, Pn, and {W}SP.  This should only	\
309      be used for SVE instructions, since Zn and Pn are valid symbols	\
310      in other contexts.  */						\
311   MULTI_REG_TYPE(R_Z_SP_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64)	\
312 		 | REG_TYPE(SP_32) | REG_TYPE(SP_64)			\
313 		 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN)	\
314 		 | REG_TYPE(FP_B) | REG_TYPE(FP_H)			\
315 		 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)	\
316 		 | REG_TYPE(ZN) | REG_TYPE(PN))				\
317   /* Any integer register; used for error messages only.  */		\
318   MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64)			\
319 		 | REG_TYPE(SP_32) | REG_TYPE(SP_64)			\
320 		 | REG_TYPE(Z_32) | REG_TYPE(Z_64))			\
321   /* Pseudo type to mark the end of the enumerator sequence.  */	\
322   BASIC_REG_TYPE(MAX)
323 
324 #undef BASIC_REG_TYPE
325 #define BASIC_REG_TYPE(T)	REG_TYPE_##T,
326 #undef MULTI_REG_TYPE
327 #define MULTI_REG_TYPE(T,V)	BASIC_REG_TYPE(T)
328 
329 /* Register type enumerators.  */
330 typedef enum aarch64_reg_type_
331 {
332   /* A list of REG_TYPE_*.  */
333   AARCH64_REG_TYPES
334 } aarch64_reg_type;
335 
336 #undef BASIC_REG_TYPE
337 #define BASIC_REG_TYPE(T)	1 << REG_TYPE_##T,
338 #undef REG_TYPE
339 #define REG_TYPE(T)		(1 << REG_TYPE_##T)
340 #undef MULTI_REG_TYPE
341 #define MULTI_REG_TYPE(T,V)	V,
342 
343 /* Structure for a hash table entry for a register.  */
344 typedef struct
345 {
346   const char *name;
347   unsigned char number;
348   ENUM_BITFIELD (aarch64_reg_type_) type : 8;
349   unsigned char builtin;
350 } reg_entry;
351 
352 /* Values indexed by aarch64_reg_type to assist the type checking.  */
353 static const unsigned reg_type_masks[] =
354 {
355   AARCH64_REG_TYPES
356 };
357 
358 #undef BASIC_REG_TYPE
359 #undef REG_TYPE
360 #undef MULTI_REG_TYPE
361 #undef AARCH64_REG_TYPES
362 
363 /* Diagnostics used when we don't get a register of the expected type.
364    Note:  this has to synchronized with aarch64_reg_type definitions
365    above.  */
366 static const char *
get_reg_expected_msg(aarch64_reg_type reg_type)367 get_reg_expected_msg (aarch64_reg_type reg_type)
368 {
369   const char *msg;
370 
371   switch (reg_type)
372     {
373     case REG_TYPE_R_32:
374       msg = N_("integer 32-bit register expected");
375       break;
376     case REG_TYPE_R_64:
377       msg = N_("integer 64-bit register expected");
378       break;
379     case REG_TYPE_R_N:
380       msg = N_("integer register expected");
381       break;
382     case REG_TYPE_R64_SP:
383       msg = N_("64-bit integer or SP register expected");
384       break;
385     case REG_TYPE_SVE_BASE:
386       msg = N_("base register expected");
387       break;
388     case REG_TYPE_R_Z:
389       msg = N_("integer or zero register expected");
390       break;
391     case REG_TYPE_SVE_OFFSET:
392       msg = N_("offset register expected");
393       break;
394     case REG_TYPE_R_SP:
395       msg = N_("integer or SP register expected");
396       break;
397     case REG_TYPE_R_Z_SP:
398       msg = N_("integer, zero or SP register expected");
399       break;
400     case REG_TYPE_FP_B:
401       msg = N_("8-bit SIMD scalar register expected");
402       break;
403     case REG_TYPE_FP_H:
404       msg = N_("16-bit SIMD scalar or floating-point half precision "
405 	       "register expected");
406       break;
407     case REG_TYPE_FP_S:
408       msg = N_("32-bit SIMD scalar or floating-point single precision "
409 	       "register expected");
410       break;
411     case REG_TYPE_FP_D:
412       msg = N_("64-bit SIMD scalar or floating-point double precision "
413 	       "register expected");
414       break;
415     case REG_TYPE_FP_Q:
416       msg = N_("128-bit SIMD scalar or floating-point quad precision "
417 	       "register expected");
418       break;
419     case REG_TYPE_R_Z_BHSDQ_V:
420     case REG_TYPE_R_Z_SP_BHSDQ_VZP:
421       msg = N_("register expected");
422       break;
423     case REG_TYPE_BHSDQ:	/* any [BHSDQ]P FP  */
424       msg = N_("SIMD scalar or floating-point register expected");
425       break;
426     case REG_TYPE_VN:		/* any V reg  */
427       msg = N_("vector register expected");
428       break;
429     case REG_TYPE_ZN:
430       msg = N_("SVE vector register expected");
431       break;
432     case REG_TYPE_PN:
433       msg = N_("SVE predicate register expected");
434       break;
435     default:
436       as_fatal (_("invalid register type %d"), reg_type);
437     }
438   return msg;
439 }
440 
441 /* Some well known registers that we refer to directly elsewhere.  */
442 #define REG_SP	31
443 #define REG_ZR	31
444 
445 /* Instructions take 4 bytes in the object file.  */
446 #define INSN_SIZE	4
447 
448 static struct hash_control *aarch64_ops_hsh;
449 static struct hash_control *aarch64_cond_hsh;
450 static struct hash_control *aarch64_shift_hsh;
451 static struct hash_control *aarch64_sys_regs_hsh;
452 static struct hash_control *aarch64_pstatefield_hsh;
453 static struct hash_control *aarch64_sys_regs_ic_hsh;
454 static struct hash_control *aarch64_sys_regs_dc_hsh;
455 static struct hash_control *aarch64_sys_regs_at_hsh;
456 static struct hash_control *aarch64_sys_regs_tlbi_hsh;
457 static struct hash_control *aarch64_sys_regs_sr_hsh;
458 static struct hash_control *aarch64_reg_hsh;
459 static struct hash_control *aarch64_barrier_opt_hsh;
460 static struct hash_control *aarch64_nzcv_hsh;
461 static struct hash_control *aarch64_pldop_hsh;
462 static struct hash_control *aarch64_hint_opt_hsh;
463 
464 /* Stuff needed to resolve the label ambiguity
465    As:
466      ...
467      label:   <insn>
468    may differ from:
469      ...
470      label:
471 	      <insn>  */
472 
473 static symbolS *last_label_seen;
474 
475 /* Literal pool structure.  Held on a per-section
476    and per-sub-section basis.  */
477 
478 #define MAX_LITERAL_POOL_SIZE 1024
479 typedef struct literal_expression
480 {
481   expressionS exp;
482   /* If exp.op == O_big then this bignum holds a copy of the global bignum value.  */
483   LITTLENUM_TYPE * bignum;
484 } literal_expression;
485 
486 typedef struct literal_pool
487 {
488   literal_expression literals[MAX_LITERAL_POOL_SIZE];
489   unsigned int next_free_entry;
490   unsigned int id;
491   symbolS *symbol;
492   segT section;
493   subsegT sub_section;
494   int size;
495   struct literal_pool *next;
496 } literal_pool;
497 
498 /* Pointer to a linked list of literal pools.  */
499 static literal_pool *list_of_pools = NULL;
500 
501 /* Pure syntax.	 */
502 
503 /* This array holds the chars that always start a comment.  If the
504    pre-processor is disabled, these aren't very useful.	 */
505 const char comment_chars[] = "";
506 
507 /* This array holds the chars that only start a comment at the beginning of
508    a line.  If the line seems to have the form '# 123 filename'
509    .line and .file directives will appear in the pre-processed output.	*/
510 /* Note that input_file.c hand checks for '#' at the beginning of the
511    first line of the input file.  This is because the compiler outputs
512    #NO_APP at the beginning of its output.  */
513 /* Also note that comments like this one will always work.  */
514 const char line_comment_chars[] = "#";
515 
516 const char line_separator_chars[] = ";";
517 
518 /* Chars that can be used to separate mant
519    from exp in floating point numbers.	*/
520 const char EXP_CHARS[] = "eE";
521 
522 /* Chars that mean this number is a floating point constant.  */
523 /* As in 0f12.456  */
524 /* or	 0d1.2345e12  */
525 
526 const char FLT_CHARS[] = "rRsSfFdDxXeEpPhH";
527 
528 /* Prefix character that indicates the start of an immediate value.  */
529 #define is_immediate_prefix(C) ((C) == '#')
530 
531 /* Separator character handling.  */
532 
533 #define skip_whitespace(str)  do { if (*(str) == ' ') ++(str); } while (0)
534 
535 static inline bfd_boolean
skip_past_char(char ** str,char c)536 skip_past_char (char **str, char c)
537 {
538   if (**str == c)
539     {
540       (*str)++;
541       return TRUE;
542     }
543   else
544     return FALSE;
545 }
546 
547 #define skip_past_comma(str) skip_past_char (str, ',')
548 
549 /* Arithmetic expressions (possibly involving symbols).	 */
550 
551 static bfd_boolean in_my_get_expression_p = FALSE;
552 
553 /* Third argument to my_get_expression.	 */
554 #define GE_NO_PREFIX 0
555 #define GE_OPT_PREFIX 1
556 
557 /* Return TRUE if the string pointed by *STR is successfully parsed
558    as an valid expression; *EP will be filled with the information of
559    such an expression.  Otherwise return FALSE.  */
560 
561 static bfd_boolean
my_get_expression(expressionS * ep,char ** str,int prefix_mode,int reject_absent)562 my_get_expression (expressionS * ep, char **str, int prefix_mode,
563 		   int reject_absent)
564 {
565   char *save_in;
566   segT seg;
567   int prefix_present_p = 0;
568 
569   switch (prefix_mode)
570     {
571     case GE_NO_PREFIX:
572       break;
573     case GE_OPT_PREFIX:
574       if (is_immediate_prefix (**str))
575 	{
576 	  (*str)++;
577 	  prefix_present_p = 1;
578 	}
579       break;
580     default:
581       abort ();
582     }
583 
584   memset (ep, 0, sizeof (expressionS));
585 
586   save_in = input_line_pointer;
587   input_line_pointer = *str;
588   in_my_get_expression_p = TRUE;
589   seg = expression (ep);
590   in_my_get_expression_p = FALSE;
591 
592   if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
593     {
594       /* We found a bad expression in md_operand().  */
595       *str = input_line_pointer;
596       input_line_pointer = save_in;
597       if (prefix_present_p && ! error_p ())
598 	set_fatal_syntax_error (_("bad expression"));
599       else
600 	set_first_syntax_error (_("bad expression"));
601       return FALSE;
602     }
603 
604 #ifdef OBJ_AOUT
605   if (seg != absolute_section
606       && seg != text_section
607       && seg != data_section
608       && seg != bss_section && seg != undefined_section)
609     {
610       set_syntax_error (_("bad segment"));
611       *str = input_line_pointer;
612       input_line_pointer = save_in;
613       return FALSE;
614     }
615 #else
616   (void) seg;
617 #endif
618 
619   *str = input_line_pointer;
620   input_line_pointer = save_in;
621   return TRUE;
622 }
623 
624 /* Turn a string in input_line_pointer into a floating point constant
625    of type TYPE, and store the appropriate bytes in *LITP.  The number
626    of LITTLENUMS emitted is stored in *SIZEP.  An error message is
627    returned, or NULL on OK.  */
628 
629 const char *
md_atof(int type,char * litP,int * sizeP)630 md_atof (int type, char *litP, int *sizeP)
631 {
632   /* If this is a bfloat16 type, then parse it slightly differently -
633      as it does not follow the IEEE standard exactly.  */
634   if (type == 'b')
635     {
636       char * t;
637       LITTLENUM_TYPE words[MAX_LITTLENUMS];
638       FLONUM_TYPE generic_float;
639 
640       t = atof_ieee_detail (input_line_pointer, 1, 8, words, &generic_float);
641 
642       if (t)
643 	input_line_pointer = t;
644       else
645 	return _("invalid floating point number");
646 
647       switch (generic_float.sign)
648 	{
649 	/* Is +Inf.  */
650 	case 'P':
651 	  words[0] = 0x7f80;
652 	  break;
653 
654 	/* Is -Inf.  */
655 	case 'N':
656 	  words[0] = 0xff80;
657 	  break;
658 
659 	/* Is NaN.  */
660 	/* bfloat16 has two types of NaN - quiet and signalling.
661 	   Quiet NaN has bit[6] == 1 && faction != 0, whereas
662 	   signalling Nan's have bit[0] == 0 && fraction != 0.
663 	   Chose this specific encoding as it is the same form
664 	   as used by other IEEE 754 encodings in GAS.  */
665 	case 0:
666 	  words[0] = 0x7fff;
667 	  break;
668 
669 	default:
670 	  break;
671 	}
672 
673       *sizeP = 2;
674 
675       md_number_to_chars (litP, (valueT) words[0], sizeof (LITTLENUM_TYPE));
676 
677       return NULL;
678     }
679 
680   return ieee_md_atof (type, litP, sizeP, target_big_endian);
681 }
682 
683 /* We handle all bad expressions here, so that we can report the faulty
684    instruction in the error message.  */
685 void
md_operand(expressionS * exp)686 md_operand (expressionS * exp)
687 {
688   if (in_my_get_expression_p)
689     exp->X_op = O_illegal;
690 }
691 
692 /* Immediate values.  */
693 
694 /* Errors may be set multiple times during parsing or bit encoding
695    (particularly in the Neon bits), but usually the earliest error which is set
696    will be the most meaningful. Avoid overwriting it with later (cascading)
697    errors by calling this function.  */
698 
699 static void
first_error(const char * error)700 first_error (const char *error)
701 {
702   if (! error_p ())
703     set_syntax_error (error);
704 }
705 
706 /* Similar to first_error, but this function accepts formatted error
707    message.  */
708 static void
first_error_fmt(const char * format,...)709 first_error_fmt (const char *format, ...)
710 {
711   va_list args;
712   enum
713   { size = 100 };
714   /* N.B. this single buffer will not cause error messages for different
715      instructions to pollute each other; this is because at the end of
716      processing of each assembly line, error message if any will be
717      collected by as_bad.  */
718   static char buffer[size];
719 
720   if (! error_p ())
721     {
722       int ret ATTRIBUTE_UNUSED;
723       va_start (args, format);
724       ret = vsnprintf (buffer, size, format, args);
725       know (ret <= size - 1 && ret >= 0);
726       va_end (args);
727       set_syntax_error (buffer);
728     }
729 }
730 
731 /* Register parsing.  */
732 
733 /* Generic register parser which is called by other specialized
734    register parsers.
735    CCP points to what should be the beginning of a register name.
736    If it is indeed a valid register name, advance CCP over it and
737    return the reg_entry structure; otherwise return NULL.
738    It does not issue diagnostics.  */
739 
740 static reg_entry *
parse_reg(char ** ccp)741 parse_reg (char **ccp)
742 {
743   char *start = *ccp;
744   char *p;
745   reg_entry *reg;
746 
747 #ifdef REGISTER_PREFIX
748   if (*start != REGISTER_PREFIX)
749     return NULL;
750   start++;
751 #endif
752 
753   p = start;
754   if (!ISALPHA (*p) || !is_name_beginner (*p))
755     return NULL;
756 
757   do
758     p++;
759   while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
760 
761   reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
762 
763   if (!reg)
764     return NULL;
765 
766   *ccp = p;
767   return reg;
768 }
769 
770 /* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
771    return FALSE.  */
772 static bfd_boolean
aarch64_check_reg_type(const reg_entry * reg,aarch64_reg_type type)773 aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
774 {
775   return (reg_type_masks[type] & (1 << reg->type)) != 0;
776 }
777 
778 /* Try to parse a base or offset register.  Allow SVE base and offset
779    registers if REG_TYPE includes SVE registers.  Return the register
780    entry on success, setting *QUALIFIER to the register qualifier.
781    Return null otherwise.
782 
783    Note that this function does not issue any diagnostics.  */
784 
785 static const reg_entry *
aarch64_addr_reg_parse(char ** ccp,aarch64_reg_type reg_type,aarch64_opnd_qualifier_t * qualifier)786 aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
787 			aarch64_opnd_qualifier_t *qualifier)
788 {
789   char *str = *ccp;
790   const reg_entry *reg = parse_reg (&str);
791 
792   if (reg == NULL)
793     return NULL;
794 
795   switch (reg->type)
796     {
797     case REG_TYPE_R_32:
798     case REG_TYPE_SP_32:
799     case REG_TYPE_Z_32:
800       *qualifier = AARCH64_OPND_QLF_W;
801       break;
802 
803     case REG_TYPE_R_64:
804     case REG_TYPE_SP_64:
805     case REG_TYPE_Z_64:
806       *qualifier = AARCH64_OPND_QLF_X;
807       break;
808 
809     case REG_TYPE_ZN:
810       if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
811 	  || str[0] != '.')
812 	return NULL;
813       switch (TOLOWER (str[1]))
814 	{
815 	case 's':
816 	  *qualifier = AARCH64_OPND_QLF_S_S;
817 	  break;
818 	case 'd':
819 	  *qualifier = AARCH64_OPND_QLF_S_D;
820 	  break;
821 	default:
822 	  return NULL;
823 	}
824       str += 2;
825       break;
826 
827     default:
828       return NULL;
829     }
830 
831   *ccp = str;
832 
833   return reg;
834 }
835 
836 /* Try to parse a base or offset register.  Return the register entry
837    on success, setting *QUALIFIER to the register qualifier.  Return null
838    otherwise.
839 
840    Note that this function does not issue any diagnostics.  */
841 
842 static const reg_entry *
aarch64_reg_parse_32_64(char ** ccp,aarch64_opnd_qualifier_t * qualifier)843 aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
844 {
845   return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
846 }
847 
848 /* Parse the qualifier of a vector register or vector element of type
849    REG_TYPE.  Fill in *PARSED_TYPE and return TRUE if the parsing
850    succeeds; otherwise return FALSE.
851 
852    Accept only one occurrence of:
853    4b 8b 16b 2h 4h 8h 2s 4s 1d 2d
854    b h s d q  */
855 static bfd_boolean
parse_vector_type_for_operand(aarch64_reg_type reg_type,struct vector_type_el * parsed_type,char ** str)856 parse_vector_type_for_operand (aarch64_reg_type reg_type,
857 			       struct vector_type_el *parsed_type, char **str)
858 {
859   char *ptr = *str;
860   unsigned width;
861   unsigned element_size;
862   enum vector_el_type type;
863 
864   /* skip '.' */
865   gas_assert (*ptr == '.');
866   ptr++;
867 
868   if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
869     {
870       width = 0;
871       goto elt_size;
872     }
873   width = strtoul (ptr, &ptr, 10);
874   if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
875     {
876       first_error_fmt (_("bad size %d in vector width specifier"), width);
877       return FALSE;
878     }
879 
880 elt_size:
881   switch (TOLOWER (*ptr))
882     {
883     case 'b':
884       type = NT_b;
885       element_size = 8;
886       break;
887     case 'h':
888       type = NT_h;
889       element_size = 16;
890       break;
891     case 's':
892       type = NT_s;
893       element_size = 32;
894       break;
895     case 'd':
896       type = NT_d;
897       element_size = 64;
898       break;
899     case 'q':
900       if (reg_type == REG_TYPE_ZN || width == 1)
901 	{
902 	  type = NT_q;
903 	  element_size = 128;
904 	  break;
905 	}
906       /* fall through.  */
907     default:
908       if (*ptr != '\0')
909 	first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
910       else
911 	first_error (_("missing element size"));
912       return FALSE;
913     }
914   if (width != 0 && width * element_size != 64
915       && width * element_size != 128
916       && !(width == 2 && element_size == 16)
917       && !(width == 4 && element_size == 8))
918     {
919       first_error_fmt (_
920 		       ("invalid element size %d and vector size combination %c"),
921 		       width, *ptr);
922       return FALSE;
923     }
924   ptr++;
925 
926   parsed_type->type = type;
927   parsed_type->width = width;
928 
929   *str = ptr;
930 
931   return TRUE;
932 }
933 
934 /* *STR contains an SVE zero/merge predication suffix.  Parse it into
935    *PARSED_TYPE and point *STR at the end of the suffix.  */
936 
937 static bfd_boolean
parse_predication_for_operand(struct vector_type_el * parsed_type,char ** str)938 parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
939 {
940   char *ptr = *str;
941 
942   /* Skip '/'.  */
943   gas_assert (*ptr == '/');
944   ptr++;
945   switch (TOLOWER (*ptr))
946     {
947     case 'z':
948       parsed_type->type = NT_zero;
949       break;
950     case 'm':
951       parsed_type->type = NT_merge;
952       break;
953     default:
954       if (*ptr != '\0' && *ptr != ',')
955 	first_error_fmt (_("unexpected character `%c' in predication type"),
956 			 *ptr);
957       else
958 	first_error (_("missing predication type"));
959       return FALSE;
960     }
961   parsed_type->width = 0;
962   *str = ptr + 1;
963   return TRUE;
964 }
965 
966 /* Parse a register of the type TYPE.
967 
968    Return PARSE_FAIL if the string pointed by *CCP is not a valid register
969    name or the parsed register is not of TYPE.
970 
971    Otherwise return the register number, and optionally fill in the actual
972    type of the register in *RTYPE when multiple alternatives were given, and
973    return the register shape and element index information in *TYPEINFO.
974 
975    IN_REG_LIST should be set with TRUE if the caller is parsing a register
976    list.  */
977 
978 static int
parse_typed_reg(char ** ccp,aarch64_reg_type type,aarch64_reg_type * rtype,struct vector_type_el * typeinfo,bfd_boolean in_reg_list)979 parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
980 		 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
981 {
982   char *str = *ccp;
983   const reg_entry *reg = parse_reg (&str);
984   struct vector_type_el atype;
985   struct vector_type_el parsetype;
986   bfd_boolean is_typed_vecreg = FALSE;
987 
988   atype.defined = 0;
989   atype.type = NT_invtype;
990   atype.width = -1;
991   atype.index = 0;
992 
993   if (reg == NULL)
994     {
995       if (typeinfo)
996 	*typeinfo = atype;
997       set_default_error ();
998       return PARSE_FAIL;
999     }
1000 
1001   if (! aarch64_check_reg_type (reg, type))
1002     {
1003       DEBUG_TRACE ("reg type check failed");
1004       set_default_error ();
1005       return PARSE_FAIL;
1006     }
1007   type = reg->type;
1008 
1009   if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
1010       && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
1011     {
1012       if (*str == '.')
1013 	{
1014 	  if (!parse_vector_type_for_operand (type, &parsetype, &str))
1015 	    return PARSE_FAIL;
1016 	}
1017       else
1018 	{
1019 	  if (!parse_predication_for_operand (&parsetype, &str))
1020 	    return PARSE_FAIL;
1021 	}
1022 
1023       /* Register if of the form Vn.[bhsdq].  */
1024       is_typed_vecreg = TRUE;
1025 
1026       if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
1027 	{
1028 	  /* The width is always variable; we don't allow an integer width
1029 	     to be specified.  */
1030 	  gas_assert (parsetype.width == 0);
1031 	  atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
1032 	}
1033       else if (parsetype.width == 0)
1034 	/* Expect index. In the new scheme we cannot have
1035 	   Vn.[bhsdq] represent a scalar. Therefore any
1036 	   Vn.[bhsdq] should have an index following it.
1037 	   Except in reglists of course.  */
1038 	atype.defined |= NTA_HASINDEX;
1039       else
1040 	atype.defined |= NTA_HASTYPE;
1041 
1042       atype.type = parsetype.type;
1043       atype.width = parsetype.width;
1044     }
1045 
1046   if (skip_past_char (&str, '['))
1047     {
1048       expressionS exp;
1049 
1050       /* Reject Sn[index] syntax.  */
1051       if (!is_typed_vecreg)
1052 	{
1053 	  first_error (_("this type of register can't be indexed"));
1054 	  return PARSE_FAIL;
1055 	}
1056 
1057       if (in_reg_list)
1058 	{
1059 	  first_error (_("index not allowed inside register list"));
1060 	  return PARSE_FAIL;
1061 	}
1062 
1063       atype.defined |= NTA_HASINDEX;
1064 
1065       my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1066 
1067       if (exp.X_op != O_constant)
1068 	{
1069 	  first_error (_("constant expression required"));
1070 	  return PARSE_FAIL;
1071 	}
1072 
1073       if (! skip_past_char (&str, ']'))
1074 	return PARSE_FAIL;
1075 
1076       atype.index = exp.X_add_number;
1077     }
1078   else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1079     {
1080       /* Indexed vector register expected.  */
1081       first_error (_("indexed vector register expected"));
1082       return PARSE_FAIL;
1083     }
1084 
1085   /* A vector reg Vn should be typed or indexed.  */
1086   if (type == REG_TYPE_VN && atype.defined == 0)
1087     {
1088       first_error (_("invalid use of vector register"));
1089     }
1090 
1091   if (typeinfo)
1092     *typeinfo = atype;
1093 
1094   if (rtype)
1095     *rtype = type;
1096 
1097   *ccp = str;
1098 
1099   return reg->number;
1100 }
1101 
1102 /* Parse register.
1103 
1104    Return the register number on success; return PARSE_FAIL otherwise.
1105 
1106    If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1107    the register (e.g. NEON double or quad reg when either has been requested).
1108 
1109    If this is a NEON vector register with additional type information, fill
1110    in the struct pointed to by VECTYPE (if non-NULL).
1111 
1112    This parser does not handle register list.  */
1113 
1114 static int
aarch64_reg_parse(char ** ccp,aarch64_reg_type type,aarch64_reg_type * rtype,struct vector_type_el * vectype)1115 aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1116 		   aarch64_reg_type *rtype, struct vector_type_el *vectype)
1117 {
1118   struct vector_type_el atype;
1119   char *str = *ccp;
1120   int reg = parse_typed_reg (&str, type, rtype, &atype,
1121 			     /*in_reg_list= */ FALSE);
1122 
1123   if (reg == PARSE_FAIL)
1124     return PARSE_FAIL;
1125 
1126   if (vectype)
1127     *vectype = atype;
1128 
1129   *ccp = str;
1130 
1131   return reg;
1132 }
1133 
1134 static inline bfd_boolean
eq_vector_type_el(struct vector_type_el e1,struct vector_type_el e2)1135 eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1136 {
1137   return
1138     e1.type == e2.type
1139     && e1.defined == e2.defined
1140     && e1.width == e2.width && e1.index == e2.index;
1141 }
1142 
1143 /* This function parses a list of vector registers of type TYPE.
1144    On success, it returns the parsed register list information in the
1145    following encoded format:
1146 
1147    bit   18-22   |   13-17   |   7-11    |    2-6    |   0-1
1148        4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1149 
1150    The information of the register shape and/or index is returned in
1151    *VECTYPE.
1152 
1153    It returns PARSE_FAIL if the register list is invalid.
1154 
1155    The list contains one to four registers.
1156    Each register can be one of:
1157    <Vt>.<T>[<index>]
1158    <Vt>.<T>
1159    All <T> should be identical.
1160    All <index> should be identical.
1161    There are restrictions on <Vt> numbers which are checked later
1162    (by reg_list_valid_p).  */
1163 
1164 static int
parse_vector_reg_list(char ** ccp,aarch64_reg_type type,struct vector_type_el * vectype)1165 parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1166 		       struct vector_type_el *vectype)
1167 {
1168   char *str = *ccp;
1169   int nb_regs;
1170   struct vector_type_el typeinfo, typeinfo_first;
1171   int val, val_range;
1172   int in_range;
1173   int ret_val;
1174   int i;
1175   bfd_boolean error = FALSE;
1176   bfd_boolean expect_index = FALSE;
1177 
1178   if (*str != '{')
1179     {
1180       set_syntax_error (_("expecting {"));
1181       return PARSE_FAIL;
1182     }
1183   str++;
1184 
1185   nb_regs = 0;
1186   typeinfo_first.defined = 0;
1187   typeinfo_first.type = NT_invtype;
1188   typeinfo_first.width = -1;
1189   typeinfo_first.index = 0;
1190   ret_val = 0;
1191   val = -1;
1192   val_range = -1;
1193   in_range = 0;
1194   do
1195     {
1196       if (in_range)
1197 	{
1198 	  str++;		/* skip over '-' */
1199 	  val_range = val;
1200 	}
1201       val = parse_typed_reg (&str, type, NULL, &typeinfo,
1202 			     /*in_reg_list= */ TRUE);
1203       if (val == PARSE_FAIL)
1204 	{
1205 	  set_first_syntax_error (_("invalid vector register in list"));
1206 	  error = TRUE;
1207 	  continue;
1208 	}
1209       /* reject [bhsd]n */
1210       if (type == REG_TYPE_VN && typeinfo.defined == 0)
1211 	{
1212 	  set_first_syntax_error (_("invalid scalar register in list"));
1213 	  error = TRUE;
1214 	  continue;
1215 	}
1216 
1217       if (typeinfo.defined & NTA_HASINDEX)
1218 	expect_index = TRUE;
1219 
1220       if (in_range)
1221 	{
1222 	  if (val < val_range)
1223 	    {
1224 	      set_first_syntax_error
1225 		(_("invalid range in vector register list"));
1226 	      error = TRUE;
1227 	    }
1228 	  val_range++;
1229 	}
1230       else
1231 	{
1232 	  val_range = val;
1233 	  if (nb_regs == 0)
1234 	    typeinfo_first = typeinfo;
1235 	  else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1236 	    {
1237 	      set_first_syntax_error
1238 		(_("type mismatch in vector register list"));
1239 	      error = TRUE;
1240 	    }
1241 	}
1242       if (! error)
1243 	for (i = val_range; i <= val; i++)
1244 	  {
1245 	    ret_val |= i << (5 * nb_regs);
1246 	    nb_regs++;
1247 	  }
1248       in_range = 0;
1249     }
1250   while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1251 
1252   skip_whitespace (str);
1253   if (*str != '}')
1254     {
1255       set_first_syntax_error (_("end of vector register list not found"));
1256       error = TRUE;
1257     }
1258   str++;
1259 
1260   skip_whitespace (str);
1261 
1262   if (expect_index)
1263     {
1264       if (skip_past_char (&str, '['))
1265 	{
1266 	  expressionS exp;
1267 
1268 	  my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1269 	  if (exp.X_op != O_constant)
1270 	    {
1271 	      set_first_syntax_error (_("constant expression required."));
1272 	      error = TRUE;
1273 	    }
1274 	  if (! skip_past_char (&str, ']'))
1275 	    error = TRUE;
1276 	  else
1277 	    typeinfo_first.index = exp.X_add_number;
1278 	}
1279       else
1280 	{
1281 	  set_first_syntax_error (_("expected index"));
1282 	  error = TRUE;
1283 	}
1284     }
1285 
1286   if (nb_regs > 4)
1287     {
1288       set_first_syntax_error (_("too many registers in vector register list"));
1289       error = TRUE;
1290     }
1291   else if (nb_regs == 0)
1292     {
1293       set_first_syntax_error (_("empty vector register list"));
1294       error = TRUE;
1295     }
1296 
1297   *ccp = str;
1298   if (! error)
1299     *vectype = typeinfo_first;
1300 
1301   return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1302 }
1303 
1304 /* Directives: register aliases.  */
1305 
1306 static reg_entry *
insert_reg_alias(char * str,int number,aarch64_reg_type type)1307 insert_reg_alias (char *str, int number, aarch64_reg_type type)
1308 {
1309   reg_entry *new;
1310   const char *name;
1311 
1312   if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1313     {
1314       if (new->builtin)
1315 	as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1316 		 str);
1317 
1318       /* Only warn about a redefinition if it's not defined as the
1319          same register.  */
1320       else if (new->number != number || new->type != type)
1321 	as_warn (_("ignoring redefinition of register alias '%s'"), str);
1322 
1323       return NULL;
1324     }
1325 
1326   name = xstrdup (str);
1327   new = XNEW (reg_entry);
1328 
1329   new->name = name;
1330   new->number = number;
1331   new->type = type;
1332   new->builtin = FALSE;
1333 
1334   if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1335     abort ();
1336 
1337   return new;
1338 }
1339 
1340 /* Look for the .req directive.	 This is of the form:
1341 
1342 	new_register_name .req existing_register_name
1343 
1344    If we find one, or if it looks sufficiently like one that we want to
1345    handle any error here, return TRUE.  Otherwise return FALSE.  */
1346 
1347 static bfd_boolean
create_register_alias(char * newname,char * p)1348 create_register_alias (char *newname, char *p)
1349 {
1350   const reg_entry *old;
1351   char *oldname, *nbuf;
1352   size_t nlen;
1353 
1354   /* The input scrubber ensures that whitespace after the mnemonic is
1355      collapsed to single spaces.  */
1356   oldname = p;
1357   if (strncmp (oldname, " .req ", 6) != 0)
1358     return FALSE;
1359 
1360   oldname += 6;
1361   if (*oldname == '\0')
1362     return FALSE;
1363 
1364   old = hash_find (aarch64_reg_hsh, oldname);
1365   if (!old)
1366     {
1367       as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1368       return TRUE;
1369     }
1370 
1371   /* If TC_CASE_SENSITIVE is defined, then newname already points to
1372      the desired alias name, and p points to its end.  If not, then
1373      the desired alias name is in the global original_case_string.  */
1374 #ifdef TC_CASE_SENSITIVE
1375   nlen = p - newname;
1376 #else
1377   newname = original_case_string;
1378   nlen = strlen (newname);
1379 #endif
1380 
1381   nbuf = xmemdup0 (newname, nlen);
1382 
1383   /* Create aliases under the new name as stated; an all-lowercase
1384      version of the new name; and an all-uppercase version of the new
1385      name.  */
1386   if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1387     {
1388       for (p = nbuf; *p; p++)
1389 	*p = TOUPPER (*p);
1390 
1391       if (strncmp (nbuf, newname, nlen))
1392 	{
1393 	  /* If this attempt to create an additional alias fails, do not bother
1394 	     trying to create the all-lower case alias.  We will fail and issue
1395 	     a second, duplicate error message.  This situation arises when the
1396 	     programmer does something like:
1397 	     foo .req r0
1398 	     Foo .req r1
1399 	     The second .req creates the "Foo" alias but then fails to create
1400 	     the artificial FOO alias because it has already been created by the
1401 	     first .req.  */
1402 	  if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1403 	    {
1404 	      free (nbuf);
1405 	      return TRUE;
1406 	    }
1407 	}
1408 
1409       for (p = nbuf; *p; p++)
1410 	*p = TOLOWER (*p);
1411 
1412       if (strncmp (nbuf, newname, nlen))
1413 	insert_reg_alias (nbuf, old->number, old->type);
1414     }
1415 
1416   free (nbuf);
1417   return TRUE;
1418 }
1419 
1420 /* Should never be called, as .req goes between the alias and the
1421    register name, not at the beginning of the line.  */
1422 static void
s_req(int a ATTRIBUTE_UNUSED)1423 s_req (int a ATTRIBUTE_UNUSED)
1424 {
1425   as_bad (_("invalid syntax for .req directive"));
1426 }
1427 
1428 /* The .unreq directive deletes an alias which was previously defined
1429    by .req.  For example:
1430 
1431        my_alias .req r11
1432        .unreq my_alias	  */
1433 
1434 static void
s_unreq(int a ATTRIBUTE_UNUSED)1435 s_unreq (int a ATTRIBUTE_UNUSED)
1436 {
1437   char *name;
1438   char saved_char;
1439 
1440   name = input_line_pointer;
1441 
1442   while (*input_line_pointer != 0
1443 	 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1444     ++input_line_pointer;
1445 
1446   saved_char = *input_line_pointer;
1447   *input_line_pointer = 0;
1448 
1449   if (!*name)
1450     as_bad (_("invalid syntax for .unreq directive"));
1451   else
1452     {
1453       reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1454 
1455       if (!reg)
1456 	as_bad (_("unknown register alias '%s'"), name);
1457       else if (reg->builtin)
1458 	as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1459 		 name);
1460       else
1461 	{
1462 	  char *p;
1463 	  char *nbuf;
1464 
1465 	  hash_delete (aarch64_reg_hsh, name, FALSE);
1466 	  free ((char *) reg->name);
1467 	  free (reg);
1468 
1469 	  /* Also locate the all upper case and all lower case versions.
1470 	     Do not complain if we cannot find one or the other as it
1471 	     was probably deleted above.  */
1472 
1473 	  nbuf = strdup (name);
1474 	  for (p = nbuf; *p; p++)
1475 	    *p = TOUPPER (*p);
1476 	  reg = hash_find (aarch64_reg_hsh, nbuf);
1477 	  if (reg)
1478 	    {
1479 	      hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1480 	      free ((char *) reg->name);
1481 	      free (reg);
1482 	    }
1483 
1484 	  for (p = nbuf; *p; p++)
1485 	    *p = TOLOWER (*p);
1486 	  reg = hash_find (aarch64_reg_hsh, nbuf);
1487 	  if (reg)
1488 	    {
1489 	      hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1490 	      free ((char *) reg->name);
1491 	      free (reg);
1492 	    }
1493 
1494 	  free (nbuf);
1495 	}
1496     }
1497 
1498   *input_line_pointer = saved_char;
1499   demand_empty_rest_of_line ();
1500 }
1501 
1502 /* Directives: Instruction set selection.  */
1503 
1504 #ifdef OBJ_ELF
1505 /* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1506    spec.  (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1507    Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1508    and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped.  */
1509 
1510 /* Create a new mapping symbol for the transition to STATE.  */
1511 
1512 static void
make_mapping_symbol(enum mstate state,valueT value,fragS * frag)1513 make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1514 {
1515   symbolS *symbolP;
1516   const char *symname;
1517   int type;
1518 
1519   switch (state)
1520     {
1521     case MAP_DATA:
1522       symname = "$d";
1523       type = BSF_NO_FLAGS;
1524       break;
1525     case MAP_INSN:
1526       symname = "$x";
1527       type = BSF_NO_FLAGS;
1528       break;
1529     default:
1530       abort ();
1531     }
1532 
1533   symbolP = symbol_new (symname, now_seg, value, frag);
1534   symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1535 
1536   /* Save the mapping symbols for future reference.  Also check that
1537      we do not place two mapping symbols at the same offset within a
1538      frag.  We'll handle overlap between frags in
1539      check_mapping_symbols.
1540 
1541      If .fill or other data filling directive generates zero sized data,
1542      the mapping symbol for the following code will have the same value
1543      as the one generated for the data filling directive.  In this case,
1544      we replace the old symbol with the new one at the same address.  */
1545   if (value == 0)
1546     {
1547       if (frag->tc_frag_data.first_map != NULL)
1548 	{
1549 	  know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1550 	  symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1551 			 &symbol_lastP);
1552 	}
1553       frag->tc_frag_data.first_map = symbolP;
1554     }
1555   if (frag->tc_frag_data.last_map != NULL)
1556     {
1557       know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1558 	    S_GET_VALUE (symbolP));
1559       if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1560 	symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1561 		       &symbol_lastP);
1562     }
1563   frag->tc_frag_data.last_map = symbolP;
1564 }
1565 
1566 /* We must sometimes convert a region marked as code to data during
1567    code alignment, if an odd number of bytes have to be padded.  The
1568    code mapping symbol is pushed to an aligned address.  */
1569 
1570 static void
insert_data_mapping_symbol(enum mstate state,valueT value,fragS * frag,offsetT bytes)1571 insert_data_mapping_symbol (enum mstate state,
1572 			    valueT value, fragS * frag, offsetT bytes)
1573 {
1574   /* If there was already a mapping symbol, remove it.  */
1575   if (frag->tc_frag_data.last_map != NULL
1576       && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1577       frag->fr_address + value)
1578     {
1579       symbolS *symp = frag->tc_frag_data.last_map;
1580 
1581       if (value == 0)
1582 	{
1583 	  know (frag->tc_frag_data.first_map == symp);
1584 	  frag->tc_frag_data.first_map = NULL;
1585 	}
1586       frag->tc_frag_data.last_map = NULL;
1587       symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1588     }
1589 
1590   make_mapping_symbol (MAP_DATA, value, frag);
1591   make_mapping_symbol (state, value + bytes, frag);
1592 }
1593 
1594 static void mapping_state_2 (enum mstate state, int max_chars);
1595 
1596 /* Set the mapping state to STATE.  Only call this when about to
1597    emit some STATE bytes to the file.  */
1598 
1599 void
mapping_state(enum mstate state)1600 mapping_state (enum mstate state)
1601 {
1602   enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1603 
1604   if (state == MAP_INSN)
1605     /* AArch64 instructions require 4-byte alignment.  When emitting
1606        instructions into any section, record the appropriate section
1607        alignment.  */
1608     record_alignment (now_seg, 2);
1609 
1610   if (mapstate == state)
1611     /* The mapping symbol has already been emitted.
1612        There is nothing else to do.  */
1613     return;
1614 
1615 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
1616   if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1617     /* Emit MAP_DATA within executable section in order.  Otherwise, it will be
1618        evaluated later in the next else.  */
1619     return;
1620   else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1621     {
1622       /* Only add the symbol if the offset is > 0:
1623 	 if we're at the first frag, check it's size > 0;
1624 	 if we're not at the first frag, then for sure
1625 	 the offset is > 0.  */
1626       struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1627       const int add_symbol = (frag_now != frag_first)
1628 	|| (frag_now_fix () > 0);
1629 
1630       if (add_symbol)
1631 	make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1632     }
1633 #undef TRANSITION
1634 
1635   mapping_state_2 (state, 0);
1636 }
1637 
1638 /* Same as mapping_state, but MAX_CHARS bytes have already been
1639    allocated.  Put the mapping symbol that far back.  */
1640 
1641 static void
mapping_state_2(enum mstate state,int max_chars)1642 mapping_state_2 (enum mstate state, int max_chars)
1643 {
1644   enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1645 
1646   if (!SEG_NORMAL (now_seg))
1647     return;
1648 
1649   if (mapstate == state)
1650     /* The mapping symbol has already been emitted.
1651        There is nothing else to do.  */
1652     return;
1653 
1654   seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1655   make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1656 }
1657 #else
1658 #define mapping_state(x)	/* nothing */
1659 #define mapping_state_2(x, y)	/* nothing */
1660 #endif
1661 
1662 /* Directives: sectioning and alignment.  */
1663 
1664 static void
s_bss(int ignore ATTRIBUTE_UNUSED)1665 s_bss (int ignore ATTRIBUTE_UNUSED)
1666 {
1667   /* We don't support putting frags in the BSS segment, we fake it by
1668      marking in_bss, then looking at s_skip for clues.  */
1669   subseg_set (bss_section, 0);
1670   demand_empty_rest_of_line ();
1671   mapping_state (MAP_DATA);
1672 }
1673 
1674 static void
s_even(int ignore ATTRIBUTE_UNUSED)1675 s_even (int ignore ATTRIBUTE_UNUSED)
1676 {
1677   /* Never make frag if expect extra pass.  */
1678   if (!need_pass_2)
1679     frag_align (1, 0, 0);
1680 
1681   record_alignment (now_seg, 1);
1682 
1683   demand_empty_rest_of_line ();
1684 }
1685 
1686 /* Directives: Literal pools.  */
1687 
1688 static literal_pool *
find_literal_pool(int size)1689 find_literal_pool (int size)
1690 {
1691   literal_pool *pool;
1692 
1693   for (pool = list_of_pools; pool != NULL; pool = pool->next)
1694     {
1695       if (pool->section == now_seg
1696 	  && pool->sub_section == now_subseg && pool->size == size)
1697 	break;
1698     }
1699 
1700   return pool;
1701 }
1702 
1703 static literal_pool *
find_or_make_literal_pool(int size)1704 find_or_make_literal_pool (int size)
1705 {
1706   /* Next literal pool ID number.  */
1707   static unsigned int latest_pool_num = 1;
1708   literal_pool *pool;
1709 
1710   pool = find_literal_pool (size);
1711 
1712   if (pool == NULL)
1713     {
1714       /* Create a new pool.  */
1715       pool = XNEW (literal_pool);
1716       if (!pool)
1717 	return NULL;
1718 
1719       /* Currently we always put the literal pool in the current text
1720          section.  If we were generating "small" model code where we
1721          knew that all code and initialised data was within 1MB then
1722          we could output literals to mergeable, read-only data
1723          sections. */
1724 
1725       pool->next_free_entry = 0;
1726       pool->section = now_seg;
1727       pool->sub_section = now_subseg;
1728       pool->size = size;
1729       pool->next = list_of_pools;
1730       pool->symbol = NULL;
1731 
1732       /* Add it to the list.  */
1733       list_of_pools = pool;
1734     }
1735 
1736   /* New pools, and emptied pools, will have a NULL symbol.  */
1737   if (pool->symbol == NULL)
1738     {
1739       pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1740 				    (valueT) 0, &zero_address_frag);
1741       pool->id = latest_pool_num++;
1742     }
1743 
1744   /* Done.  */
1745   return pool;
1746 }
1747 
1748 /* Add the literal of size SIZE in *EXP to the relevant literal pool.
1749    Return TRUE on success, otherwise return FALSE.  */
1750 static bfd_boolean
add_to_lit_pool(expressionS * exp,int size)1751 add_to_lit_pool (expressionS *exp, int size)
1752 {
1753   literal_pool *pool;
1754   unsigned int entry;
1755 
1756   pool = find_or_make_literal_pool (size);
1757 
1758   /* Check if this literal value is already in the pool.  */
1759   for (entry = 0; entry < pool->next_free_entry; entry++)
1760     {
1761       expressionS * litexp = & pool->literals[entry].exp;
1762 
1763       if ((litexp->X_op == exp->X_op)
1764 	  && (exp->X_op == O_constant)
1765 	  && (litexp->X_add_number == exp->X_add_number)
1766 	  && (litexp->X_unsigned == exp->X_unsigned))
1767 	break;
1768 
1769       if ((litexp->X_op == exp->X_op)
1770 	  && (exp->X_op == O_symbol)
1771 	  && (litexp->X_add_number == exp->X_add_number)
1772 	  && (litexp->X_add_symbol == exp->X_add_symbol)
1773 	  && (litexp->X_op_symbol == exp->X_op_symbol))
1774 	break;
1775     }
1776 
1777   /* Do we need to create a new entry?  */
1778   if (entry == pool->next_free_entry)
1779     {
1780       if (entry >= MAX_LITERAL_POOL_SIZE)
1781 	{
1782 	  set_syntax_error (_("literal pool overflow"));
1783 	  return FALSE;
1784 	}
1785 
1786       pool->literals[entry].exp = *exp;
1787       pool->next_free_entry += 1;
1788       if (exp->X_op == O_big)
1789 	{
1790 	  /* PR 16688: Bignums are held in a single global array.  We must
1791 	     copy and preserve that value now, before it is overwritten.  */
1792 	  pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1793 						  exp->X_add_number);
1794 	  memcpy (pool->literals[entry].bignum, generic_bignum,
1795 		  CHARS_PER_LITTLENUM * exp->X_add_number);
1796 	}
1797       else
1798 	pool->literals[entry].bignum = NULL;
1799     }
1800 
1801   exp->X_op = O_symbol;
1802   exp->X_add_number = ((int) entry) * size;
1803   exp->X_add_symbol = pool->symbol;
1804 
1805   return TRUE;
1806 }
1807 
1808 /* Can't use symbol_new here, so have to create a symbol and then at
1809    a later date assign it a value. That's what these functions do.  */
1810 
1811 static void
symbol_locate(symbolS * symbolP,const char * name,segT segment,valueT valu,fragS * frag)1812 symbol_locate (symbolS * symbolP,
1813 	       const char *name,/* It is copied, the caller can modify.  */
1814 	       segT segment,	/* Segment identifier (SEG_<something>).  */
1815 	       valueT valu,	/* Symbol value.  */
1816 	       fragS * frag)	/* Associated fragment.  */
1817 {
1818   size_t name_length;
1819   char *preserved_copy_of_name;
1820 
1821   name_length = strlen (name) + 1;	/* +1 for \0.  */
1822   obstack_grow (&notes, name, name_length);
1823   preserved_copy_of_name = obstack_finish (&notes);
1824 
1825 #ifdef tc_canonicalize_symbol_name
1826   preserved_copy_of_name =
1827     tc_canonicalize_symbol_name (preserved_copy_of_name);
1828 #endif
1829 
1830   S_SET_NAME (symbolP, preserved_copy_of_name);
1831 
1832   S_SET_SEGMENT (symbolP, segment);
1833   S_SET_VALUE (symbolP, valu);
1834   symbol_clear_list_pointers (symbolP);
1835 
1836   symbol_set_frag (symbolP, frag);
1837 
1838   /* Link to end of symbol chain.  */
1839   {
1840     extern int symbol_table_frozen;
1841 
1842     if (symbol_table_frozen)
1843       abort ();
1844   }
1845 
1846   symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1847 
1848   obj_symbol_new_hook (symbolP);
1849 
1850 #ifdef tc_symbol_new_hook
1851   tc_symbol_new_hook (symbolP);
1852 #endif
1853 
1854 #ifdef DEBUG_SYMS
1855   verify_symbol_chain (symbol_rootP, symbol_lastP);
1856 #endif /* DEBUG_SYMS  */
1857 }
1858 
1859 
1860 static void
s_ltorg(int ignored ATTRIBUTE_UNUSED)1861 s_ltorg (int ignored ATTRIBUTE_UNUSED)
1862 {
1863   unsigned int entry;
1864   literal_pool *pool;
1865   char sym_name[20];
1866   int align;
1867 
1868   for (align = 2; align <= 4; align++)
1869     {
1870       int size = 1 << align;
1871 
1872       pool = find_literal_pool (size);
1873       if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1874 	continue;
1875 
1876       /* Align pool as you have word accesses.
1877          Only make a frag if we have to.  */
1878       if (!need_pass_2)
1879 	frag_align (align, 0, 0);
1880 
1881       mapping_state (MAP_DATA);
1882 
1883       record_alignment (now_seg, align);
1884 
1885       sprintf (sym_name, "$$lit_\002%x", pool->id);
1886 
1887       symbol_locate (pool->symbol, sym_name, now_seg,
1888 		     (valueT) frag_now_fix (), frag_now);
1889       symbol_table_insert (pool->symbol);
1890 
1891       for (entry = 0; entry < pool->next_free_entry; entry++)
1892 	{
1893 	  expressionS * exp = & pool->literals[entry].exp;
1894 
1895 	  if (exp->X_op == O_big)
1896 	    {
1897 	      /* PR 16688: Restore the global bignum value.  */
1898 	      gas_assert (pool->literals[entry].bignum != NULL);
1899 	      memcpy (generic_bignum, pool->literals[entry].bignum,
1900 		      CHARS_PER_LITTLENUM * exp->X_add_number);
1901 	    }
1902 
1903 	  /* First output the expression in the instruction to the pool.  */
1904 	  emit_expr (exp, size);	/* .word|.xword  */
1905 
1906 	  if (exp->X_op == O_big)
1907 	    {
1908 	      free (pool->literals[entry].bignum);
1909 	      pool->literals[entry].bignum = NULL;
1910 	    }
1911 	}
1912 
1913       /* Mark the pool as empty.  */
1914       pool->next_free_entry = 0;
1915       pool->symbol = NULL;
1916     }
1917 }
1918 
1919 #ifdef OBJ_ELF
1920 /* Forward declarations for functions below, in the MD interface
1921    section.  */
1922 static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1923 static struct reloc_table_entry * find_reloc_table_entry (char **);
1924 
1925 /* Directives: Data.  */
1926 /* N.B. the support for relocation suffix in this directive needs to be
1927    implemented properly.  */
1928 
1929 static void
s_aarch64_elf_cons(int nbytes)1930 s_aarch64_elf_cons (int nbytes)
1931 {
1932   expressionS exp;
1933 
1934 #ifdef md_flush_pending_output
1935   md_flush_pending_output ();
1936 #endif
1937 
1938   if (is_it_end_of_statement ())
1939     {
1940       demand_empty_rest_of_line ();
1941       return;
1942     }
1943 
1944 #ifdef md_cons_align
1945   md_cons_align (nbytes);
1946 #endif
1947 
1948   mapping_state (MAP_DATA);
1949   do
1950     {
1951       struct reloc_table_entry *reloc;
1952 
1953       expression (&exp);
1954 
1955       if (exp.X_op != O_symbol)
1956 	emit_expr (&exp, (unsigned int) nbytes);
1957       else
1958 	{
1959 	  skip_past_char (&input_line_pointer, '#');
1960 	  if (skip_past_char (&input_line_pointer, ':'))
1961 	    {
1962 	      reloc = find_reloc_table_entry (&input_line_pointer);
1963 	      if (reloc == NULL)
1964 		as_bad (_("unrecognized relocation suffix"));
1965 	      else
1966 		as_bad (_("unimplemented relocation suffix"));
1967 	      ignore_rest_of_line ();
1968 	      return;
1969 	    }
1970 	  else
1971 	    emit_expr (&exp, (unsigned int) nbytes);
1972 	}
1973     }
1974   while (*input_line_pointer++ == ',');
1975 
1976   /* Put terminator back into stream.  */
1977   input_line_pointer--;
1978   demand_empty_rest_of_line ();
1979 }
1980 
1981 /* Mark symbol that it follows a variant PCS convention.  */
1982 
1983 static void
s_variant_pcs(int ignored ATTRIBUTE_UNUSED)1984 s_variant_pcs (int ignored ATTRIBUTE_UNUSED)
1985 {
1986   char *name;
1987   char c;
1988   symbolS *sym;
1989   asymbol *bfdsym;
1990   elf_symbol_type *elfsym;
1991 
1992   c = get_symbol_name (&name);
1993   if (!*name)
1994     as_bad (_("Missing symbol name in directive"));
1995   sym = symbol_find_or_make (name);
1996   restore_line_pointer (c);
1997   demand_empty_rest_of_line ();
1998   bfdsym = symbol_get_bfdsym (sym);
1999   elfsym = elf_symbol_from (bfd_asymbol_bfd (bfdsym), bfdsym);
2000   gas_assert (elfsym);
2001   elfsym->internal_elf_sym.st_other |= STO_AARCH64_VARIANT_PCS;
2002 }
2003 #endif /* OBJ_ELF */
2004 
2005 /* Output a 32-bit word, but mark as an instruction.  */
2006 
2007 static void
s_aarch64_inst(int ignored ATTRIBUTE_UNUSED)2008 s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
2009 {
2010   expressionS exp;
2011 
2012 #ifdef md_flush_pending_output
2013   md_flush_pending_output ();
2014 #endif
2015 
2016   if (is_it_end_of_statement ())
2017     {
2018       demand_empty_rest_of_line ();
2019       return;
2020     }
2021 
2022   /* Sections are assumed to start aligned. In executable section, there is no
2023      MAP_DATA symbol pending. So we only align the address during
2024      MAP_DATA --> MAP_INSN transition.
2025      For other sections, this is not guaranteed.  */
2026   enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
2027   if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
2028     frag_align_code (2, 0);
2029 
2030 #ifdef OBJ_ELF
2031   mapping_state (MAP_INSN);
2032 #endif
2033 
2034   do
2035     {
2036       expression (&exp);
2037       if (exp.X_op != O_constant)
2038 	{
2039 	  as_bad (_("constant expression required"));
2040 	  ignore_rest_of_line ();
2041 	  return;
2042 	}
2043 
2044       if (target_big_endian)
2045 	{
2046 	  unsigned int val = exp.X_add_number;
2047 	  exp.X_add_number = SWAP_32 (val);
2048 	}
2049       emit_expr (&exp, 4);
2050     }
2051   while (*input_line_pointer++ == ',');
2052 
2053   /* Put terminator back into stream.  */
2054   input_line_pointer--;
2055   demand_empty_rest_of_line ();
2056 }
2057 
2058 static void
s_aarch64_cfi_b_key_frame(int ignored ATTRIBUTE_UNUSED)2059 s_aarch64_cfi_b_key_frame (int ignored ATTRIBUTE_UNUSED)
2060 {
2061   demand_empty_rest_of_line ();
2062   struct fde_entry *fde = frchain_now->frch_cfi_data->cur_fde_data;
2063   fde->pauth_key = AARCH64_PAUTH_KEY_B;
2064 }
2065 
2066 #ifdef OBJ_ELF
2067 /* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction.  */
2068 
2069 static void
s_tlsdescadd(int ignored ATTRIBUTE_UNUSED)2070 s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
2071 {
2072   expressionS exp;
2073 
2074   expression (&exp);
2075   frag_grow (4);
2076   fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2077 		   BFD_RELOC_AARCH64_TLSDESC_ADD);
2078 
2079   demand_empty_rest_of_line ();
2080 }
2081 
2082 /* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction.  */
2083 
2084 static void
s_tlsdesccall(int ignored ATTRIBUTE_UNUSED)2085 s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
2086 {
2087   expressionS exp;
2088 
2089   /* Since we're just labelling the code, there's no need to define a
2090      mapping symbol.  */
2091   expression (&exp);
2092   /* Make sure there is enough room in this frag for the following
2093      blr.  This trick only works if the blr follows immediately after
2094      the .tlsdesc directive.  */
2095   frag_grow (4);
2096   fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2097 		   BFD_RELOC_AARCH64_TLSDESC_CALL);
2098 
2099   demand_empty_rest_of_line ();
2100 }
2101 
2102 /* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction.  */
2103 
2104 static void
s_tlsdescldr(int ignored ATTRIBUTE_UNUSED)2105 s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2106 {
2107   expressionS exp;
2108 
2109   expression (&exp);
2110   frag_grow (4);
2111   fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2112 		   BFD_RELOC_AARCH64_TLSDESC_LDR);
2113 
2114   demand_empty_rest_of_line ();
2115 }
2116 #endif	/* OBJ_ELF */
2117 
2118 static void s_aarch64_arch (int);
2119 static void s_aarch64_cpu (int);
2120 static void s_aarch64_arch_extension (int);
2121 
2122 /* This table describes all the machine specific pseudo-ops the assembler
2123    has to support.  The fields are:
2124      pseudo-op name without dot
2125      function to call to execute this pseudo-op
2126      Integer arg to pass to the function.  */
2127 
2128 const pseudo_typeS md_pseudo_table[] = {
2129   /* Never called because '.req' does not start a line.  */
2130   {"req", s_req, 0},
2131   {"unreq", s_unreq, 0},
2132   {"bss", s_bss, 0},
2133   {"even", s_even, 0},
2134   {"ltorg", s_ltorg, 0},
2135   {"pool", s_ltorg, 0},
2136   {"cpu", s_aarch64_cpu, 0},
2137   {"arch", s_aarch64_arch, 0},
2138   {"arch_extension", s_aarch64_arch_extension, 0},
2139   {"inst", s_aarch64_inst, 0},
2140   {"cfi_b_key_frame", s_aarch64_cfi_b_key_frame, 0},
2141 #ifdef OBJ_ELF
2142   {"tlsdescadd", s_tlsdescadd, 0},
2143   {"tlsdesccall", s_tlsdesccall, 0},
2144   {"tlsdescldr", s_tlsdescldr, 0},
2145   {"word", s_aarch64_elf_cons, 4},
2146   {"long", s_aarch64_elf_cons, 4},
2147   {"xword", s_aarch64_elf_cons, 8},
2148   {"dword", s_aarch64_elf_cons, 8},
2149   {"variant_pcs", s_variant_pcs, 0},
2150 #endif
2151   {"float16", float_cons, 'h'},
2152   {"bfloat16", float_cons, 'b'},
2153   {0, 0, 0}
2154 };
2155 
2156 
2157 /* Check whether STR points to a register name followed by a comma or the
2158    end of line; REG_TYPE indicates which register types are checked
2159    against.  Return TRUE if STR is such a register name; otherwise return
2160    FALSE.  The function does not intend to produce any diagnostics, but since
2161    the register parser aarch64_reg_parse, which is called by this function,
2162    does produce diagnostics, we call clear_error to clear any diagnostics
2163    that may be generated by aarch64_reg_parse.
2164    Also, the function returns FALSE directly if there is any user error
2165    present at the function entry.  This prevents the existing diagnostics
2166    state from being spoiled.
2167    The function currently serves parse_constant_immediate and
2168    parse_big_immediate only.  */
2169 static bfd_boolean
reg_name_p(char * str,aarch64_reg_type reg_type)2170 reg_name_p (char *str, aarch64_reg_type reg_type)
2171 {
2172   int reg;
2173 
2174   /* Prevent the diagnostics state from being spoiled.  */
2175   if (error_p ())
2176     return FALSE;
2177 
2178   reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2179 
2180   /* Clear the parsing error that may be set by the reg parser.  */
2181   clear_error ();
2182 
2183   if (reg == PARSE_FAIL)
2184     return FALSE;
2185 
2186   skip_whitespace (str);
2187   if (*str == ',' || is_end_of_line[(unsigned int) *str])
2188     return TRUE;
2189 
2190   return FALSE;
2191 }
2192 
2193 /* Parser functions used exclusively in instruction operands.  */
2194 
2195 /* Parse an immediate expression which may not be constant.
2196 
2197    To prevent the expression parser from pushing a register name
2198    into the symbol table as an undefined symbol, firstly a check is
2199    done to find out whether STR is a register of type REG_TYPE followed
2200    by a comma or the end of line.  Return FALSE if STR is such a string.  */
2201 
2202 static bfd_boolean
parse_immediate_expression(char ** str,expressionS * exp,aarch64_reg_type reg_type)2203 parse_immediate_expression (char **str, expressionS *exp,
2204 			    aarch64_reg_type reg_type)
2205 {
2206   if (reg_name_p (*str, reg_type))
2207     {
2208       set_recoverable_error (_("immediate operand required"));
2209       return FALSE;
2210     }
2211 
2212   my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2213 
2214   if (exp->X_op == O_absent)
2215     {
2216       set_fatal_syntax_error (_("missing immediate expression"));
2217       return FALSE;
2218     }
2219 
2220   return TRUE;
2221 }
2222 
2223 /* Constant immediate-value read function for use in insn parsing.
2224    STR points to the beginning of the immediate (with the optional
2225    leading #); *VAL receives the value.  REG_TYPE says which register
2226    names should be treated as registers rather than as symbolic immediates.
2227 
2228    Return TRUE on success; otherwise return FALSE.  */
2229 
2230 static bfd_boolean
parse_constant_immediate(char ** str,int64_t * val,aarch64_reg_type reg_type)2231 parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2232 {
2233   expressionS exp;
2234 
2235   if (! parse_immediate_expression (str, &exp, reg_type))
2236     return FALSE;
2237 
2238   if (exp.X_op != O_constant)
2239     {
2240       set_syntax_error (_("constant expression required"));
2241       return FALSE;
2242     }
2243 
2244   *val = exp.X_add_number;
2245   return TRUE;
2246 }
2247 
2248 static uint32_t
encode_imm_float_bits(uint32_t imm)2249 encode_imm_float_bits (uint32_t imm)
2250 {
2251   return ((imm >> 19) & 0x7f)	/* b[25:19] -> b[6:0] */
2252     | ((imm >> (31 - 7)) & 0x80);	/* b[31]    -> b[7]   */
2253 }
2254 
2255 /* Return TRUE if the single-precision floating-point value encoded in IMM
2256    can be expressed in the AArch64 8-bit signed floating-point format with
2257    3-bit exponent and normalized 4 bits of precision; in other words, the
2258    floating-point value must be expressable as
2259      (+/-) n / 16 * power (2, r)
2260    where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4.  */
2261 
2262 static bfd_boolean
aarch64_imm_float_p(uint32_t imm)2263 aarch64_imm_float_p (uint32_t imm)
2264 {
2265   /* If a single-precision floating-point value has the following bit
2266      pattern, it can be expressed in the AArch64 8-bit floating-point
2267      format:
2268 
2269      3 32222222 2221111111111
2270      1 09876543 21098765432109876543210
2271      n Eeeeeexx xxxx0000000000000000000
2272 
2273      where n, e and each x are either 0 or 1 independently, with
2274      E == ~ e.  */
2275 
2276   uint32_t pattern;
2277 
2278   /* Prepare the pattern for 'Eeeeee'.  */
2279   if (((imm >> 30) & 0x1) == 0)
2280     pattern = 0x3e000000;
2281   else
2282     pattern = 0x40000000;
2283 
2284   return (imm & 0x7ffff) == 0		/* lower 19 bits are 0.  */
2285     && ((imm & 0x7e000000) == pattern);	/* bits 25 - 29 == ~ bit 30.  */
2286 }
2287 
2288 /* Return TRUE if the IEEE double value encoded in IMM can be expressed
2289    as an IEEE float without any loss of precision.  Store the value in
2290    *FPWORD if so.  */
2291 
2292 static bfd_boolean
can_convert_double_to_float(uint64_t imm,uint32_t * fpword)2293 can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2294 {
2295   /* If a double-precision floating-point value has the following bit
2296      pattern, it can be expressed in a float:
2297 
2298      6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2299      3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2300      n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2301 
2302        ----------------------------->     nEeeeeee esssssss ssssssss sssssSSS
2303 	 if Eeee_eeee != 1111_1111
2304 
2305      where n, e, s and S are either 0 or 1 independently and where ~ is the
2306      inverse of E.  */
2307 
2308   uint32_t pattern;
2309   uint32_t high32 = imm >> 32;
2310   uint32_t low32 = imm;
2311 
2312   /* Lower 29 bits need to be 0s.  */
2313   if ((imm & 0x1fffffff) != 0)
2314     return FALSE;
2315 
2316   /* Prepare the pattern for 'Eeeeeeeee'.  */
2317   if (((high32 >> 30) & 0x1) == 0)
2318     pattern = 0x38000000;
2319   else
2320     pattern = 0x40000000;
2321 
2322   /* Check E~~~.  */
2323   if ((high32 & 0x78000000) != pattern)
2324     return FALSE;
2325 
2326   /* Check Eeee_eeee != 1111_1111.  */
2327   if ((high32 & 0x7ff00000) == 0x47f00000)
2328     return FALSE;
2329 
2330   *fpword = ((high32 & 0xc0000000)		/* 1 n bit and 1 E bit.  */
2331 	     | ((high32 << 3) & 0x3ffffff8)	/* 7 e and 20 s bits.  */
2332 	     | (low32 >> 29));			/* 3 S bits.  */
2333   return TRUE;
2334 }
2335 
2336 /* Return true if we should treat OPERAND as a double-precision
2337    floating-point operand rather than a single-precision one.  */
2338 static bfd_boolean
double_precision_operand_p(const aarch64_opnd_info * operand)2339 double_precision_operand_p (const aarch64_opnd_info *operand)
2340 {
2341   /* Check for unsuffixed SVE registers, which are allowed
2342      for LDR and STR but not in instructions that require an
2343      immediate.  We get better error messages if we arbitrarily
2344      pick one size, parse the immediate normally, and then
2345      report the match failure in the normal way.  */
2346   return (operand->qualifier == AARCH64_OPND_QLF_NIL
2347 	  || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2348 }
2349 
2350 /* Parse a floating-point immediate.  Return TRUE on success and return the
2351    value in *IMMED in the format of IEEE754 single-precision encoding.
2352    *CCP points to the start of the string; DP_P is TRUE when the immediate
2353    is expected to be in double-precision (N.B. this only matters when
2354    hexadecimal representation is involved).  REG_TYPE says which register
2355    names should be treated as registers rather than as symbolic immediates.
2356 
2357    This routine accepts any IEEE float; it is up to the callers to reject
2358    invalid ones.  */
2359 
2360 static bfd_boolean
parse_aarch64_imm_float(char ** ccp,int * immed,bfd_boolean dp_p,aarch64_reg_type reg_type)2361 parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2362 			 aarch64_reg_type reg_type)
2363 {
2364   char *str = *ccp;
2365   char *fpnum;
2366   LITTLENUM_TYPE words[MAX_LITTLENUMS];
2367   int64_t val = 0;
2368   unsigned fpword = 0;
2369   bfd_boolean hex_p = FALSE;
2370 
2371   skip_past_char (&str, '#');
2372 
2373   fpnum = str;
2374   skip_whitespace (fpnum);
2375 
2376   if (strncmp (fpnum, "0x", 2) == 0)
2377     {
2378       /* Support the hexadecimal representation of the IEEE754 encoding.
2379 	 Double-precision is expected when DP_P is TRUE, otherwise the
2380 	 representation should be in single-precision.  */
2381       if (! parse_constant_immediate (&str, &val, reg_type))
2382 	goto invalid_fp;
2383 
2384       if (dp_p)
2385 	{
2386 	  if (!can_convert_double_to_float (val, &fpword))
2387 	    goto invalid_fp;
2388 	}
2389       else if ((uint64_t) val > 0xffffffff)
2390 	goto invalid_fp;
2391       else
2392 	fpword = val;
2393 
2394       hex_p = TRUE;
2395     }
2396   else if (reg_name_p (str, reg_type))
2397    {
2398      set_recoverable_error (_("immediate operand required"));
2399      return FALSE;
2400     }
2401 
2402   if (! hex_p)
2403     {
2404       int i;
2405 
2406       if ((str = atof_ieee (str, 's', words)) == NULL)
2407 	goto invalid_fp;
2408 
2409       /* Our FP word must be 32 bits (single-precision FP).  */
2410       for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2411 	{
2412 	  fpword <<= LITTLENUM_NUMBER_OF_BITS;
2413 	  fpword |= words[i];
2414 	}
2415     }
2416 
2417   *immed = fpword;
2418   *ccp = str;
2419   return TRUE;
2420 
2421 invalid_fp:
2422   set_fatal_syntax_error (_("invalid floating-point constant"));
2423   return FALSE;
2424 }
2425 
2426 /* Less-generic immediate-value read function with the possibility of loading
2427    a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2428    instructions.
2429 
2430    To prevent the expression parser from pushing a register name into the
2431    symbol table as an undefined symbol, a check is firstly done to find
2432    out whether STR is a register of type REG_TYPE followed by a comma or
2433    the end of line.  Return FALSE if STR is such a register.  */
2434 
2435 static bfd_boolean
parse_big_immediate(char ** str,int64_t * imm,aarch64_reg_type reg_type)2436 parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2437 {
2438   char *ptr = *str;
2439 
2440   if (reg_name_p (ptr, reg_type))
2441     {
2442       set_syntax_error (_("immediate operand required"));
2443       return FALSE;
2444     }
2445 
2446   my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2447 
2448   if (inst.reloc.exp.X_op == O_constant)
2449     *imm = inst.reloc.exp.X_add_number;
2450 
2451   *str = ptr;
2452 
2453   return TRUE;
2454 }
2455 
2456 /* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2457    if NEED_LIBOPCODES is non-zero, the fixup will need
2458    assistance from the libopcodes.   */
2459 
2460 static inline void
aarch64_set_gas_internal_fixup(struct reloc * reloc,const aarch64_opnd_info * operand,int need_libopcodes_p)2461 aarch64_set_gas_internal_fixup (struct reloc *reloc,
2462 				const aarch64_opnd_info *operand,
2463 				int need_libopcodes_p)
2464 {
2465   reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2466   reloc->opnd = operand->type;
2467   if (need_libopcodes_p)
2468     reloc->need_libopcodes_p = 1;
2469 };
2470 
2471 /* Return TRUE if the instruction needs to be fixed up later internally by
2472    the GAS; otherwise return FALSE.  */
2473 
2474 static inline bfd_boolean
aarch64_gas_internal_fixup_p(void)2475 aarch64_gas_internal_fixup_p (void)
2476 {
2477   return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2478 }
2479 
2480 /* Assign the immediate value to the relevant field in *OPERAND if
2481    RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2482    needs an internal fixup in a later stage.
2483    ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2484    IMM.VALUE that may get assigned with the constant.  */
2485 static inline void
assign_imm_if_const_or_fixup_later(struct reloc * reloc,aarch64_opnd_info * operand,int addr_off_p,int need_libopcodes_p,int skip_p)2486 assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2487 				    aarch64_opnd_info *operand,
2488 				    int addr_off_p,
2489 				    int need_libopcodes_p,
2490 				    int skip_p)
2491 {
2492   if (reloc->exp.X_op == O_constant)
2493     {
2494       if (addr_off_p)
2495 	operand->addr.offset.imm = reloc->exp.X_add_number;
2496       else
2497 	operand->imm.value = reloc->exp.X_add_number;
2498       reloc->type = BFD_RELOC_UNUSED;
2499     }
2500   else
2501     {
2502       aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2503       /* Tell libopcodes to ignore this operand or not.  This is helpful
2504 	 when one of the operands needs to be fixed up later but we need
2505 	 libopcodes to check the other operands.  */
2506       operand->skip = skip_p;
2507     }
2508 }
2509 
2510 /* Relocation modifiers.  Each entry in the table contains the textual
2511    name for the relocation which may be placed before a symbol used as
2512    a load/store offset, or add immediate. It must be surrounded by a
2513    leading and trailing colon, for example:
2514 
2515 	ldr	x0, [x1, #:rello:varsym]
2516 	add	x0, x1, #:rello:varsym  */
2517 
2518 struct reloc_table_entry
2519 {
2520   const char *name;
2521   int pc_rel;
2522   bfd_reloc_code_real_type adr_type;
2523   bfd_reloc_code_real_type adrp_type;
2524   bfd_reloc_code_real_type movw_type;
2525   bfd_reloc_code_real_type add_type;
2526   bfd_reloc_code_real_type ldst_type;
2527   bfd_reloc_code_real_type ld_literal_type;
2528 };
2529 
2530 static struct reloc_table_entry reloc_table[] = {
2531   /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2532   {"lo12", 0,
2533    0,				/* adr_type */
2534    0,
2535    0,
2536    BFD_RELOC_AARCH64_ADD_LO12,
2537    BFD_RELOC_AARCH64_LDST_LO12,
2538    0},
2539 
2540   /* Higher 21 bits of pc-relative page offset: ADRP */
2541   {"pg_hi21", 1,
2542    0,				/* adr_type */
2543    BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2544    0,
2545    0,
2546    0,
2547    0},
2548 
2549   /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2550   {"pg_hi21_nc", 1,
2551    0,				/* adr_type */
2552    BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2553    0,
2554    0,
2555    0,
2556    0},
2557 
2558   /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2559   {"abs_g0", 0,
2560    0,				/* adr_type */
2561    0,
2562    BFD_RELOC_AARCH64_MOVW_G0,
2563    0,
2564    0,
2565    0},
2566 
2567   /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2568   {"abs_g0_s", 0,
2569    0,				/* adr_type */
2570    0,
2571    BFD_RELOC_AARCH64_MOVW_G0_S,
2572    0,
2573    0,
2574    0},
2575 
2576   /* Less significant bits 0-15 of address/value: MOVK, no check */
2577   {"abs_g0_nc", 0,
2578    0,				/* adr_type */
2579    0,
2580    BFD_RELOC_AARCH64_MOVW_G0_NC,
2581    0,
2582    0,
2583    0},
2584 
2585   /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2586   {"abs_g1", 0,
2587    0,				/* adr_type */
2588    0,
2589    BFD_RELOC_AARCH64_MOVW_G1,
2590    0,
2591    0,
2592    0},
2593 
2594   /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2595   {"abs_g1_s", 0,
2596    0,				/* adr_type */
2597    0,
2598    BFD_RELOC_AARCH64_MOVW_G1_S,
2599    0,
2600    0,
2601    0},
2602 
2603   /* Less significant bits 16-31 of address/value: MOVK, no check */
2604   {"abs_g1_nc", 0,
2605    0,				/* adr_type */
2606    0,
2607    BFD_RELOC_AARCH64_MOVW_G1_NC,
2608    0,
2609    0,
2610    0},
2611 
2612   /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2613   {"abs_g2", 0,
2614    0,				/* adr_type */
2615    0,
2616    BFD_RELOC_AARCH64_MOVW_G2,
2617    0,
2618    0,
2619    0},
2620 
2621   /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2622   {"abs_g2_s", 0,
2623    0,				/* adr_type */
2624    0,
2625    BFD_RELOC_AARCH64_MOVW_G2_S,
2626    0,
2627    0,
2628    0},
2629 
2630   /* Less significant bits 32-47 of address/value: MOVK, no check */
2631   {"abs_g2_nc", 0,
2632    0,				/* adr_type */
2633    0,
2634    BFD_RELOC_AARCH64_MOVW_G2_NC,
2635    0,
2636    0,
2637    0},
2638 
2639   /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2640   {"abs_g3", 0,
2641    0,				/* adr_type */
2642    0,
2643    BFD_RELOC_AARCH64_MOVW_G3,
2644    0,
2645    0,
2646    0},
2647 
2648   /* Most significant bits 0-15 of signed/unsigned address/value: MOVZ */
2649   {"prel_g0", 1,
2650    0,				/* adr_type */
2651    0,
2652    BFD_RELOC_AARCH64_MOVW_PREL_G0,
2653    0,
2654    0,
2655    0},
2656 
2657   /* Most significant bits 0-15 of signed/unsigned address/value: MOVK */
2658   {"prel_g0_nc", 1,
2659    0,				/* adr_type */
2660    0,
2661    BFD_RELOC_AARCH64_MOVW_PREL_G0_NC,
2662    0,
2663    0,
2664    0},
2665 
2666   /* Most significant bits 16-31 of signed/unsigned address/value: MOVZ */
2667   {"prel_g1", 1,
2668    0,				/* adr_type */
2669    0,
2670    BFD_RELOC_AARCH64_MOVW_PREL_G1,
2671    0,
2672    0,
2673    0},
2674 
2675   /* Most significant bits 16-31 of signed/unsigned address/value: MOVK */
2676   {"prel_g1_nc", 1,
2677    0,				/* adr_type */
2678    0,
2679    BFD_RELOC_AARCH64_MOVW_PREL_G1_NC,
2680    0,
2681    0,
2682    0},
2683 
2684   /* Most significant bits 32-47 of signed/unsigned address/value: MOVZ */
2685   {"prel_g2", 1,
2686    0,				/* adr_type */
2687    0,
2688    BFD_RELOC_AARCH64_MOVW_PREL_G2,
2689    0,
2690    0,
2691    0},
2692 
2693   /* Most significant bits 32-47 of signed/unsigned address/value: MOVK */
2694   {"prel_g2_nc", 1,
2695    0,				/* adr_type */
2696    0,
2697    BFD_RELOC_AARCH64_MOVW_PREL_G2_NC,
2698    0,
2699    0,
2700    0},
2701 
2702   /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2703   {"prel_g3", 1,
2704    0,				/* adr_type */
2705    0,
2706    BFD_RELOC_AARCH64_MOVW_PREL_G3,
2707    0,
2708    0,
2709    0},
2710 
2711   /* Get to the page containing GOT entry for a symbol.  */
2712   {"got", 1,
2713    0,				/* adr_type */
2714    BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2715    0,
2716    0,
2717    0,
2718    BFD_RELOC_AARCH64_GOT_LD_PREL19},
2719 
2720   /* 12 bit offset into the page containing GOT entry for that symbol.  */
2721   {"got_lo12", 0,
2722    0,				/* adr_type */
2723    0,
2724    0,
2725    0,
2726    BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2727    0},
2728 
2729   /* 0-15 bits of address/value: MOVk, no check.  */
2730   {"gotoff_g0_nc", 0,
2731    0,				/* adr_type */
2732    0,
2733    BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2734    0,
2735    0,
2736    0},
2737 
2738   /* Most significant bits 16-31 of address/value: MOVZ.  */
2739   {"gotoff_g1", 0,
2740    0,				/* adr_type */
2741    0,
2742    BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2743    0,
2744    0,
2745    0},
2746 
2747   /* 15 bit offset into the page containing GOT entry for that symbol.  */
2748   {"gotoff_lo15", 0,
2749    0,				/* adr_type */
2750    0,
2751    0,
2752    0,
2753    BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2754    0},
2755 
2756   /* Get to the page containing GOT TLS entry for a symbol */
2757   {"gottprel_g0_nc", 0,
2758    0,				/* adr_type */
2759    0,
2760    BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2761    0,
2762    0,
2763    0},
2764 
2765   /* Get to the page containing GOT TLS entry for a symbol */
2766   {"gottprel_g1", 0,
2767    0,				/* adr_type */
2768    0,
2769    BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2770    0,
2771    0,
2772    0},
2773 
2774   /* Get to the page containing GOT TLS entry for a symbol */
2775   {"tlsgd", 0,
2776    BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2777    BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2778    0,
2779    0,
2780    0,
2781    0},
2782 
2783   /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2784   {"tlsgd_lo12", 0,
2785    0,				/* adr_type */
2786    0,
2787    0,
2788    BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2789    0,
2790    0},
2791 
2792   /* Lower 16 bits address/value: MOVk.  */
2793   {"tlsgd_g0_nc", 0,
2794    0,				/* adr_type */
2795    0,
2796    BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2797    0,
2798    0,
2799    0},
2800 
2801   /* Most significant bits 16-31 of address/value: MOVZ.  */
2802   {"tlsgd_g1", 0,
2803    0,				/* adr_type */
2804    0,
2805    BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2806    0,
2807    0,
2808    0},
2809 
2810   /* Get to the page containing GOT TLS entry for a symbol */
2811   {"tlsdesc", 0,
2812    BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2813    BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2814    0,
2815    0,
2816    0,
2817    BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2818 
2819   /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2820   {"tlsdesc_lo12", 0,
2821    0,				/* adr_type */
2822    0,
2823    0,
2824    BFD_RELOC_AARCH64_TLSDESC_ADD_LO12,
2825    BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2826    0},
2827 
2828   /* Get to the page containing GOT TLS entry for a symbol.
2829      The same as GD, we allocate two consecutive GOT slots
2830      for module index and module offset, the only difference
2831      with GD is the module offset should be initialized to
2832      zero without any outstanding runtime relocation. */
2833   {"tlsldm", 0,
2834    BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2835    BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2836    0,
2837    0,
2838    0,
2839    0},
2840 
2841   /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2842   {"tlsldm_lo12_nc", 0,
2843    0,				/* adr_type */
2844    0,
2845    0,
2846    BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2847    0,
2848    0},
2849 
2850   /* 12 bit offset into the module TLS base address.  */
2851   {"dtprel_lo12", 0,
2852    0,				/* adr_type */
2853    0,
2854    0,
2855    BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2856    BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2857    0},
2858 
2859   /* Same as dtprel_lo12, no overflow check.  */
2860   {"dtprel_lo12_nc", 0,
2861    0,				/* adr_type */
2862    0,
2863    0,
2864    BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2865    BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2866    0},
2867 
2868   /* bits[23:12] of offset to the module TLS base address.  */
2869   {"dtprel_hi12", 0,
2870    0,				/* adr_type */
2871    0,
2872    0,
2873    BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2874    0,
2875    0},
2876 
2877   /* bits[15:0] of offset to the module TLS base address.  */
2878   {"dtprel_g0", 0,
2879    0,				/* adr_type */
2880    0,
2881    BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2882    0,
2883    0,
2884    0},
2885 
2886   /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0.  */
2887   {"dtprel_g0_nc", 0,
2888    0,				/* adr_type */
2889    0,
2890    BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2891    0,
2892    0,
2893    0},
2894 
2895   /* bits[31:16] of offset to the module TLS base address.  */
2896   {"dtprel_g1", 0,
2897    0,				/* adr_type */
2898    0,
2899    BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2900    0,
2901    0,
2902    0},
2903 
2904   /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1.  */
2905   {"dtprel_g1_nc", 0,
2906    0,				/* adr_type */
2907    0,
2908    BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2909    0,
2910    0,
2911    0},
2912 
2913   /* bits[47:32] of offset to the module TLS base address.  */
2914   {"dtprel_g2", 0,
2915    0,				/* adr_type */
2916    0,
2917    BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2918    0,
2919    0,
2920    0},
2921 
2922   /* Lower 16 bit offset into GOT entry for a symbol */
2923   {"tlsdesc_off_g0_nc", 0,
2924    0,				/* adr_type */
2925    0,
2926    BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2927    0,
2928    0,
2929    0},
2930 
2931   /* Higher 16 bit offset into GOT entry for a symbol */
2932   {"tlsdesc_off_g1", 0,
2933    0,				/* adr_type */
2934    0,
2935    BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2936    0,
2937    0,
2938    0},
2939 
2940   /* Get to the page containing GOT TLS entry for a symbol */
2941   {"gottprel", 0,
2942    0,				/* adr_type */
2943    BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2944    0,
2945    0,
2946    0,
2947    BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2948 
2949   /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2950   {"gottprel_lo12", 0,
2951    0,				/* adr_type */
2952    0,
2953    0,
2954    0,
2955    BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2956    0},
2957 
2958   /* Get tp offset for a symbol.  */
2959   {"tprel", 0,
2960    0,				/* adr_type */
2961    0,
2962    0,
2963    BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2964    0,
2965    0},
2966 
2967   /* Get tp offset for a symbol.  */
2968   {"tprel_lo12", 0,
2969    0,				/* adr_type */
2970    0,
2971    0,
2972    BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2973    BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12,
2974    0},
2975 
2976   /* Get tp offset for a symbol.  */
2977   {"tprel_hi12", 0,
2978    0,				/* adr_type */
2979    0,
2980    0,
2981    BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2982    0,
2983    0},
2984 
2985   /* Get tp offset for a symbol.  */
2986   {"tprel_lo12_nc", 0,
2987    0,				/* adr_type */
2988    0,
2989    0,
2990    BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2991    BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC,
2992    0},
2993 
2994   /* Most significant bits 32-47 of address/value: MOVZ.  */
2995   {"tprel_g2", 0,
2996    0,				/* adr_type */
2997    0,
2998    BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2999    0,
3000    0,
3001    0},
3002 
3003   /* Most significant bits 16-31 of address/value: MOVZ.  */
3004   {"tprel_g1", 0,
3005    0,				/* adr_type */
3006    0,
3007    BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
3008    0,
3009    0,
3010    0},
3011 
3012   /* Most significant bits 16-31 of address/value: MOVZ, no check.  */
3013   {"tprel_g1_nc", 0,
3014    0,				/* adr_type */
3015    0,
3016    BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
3017    0,
3018    0,
3019    0},
3020 
3021   /* Most significant bits 0-15 of address/value: MOVZ.  */
3022   {"tprel_g0", 0,
3023    0,				/* adr_type */
3024    0,
3025    BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
3026    0,
3027    0,
3028    0},
3029 
3030   /* Most significant bits 0-15 of address/value: MOVZ, no check.  */
3031   {"tprel_g0_nc", 0,
3032    0,				/* adr_type */
3033    0,
3034    BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
3035    0,
3036    0,
3037    0},
3038 
3039   /* 15bit offset from got entry to base address of GOT table.  */
3040   {"gotpage_lo15", 0,
3041    0,
3042    0,
3043    0,
3044    0,
3045    BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
3046    0},
3047 
3048   /* 14bit offset from got entry to base address of GOT table.  */
3049   {"gotpage_lo14", 0,
3050    0,
3051    0,
3052    0,
3053    0,
3054    BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
3055    0},
3056 };
3057 
3058 /* Given the address of a pointer pointing to the textual name of a
3059    relocation as may appear in assembler source, attempt to find its
3060    details in reloc_table.  The pointer will be updated to the character
3061    after the trailing colon.  On failure, NULL will be returned;
3062    otherwise return the reloc_table_entry.  */
3063 
3064 static struct reloc_table_entry *
find_reloc_table_entry(char ** str)3065 find_reloc_table_entry (char **str)
3066 {
3067   unsigned int i;
3068   for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
3069     {
3070       int length = strlen (reloc_table[i].name);
3071 
3072       if (strncasecmp (reloc_table[i].name, *str, length) == 0
3073 	  && (*str)[length] == ':')
3074 	{
3075 	  *str += (length + 1);
3076 	  return &reloc_table[i];
3077 	}
3078     }
3079 
3080   return NULL;
3081 }
3082 
3083 /* Mode argument to parse_shift and parser_shifter_operand.  */
3084 enum parse_shift_mode
3085 {
3086   SHIFTED_NONE,			/* no shifter allowed  */
3087   SHIFTED_ARITH_IMM,		/* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
3088 				   "#imm{,lsl #n}"  */
3089   SHIFTED_LOGIC_IMM,		/* "rn{,lsl|lsr|asl|asr|ror #n}" or
3090 				   "#imm"  */
3091   SHIFTED_LSL,			/* bare "lsl #n"  */
3092   SHIFTED_MUL,			/* bare "mul #n"  */
3093   SHIFTED_LSL_MSL,		/* "lsl|msl #n"  */
3094   SHIFTED_MUL_VL,		/* "mul vl"  */
3095   SHIFTED_REG_OFFSET		/* [su]xtw|sxtx {#n} or lsl #n  */
3096 };
3097 
3098 /* Parse a <shift> operator on an AArch64 data processing instruction.
3099    Return TRUE on success; otherwise return FALSE.  */
3100 static bfd_boolean
parse_shift(char ** str,aarch64_opnd_info * operand,enum parse_shift_mode mode)3101 parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
3102 {
3103   const struct aarch64_name_value_pair *shift_op;
3104   enum aarch64_modifier_kind kind;
3105   expressionS exp;
3106   int exp_has_prefix;
3107   char *s = *str;
3108   char *p = s;
3109 
3110   for (p = *str; ISALPHA (*p); p++)
3111     ;
3112 
3113   if (p == *str)
3114     {
3115       set_syntax_error (_("shift expression expected"));
3116       return FALSE;
3117     }
3118 
3119   shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
3120 
3121   if (shift_op == NULL)
3122     {
3123       set_syntax_error (_("shift operator expected"));
3124       return FALSE;
3125     }
3126 
3127   kind = aarch64_get_operand_modifier (shift_op);
3128 
3129   if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
3130     {
3131       set_syntax_error (_("invalid use of 'MSL'"));
3132       return FALSE;
3133     }
3134 
3135   if (kind == AARCH64_MOD_MUL
3136       && mode != SHIFTED_MUL
3137       && mode != SHIFTED_MUL_VL)
3138     {
3139       set_syntax_error (_("invalid use of 'MUL'"));
3140       return FALSE;
3141     }
3142 
3143   switch (mode)
3144     {
3145     case SHIFTED_LOGIC_IMM:
3146       if (aarch64_extend_operator_p (kind))
3147 	{
3148 	  set_syntax_error (_("extending shift is not permitted"));
3149 	  return FALSE;
3150 	}
3151       break;
3152 
3153     case SHIFTED_ARITH_IMM:
3154       if (kind == AARCH64_MOD_ROR)
3155 	{
3156 	  set_syntax_error (_("'ROR' shift is not permitted"));
3157 	  return FALSE;
3158 	}
3159       break;
3160 
3161     case SHIFTED_LSL:
3162       if (kind != AARCH64_MOD_LSL)
3163 	{
3164 	  set_syntax_error (_("only 'LSL' shift is permitted"));
3165 	  return FALSE;
3166 	}
3167       break;
3168 
3169     case SHIFTED_MUL:
3170       if (kind != AARCH64_MOD_MUL)
3171 	{
3172 	  set_syntax_error (_("only 'MUL' is permitted"));
3173 	  return FALSE;
3174 	}
3175       break;
3176 
3177     case SHIFTED_MUL_VL:
3178       /* "MUL VL" consists of two separate tokens.  Require the first
3179 	 token to be "MUL" and look for a following "VL".  */
3180       if (kind == AARCH64_MOD_MUL)
3181 	{
3182 	  skip_whitespace (p);
3183 	  if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3184 	    {
3185 	      p += 2;
3186 	      kind = AARCH64_MOD_MUL_VL;
3187 	      break;
3188 	    }
3189 	}
3190       set_syntax_error (_("only 'MUL VL' is permitted"));
3191       return FALSE;
3192 
3193     case SHIFTED_REG_OFFSET:
3194       if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3195 	  && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3196 	{
3197 	  set_fatal_syntax_error
3198 	    (_("invalid shift for the register offset addressing mode"));
3199 	  return FALSE;
3200 	}
3201       break;
3202 
3203     case SHIFTED_LSL_MSL:
3204       if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3205 	{
3206 	  set_syntax_error (_("invalid shift operator"));
3207 	  return FALSE;
3208 	}
3209       break;
3210 
3211     default:
3212       abort ();
3213     }
3214 
3215   /* Whitespace can appear here if the next thing is a bare digit.  */
3216   skip_whitespace (p);
3217 
3218   /* Parse shift amount.  */
3219   exp_has_prefix = 0;
3220   if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3221     exp.X_op = O_absent;
3222   else
3223     {
3224       if (is_immediate_prefix (*p))
3225 	{
3226 	  p++;
3227 	  exp_has_prefix = 1;
3228 	}
3229       my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3230     }
3231   if (kind == AARCH64_MOD_MUL_VL)
3232     /* For consistency, give MUL VL the same shift amount as an implicit
3233        MUL #1.  */
3234     operand->shifter.amount = 1;
3235   else if (exp.X_op == O_absent)
3236     {
3237       if (!aarch64_extend_operator_p (kind) || exp_has_prefix)
3238 	{
3239 	  set_syntax_error (_("missing shift amount"));
3240 	  return FALSE;
3241 	}
3242       operand->shifter.amount = 0;
3243     }
3244   else if (exp.X_op != O_constant)
3245     {
3246       set_syntax_error (_("constant shift amount required"));
3247       return FALSE;
3248     }
3249   /* For parsing purposes, MUL #n has no inherent range.  The range
3250      depends on the operand and will be checked by operand-specific
3251      routines.  */
3252   else if (kind != AARCH64_MOD_MUL
3253 	   && (exp.X_add_number < 0 || exp.X_add_number > 63))
3254     {
3255       set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3256       return FALSE;
3257     }
3258   else
3259     {
3260       operand->shifter.amount = exp.X_add_number;
3261       operand->shifter.amount_present = 1;
3262     }
3263 
3264   operand->shifter.operator_present = 1;
3265   operand->shifter.kind = kind;
3266 
3267   *str = p;
3268   return TRUE;
3269 }
3270 
3271 /* Parse a <shifter_operand> for a data processing instruction:
3272 
3273       #<immediate>
3274       #<immediate>, LSL #imm
3275 
3276    Validation of immediate operands is deferred to md_apply_fix.
3277 
3278    Return TRUE on success; otherwise return FALSE.  */
3279 
3280 static bfd_boolean
parse_shifter_operand_imm(char ** str,aarch64_opnd_info * operand,enum parse_shift_mode mode)3281 parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3282 			   enum parse_shift_mode mode)
3283 {
3284   char *p;
3285 
3286   if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3287     return FALSE;
3288 
3289   p = *str;
3290 
3291   /* Accept an immediate expression.  */
3292   if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3293     return FALSE;
3294 
3295   /* Accept optional LSL for arithmetic immediate values.  */
3296   if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3297     if (! parse_shift (&p, operand, SHIFTED_LSL))
3298       return FALSE;
3299 
3300   /* Not accept any shifter for logical immediate values.  */
3301   if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3302       && parse_shift (&p, operand, mode))
3303     {
3304       set_syntax_error (_("unexpected shift operator"));
3305       return FALSE;
3306     }
3307 
3308   *str = p;
3309   return TRUE;
3310 }
3311 
3312 /* Parse a <shifter_operand> for a data processing instruction:
3313 
3314       <Rm>
3315       <Rm>, <shift>
3316       #<immediate>
3317       #<immediate>, LSL #imm
3318 
3319    where <shift> is handled by parse_shift above, and the last two
3320    cases are handled by the function above.
3321 
3322    Validation of immediate operands is deferred to md_apply_fix.
3323 
3324    Return TRUE on success; otherwise return FALSE.  */
3325 
3326 static bfd_boolean
parse_shifter_operand(char ** str,aarch64_opnd_info * operand,enum parse_shift_mode mode)3327 parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3328 		       enum parse_shift_mode mode)
3329 {
3330   const reg_entry *reg;
3331   aarch64_opnd_qualifier_t qualifier;
3332   enum aarch64_operand_class opd_class
3333     = aarch64_get_operand_class (operand->type);
3334 
3335   reg = aarch64_reg_parse_32_64 (str, &qualifier);
3336   if (reg)
3337     {
3338       if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3339 	{
3340 	  set_syntax_error (_("unexpected register in the immediate operand"));
3341 	  return FALSE;
3342 	}
3343 
3344       if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3345 	{
3346 	  set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3347 	  return FALSE;
3348 	}
3349 
3350       operand->reg.regno = reg->number;
3351       operand->qualifier = qualifier;
3352 
3353       /* Accept optional shift operation on register.  */
3354       if (! skip_past_comma (str))
3355 	return TRUE;
3356 
3357       if (! parse_shift (str, operand, mode))
3358 	return FALSE;
3359 
3360       return TRUE;
3361     }
3362   else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3363     {
3364       set_syntax_error
3365 	(_("integer register expected in the extended/shifted operand "
3366 	   "register"));
3367       return FALSE;
3368     }
3369 
3370   /* We have a shifted immediate variable.  */
3371   return parse_shifter_operand_imm (str, operand, mode);
3372 }
3373 
3374 /* Return TRUE on success; return FALSE otherwise.  */
3375 
3376 static bfd_boolean
parse_shifter_operand_reloc(char ** str,aarch64_opnd_info * operand,enum parse_shift_mode mode)3377 parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3378 			     enum parse_shift_mode mode)
3379 {
3380   char *p = *str;
3381 
3382   /* Determine if we have the sequence of characters #: or just :
3383      coming next.  If we do, then we check for a :rello: relocation
3384      modifier.  If we don't, punt the whole lot to
3385      parse_shifter_operand.  */
3386 
3387   if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3388     {
3389       struct reloc_table_entry *entry;
3390 
3391       if (p[0] == '#')
3392 	p += 2;
3393       else
3394 	p++;
3395       *str = p;
3396 
3397       /* Try to parse a relocation.  Anything else is an error.  */
3398       if (!(entry = find_reloc_table_entry (str)))
3399 	{
3400 	  set_syntax_error (_("unknown relocation modifier"));
3401 	  return FALSE;
3402 	}
3403 
3404       if (entry->add_type == 0)
3405 	{
3406 	  set_syntax_error
3407 	    (_("this relocation modifier is not allowed on this instruction"));
3408 	  return FALSE;
3409 	}
3410 
3411       /* Save str before we decompose it.  */
3412       p = *str;
3413 
3414       /* Next, we parse the expression.  */
3415       if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3416 	return FALSE;
3417 
3418       /* Record the relocation type (use the ADD variant here).  */
3419       inst.reloc.type = entry->add_type;
3420       inst.reloc.pc_rel = entry->pc_rel;
3421 
3422       /* If str is empty, we've reached the end, stop here.  */
3423       if (**str == '\0')
3424 	return TRUE;
3425 
3426       /* Otherwise, we have a shifted reloc modifier, so rewind to
3427          recover the variable name and continue parsing for the shifter.  */
3428       *str = p;
3429       return parse_shifter_operand_imm (str, operand, mode);
3430     }
3431 
3432   return parse_shifter_operand (str, operand, mode);
3433 }
3434 
3435 /* Parse all forms of an address expression.  Information is written
3436    to *OPERAND and/or inst.reloc.
3437 
3438    The A64 instruction set has the following addressing modes:
3439 
3440    Offset
3441      [base]			 // in SIMD ld/st structure
3442      [base{,#0}]		 // in ld/st exclusive
3443      [base{,#imm}]
3444      [base,Xm{,LSL #imm}]
3445      [base,Xm,SXTX {#imm}]
3446      [base,Wm,(S|U)XTW {#imm}]
3447    Pre-indexed
3448      [base]!                    // in ldraa/ldrab exclusive
3449      [base,#imm]!
3450    Post-indexed
3451      [base],#imm
3452      [base],Xm			 // in SIMD ld/st structure
3453    PC-relative (literal)
3454      label
3455    SVE:
3456      [base,#imm,MUL VL]
3457      [base,Zm.D{,LSL #imm}]
3458      [base,Zm.S,(S|U)XTW {#imm}]
3459      [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3460      [Zn.S,#imm]
3461      [Zn.D,#imm]
3462      [Zn.S{, Xm}]
3463      [Zn.S,Zm.S{,LSL #imm}]      // in ADR
3464      [Zn.D,Zm.D{,LSL #imm}]      // in ADR
3465      [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3466 
3467    (As a convenience, the notation "=immediate" is permitted in conjunction
3468    with the pc-relative literal load instructions to automatically place an
3469    immediate value or symbolic address in a nearby literal pool and generate
3470    a hidden label which references it.)
3471 
3472    Upon a successful parsing, the address structure in *OPERAND will be
3473    filled in the following way:
3474 
3475      .base_regno = <base>
3476      .offset.is_reg	// 1 if the offset is a register
3477      .offset.imm = <imm>
3478      .offset.regno = <Rm>
3479 
3480    For different addressing modes defined in the A64 ISA:
3481 
3482    Offset
3483      .pcrel=0; .preind=1; .postind=0; .writeback=0
3484    Pre-indexed
3485      .pcrel=0; .preind=1; .postind=0; .writeback=1
3486    Post-indexed
3487      .pcrel=0; .preind=0; .postind=1; .writeback=1
3488    PC-relative (literal)
3489      .pcrel=1; .preind=1; .postind=0; .writeback=0
3490 
3491    The shift/extension information, if any, will be stored in .shifter.
3492    The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3493    *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3494    corresponding register.
3495 
3496    BASE_TYPE says which types of base register should be accepted and
3497    OFFSET_TYPE says the same for offset registers.  IMM_SHIFT_MODE
3498    is the type of shifter that is allowed for immediate offsets,
3499    or SHIFTED_NONE if none.
3500 
3501    In all other respects, it is the caller's responsibility to check
3502    for addressing modes not supported by the instruction, and to set
3503    inst.reloc.type.  */
3504 
3505 static bfd_boolean
parse_address_main(char ** str,aarch64_opnd_info * operand,aarch64_opnd_qualifier_t * base_qualifier,aarch64_opnd_qualifier_t * offset_qualifier,aarch64_reg_type base_type,aarch64_reg_type offset_type,enum parse_shift_mode imm_shift_mode)3506 parse_address_main (char **str, aarch64_opnd_info *operand,
3507 		    aarch64_opnd_qualifier_t *base_qualifier,
3508 		    aarch64_opnd_qualifier_t *offset_qualifier,
3509 		    aarch64_reg_type base_type, aarch64_reg_type offset_type,
3510 		    enum parse_shift_mode imm_shift_mode)
3511 {
3512   char *p = *str;
3513   const reg_entry *reg;
3514   expressionS *exp = &inst.reloc.exp;
3515 
3516   *base_qualifier = AARCH64_OPND_QLF_NIL;
3517   *offset_qualifier = AARCH64_OPND_QLF_NIL;
3518   if (! skip_past_char (&p, '['))
3519     {
3520       /* =immediate or label.  */
3521       operand->addr.pcrel = 1;
3522       operand->addr.preind = 1;
3523 
3524       /* #:<reloc_op>:<symbol>  */
3525       skip_past_char (&p, '#');
3526       if (skip_past_char (&p, ':'))
3527 	{
3528 	  bfd_reloc_code_real_type ty;
3529 	  struct reloc_table_entry *entry;
3530 
3531 	  /* Try to parse a relocation modifier.  Anything else is
3532 	     an error.  */
3533 	  entry = find_reloc_table_entry (&p);
3534 	  if (! entry)
3535 	    {
3536 	      set_syntax_error (_("unknown relocation modifier"));
3537 	      return FALSE;
3538 	    }
3539 
3540 	  switch (operand->type)
3541 	    {
3542 	    case AARCH64_OPND_ADDR_PCREL21:
3543 	      /* adr */
3544 	      ty = entry->adr_type;
3545 	      break;
3546 
3547 	    default:
3548 	      ty = entry->ld_literal_type;
3549 	      break;
3550 	    }
3551 
3552 	  if (ty == 0)
3553 	    {
3554 	      set_syntax_error
3555 		(_("this relocation modifier is not allowed on this "
3556 		   "instruction"));
3557 	      return FALSE;
3558 	    }
3559 
3560 	  /* #:<reloc_op>:  */
3561 	  if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3562 	    {
3563 	      set_syntax_error (_("invalid relocation expression"));
3564 	      return FALSE;
3565 	    }
3566 
3567 	  /* #:<reloc_op>:<expr>  */
3568 	  /* Record the relocation type.  */
3569 	  inst.reloc.type = ty;
3570 	  inst.reloc.pc_rel = entry->pc_rel;
3571 	}
3572       else
3573 	{
3574 
3575 	  if (skip_past_char (&p, '='))
3576 	    /* =immediate; need to generate the literal in the literal pool. */
3577 	    inst.gen_lit_pool = 1;
3578 
3579 	  if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3580 	    {
3581 	      set_syntax_error (_("invalid address"));
3582 	      return FALSE;
3583 	    }
3584 	}
3585 
3586       *str = p;
3587       return TRUE;
3588     }
3589 
3590   /* [ */
3591 
3592   reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3593   if (!reg || !aarch64_check_reg_type (reg, base_type))
3594     {
3595       set_syntax_error (_(get_reg_expected_msg (base_type)));
3596       return FALSE;
3597     }
3598   operand->addr.base_regno = reg->number;
3599 
3600   /* [Xn */
3601   if (skip_past_comma (&p))
3602     {
3603       /* [Xn, */
3604       operand->addr.preind = 1;
3605 
3606       reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3607       if (reg)
3608 	{
3609 	  if (!aarch64_check_reg_type (reg, offset_type))
3610 	    {
3611 	      set_syntax_error (_(get_reg_expected_msg (offset_type)));
3612 	      return FALSE;
3613 	    }
3614 
3615 	  /* [Xn,Rm  */
3616 	  operand->addr.offset.regno = reg->number;
3617 	  operand->addr.offset.is_reg = 1;
3618 	  /* Shifted index.  */
3619 	  if (skip_past_comma (&p))
3620 	    {
3621 	      /* [Xn,Rm,  */
3622 	      if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3623 		/* Use the diagnostics set in parse_shift, so not set new
3624 		   error message here.  */
3625 		return FALSE;
3626 	    }
3627 	  /* We only accept:
3628 	     [base,Xm]  # For vector plus scalar SVE2 indexing.
3629 	     [base,Xm{,LSL #imm}]
3630 	     [base,Xm,SXTX {#imm}]
3631 	     [base,Wm,(S|U)XTW {#imm}]  */
3632 	  if (operand->shifter.kind == AARCH64_MOD_NONE
3633 	      || operand->shifter.kind == AARCH64_MOD_LSL
3634 	      || operand->shifter.kind == AARCH64_MOD_SXTX)
3635 	    {
3636 	      if (*offset_qualifier == AARCH64_OPND_QLF_W)
3637 		{
3638 		  set_syntax_error (_("invalid use of 32-bit register offset"));
3639 		  return FALSE;
3640 		}
3641 	      if (aarch64_get_qualifier_esize (*base_qualifier)
3642 		  != aarch64_get_qualifier_esize (*offset_qualifier)
3643 		  && (operand->type != AARCH64_OPND_SVE_ADDR_ZX
3644 		      || *base_qualifier != AARCH64_OPND_QLF_S_S
3645 		      || *offset_qualifier != AARCH64_OPND_QLF_X))
3646 		{
3647 		  set_syntax_error (_("offset has different size from base"));
3648 		  return FALSE;
3649 		}
3650 	    }
3651 	  else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3652 	    {
3653 	      set_syntax_error (_("invalid use of 64-bit register offset"));
3654 	      return FALSE;
3655 	    }
3656 	}
3657       else
3658 	{
3659 	  /* [Xn,#:<reloc_op>:<symbol>  */
3660 	  skip_past_char (&p, '#');
3661 	  if (skip_past_char (&p, ':'))
3662 	    {
3663 	      struct reloc_table_entry *entry;
3664 
3665 	      /* Try to parse a relocation modifier.  Anything else is
3666 		 an error.  */
3667 	      if (!(entry = find_reloc_table_entry (&p)))
3668 		{
3669 		  set_syntax_error (_("unknown relocation modifier"));
3670 		  return FALSE;
3671 		}
3672 
3673 	      if (entry->ldst_type == 0)
3674 		{
3675 		  set_syntax_error
3676 		    (_("this relocation modifier is not allowed on this "
3677 		       "instruction"));
3678 		  return FALSE;
3679 		}
3680 
3681 	      /* [Xn,#:<reloc_op>:  */
3682 	      /* We now have the group relocation table entry corresponding to
3683 	         the name in the assembler source.  Next, we parse the
3684 	         expression.  */
3685 	      if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3686 		{
3687 		  set_syntax_error (_("invalid relocation expression"));
3688 		  return FALSE;
3689 		}
3690 
3691 	      /* [Xn,#:<reloc_op>:<expr>  */
3692 	      /* Record the load/store relocation type.  */
3693 	      inst.reloc.type = entry->ldst_type;
3694 	      inst.reloc.pc_rel = entry->pc_rel;
3695 	    }
3696 	  else
3697 	    {
3698 	      if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3699 		{
3700 		  set_syntax_error (_("invalid expression in the address"));
3701 		  return FALSE;
3702 		}
3703 	      /* [Xn,<expr>  */
3704 	      if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3705 		/* [Xn,<expr>,<shifter>  */
3706 		if (! parse_shift (&p, operand, imm_shift_mode))
3707 		  return FALSE;
3708 	    }
3709 	}
3710     }
3711 
3712   if (! skip_past_char (&p, ']'))
3713     {
3714       set_syntax_error (_("']' expected"));
3715       return FALSE;
3716     }
3717 
3718   if (skip_past_char (&p, '!'))
3719     {
3720       if (operand->addr.preind && operand->addr.offset.is_reg)
3721 	{
3722 	  set_syntax_error (_("register offset not allowed in pre-indexed "
3723 			      "addressing mode"));
3724 	  return FALSE;
3725 	}
3726       /* [Xn]! */
3727       operand->addr.writeback = 1;
3728     }
3729   else if (skip_past_comma (&p))
3730     {
3731       /* [Xn], */
3732       operand->addr.postind = 1;
3733       operand->addr.writeback = 1;
3734 
3735       if (operand->addr.preind)
3736 	{
3737 	  set_syntax_error (_("cannot combine pre- and post-indexing"));
3738 	  return FALSE;
3739 	}
3740 
3741       reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3742       if (reg)
3743 	{
3744 	  /* [Xn],Xm */
3745 	  if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3746 	    {
3747 	      set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3748 	      return FALSE;
3749 	    }
3750 
3751 	  operand->addr.offset.regno = reg->number;
3752 	  operand->addr.offset.is_reg = 1;
3753 	}
3754       else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3755 	{
3756 	  /* [Xn],#expr */
3757 	  set_syntax_error (_("invalid expression in the address"));
3758 	  return FALSE;
3759 	}
3760     }
3761 
3762   /* If at this point neither .preind nor .postind is set, we have a
3763      bare [Rn]{!}; only accept [Rn]! as a shorthand for [Rn,#0]! for ldraa and
3764      ldrab, accept [Rn] as a shorthand for [Rn,#0].
3765      For SVE2 vector plus scalar offsets, allow [Zn.<T>] as shorthand for
3766      [Zn.<T>, xzr].  */
3767   if (operand->addr.preind == 0 && operand->addr.postind == 0)
3768     {
3769       if (operand->addr.writeback)
3770 	{
3771 	  if (operand->type == AARCH64_OPND_ADDR_SIMM10)
3772             {
3773               /* Accept [Rn]! as a shorthand for [Rn,#0]!   */
3774               operand->addr.offset.is_reg = 0;
3775               operand->addr.offset.imm = 0;
3776               operand->addr.preind = 1;
3777             }
3778           else
3779            {
3780 	     /* Reject [Rn]!   */
3781 	     set_syntax_error (_("missing offset in the pre-indexed address"));
3782 	     return FALSE;
3783 	   }
3784 	}
3785        else
3786 	{
3787           operand->addr.preind = 1;
3788           if (operand->type == AARCH64_OPND_SVE_ADDR_ZX)
3789 	   {
3790 	     operand->addr.offset.is_reg = 1;
3791 	     operand->addr.offset.regno = REG_ZR;
3792 	     *offset_qualifier = AARCH64_OPND_QLF_X;
3793  	   }
3794           else
3795 	   {
3796 	     inst.reloc.exp.X_op = O_constant;
3797 	     inst.reloc.exp.X_add_number = 0;
3798 	   }
3799 	}
3800     }
3801 
3802   *str = p;
3803   return TRUE;
3804 }
3805 
3806 /* Parse a base AArch64 address (as opposed to an SVE one).  Return TRUE
3807    on success.  */
3808 static bfd_boolean
parse_address(char ** str,aarch64_opnd_info * operand)3809 parse_address (char **str, aarch64_opnd_info *operand)
3810 {
3811   aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3812   return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3813 			     REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3814 }
3815 
3816 /* Parse an address in which SVE vector registers and MUL VL are allowed.
3817    The arguments have the same meaning as for parse_address_main.
3818    Return TRUE on success.  */
3819 static bfd_boolean
parse_sve_address(char ** str,aarch64_opnd_info * operand,aarch64_opnd_qualifier_t * base_qualifier,aarch64_opnd_qualifier_t * offset_qualifier)3820 parse_sve_address (char **str, aarch64_opnd_info *operand,
3821 		   aarch64_opnd_qualifier_t *base_qualifier,
3822 		   aarch64_opnd_qualifier_t *offset_qualifier)
3823 {
3824   return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3825 			     REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3826 			     SHIFTED_MUL_VL);
3827 }
3828 
3829 /* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3830    Return TRUE on success; otherwise return FALSE.  */
3831 static bfd_boolean
parse_half(char ** str,int * internal_fixup_p)3832 parse_half (char **str, int *internal_fixup_p)
3833 {
3834   char *p = *str;
3835 
3836   skip_past_char (&p, '#');
3837 
3838   gas_assert (internal_fixup_p);
3839   *internal_fixup_p = 0;
3840 
3841   if (*p == ':')
3842     {
3843       struct reloc_table_entry *entry;
3844 
3845       /* Try to parse a relocation.  Anything else is an error.  */
3846       ++p;
3847       if (!(entry = find_reloc_table_entry (&p)))
3848 	{
3849 	  set_syntax_error (_("unknown relocation modifier"));
3850 	  return FALSE;
3851 	}
3852 
3853       if (entry->movw_type == 0)
3854 	{
3855 	  set_syntax_error
3856 	    (_("this relocation modifier is not allowed on this instruction"));
3857 	  return FALSE;
3858 	}
3859 
3860       inst.reloc.type = entry->movw_type;
3861     }
3862   else
3863     *internal_fixup_p = 1;
3864 
3865   if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3866     return FALSE;
3867 
3868   *str = p;
3869   return TRUE;
3870 }
3871 
3872 /* Parse an operand for an ADRP instruction:
3873      ADRP <Xd>, <label>
3874    Return TRUE on success; otherwise return FALSE.  */
3875 
3876 static bfd_boolean
parse_adrp(char ** str)3877 parse_adrp (char **str)
3878 {
3879   char *p;
3880 
3881   p = *str;
3882   if (*p == ':')
3883     {
3884       struct reloc_table_entry *entry;
3885 
3886       /* Try to parse a relocation.  Anything else is an error.  */
3887       ++p;
3888       if (!(entry = find_reloc_table_entry (&p)))
3889 	{
3890 	  set_syntax_error (_("unknown relocation modifier"));
3891 	  return FALSE;
3892 	}
3893 
3894       if (entry->adrp_type == 0)
3895 	{
3896 	  set_syntax_error
3897 	    (_("this relocation modifier is not allowed on this instruction"));
3898 	  return FALSE;
3899 	}
3900 
3901       inst.reloc.type = entry->adrp_type;
3902     }
3903   else
3904     inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3905 
3906   inst.reloc.pc_rel = 1;
3907 
3908   if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3909     return FALSE;
3910 
3911   *str = p;
3912   return TRUE;
3913 }
3914 
3915 /* Miscellaneous. */
3916 
3917 /* Parse a symbolic operand such as "pow2" at *STR.  ARRAY is an array
3918    of SIZE tokens in which index I gives the token for field value I,
3919    or is null if field value I is invalid.  REG_TYPE says which register
3920    names should be treated as registers rather than as symbolic immediates.
3921 
3922    Return true on success, moving *STR past the operand and storing the
3923    field value in *VAL.  */
3924 
3925 static int
parse_enum_string(char ** str,int64_t * val,const char * const * array,size_t size,aarch64_reg_type reg_type)3926 parse_enum_string (char **str, int64_t *val, const char *const *array,
3927 		   size_t size, aarch64_reg_type reg_type)
3928 {
3929   expressionS exp;
3930   char *p, *q;
3931   size_t i;
3932 
3933   /* Match C-like tokens.  */
3934   p = q = *str;
3935   while (ISALNUM (*q))
3936     q++;
3937 
3938   for (i = 0; i < size; ++i)
3939     if (array[i]
3940 	&& strncasecmp (array[i], p, q - p) == 0
3941 	&& array[i][q - p] == 0)
3942       {
3943 	*val = i;
3944 	*str = q;
3945 	return TRUE;
3946       }
3947 
3948   if (!parse_immediate_expression (&p, &exp, reg_type))
3949     return FALSE;
3950 
3951   if (exp.X_op == O_constant
3952       && (uint64_t) exp.X_add_number < size)
3953     {
3954       *val = exp.X_add_number;
3955       *str = p;
3956       return TRUE;
3957     }
3958 
3959   /* Use the default error for this operand.  */
3960   return FALSE;
3961 }
3962 
3963 /* Parse an option for a preload instruction.  Returns the encoding for the
3964    option, or PARSE_FAIL.  */
3965 
3966 static int
parse_pldop(char ** str)3967 parse_pldop (char **str)
3968 {
3969   char *p, *q;
3970   const struct aarch64_name_value_pair *o;
3971 
3972   p = q = *str;
3973   while (ISALNUM (*q))
3974     q++;
3975 
3976   o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3977   if (!o)
3978     return PARSE_FAIL;
3979 
3980   *str = q;
3981   return o->value;
3982 }
3983 
3984 /* Parse an option for a barrier instruction.  Returns the encoding for the
3985    option, or PARSE_FAIL.  */
3986 
3987 static int
parse_barrier(char ** str)3988 parse_barrier (char **str)
3989 {
3990   char *p, *q;
3991   const struct aarch64_name_value_pair *o;
3992 
3993   p = q = *str;
3994   while (ISALPHA (*q))
3995     q++;
3996 
3997   o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3998   if (!o)
3999     return PARSE_FAIL;
4000 
4001   *str = q;
4002   return o->value;
4003 }
4004 
4005 /* Parse an operand for a PSB barrier.  Set *HINT_OPT to the hint-option record
4006    return 0 if successful.  Otherwise return PARSE_FAIL.  */
4007 
4008 static int
parse_barrier_psb(char ** str,const struct aarch64_name_value_pair ** hint_opt)4009 parse_barrier_psb (char **str,
4010 		   const struct aarch64_name_value_pair ** hint_opt)
4011 {
4012   char *p, *q;
4013   const struct aarch64_name_value_pair *o;
4014 
4015   p = q = *str;
4016   while (ISALPHA (*q))
4017     q++;
4018 
4019   o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4020   if (!o)
4021     {
4022       set_fatal_syntax_error
4023 	( _("unknown or missing option to PSB"));
4024       return PARSE_FAIL;
4025     }
4026 
4027   if (o->value != 0x11)
4028     {
4029       /* PSB only accepts option name 'CSYNC'.  */
4030       set_syntax_error
4031 	(_("the specified option is not accepted for PSB"));
4032       return PARSE_FAIL;
4033     }
4034 
4035   *str = q;
4036   *hint_opt = o;
4037   return 0;
4038 }
4039 
4040 /* Parse an operand for BTI.  Set *HINT_OPT to the hint-option record
4041    return 0 if successful.  Otherwise return PARSE_FAIL.  */
4042 
4043 static int
parse_bti_operand(char ** str,const struct aarch64_name_value_pair ** hint_opt)4044 parse_bti_operand (char **str,
4045 		   const struct aarch64_name_value_pair ** hint_opt)
4046 {
4047   char *p, *q;
4048   const struct aarch64_name_value_pair *o;
4049 
4050   p = q = *str;
4051   while (ISALPHA (*q))
4052     q++;
4053 
4054   o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
4055   if (!o)
4056     {
4057       set_fatal_syntax_error
4058 	( _("unknown option to BTI"));
4059       return PARSE_FAIL;
4060     }
4061 
4062   switch (o->value)
4063     {
4064     /* Valid BTI operands.  */
4065     case HINT_OPD_C:
4066     case HINT_OPD_J:
4067     case HINT_OPD_JC:
4068       break;
4069 
4070     default:
4071       set_syntax_error
4072 	(_("unknown option to BTI"));
4073       return PARSE_FAIL;
4074     }
4075 
4076   *str = q;
4077   *hint_opt = o;
4078   return 0;
4079 }
4080 
4081 /* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
4082    Returns the encoding for the option, or PARSE_FAIL.
4083 
4084    If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
4085    implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
4086 
4087    If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
4088    field, otherwise as a system register.
4089 */
4090 
4091 static int
parse_sys_reg(char ** str,struct hash_control * sys_regs,int imple_defined_p,int pstatefield_p,uint32_t * flags)4092 parse_sys_reg (char **str, struct hash_control *sys_regs,
4093 	       int imple_defined_p, int pstatefield_p,
4094 	       uint32_t* flags)
4095 {
4096   char *p, *q;
4097   char buf[32];
4098   const aarch64_sys_reg *o;
4099   int value;
4100 
4101   p = buf;
4102   for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4103     if (p < buf + 31)
4104       *p++ = TOLOWER (*q);
4105   *p = '\0';
4106   /* Assert that BUF be large enough.  */
4107   gas_assert (p - buf == q - *str);
4108 
4109   o = hash_find (sys_regs, buf);
4110   if (!o)
4111     {
4112       if (!imple_defined_p)
4113 	return PARSE_FAIL;
4114       else
4115 	{
4116 	  /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>.  */
4117 	  unsigned int op0, op1, cn, cm, op2;
4118 
4119 	  if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
4120 	      != 5)
4121 	    return PARSE_FAIL;
4122 	  if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
4123 	    return PARSE_FAIL;
4124 	  value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
4125 	  if (flags)
4126 	    *flags = 0;
4127 	}
4128     }
4129   else
4130     {
4131       if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
4132 	as_bad (_("selected processor does not support PSTATE field "
4133 		  "name '%s'"), buf);
4134       if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
4135 	as_bad (_("selected processor does not support system register "
4136 		  "name '%s'"), buf);
4137       if (aarch64_sys_reg_deprecated_p (o))
4138 	as_warn (_("system register name '%s' is deprecated and may be "
4139 		   "removed in a future release"), buf);
4140       value = o->value;
4141       if (flags)
4142 	*flags = o->flags;
4143     }
4144 
4145   *str = q;
4146   return value;
4147 }
4148 
4149 /* Parse a system reg for ic/dc/at/tlbi instructions.  Returns the table entry
4150    for the option, or NULL.  */
4151 
4152 static const aarch64_sys_ins_reg *
parse_sys_ins_reg(char ** str,struct hash_control * sys_ins_regs)4153 parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
4154 {
4155   char *p, *q;
4156   char buf[32];
4157   const aarch64_sys_ins_reg *o;
4158 
4159   p = buf;
4160   for (q = *str; ISALNUM (*q) || *q == '_'; q++)
4161     if (p < buf + 31)
4162       *p++ = TOLOWER (*q);
4163   *p = '\0';
4164 
4165   o = hash_find (sys_ins_regs, buf);
4166   if (!o)
4167     return NULL;
4168 
4169   if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
4170     as_bad (_("selected processor does not support system register "
4171 	      "name '%s'"), buf);
4172 
4173   *str = q;
4174   return o;
4175 }
4176 
4177 #define po_char_or_fail(chr) do {				\
4178     if (! skip_past_char (&str, chr))				\
4179       goto failure;						\
4180 } while (0)
4181 
4182 #define po_reg_or_fail(regtype) do {				\
4183     val = aarch64_reg_parse (&str, regtype, &rtype, NULL);	\
4184     if (val == PARSE_FAIL)					\
4185       {								\
4186 	set_default_error ();					\
4187 	goto failure;						\
4188       }								\
4189   } while (0)
4190 
4191 #define po_int_reg_or_fail(reg_type) do {			\
4192     reg = aarch64_reg_parse_32_64 (&str, &qualifier);		\
4193     if (!reg || !aarch64_check_reg_type (reg, reg_type))	\
4194       {								\
4195 	set_default_error ();					\
4196 	goto failure;						\
4197       }								\
4198     info->reg.regno = reg->number;				\
4199     info->qualifier = qualifier;				\
4200   } while (0)
4201 
4202 #define po_imm_nc_or_fail() do {				\
4203     if (! parse_constant_immediate (&str, &val, imm_reg_type))	\
4204       goto failure;						\
4205   } while (0)
4206 
4207 #define po_imm_or_fail(min, max) do {				\
4208     if (! parse_constant_immediate (&str, &val, imm_reg_type))	\
4209       goto failure;						\
4210     if (val < min || val > max)					\
4211       {								\
4212 	set_fatal_syntax_error (_("immediate value out of range "\
4213 #min " to "#max));						\
4214 	goto failure;						\
4215       }								\
4216   } while (0)
4217 
4218 #define po_enum_or_fail(array) do {				\
4219     if (!parse_enum_string (&str, &val, array,			\
4220 			    ARRAY_SIZE (array), imm_reg_type))	\
4221       goto failure;						\
4222   } while (0)
4223 
4224 #define po_misc_or_fail(expr) do {				\
4225     if (!expr)							\
4226       goto failure;						\
4227   } while (0)
4228 
4229 /* encode the 12-bit imm field of Add/sub immediate */
4230 static inline uint32_t
encode_addsub_imm(uint32_t imm)4231 encode_addsub_imm (uint32_t imm)
4232 {
4233   return imm << 10;
4234 }
4235 
4236 /* encode the shift amount field of Add/sub immediate */
4237 static inline uint32_t
encode_addsub_imm_shift_amount(uint32_t cnt)4238 encode_addsub_imm_shift_amount (uint32_t cnt)
4239 {
4240   return cnt << 22;
4241 }
4242 
4243 
4244 /* encode the imm field of Adr instruction */
4245 static inline uint32_t
encode_adr_imm(uint32_t imm)4246 encode_adr_imm (uint32_t imm)
4247 {
4248   return (((imm & 0x3) << 29)	/*  [1:0] -> [30:29] */
4249 	  | ((imm & (0x7ffff << 2)) << 3));	/* [20:2] -> [23:5]  */
4250 }
4251 
4252 /* encode the immediate field of Move wide immediate */
4253 static inline uint32_t
encode_movw_imm(uint32_t imm)4254 encode_movw_imm (uint32_t imm)
4255 {
4256   return imm << 5;
4257 }
4258 
4259 /* encode the 26-bit offset of unconditional branch */
4260 static inline uint32_t
encode_branch_ofs_26(uint32_t ofs)4261 encode_branch_ofs_26 (uint32_t ofs)
4262 {
4263   return ofs & ((1 << 26) - 1);
4264 }
4265 
4266 /* encode the 19-bit offset of conditional branch and compare & branch */
4267 static inline uint32_t
encode_cond_branch_ofs_19(uint32_t ofs)4268 encode_cond_branch_ofs_19 (uint32_t ofs)
4269 {
4270   return (ofs & ((1 << 19) - 1)) << 5;
4271 }
4272 
4273 /* encode the 19-bit offset of ld literal */
4274 static inline uint32_t
encode_ld_lit_ofs_19(uint32_t ofs)4275 encode_ld_lit_ofs_19 (uint32_t ofs)
4276 {
4277   return (ofs & ((1 << 19) - 1)) << 5;
4278 }
4279 
4280 /* Encode the 14-bit offset of test & branch.  */
4281 static inline uint32_t
encode_tst_branch_ofs_14(uint32_t ofs)4282 encode_tst_branch_ofs_14 (uint32_t ofs)
4283 {
4284   return (ofs & ((1 << 14) - 1)) << 5;
4285 }
4286 
4287 /* Encode the 16-bit imm field of svc/hvc/smc.  */
4288 static inline uint32_t
encode_svc_imm(uint32_t imm)4289 encode_svc_imm (uint32_t imm)
4290 {
4291   return imm << 5;
4292 }
4293 
4294 /* Reencode add(s) to sub(s), or sub(s) to add(s).  */
4295 static inline uint32_t
reencode_addsub_switch_add_sub(uint32_t opcode)4296 reencode_addsub_switch_add_sub (uint32_t opcode)
4297 {
4298   return opcode ^ (1 << 30);
4299 }
4300 
4301 static inline uint32_t
reencode_movzn_to_movz(uint32_t opcode)4302 reencode_movzn_to_movz (uint32_t opcode)
4303 {
4304   return opcode | (1 << 30);
4305 }
4306 
4307 static inline uint32_t
reencode_movzn_to_movn(uint32_t opcode)4308 reencode_movzn_to_movn (uint32_t opcode)
4309 {
4310   return opcode & ~(1 << 30);
4311 }
4312 
4313 /* Overall per-instruction processing.	*/
4314 
4315 /* We need to be able to fix up arbitrary expressions in some statements.
4316    This is so that we can handle symbols that are an arbitrary distance from
4317    the pc.  The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4318    which returns part of an address in a form which will be valid for
4319    a data instruction.	We do this by pushing the expression into a symbol
4320    in the expr_section, and creating a fix for that.  */
4321 
4322 static fixS *
fix_new_aarch64(fragS * frag,int where,short int size,expressionS * exp,int pc_rel,int reloc)4323 fix_new_aarch64 (fragS * frag,
4324 		 int where,
4325 		 short int size, expressionS * exp, int pc_rel, int reloc)
4326 {
4327   fixS *new_fix;
4328 
4329   switch (exp->X_op)
4330     {
4331     case O_constant:
4332     case O_symbol:
4333     case O_add:
4334     case O_subtract:
4335       new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4336       break;
4337 
4338     default:
4339       new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4340 			 pc_rel, reloc);
4341       break;
4342     }
4343   return new_fix;
4344 }
4345 
4346 /* Diagnostics on operands errors.  */
4347 
4348 /* By default, output verbose error message.
4349    Disable the verbose error message by -mno-verbose-error.  */
4350 static int verbose_error_p = 1;
4351 
4352 #ifdef DEBUG_AARCH64
4353 /* N.B. this is only for the purpose of debugging.  */
4354 const char* operand_mismatch_kind_names[] =
4355 {
4356   "AARCH64_OPDE_NIL",
4357   "AARCH64_OPDE_RECOVERABLE",
4358   "AARCH64_OPDE_SYNTAX_ERROR",
4359   "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4360   "AARCH64_OPDE_INVALID_VARIANT",
4361   "AARCH64_OPDE_OUT_OF_RANGE",
4362   "AARCH64_OPDE_UNALIGNED",
4363   "AARCH64_OPDE_REG_LIST",
4364   "AARCH64_OPDE_OTHER_ERROR",
4365 };
4366 #endif /* DEBUG_AARCH64 */
4367 
4368 /* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4369 
4370    When multiple errors of different kinds are found in the same assembly
4371    line, only the error of the highest severity will be picked up for
4372    issuing the diagnostics.  */
4373 
4374 static inline bfd_boolean
operand_error_higher_severity_p(enum aarch64_operand_error_kind lhs,enum aarch64_operand_error_kind rhs)4375 operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4376 				 enum aarch64_operand_error_kind rhs)
4377 {
4378   gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4379   gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4380   gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4381   gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4382   gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4383   gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4384   gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4385   gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4386   return lhs > rhs;
4387 }
4388 
4389 /* Helper routine to get the mnemonic name from the assembly instruction
4390    line; should only be called for the diagnosis purpose, as there is
4391    string copy operation involved, which may affect the runtime
4392    performance if used in elsewhere.  */
4393 
4394 static const char*
get_mnemonic_name(const char * str)4395 get_mnemonic_name (const char *str)
4396 {
4397   static char mnemonic[32];
4398   char *ptr;
4399 
4400   /* Get the first 15 bytes and assume that the full name is included.  */
4401   strncpy (mnemonic, str, 31);
4402   mnemonic[31] = '\0';
4403 
4404   /* Scan up to the end of the mnemonic, which must end in white space,
4405      '.', or end of string.  */
4406   for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4407     ;
4408 
4409   *ptr = '\0';
4410 
4411   /* Append '...' to the truncated long name.  */
4412   if (ptr - mnemonic == 31)
4413     mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4414 
4415   return mnemonic;
4416 }
4417 
4418 static void
reset_aarch64_instruction(aarch64_instruction * instruction)4419 reset_aarch64_instruction (aarch64_instruction *instruction)
4420 {
4421   memset (instruction, '\0', sizeof (aarch64_instruction));
4422   instruction->reloc.type = BFD_RELOC_UNUSED;
4423 }
4424 
4425 /* Data structures storing one user error in the assembly code related to
4426    operands.  */
4427 
4428 struct operand_error_record
4429 {
4430   const aarch64_opcode *opcode;
4431   aarch64_operand_error detail;
4432   struct operand_error_record *next;
4433 };
4434 
4435 typedef struct operand_error_record operand_error_record;
4436 
4437 struct operand_errors
4438 {
4439   operand_error_record *head;
4440   operand_error_record *tail;
4441 };
4442 
4443 typedef struct operand_errors operand_errors;
4444 
4445 /* Top-level data structure reporting user errors for the current line of
4446    the assembly code.
4447    The way md_assemble works is that all opcodes sharing the same mnemonic
4448    name are iterated to find a match to the assembly line.  In this data
4449    structure, each of the such opcodes will have one operand_error_record
4450    allocated and inserted.  In other words, excessive errors related with
4451    a single opcode are disregarded.  */
4452 operand_errors operand_error_report;
4453 
4454 /* Free record nodes.  */
4455 static operand_error_record *free_opnd_error_record_nodes = NULL;
4456 
4457 /* Initialize the data structure that stores the operand mismatch
4458    information on assembling one line of the assembly code.  */
4459 static void
init_operand_error_report(void)4460 init_operand_error_report (void)
4461 {
4462   if (operand_error_report.head != NULL)
4463     {
4464       gas_assert (operand_error_report.tail != NULL);
4465       operand_error_report.tail->next = free_opnd_error_record_nodes;
4466       free_opnd_error_record_nodes = operand_error_report.head;
4467       operand_error_report.head = NULL;
4468       operand_error_report.tail = NULL;
4469       return;
4470     }
4471   gas_assert (operand_error_report.tail == NULL);
4472 }
4473 
4474 /* Return TRUE if some operand error has been recorded during the
4475    parsing of the current assembly line using the opcode *OPCODE;
4476    otherwise return FALSE.  */
4477 static inline bfd_boolean
opcode_has_operand_error_p(const aarch64_opcode * opcode)4478 opcode_has_operand_error_p (const aarch64_opcode *opcode)
4479 {
4480   operand_error_record *record = operand_error_report.head;
4481   return record && record->opcode == opcode;
4482 }
4483 
4484 /* Add the error record *NEW_RECORD to operand_error_report.  The record's
4485    OPCODE field is initialized with OPCODE.
4486    N.B. only one record for each opcode, i.e. the maximum of one error is
4487    recorded for each instruction template.  */
4488 
4489 static void
add_operand_error_record(const operand_error_record * new_record)4490 add_operand_error_record (const operand_error_record* new_record)
4491 {
4492   const aarch64_opcode *opcode = new_record->opcode;
4493   operand_error_record* record = operand_error_report.head;
4494 
4495   /* The record may have been created for this opcode.  If not, we need
4496      to prepare one.  */
4497   if (! opcode_has_operand_error_p (opcode))
4498     {
4499       /* Get one empty record.  */
4500       if (free_opnd_error_record_nodes == NULL)
4501 	{
4502 	  record = XNEW (operand_error_record);
4503 	}
4504       else
4505 	{
4506 	  record = free_opnd_error_record_nodes;
4507 	  free_opnd_error_record_nodes = record->next;
4508 	}
4509       record->opcode = opcode;
4510       /* Insert at the head.  */
4511       record->next = operand_error_report.head;
4512       operand_error_report.head = record;
4513       if (operand_error_report.tail == NULL)
4514 	operand_error_report.tail = record;
4515     }
4516   else if (record->detail.kind != AARCH64_OPDE_NIL
4517 	   && record->detail.index <= new_record->detail.index
4518 	   && operand_error_higher_severity_p (record->detail.kind,
4519 					       new_record->detail.kind))
4520     {
4521       /* In the case of multiple errors found on operands related with a
4522 	 single opcode, only record the error of the leftmost operand and
4523 	 only if the error is of higher severity.  */
4524       DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4525 		   " the existing error %s on operand %d",
4526 		   operand_mismatch_kind_names[new_record->detail.kind],
4527 		   new_record->detail.index,
4528 		   operand_mismatch_kind_names[record->detail.kind],
4529 		   record->detail.index);
4530       return;
4531     }
4532 
4533   record->detail = new_record->detail;
4534 }
4535 
4536 static inline void
record_operand_error_info(const aarch64_opcode * opcode,aarch64_operand_error * error_info)4537 record_operand_error_info (const aarch64_opcode *opcode,
4538 			   aarch64_operand_error *error_info)
4539 {
4540   operand_error_record record;
4541   record.opcode = opcode;
4542   record.detail = *error_info;
4543   add_operand_error_record (&record);
4544 }
4545 
4546 /* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4547    error message *ERROR, for operand IDX (count from 0).  */
4548 
4549 static void
record_operand_error(const aarch64_opcode * opcode,int idx,enum aarch64_operand_error_kind kind,const char * error)4550 record_operand_error (const aarch64_opcode *opcode, int idx,
4551 		      enum aarch64_operand_error_kind kind,
4552 		      const char* error)
4553 {
4554   aarch64_operand_error info;
4555   memset(&info, 0, sizeof (info));
4556   info.index = idx;
4557   info.kind = kind;
4558   info.error = error;
4559   info.non_fatal = FALSE;
4560   record_operand_error_info (opcode, &info);
4561 }
4562 
4563 static void
record_operand_error_with_data(const aarch64_opcode * opcode,int idx,enum aarch64_operand_error_kind kind,const char * error,const int * extra_data)4564 record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4565 				enum aarch64_operand_error_kind kind,
4566 				const char* error, const int *extra_data)
4567 {
4568   aarch64_operand_error info;
4569   info.index = idx;
4570   info.kind = kind;
4571   info.error = error;
4572   info.data[0] = extra_data[0];
4573   info.data[1] = extra_data[1];
4574   info.data[2] = extra_data[2];
4575   info.non_fatal = FALSE;
4576   record_operand_error_info (opcode, &info);
4577 }
4578 
4579 static void
record_operand_out_of_range_error(const aarch64_opcode * opcode,int idx,const char * error,int lower_bound,int upper_bound)4580 record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4581 				   const char* error, int lower_bound,
4582 				   int upper_bound)
4583 {
4584   int data[3] = {lower_bound, upper_bound, 0};
4585   record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4586 				  error, data);
4587 }
4588 
4589 /* Remove the operand error record for *OPCODE.  */
4590 static void ATTRIBUTE_UNUSED
remove_operand_error_record(const aarch64_opcode * opcode)4591 remove_operand_error_record (const aarch64_opcode *opcode)
4592 {
4593   if (opcode_has_operand_error_p (opcode))
4594     {
4595       operand_error_record* record = operand_error_report.head;
4596       gas_assert (record != NULL && operand_error_report.tail != NULL);
4597       operand_error_report.head = record->next;
4598       record->next = free_opnd_error_record_nodes;
4599       free_opnd_error_record_nodes = record;
4600       if (operand_error_report.head == NULL)
4601 	{
4602 	  gas_assert (operand_error_report.tail == record);
4603 	  operand_error_report.tail = NULL;
4604 	}
4605     }
4606 }
4607 
4608 /* Given the instruction in *INSTR, return the index of the best matched
4609    qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4610 
4611    Return -1 if there is no qualifier sequence; return the first match
4612    if there is multiple matches found.  */
4613 
4614 static int
find_best_match(const aarch64_inst * instr,const aarch64_opnd_qualifier_seq_t * qualifiers_list)4615 find_best_match (const aarch64_inst *instr,
4616 		 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4617 {
4618   int i, num_opnds, max_num_matched, idx;
4619 
4620   num_opnds = aarch64_num_of_operands (instr->opcode);
4621   if (num_opnds == 0)
4622     {
4623       DEBUG_TRACE ("no operand");
4624       return -1;
4625     }
4626 
4627   max_num_matched = 0;
4628   idx = 0;
4629 
4630   /* For each pattern.  */
4631   for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4632     {
4633       int j, num_matched;
4634       const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4635 
4636       /* Most opcodes has much fewer patterns in the list.  */
4637       if (empty_qualifier_sequence_p (qualifiers))
4638 	{
4639 	  DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4640 	  break;
4641 	}
4642 
4643       for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4644 	if (*qualifiers == instr->operands[j].qualifier)
4645 	  ++num_matched;
4646 
4647       if (num_matched > max_num_matched)
4648 	{
4649 	  max_num_matched = num_matched;
4650 	  idx = i;
4651 	}
4652     }
4653 
4654   DEBUG_TRACE ("return with %d", idx);
4655   return idx;
4656 }
4657 
4658 /* Assign qualifiers in the qualifier sequence (headed by QUALIFIERS) to the
4659    corresponding operands in *INSTR.  */
4660 
4661 static inline void
assign_qualifier_sequence(aarch64_inst * instr,const aarch64_opnd_qualifier_t * qualifiers)4662 assign_qualifier_sequence (aarch64_inst *instr,
4663 			   const aarch64_opnd_qualifier_t *qualifiers)
4664 {
4665   int i = 0;
4666   int num_opnds = aarch64_num_of_operands (instr->opcode);
4667   gas_assert (num_opnds);
4668   for (i = 0; i < num_opnds; ++i, ++qualifiers)
4669     instr->operands[i].qualifier = *qualifiers;
4670 }
4671 
4672 /* Print operands for the diagnosis purpose.  */
4673 
4674 static void
print_operands(char * buf,const aarch64_opcode * opcode,const aarch64_opnd_info * opnds)4675 print_operands (char *buf, const aarch64_opcode *opcode,
4676 		const aarch64_opnd_info *opnds)
4677 {
4678   int i;
4679 
4680   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4681     {
4682       char str[128];
4683 
4684       /* We regard the opcode operand info more, however we also look into
4685 	 the inst->operands to support the disassembling of the optional
4686 	 operand.
4687 	 The two operand code should be the same in all cases, apart from
4688 	 when the operand can be optional.  */
4689       if (opcode->operands[i] == AARCH64_OPND_NIL
4690 	  || opnds[i].type == AARCH64_OPND_NIL)
4691 	break;
4692 
4693       /* Generate the operand string in STR.  */
4694       aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL,
4695 			     NULL);
4696 
4697       /* Delimiter.  */
4698       if (str[0] != '\0')
4699 	strcat (buf, i == 0 ? " " : ", ");
4700 
4701       /* Append the operand string.  */
4702       strcat (buf, str);
4703     }
4704 }
4705 
4706 /* Send to stderr a string as information.  */
4707 
4708 static void
output_info(const char * format,...)4709 output_info (const char *format, ...)
4710 {
4711   const char *file;
4712   unsigned int line;
4713   va_list args;
4714 
4715   file = as_where (&line);
4716   if (file)
4717     {
4718       if (line != 0)
4719 	fprintf (stderr, "%s:%u: ", file, line);
4720       else
4721 	fprintf (stderr, "%s: ", file);
4722     }
4723   fprintf (stderr, _("Info: "));
4724   va_start (args, format);
4725   vfprintf (stderr, format, args);
4726   va_end (args);
4727   (void) putc ('\n', stderr);
4728 }
4729 
4730 /* Output one operand error record.  */
4731 
4732 static void
output_operand_error_record(const operand_error_record * record,char * str)4733 output_operand_error_record (const operand_error_record *record, char *str)
4734 {
4735   const aarch64_operand_error *detail = &record->detail;
4736   int idx = detail->index;
4737   const aarch64_opcode *opcode = record->opcode;
4738   enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4739 				: AARCH64_OPND_NIL);
4740 
4741   typedef void (*handler_t)(const char *format, ...);
4742   handler_t handler = detail->non_fatal ? as_warn : as_bad;
4743 
4744   switch (detail->kind)
4745     {
4746     case AARCH64_OPDE_NIL:
4747       gas_assert (0);
4748       break;
4749     case AARCH64_OPDE_SYNTAX_ERROR:
4750     case AARCH64_OPDE_RECOVERABLE:
4751     case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4752     case AARCH64_OPDE_OTHER_ERROR:
4753       /* Use the prepared error message if there is, otherwise use the
4754 	 operand description string to describe the error.  */
4755       if (detail->error != NULL)
4756 	{
4757 	  if (idx < 0)
4758 	    handler (_("%s -- `%s'"), detail->error, str);
4759 	  else
4760 	    handler (_("%s at operand %d -- `%s'"),
4761 		     detail->error, idx + 1, str);
4762 	}
4763       else
4764 	{
4765 	  gas_assert (idx >= 0);
4766 	  handler (_("operand %d must be %s -- `%s'"), idx + 1,
4767 		   aarch64_get_operand_desc (opd_code), str);
4768 	}
4769       break;
4770 
4771     case AARCH64_OPDE_INVALID_VARIANT:
4772       handler (_("operand mismatch -- `%s'"), str);
4773       if (verbose_error_p)
4774 	{
4775 	  /* We will try to correct the erroneous instruction and also provide
4776 	     more information e.g. all other valid variants.
4777 
4778 	     The string representation of the corrected instruction and other
4779 	     valid variants are generated by
4780 
4781 	     1) obtaining the intermediate representation of the erroneous
4782 	     instruction;
4783 	     2) manipulating the IR, e.g. replacing the operand qualifier;
4784 	     3) printing out the instruction by calling the printer functions
4785 	     shared with the disassembler.
4786 
4787 	     The limitation of this method is that the exact input assembly
4788 	     line cannot be accurately reproduced in some cases, for example an
4789 	     optional operand present in the actual assembly line will be
4790 	     omitted in the output; likewise for the optional syntax rules,
4791 	     e.g. the # before the immediate.  Another limitation is that the
4792 	     assembly symbols and relocation operations in the assembly line
4793 	     currently cannot be printed out in the error report.  Last but not
4794 	     least, when there is other error(s) co-exist with this error, the
4795 	     'corrected' instruction may be still incorrect, e.g.  given
4796 	       'ldnp h0,h1,[x0,#6]!'
4797 	     this diagnosis will provide the version:
4798 	       'ldnp s0,s1,[x0,#6]!'
4799 	     which is still not right.  */
4800 	  size_t len = strlen (get_mnemonic_name (str));
4801 	  int i, qlf_idx;
4802 	  bfd_boolean result;
4803 	  char buf[2048];
4804 	  aarch64_inst *inst_base = &inst.base;
4805 	  const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4806 
4807 	  /* Init inst.  */
4808 	  reset_aarch64_instruction (&inst);
4809 	  inst_base->opcode = opcode;
4810 
4811 	  /* Reset the error report so that there is no side effect on the
4812 	     following operand parsing.  */
4813 	  init_operand_error_report ();
4814 
4815 	  /* Fill inst.  */
4816 	  result = parse_operands (str + len, opcode)
4817 	    && programmer_friendly_fixup (&inst);
4818 	  gas_assert (result);
4819 	  result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4820 					  NULL, NULL, insn_sequence);
4821 	  gas_assert (!result);
4822 
4823 	  /* Find the most matched qualifier sequence.  */
4824 	  qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4825 	  gas_assert (qlf_idx > -1);
4826 
4827 	  /* Assign the qualifiers.  */
4828 	  assign_qualifier_sequence (inst_base,
4829 				     opcode->qualifiers_list[qlf_idx]);
4830 
4831 	  /* Print the hint.  */
4832 	  output_info (_("   did you mean this?"));
4833 	  snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4834 	  print_operands (buf, opcode, inst_base->operands);
4835 	  output_info (_("   %s"), buf);
4836 
4837 	  /* Print out other variant(s) if there is any.  */
4838 	  if (qlf_idx != 0 ||
4839 	      !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4840 	    output_info (_("   other valid variant(s):"));
4841 
4842 	  /* For each pattern.  */
4843 	  qualifiers_list = opcode->qualifiers_list;
4844 	  for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4845 	    {
4846 	      /* Most opcodes has much fewer patterns in the list.
4847 		 First NIL qualifier indicates the end in the list.   */
4848 	      if (empty_qualifier_sequence_p (*qualifiers_list))
4849 		break;
4850 
4851 	      if (i != qlf_idx)
4852 		{
4853 		  /* Mnemonics name.  */
4854 		  snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4855 
4856 		  /* Assign the qualifiers.  */
4857 		  assign_qualifier_sequence (inst_base, *qualifiers_list);
4858 
4859 		  /* Print instruction.  */
4860 		  print_operands (buf, opcode, inst_base->operands);
4861 
4862 		  output_info (_("   %s"), buf);
4863 		}
4864 	    }
4865 	}
4866       break;
4867 
4868     case AARCH64_OPDE_UNTIED_OPERAND:
4869       handler (_("operand %d must be the same register as operand 1 -- `%s'"),
4870 	       detail->index + 1, str);
4871       break;
4872 
4873     case AARCH64_OPDE_OUT_OF_RANGE:
4874       if (detail->data[0] != detail->data[1])
4875 	handler (_("%s out of range %d to %d at operand %d -- `%s'"),
4876 		 detail->error ? detail->error : _("immediate value"),
4877 		 detail->data[0], detail->data[1], idx + 1, str);
4878       else
4879 	handler (_("%s must be %d at operand %d -- `%s'"),
4880 		 detail->error ? detail->error : _("immediate value"),
4881 		 detail->data[0], idx + 1, str);
4882       break;
4883 
4884     case AARCH64_OPDE_REG_LIST:
4885       if (detail->data[0] == 1)
4886 	handler (_("invalid number of registers in the list; "
4887 		   "only 1 register is expected at operand %d -- `%s'"),
4888 		 idx + 1, str);
4889       else
4890 	handler (_("invalid number of registers in the list; "
4891 		   "%d registers are expected at operand %d -- `%s'"),
4892 	       detail->data[0], idx + 1, str);
4893       break;
4894 
4895     case AARCH64_OPDE_UNALIGNED:
4896       handler (_("immediate value must be a multiple of "
4897 		 "%d at operand %d -- `%s'"),
4898 	       detail->data[0], idx + 1, str);
4899       break;
4900 
4901     default:
4902       gas_assert (0);
4903       break;
4904     }
4905 }
4906 
4907 /* Process and output the error message about the operand mismatching.
4908 
4909    When this function is called, the operand error information had
4910    been collected for an assembly line and there will be multiple
4911    errors in the case of multiple instruction templates; output the
4912    error message that most closely describes the problem.
4913 
4914    The errors to be printed can be filtered on printing all errors
4915    or only non-fatal errors.  This distinction has to be made because
4916    the error buffer may already be filled with fatal errors we don't want to
4917    print due to the different instruction templates.  */
4918 
4919 static void
output_operand_error_report(char * str,bfd_boolean non_fatal_only)4920 output_operand_error_report (char *str, bfd_boolean non_fatal_only)
4921 {
4922   int largest_error_pos;
4923   const char *msg = NULL;
4924   enum aarch64_operand_error_kind kind;
4925   operand_error_record *curr;
4926   operand_error_record *head = operand_error_report.head;
4927   operand_error_record *record = NULL;
4928 
4929   /* No error to report.  */
4930   if (head == NULL)
4931     return;
4932 
4933   gas_assert (head != NULL && operand_error_report.tail != NULL);
4934 
4935   /* Only one error.  */
4936   if (head == operand_error_report.tail)
4937     {
4938       /* If the only error is a non-fatal one and we don't want to print it,
4939 	 just exit.  */
4940       if (!non_fatal_only || head->detail.non_fatal)
4941 	{
4942 	  DEBUG_TRACE ("single opcode entry with error kind: %s",
4943 		       operand_mismatch_kind_names[head->detail.kind]);
4944 	  output_operand_error_record (head, str);
4945 	}
4946       return;
4947     }
4948 
4949   /* Find the error kind of the highest severity.  */
4950   DEBUG_TRACE ("multiple opcode entries with error kind");
4951   kind = AARCH64_OPDE_NIL;
4952   for (curr = head; curr != NULL; curr = curr->next)
4953     {
4954       gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4955       DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4956       if (operand_error_higher_severity_p (curr->detail.kind, kind)
4957 	  && (!non_fatal_only || (non_fatal_only && curr->detail.non_fatal)))
4958 	kind = curr->detail.kind;
4959     }
4960 
4961   gas_assert (kind != AARCH64_OPDE_NIL || non_fatal_only);
4962 
4963   /* Pick up one of errors of KIND to report.  */
4964   largest_error_pos = -2; /* Index can be -1 which means unknown index.  */
4965   for (curr = head; curr != NULL; curr = curr->next)
4966     {
4967       /* If we don't want to print non-fatal errors then don't consider them
4968 	 at all.  */
4969       if (curr->detail.kind != kind
4970 	  || (non_fatal_only && !curr->detail.non_fatal))
4971 	continue;
4972       /* If there are multiple errors, pick up the one with the highest
4973 	 mismatching operand index.  In the case of multiple errors with
4974 	 the equally highest operand index, pick up the first one or the
4975 	 first one with non-NULL error message.  */
4976       if (curr->detail.index > largest_error_pos
4977 	  || (curr->detail.index == largest_error_pos && msg == NULL
4978 	      && curr->detail.error != NULL))
4979 	{
4980 	  largest_error_pos = curr->detail.index;
4981 	  record = curr;
4982 	  msg = record->detail.error;
4983 	}
4984     }
4985 
4986   /* The way errors are collected in the back-end is a bit non-intuitive.  But
4987      essentially, because each operand template is tried recursively you may
4988      always have errors collected from the previous tried OPND.  These are
4989      usually skipped if there is one successful match.  However now with the
4990      non-fatal errors we have to ignore those previously collected hard errors
4991      when we're only interested in printing the non-fatal ones.  This condition
4992      prevents us from printing errors that are not appropriate, since we did
4993      match a condition, but it also has warnings that it wants to print.  */
4994   if (non_fatal_only && !record)
4995     return;
4996 
4997   gas_assert (largest_error_pos != -2 && record != NULL);
4998   DEBUG_TRACE ("Pick up error kind %s to report",
4999 	       operand_mismatch_kind_names[record->detail.kind]);
5000 
5001   /* Output.  */
5002   output_operand_error_record (record, str);
5003 }
5004 
5005 /* Write an AARCH64 instruction to buf - always little-endian.  */
5006 static void
put_aarch64_insn(char * buf,uint32_t insn)5007 put_aarch64_insn (char *buf, uint32_t insn)
5008 {
5009   unsigned char *where = (unsigned char *) buf;
5010   where[0] = insn;
5011   where[1] = insn >> 8;
5012   where[2] = insn >> 16;
5013   where[3] = insn >> 24;
5014 }
5015 
5016 static uint32_t
get_aarch64_insn(char * buf)5017 get_aarch64_insn (char *buf)
5018 {
5019   unsigned char *where = (unsigned char *) buf;
5020   uint32_t result;
5021   result = ((where[0] | (where[1] << 8) | (where[2] << 16)
5022 	     | ((uint32_t) where[3] << 24)));
5023   return result;
5024 }
5025 
5026 static void
output_inst(struct aarch64_inst * new_inst)5027 output_inst (struct aarch64_inst *new_inst)
5028 {
5029   char *to = NULL;
5030 
5031   to = frag_more (INSN_SIZE);
5032 
5033   frag_now->tc_frag_data.recorded = 1;
5034 
5035   put_aarch64_insn (to, inst.base.value);
5036 
5037   if (inst.reloc.type != BFD_RELOC_UNUSED)
5038     {
5039       fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
5040 				    INSN_SIZE, &inst.reloc.exp,
5041 				    inst.reloc.pc_rel,
5042 				    inst.reloc.type);
5043       DEBUG_TRACE ("Prepared relocation fix up");
5044       /* Don't check the addend value against the instruction size,
5045          that's the job of our code in md_apply_fix(). */
5046       fixp->fx_no_overflow = 1;
5047       if (new_inst != NULL)
5048 	fixp->tc_fix_data.inst = new_inst;
5049       if (aarch64_gas_internal_fixup_p ())
5050 	{
5051 	  gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
5052 	  fixp->tc_fix_data.opnd = inst.reloc.opnd;
5053 	  fixp->fx_addnumber = inst.reloc.flags;
5054 	}
5055     }
5056 
5057   dwarf2_emit_insn (INSN_SIZE);
5058 }
5059 
5060 /* Link together opcodes of the same name.  */
5061 
5062 struct templates
5063 {
5064   aarch64_opcode *opcode;
5065   struct templates *next;
5066 };
5067 
5068 typedef struct templates templates;
5069 
5070 static templates *
lookup_mnemonic(const char * start,int len)5071 lookup_mnemonic (const char *start, int len)
5072 {
5073   templates *templ = NULL;
5074 
5075   templ = hash_find_n (aarch64_ops_hsh, start, len);
5076   return templ;
5077 }
5078 
5079 /* Subroutine of md_assemble, responsible for looking up the primary
5080    opcode from the mnemonic the user wrote.  STR points to the
5081    beginning of the mnemonic. */
5082 
5083 static templates *
opcode_lookup(char ** str)5084 opcode_lookup (char **str)
5085 {
5086   char *end, *base, *dot;
5087   const aarch64_cond *cond;
5088   char condname[16];
5089   int len;
5090 
5091   /* Scan up to the end of the mnemonic, which must end in white space,
5092      '.', or end of string.  */
5093   dot = 0;
5094   for (base = end = *str; is_part_of_name(*end); end++)
5095     if (*end == '.' && !dot)
5096       dot = end;
5097 
5098   if (end == base || dot == base)
5099     return 0;
5100 
5101   inst.cond = COND_ALWAYS;
5102 
5103   /* Handle a possible condition.  */
5104   if (dot)
5105     {
5106       cond = hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
5107       if (cond)
5108 	{
5109 	  inst.cond = cond->value;
5110 	  *str = end;
5111 	}
5112       else
5113 	{
5114 	  *str = dot;
5115 	  return 0;
5116 	}
5117       len = dot - base;
5118     }
5119   else
5120     {
5121       *str = end;
5122       len = end - base;
5123     }
5124 
5125   if (inst.cond == COND_ALWAYS)
5126     {
5127       /* Look for unaffixed mnemonic.  */
5128       return lookup_mnemonic (base, len);
5129     }
5130   else if (len <= 13)
5131     {
5132       /* append ".c" to mnemonic if conditional */
5133       memcpy (condname, base, len);
5134       memcpy (condname + len, ".c", 2);
5135       base = condname;
5136       len += 2;
5137       return lookup_mnemonic (base, len);
5138     }
5139 
5140   return NULL;
5141 }
5142 
5143 /* Internal helper routine converting a vector_type_el structure *VECTYPE
5144    to a corresponding operand qualifier.  */
5145 
5146 static inline aarch64_opnd_qualifier_t
vectype_to_qualifier(const struct vector_type_el * vectype)5147 vectype_to_qualifier (const struct vector_type_el *vectype)
5148 {
5149   /* Element size in bytes indexed by vector_el_type.  */
5150   const unsigned char ele_size[5]
5151     = {1, 2, 4, 8, 16};
5152   const unsigned int ele_base [5] =
5153     {
5154       AARCH64_OPND_QLF_V_4B,
5155       AARCH64_OPND_QLF_V_2H,
5156       AARCH64_OPND_QLF_V_2S,
5157       AARCH64_OPND_QLF_V_1D,
5158       AARCH64_OPND_QLF_V_1Q
5159   };
5160 
5161   if (!vectype->defined || vectype->type == NT_invtype)
5162     goto vectype_conversion_fail;
5163 
5164   if (vectype->type == NT_zero)
5165     return AARCH64_OPND_QLF_P_Z;
5166   if (vectype->type == NT_merge)
5167     return AARCH64_OPND_QLF_P_M;
5168 
5169   gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
5170 
5171   if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
5172     {
5173       /* Special case S_4B.  */
5174       if (vectype->type == NT_b && vectype->width == 4)
5175 	return AARCH64_OPND_QLF_S_4B;
5176 
5177       /* Special case S_2H.  */
5178       if (vectype->type == NT_h && vectype->width == 2)
5179 	return AARCH64_OPND_QLF_S_2H;
5180 
5181       /* Vector element register.  */
5182       return AARCH64_OPND_QLF_S_B + vectype->type;
5183     }
5184   else
5185     {
5186       /* Vector register.  */
5187       int reg_size = ele_size[vectype->type] * vectype->width;
5188       unsigned offset;
5189       unsigned shift;
5190       if (reg_size != 16 && reg_size != 8 && reg_size != 4)
5191 	goto vectype_conversion_fail;
5192 
5193       /* The conversion is by calculating the offset from the base operand
5194 	 qualifier for the vector type.  The operand qualifiers are regular
5195 	 enough that the offset can established by shifting the vector width by
5196 	 a vector-type dependent amount.  */
5197       shift = 0;
5198       if (vectype->type == NT_b)
5199 	shift = 3;
5200       else if (vectype->type == NT_h || vectype->type == NT_s)
5201 	shift = 2;
5202       else if (vectype->type >= NT_d)
5203 	shift = 1;
5204       else
5205 	gas_assert (0);
5206 
5207       offset = ele_base [vectype->type] + (vectype->width >> shift);
5208       gas_assert (AARCH64_OPND_QLF_V_4B <= offset
5209 		  && offset <= AARCH64_OPND_QLF_V_1Q);
5210       return offset;
5211     }
5212 
5213 vectype_conversion_fail:
5214   first_error (_("bad vector arrangement type"));
5215   return AARCH64_OPND_QLF_NIL;
5216 }
5217 
5218 /* Process an optional operand that is found omitted from the assembly line.
5219    Fill *OPERAND for such an operand of type TYPE.  OPCODE points to the
5220    instruction's opcode entry while IDX is the index of this omitted operand.
5221    */
5222 
5223 static void
process_omitted_operand(enum aarch64_opnd type,const aarch64_opcode * opcode,int idx,aarch64_opnd_info * operand)5224 process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
5225 			 int idx, aarch64_opnd_info *operand)
5226 {
5227   aarch64_insn default_value = get_optional_operand_default_value (opcode);
5228   gas_assert (optional_operand_p (opcode, idx));
5229   gas_assert (!operand->present);
5230 
5231   switch (type)
5232     {
5233     case AARCH64_OPND_Rd:
5234     case AARCH64_OPND_Rn:
5235     case AARCH64_OPND_Rm:
5236     case AARCH64_OPND_Rt:
5237     case AARCH64_OPND_Rt2:
5238     case AARCH64_OPND_Rt_SP:
5239     case AARCH64_OPND_Rs:
5240     case AARCH64_OPND_Ra:
5241     case AARCH64_OPND_Rt_SYS:
5242     case AARCH64_OPND_Rd_SP:
5243     case AARCH64_OPND_Rn_SP:
5244     case AARCH64_OPND_Rm_SP:
5245     case AARCH64_OPND_Fd:
5246     case AARCH64_OPND_Fn:
5247     case AARCH64_OPND_Fm:
5248     case AARCH64_OPND_Fa:
5249     case AARCH64_OPND_Ft:
5250     case AARCH64_OPND_Ft2:
5251     case AARCH64_OPND_Sd:
5252     case AARCH64_OPND_Sn:
5253     case AARCH64_OPND_Sm:
5254     case AARCH64_OPND_Va:
5255     case AARCH64_OPND_Vd:
5256     case AARCH64_OPND_Vn:
5257     case AARCH64_OPND_Vm:
5258     case AARCH64_OPND_VdD1:
5259     case AARCH64_OPND_VnD1:
5260       operand->reg.regno = default_value;
5261       break;
5262 
5263     case AARCH64_OPND_Ed:
5264     case AARCH64_OPND_En:
5265     case AARCH64_OPND_Em:
5266     case AARCH64_OPND_Em16:
5267     case AARCH64_OPND_SM3_IMM2:
5268       operand->reglane.regno = default_value;
5269       break;
5270 
5271     case AARCH64_OPND_IDX:
5272     case AARCH64_OPND_BIT_NUM:
5273     case AARCH64_OPND_IMMR:
5274     case AARCH64_OPND_IMMS:
5275     case AARCH64_OPND_SHLL_IMM:
5276     case AARCH64_OPND_IMM_VLSL:
5277     case AARCH64_OPND_IMM_VLSR:
5278     case AARCH64_OPND_CCMP_IMM:
5279     case AARCH64_OPND_FBITS:
5280     case AARCH64_OPND_UIMM4:
5281     case AARCH64_OPND_UIMM3_OP1:
5282     case AARCH64_OPND_UIMM3_OP2:
5283     case AARCH64_OPND_IMM:
5284     case AARCH64_OPND_IMM_2:
5285     case AARCH64_OPND_WIDTH:
5286     case AARCH64_OPND_UIMM7:
5287     case AARCH64_OPND_NZCV:
5288     case AARCH64_OPND_SVE_PATTERN:
5289     case AARCH64_OPND_SVE_PRFOP:
5290       operand->imm.value = default_value;
5291       break;
5292 
5293     case AARCH64_OPND_SVE_PATTERN_SCALED:
5294       operand->imm.value = default_value;
5295       operand->shifter.kind = AARCH64_MOD_MUL;
5296       operand->shifter.amount = 1;
5297       break;
5298 
5299     case AARCH64_OPND_EXCEPTION:
5300       inst.reloc.type = BFD_RELOC_UNUSED;
5301       break;
5302 
5303     case AARCH64_OPND_BARRIER_ISB:
5304       operand->barrier = aarch64_barrier_options + default_value;
5305       break;
5306 
5307     case AARCH64_OPND_BTI_TARGET:
5308       operand->hint_option = aarch64_hint_options + default_value;
5309       break;
5310 
5311     default:
5312       break;
5313     }
5314 }
5315 
5316 /* Process the relocation type for move wide instructions.
5317    Return TRUE on success; otherwise return FALSE.  */
5318 
5319 static bfd_boolean
process_movw_reloc_info(void)5320 process_movw_reloc_info (void)
5321 {
5322   int is32;
5323   unsigned shift;
5324 
5325   is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5326 
5327   if (inst.base.opcode->op == OP_MOVK)
5328     switch (inst.reloc.type)
5329       {
5330       case BFD_RELOC_AARCH64_MOVW_G0_S:
5331       case BFD_RELOC_AARCH64_MOVW_G1_S:
5332       case BFD_RELOC_AARCH64_MOVW_G2_S:
5333       case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5334       case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5335       case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5336       case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5337       case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5338       case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5339       case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5340       case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5341 	set_syntax_error
5342 	  (_("the specified relocation type is not allowed for MOVK"));
5343 	return FALSE;
5344       default:
5345 	break;
5346       }
5347 
5348   switch (inst.reloc.type)
5349     {
5350     case BFD_RELOC_AARCH64_MOVW_G0:
5351     case BFD_RELOC_AARCH64_MOVW_G0_NC:
5352     case BFD_RELOC_AARCH64_MOVW_G0_S:
5353     case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5354     case BFD_RELOC_AARCH64_MOVW_PREL_G0:
5355     case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
5356     case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5357     case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5358     case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5359     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5360     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5361     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5362     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5363       shift = 0;
5364       break;
5365     case BFD_RELOC_AARCH64_MOVW_G1:
5366     case BFD_RELOC_AARCH64_MOVW_G1_NC:
5367     case BFD_RELOC_AARCH64_MOVW_G1_S:
5368     case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5369     case BFD_RELOC_AARCH64_MOVW_PREL_G1:
5370     case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
5371     case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5372     case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5373     case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5374     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5375     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5376     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5377     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5378       shift = 16;
5379       break;
5380     case BFD_RELOC_AARCH64_MOVW_G2:
5381     case BFD_RELOC_AARCH64_MOVW_G2_NC:
5382     case BFD_RELOC_AARCH64_MOVW_G2_S:
5383     case BFD_RELOC_AARCH64_MOVW_PREL_G2:
5384     case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
5385     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5386     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5387       if (is32)
5388 	{
5389 	  set_fatal_syntax_error
5390 	    (_("the specified relocation type is not allowed for 32-bit "
5391 	       "register"));
5392 	  return FALSE;
5393 	}
5394       shift = 32;
5395       break;
5396     case BFD_RELOC_AARCH64_MOVW_G3:
5397     case BFD_RELOC_AARCH64_MOVW_PREL_G3:
5398       if (is32)
5399 	{
5400 	  set_fatal_syntax_error
5401 	    (_("the specified relocation type is not allowed for 32-bit "
5402 	       "register"));
5403 	  return FALSE;
5404 	}
5405       shift = 48;
5406       break;
5407     default:
5408       /* More cases should be added when more MOVW-related relocation types
5409          are supported in GAS.  */
5410       gas_assert (aarch64_gas_internal_fixup_p ());
5411       /* The shift amount should have already been set by the parser.  */
5412       return TRUE;
5413     }
5414   inst.base.operands[1].shifter.amount = shift;
5415   return TRUE;
5416 }
5417 
5418 /* A primitive log calculator.  */
5419 
5420 static inline unsigned int
get_logsz(unsigned int size)5421 get_logsz (unsigned int size)
5422 {
5423   const unsigned char ls[16] =
5424     {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5425   if (size > 16)
5426     {
5427       gas_assert (0);
5428       return -1;
5429     }
5430   gas_assert (ls[size - 1] != (unsigned char)-1);
5431   return ls[size - 1];
5432 }
5433 
5434 /* Determine and return the real reloc type code for an instruction
5435    with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12.  */
5436 
5437 static inline bfd_reloc_code_real_type
ldst_lo12_determine_real_reloc_type(void)5438 ldst_lo12_determine_real_reloc_type (void)
5439 {
5440   unsigned logsz;
5441   enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5442   enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5443 
5444   const bfd_reloc_code_real_type reloc_ldst_lo12[5][5] = {
5445     {
5446       BFD_RELOC_AARCH64_LDST8_LO12,
5447       BFD_RELOC_AARCH64_LDST16_LO12,
5448       BFD_RELOC_AARCH64_LDST32_LO12,
5449       BFD_RELOC_AARCH64_LDST64_LO12,
5450       BFD_RELOC_AARCH64_LDST128_LO12
5451     },
5452     {
5453       BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5454       BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5455       BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5456       BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5457       BFD_RELOC_AARCH64_NONE
5458     },
5459     {
5460       BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5461       BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5462       BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5463       BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5464       BFD_RELOC_AARCH64_NONE
5465     },
5466     {
5467       BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12,
5468       BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12,
5469       BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12,
5470       BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12,
5471       BFD_RELOC_AARCH64_NONE
5472     },
5473     {
5474       BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC,
5475       BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC,
5476       BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC,
5477       BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC,
5478       BFD_RELOC_AARCH64_NONE
5479     }
5480   };
5481 
5482   gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5483 	      || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5484 	      || (inst.reloc.type
5485 		  == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5486 	      || (inst.reloc.type
5487 		  == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
5488 	      || (inst.reloc.type
5489 		  == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC));
5490   gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5491 
5492   if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5493     opd1_qlf =
5494       aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5495 				      1, opd0_qlf, 0);
5496   gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5497 
5498   logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5499   if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5500       || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC
5501       || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12
5502       || inst.reloc.type == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC)
5503     gas_assert (logsz <= 3);
5504   else
5505     gas_assert (logsz <= 4);
5506 
5507   /* In reloc.c, these pseudo relocation types should be defined in similar
5508      order as above reloc_ldst_lo12 array. Because the array index calculation
5509      below relies on this.  */
5510   return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5511 }
5512 
5513 /* Check whether a register list REGINFO is valid.  The registers must be
5514    numbered in increasing order (modulo 32), in increments of one or two.
5515 
5516    If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5517    increments of two.
5518 
5519    Return FALSE if such a register list is invalid, otherwise return TRUE.  */
5520 
5521 static bfd_boolean
reg_list_valid_p(uint32_t reginfo,int accept_alternate)5522 reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5523 {
5524   uint32_t i, nb_regs, prev_regno, incr;
5525 
5526   nb_regs = 1 + (reginfo & 0x3);
5527   reginfo >>= 2;
5528   prev_regno = reginfo & 0x1f;
5529   incr = accept_alternate ? 2 : 1;
5530 
5531   for (i = 1; i < nb_regs; ++i)
5532     {
5533       uint32_t curr_regno;
5534       reginfo >>= 5;
5535       curr_regno = reginfo & 0x1f;
5536       if (curr_regno != ((prev_regno + incr) & 0x1f))
5537 	return FALSE;
5538       prev_regno = curr_regno;
5539     }
5540 
5541   return TRUE;
5542 }
5543 
5544 /* Generic instruction operand parser.	This does no encoding and no
5545    semantic validation; it merely squirrels values away in the inst
5546    structure.  Returns TRUE or FALSE depending on whether the
5547    specified grammar matched.  */
5548 
5549 static bfd_boolean
parse_operands(char * str,const aarch64_opcode * opcode)5550 parse_operands (char *str, const aarch64_opcode *opcode)
5551 {
5552   int i;
5553   char *backtrack_pos = 0;
5554   const enum aarch64_opnd *operands = opcode->operands;
5555   aarch64_reg_type imm_reg_type;
5556 
5557   clear_error ();
5558   skip_whitespace (str);
5559 
5560   if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5561     imm_reg_type = REG_TYPE_R_Z_SP_BHSDQ_VZP;
5562   else
5563     imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5564 
5565   for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5566     {
5567       int64_t val;
5568       const reg_entry *reg;
5569       int comma_skipped_p = 0;
5570       aarch64_reg_type rtype;
5571       struct vector_type_el vectype;
5572       aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5573       aarch64_opnd_info *info = &inst.base.operands[i];
5574       aarch64_reg_type reg_type;
5575 
5576       DEBUG_TRACE ("parse operand %d", i);
5577 
5578       /* Assign the operand code.  */
5579       info->type = operands[i];
5580 
5581       if (optional_operand_p (opcode, i))
5582 	{
5583 	  /* Remember where we are in case we need to backtrack.  */
5584 	  gas_assert (!backtrack_pos);
5585 	  backtrack_pos = str;
5586 	}
5587 
5588       /* Expect comma between operands; the backtrack mechanism will take
5589 	 care of cases of omitted optional operand.  */
5590       if (i > 0 && ! skip_past_char (&str, ','))
5591 	{
5592 	  set_syntax_error (_("comma expected between operands"));
5593 	  goto failure;
5594 	}
5595       else
5596 	comma_skipped_p = 1;
5597 
5598       switch (operands[i])
5599 	{
5600 	case AARCH64_OPND_Rd:
5601 	case AARCH64_OPND_Rn:
5602 	case AARCH64_OPND_Rm:
5603 	case AARCH64_OPND_Rt:
5604 	case AARCH64_OPND_Rt2:
5605 	case AARCH64_OPND_Rs:
5606 	case AARCH64_OPND_Ra:
5607 	case AARCH64_OPND_Rt_SYS:
5608 	case AARCH64_OPND_PAIRREG:
5609 	case AARCH64_OPND_SVE_Rm:
5610 	  po_int_reg_or_fail (REG_TYPE_R_Z);
5611 	  break;
5612 
5613 	case AARCH64_OPND_Rd_SP:
5614 	case AARCH64_OPND_Rn_SP:
5615 	case AARCH64_OPND_Rt_SP:
5616 	case AARCH64_OPND_SVE_Rn_SP:
5617 	case AARCH64_OPND_Rm_SP:
5618 	  po_int_reg_or_fail (REG_TYPE_R_SP);
5619 	  break;
5620 
5621 	case AARCH64_OPND_Rm_EXT:
5622 	case AARCH64_OPND_Rm_SFT:
5623 	  po_misc_or_fail (parse_shifter_operand
5624 			   (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5625 					 ? SHIFTED_ARITH_IMM
5626 					 : SHIFTED_LOGIC_IMM)));
5627 	  if (!info->shifter.operator_present)
5628 	    {
5629 	      /* Default to LSL if not present.  Libopcodes prefers shifter
5630 		 kind to be explicit.  */
5631 	      gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5632 	      info->shifter.kind = AARCH64_MOD_LSL;
5633 	      /* For Rm_EXT, libopcodes will carry out further check on whether
5634 		 or not stack pointer is used in the instruction (Recall that
5635 		 "the extend operator is not optional unless at least one of
5636 		 "Rd" or "Rn" is '11111' (i.e. WSP)").  */
5637 	    }
5638 	  break;
5639 
5640 	case AARCH64_OPND_Fd:
5641 	case AARCH64_OPND_Fn:
5642 	case AARCH64_OPND_Fm:
5643 	case AARCH64_OPND_Fa:
5644 	case AARCH64_OPND_Ft:
5645 	case AARCH64_OPND_Ft2:
5646 	case AARCH64_OPND_Sd:
5647 	case AARCH64_OPND_Sn:
5648 	case AARCH64_OPND_Sm:
5649 	case AARCH64_OPND_SVE_VZn:
5650 	case AARCH64_OPND_SVE_Vd:
5651 	case AARCH64_OPND_SVE_Vm:
5652 	case AARCH64_OPND_SVE_Vn:
5653 	  val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5654 	  if (val == PARSE_FAIL)
5655 	    {
5656 	      first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5657 	      goto failure;
5658 	    }
5659 	  gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5660 
5661 	  info->reg.regno = val;
5662 	  info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5663 	  break;
5664 
5665 	case AARCH64_OPND_SVE_Pd:
5666 	case AARCH64_OPND_SVE_Pg3:
5667 	case AARCH64_OPND_SVE_Pg4_5:
5668 	case AARCH64_OPND_SVE_Pg4_10:
5669 	case AARCH64_OPND_SVE_Pg4_16:
5670 	case AARCH64_OPND_SVE_Pm:
5671 	case AARCH64_OPND_SVE_Pn:
5672 	case AARCH64_OPND_SVE_Pt:
5673 	  reg_type = REG_TYPE_PN;
5674 	  goto vector_reg;
5675 
5676 	case AARCH64_OPND_SVE_Za_5:
5677 	case AARCH64_OPND_SVE_Za_16:
5678 	case AARCH64_OPND_SVE_Zd:
5679 	case AARCH64_OPND_SVE_Zm_5:
5680 	case AARCH64_OPND_SVE_Zm_16:
5681 	case AARCH64_OPND_SVE_Zn:
5682 	case AARCH64_OPND_SVE_Zt:
5683 	  reg_type = REG_TYPE_ZN;
5684 	  goto vector_reg;
5685 
5686 	case AARCH64_OPND_Va:
5687 	case AARCH64_OPND_Vd:
5688 	case AARCH64_OPND_Vn:
5689 	case AARCH64_OPND_Vm:
5690 	  reg_type = REG_TYPE_VN;
5691 	vector_reg:
5692 	  val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5693 	  if (val == PARSE_FAIL)
5694 	    {
5695 	      first_error (_(get_reg_expected_msg (reg_type)));
5696 	      goto failure;
5697 	    }
5698 	  if (vectype.defined & NTA_HASINDEX)
5699 	    goto failure;
5700 
5701 	  info->reg.regno = val;
5702 	  if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5703 	      && vectype.type == NT_invtype)
5704 	    /* Unqualified Pn and Zn registers are allowed in certain
5705 	       contexts.  Rely on F_STRICT qualifier checking to catch
5706 	       invalid uses.  */
5707 	    info->qualifier = AARCH64_OPND_QLF_NIL;
5708 	  else
5709 	    {
5710 	      info->qualifier = vectype_to_qualifier (&vectype);
5711 	      if (info->qualifier == AARCH64_OPND_QLF_NIL)
5712 		goto failure;
5713 	    }
5714 	  break;
5715 
5716 	case AARCH64_OPND_VdD1:
5717 	case AARCH64_OPND_VnD1:
5718 	  val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5719 	  if (val == PARSE_FAIL)
5720 	    {
5721 	      set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5722 	      goto failure;
5723 	    }
5724 	  if (vectype.type != NT_d || vectype.index != 1)
5725 	    {
5726 	      set_fatal_syntax_error
5727 		(_("the top half of a 128-bit FP/SIMD register is expected"));
5728 	      goto failure;
5729 	    }
5730 	  info->reg.regno = val;
5731 	  /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5732 	     here; it is correct for the purpose of encoding/decoding since
5733 	     only the register number is explicitly encoded in the related
5734 	     instructions, although this appears a bit hacky.  */
5735 	  info->qualifier = AARCH64_OPND_QLF_S_D;
5736 	  break;
5737 
5738 	case AARCH64_OPND_SVE_Zm3_INDEX:
5739 	case AARCH64_OPND_SVE_Zm3_22_INDEX:
5740 	case AARCH64_OPND_SVE_Zm3_11_INDEX:
5741 	case AARCH64_OPND_SVE_Zm4_11_INDEX:
5742 	case AARCH64_OPND_SVE_Zm4_INDEX:
5743 	case AARCH64_OPND_SVE_Zn_INDEX:
5744 	  reg_type = REG_TYPE_ZN;
5745 	  goto vector_reg_index;
5746 
5747 	case AARCH64_OPND_Ed:
5748 	case AARCH64_OPND_En:
5749 	case AARCH64_OPND_Em:
5750 	case AARCH64_OPND_Em16:
5751 	case AARCH64_OPND_SM3_IMM2:
5752 	  reg_type = REG_TYPE_VN;
5753 	vector_reg_index:
5754 	  val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5755 	  if (val == PARSE_FAIL)
5756 	    {
5757 	      first_error (_(get_reg_expected_msg (reg_type)));
5758 	      goto failure;
5759 	    }
5760 	  if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5761 	    goto failure;
5762 
5763 	  info->reglane.regno = val;
5764 	  info->reglane.index = vectype.index;
5765 	  info->qualifier = vectype_to_qualifier (&vectype);
5766 	  if (info->qualifier == AARCH64_OPND_QLF_NIL)
5767 	    goto failure;
5768 	  break;
5769 
5770 	case AARCH64_OPND_SVE_ZnxN:
5771 	case AARCH64_OPND_SVE_ZtxN:
5772 	  reg_type = REG_TYPE_ZN;
5773 	  goto vector_reg_list;
5774 
5775 	case AARCH64_OPND_LVn:
5776 	case AARCH64_OPND_LVt:
5777 	case AARCH64_OPND_LVt_AL:
5778 	case AARCH64_OPND_LEt:
5779 	  reg_type = REG_TYPE_VN;
5780 	vector_reg_list:
5781 	  if (reg_type == REG_TYPE_ZN
5782 	      && get_opcode_dependent_value (opcode) == 1
5783 	      && *str != '{')
5784 	    {
5785 	      val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5786 	      if (val == PARSE_FAIL)
5787 		{
5788 		  first_error (_(get_reg_expected_msg (reg_type)));
5789 		  goto failure;
5790 		}
5791 	      info->reglist.first_regno = val;
5792 	      info->reglist.num_regs = 1;
5793 	    }
5794 	  else
5795 	    {
5796 	      val = parse_vector_reg_list (&str, reg_type, &vectype);
5797 	      if (val == PARSE_FAIL)
5798 		goto failure;
5799 
5800 	      if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5801 		{
5802 		  set_fatal_syntax_error (_("invalid register list"));
5803 		  goto failure;
5804 		}
5805 
5806 	      if (vectype.width != 0 && *str != ',')
5807 		{
5808 		  set_fatal_syntax_error
5809 		    (_("expected element type rather than vector type"));
5810 		  goto failure;
5811 		}
5812 
5813 	      info->reglist.first_regno = (val >> 2) & 0x1f;
5814 	      info->reglist.num_regs = (val & 0x3) + 1;
5815 	    }
5816 	  if (operands[i] == AARCH64_OPND_LEt)
5817 	    {
5818 	      if (!(vectype.defined & NTA_HASINDEX))
5819 		goto failure;
5820 	      info->reglist.has_index = 1;
5821 	      info->reglist.index = vectype.index;
5822 	    }
5823 	  else
5824 	    {
5825 	      if (vectype.defined & NTA_HASINDEX)
5826 		goto failure;
5827 	      if (!(vectype.defined & NTA_HASTYPE))
5828 		{
5829 		  if (reg_type == REG_TYPE_ZN)
5830 		    set_fatal_syntax_error (_("missing type suffix"));
5831 		  goto failure;
5832 		}
5833 	    }
5834 	  info->qualifier = vectype_to_qualifier (&vectype);
5835 	  if (info->qualifier == AARCH64_OPND_QLF_NIL)
5836 	    goto failure;
5837 	  break;
5838 
5839 	case AARCH64_OPND_CRn:
5840 	case AARCH64_OPND_CRm:
5841 	    {
5842 	      char prefix = *(str++);
5843 	      if (prefix != 'c' && prefix != 'C')
5844 		goto failure;
5845 
5846 	      po_imm_nc_or_fail ();
5847 	      if (val > 15)
5848 		{
5849 		  set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
5850 		  goto failure;
5851 		}
5852 	      info->qualifier = AARCH64_OPND_QLF_CR;
5853 	      info->imm.value = val;
5854 	      break;
5855 	    }
5856 
5857 	case AARCH64_OPND_SHLL_IMM:
5858 	case AARCH64_OPND_IMM_VLSR:
5859 	  po_imm_or_fail (1, 64);
5860 	  info->imm.value = val;
5861 	  break;
5862 
5863 	case AARCH64_OPND_CCMP_IMM:
5864 	case AARCH64_OPND_SIMM5:
5865 	case AARCH64_OPND_FBITS:
5866 	case AARCH64_OPND_TME_UIMM16:
5867 	case AARCH64_OPND_UIMM4:
5868 	case AARCH64_OPND_UIMM4_ADDG:
5869 	case AARCH64_OPND_UIMM10:
5870 	case AARCH64_OPND_UIMM3_OP1:
5871 	case AARCH64_OPND_UIMM3_OP2:
5872 	case AARCH64_OPND_IMM_VLSL:
5873 	case AARCH64_OPND_IMM:
5874 	case AARCH64_OPND_IMM_2:
5875 	case AARCH64_OPND_WIDTH:
5876 	case AARCH64_OPND_SVE_INV_LIMM:
5877 	case AARCH64_OPND_SVE_LIMM:
5878 	case AARCH64_OPND_SVE_LIMM_MOV:
5879 	case AARCH64_OPND_SVE_SHLIMM_PRED:
5880 	case AARCH64_OPND_SVE_SHLIMM_UNPRED:
5881 	case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
5882 	case AARCH64_OPND_SVE_SHRIMM_PRED:
5883 	case AARCH64_OPND_SVE_SHRIMM_UNPRED:
5884 	case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
5885 	case AARCH64_OPND_SVE_SIMM5:
5886 	case AARCH64_OPND_SVE_SIMM5B:
5887 	case AARCH64_OPND_SVE_SIMM6:
5888 	case AARCH64_OPND_SVE_SIMM8:
5889 	case AARCH64_OPND_SVE_UIMM3:
5890 	case AARCH64_OPND_SVE_UIMM7:
5891 	case AARCH64_OPND_SVE_UIMM8:
5892 	case AARCH64_OPND_SVE_UIMM8_53:
5893 	case AARCH64_OPND_IMM_ROT1:
5894 	case AARCH64_OPND_IMM_ROT2:
5895 	case AARCH64_OPND_IMM_ROT3:
5896 	case AARCH64_OPND_SVE_IMM_ROT1:
5897 	case AARCH64_OPND_SVE_IMM_ROT2:
5898 	case AARCH64_OPND_SVE_IMM_ROT3:
5899 	  po_imm_nc_or_fail ();
5900 	  info->imm.value = val;
5901 	  break;
5902 
5903 	case AARCH64_OPND_SVE_AIMM:
5904 	case AARCH64_OPND_SVE_ASIMM:
5905 	  po_imm_nc_or_fail ();
5906 	  info->imm.value = val;
5907 	  skip_whitespace (str);
5908 	  if (skip_past_comma (&str))
5909 	    po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5910 	  else
5911 	    inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5912 	  break;
5913 
5914 	case AARCH64_OPND_SVE_PATTERN:
5915 	  po_enum_or_fail (aarch64_sve_pattern_array);
5916 	  info->imm.value = val;
5917 	  break;
5918 
5919 	case AARCH64_OPND_SVE_PATTERN_SCALED:
5920 	  po_enum_or_fail (aarch64_sve_pattern_array);
5921 	  info->imm.value = val;
5922 	  if (skip_past_comma (&str)
5923 	      && !parse_shift (&str, info, SHIFTED_MUL))
5924 	    goto failure;
5925 	  if (!info->shifter.operator_present)
5926 	    {
5927 	      gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5928 	      info->shifter.kind = AARCH64_MOD_MUL;
5929 	      info->shifter.amount = 1;
5930 	    }
5931 	  break;
5932 
5933 	case AARCH64_OPND_SVE_PRFOP:
5934 	  po_enum_or_fail (aarch64_sve_prfop_array);
5935 	  info->imm.value = val;
5936 	  break;
5937 
5938 	case AARCH64_OPND_UIMM7:
5939 	  po_imm_or_fail (0, 127);
5940 	  info->imm.value = val;
5941 	  break;
5942 
5943 	case AARCH64_OPND_IDX:
5944 	case AARCH64_OPND_MASK:
5945 	case AARCH64_OPND_BIT_NUM:
5946 	case AARCH64_OPND_IMMR:
5947 	case AARCH64_OPND_IMMS:
5948 	  po_imm_or_fail (0, 63);
5949 	  info->imm.value = val;
5950 	  break;
5951 
5952 	case AARCH64_OPND_IMM0:
5953 	  po_imm_nc_or_fail ();
5954 	  if (val != 0)
5955 	    {
5956 	      set_fatal_syntax_error (_("immediate zero expected"));
5957 	      goto failure;
5958 	    }
5959 	  info->imm.value = 0;
5960 	  break;
5961 
5962 	case AARCH64_OPND_FPIMM0:
5963 	  {
5964 	    int qfloat;
5965 	    bfd_boolean res1 = FALSE, res2 = FALSE;
5966 	    /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5967 	       it is probably not worth the effort to support it.  */
5968 	    if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5969 						  imm_reg_type))
5970 		&& (error_p ()
5971 		    || !(res2 = parse_constant_immediate (&str, &val,
5972 							  imm_reg_type))))
5973 	      goto failure;
5974 	    if ((res1 && qfloat == 0) || (res2 && val == 0))
5975 	      {
5976 		info->imm.value = 0;
5977 		info->imm.is_fp = 1;
5978 		break;
5979 	      }
5980 	    set_fatal_syntax_error (_("immediate zero expected"));
5981 	    goto failure;
5982 	  }
5983 
5984 	case AARCH64_OPND_IMM_MOV:
5985 	  {
5986 	    char *saved = str;
5987 	    if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5988 		reg_name_p (str, REG_TYPE_VN))
5989 	      goto failure;
5990 	    str = saved;
5991 	    po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5992 						GE_OPT_PREFIX, 1));
5993 	    /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5994 	       later.  fix_mov_imm_insn will try to determine a machine
5995 	       instruction (MOVZ, MOVN or ORR) for it and will issue an error
5996 	       message if the immediate cannot be moved by a single
5997 	       instruction.  */
5998 	    aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5999 	    inst.base.operands[i].skip = 1;
6000 	  }
6001 	  break;
6002 
6003 	case AARCH64_OPND_SIMD_IMM:
6004 	case AARCH64_OPND_SIMD_IMM_SFT:
6005 	  if (! parse_big_immediate (&str, &val, imm_reg_type))
6006 	    goto failure;
6007 	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6008 					      /* addr_off_p */ 0,
6009 					      /* need_libopcodes_p */ 1,
6010 					      /* skip_p */ 1);
6011 	  /* Parse shift.
6012 	     N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
6013 	     shift, we don't check it here; we leave the checking to
6014 	     the libopcodes (operand_general_constraint_met_p).  By
6015 	     doing this, we achieve better diagnostics.  */
6016 	  if (skip_past_comma (&str)
6017 	      && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
6018 	    goto failure;
6019 	  if (!info->shifter.operator_present
6020 	      && info->type == AARCH64_OPND_SIMD_IMM_SFT)
6021 	    {
6022 	      /* Default to LSL if not present.  Libopcodes prefers shifter
6023 		 kind to be explicit.  */
6024 	      gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6025 	      info->shifter.kind = AARCH64_MOD_LSL;
6026 	    }
6027 	  break;
6028 
6029 	case AARCH64_OPND_FPIMM:
6030 	case AARCH64_OPND_SIMD_FPIMM:
6031 	case AARCH64_OPND_SVE_FPIMM8:
6032 	  {
6033 	    int qfloat;
6034 	    bfd_boolean dp_p;
6035 
6036 	    dp_p = double_precision_operand_p (&inst.base.operands[0]);
6037 	    if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
6038 		|| !aarch64_imm_float_p (qfloat))
6039 	      {
6040 		if (!error_p ())
6041 		  set_fatal_syntax_error (_("invalid floating-point"
6042 					    " constant"));
6043 		goto failure;
6044 	      }
6045 	    inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
6046 	    inst.base.operands[i].imm.is_fp = 1;
6047 	  }
6048 	  break;
6049 
6050 	case AARCH64_OPND_SVE_I1_HALF_ONE:
6051 	case AARCH64_OPND_SVE_I1_HALF_TWO:
6052 	case AARCH64_OPND_SVE_I1_ZERO_ONE:
6053 	  {
6054 	    int qfloat;
6055 	    bfd_boolean dp_p;
6056 
6057 	    dp_p = double_precision_operand_p (&inst.base.operands[0]);
6058 	    if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
6059 	      {
6060 		if (!error_p ())
6061 		  set_fatal_syntax_error (_("invalid floating-point"
6062 					    " constant"));
6063 		goto failure;
6064 	      }
6065 	    inst.base.operands[i].imm.value = qfloat;
6066 	    inst.base.operands[i].imm.is_fp = 1;
6067 	  }
6068 	  break;
6069 
6070 	case AARCH64_OPND_LIMM:
6071 	  po_misc_or_fail (parse_shifter_operand (&str, info,
6072 						  SHIFTED_LOGIC_IMM));
6073 	  if (info->shifter.operator_present)
6074 	    {
6075 	      set_fatal_syntax_error
6076 		(_("shift not allowed for bitmask immediate"));
6077 	      goto failure;
6078 	    }
6079 	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6080 					      /* addr_off_p */ 0,
6081 					      /* need_libopcodes_p */ 1,
6082 					      /* skip_p */ 1);
6083 	  break;
6084 
6085 	case AARCH64_OPND_AIMM:
6086 	  if (opcode->op == OP_ADD)
6087 	    /* ADD may have relocation types.  */
6088 	    po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
6089 							  SHIFTED_ARITH_IMM));
6090 	  else
6091 	    po_misc_or_fail (parse_shifter_operand (&str, info,
6092 						    SHIFTED_ARITH_IMM));
6093 	  switch (inst.reloc.type)
6094 	    {
6095 	    case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
6096 	      info->shifter.amount = 12;
6097 	      break;
6098 	    case BFD_RELOC_UNUSED:
6099 	      aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6100 	      if (info->shifter.kind != AARCH64_MOD_NONE)
6101 		inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
6102 	      inst.reloc.pc_rel = 0;
6103 	      break;
6104 	    default:
6105 	      break;
6106 	    }
6107 	  info->imm.value = 0;
6108 	  if (!info->shifter.operator_present)
6109 	    {
6110 	      /* Default to LSL if not present.  Libopcodes prefers shifter
6111 		 kind to be explicit.  */
6112 	      gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6113 	      info->shifter.kind = AARCH64_MOD_LSL;
6114 	    }
6115 	  break;
6116 
6117 	case AARCH64_OPND_HALF:
6118 	    {
6119 	      /* #<imm16> or relocation.  */
6120 	      int internal_fixup_p;
6121 	      po_misc_or_fail (parse_half (&str, &internal_fixup_p));
6122 	      if (internal_fixup_p)
6123 		aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
6124 	      skip_whitespace (str);
6125 	      if (skip_past_comma (&str))
6126 		{
6127 		  /* {, LSL #<shift>}  */
6128 		  if (! aarch64_gas_internal_fixup_p ())
6129 		    {
6130 		      set_fatal_syntax_error (_("can't mix relocation modifier "
6131 						"with explicit shift"));
6132 		      goto failure;
6133 		    }
6134 		  po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
6135 		}
6136 	      else
6137 		inst.base.operands[i].shifter.amount = 0;
6138 	      inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
6139 	      inst.base.operands[i].imm.value = 0;
6140 	      if (! process_movw_reloc_info ())
6141 		goto failure;
6142 	    }
6143 	  break;
6144 
6145 	case AARCH64_OPND_EXCEPTION:
6146 	  po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
6147 						       imm_reg_type));
6148 	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6149 					      /* addr_off_p */ 0,
6150 					      /* need_libopcodes_p */ 0,
6151 					      /* skip_p */ 1);
6152 	  break;
6153 
6154 	case AARCH64_OPND_NZCV:
6155 	  {
6156 	    const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
6157 	    if (nzcv != NULL)
6158 	      {
6159 		str += 4;
6160 		info->imm.value = nzcv->value;
6161 		break;
6162 	      }
6163 	    po_imm_or_fail (0, 15);
6164 	    info->imm.value = val;
6165 	  }
6166 	  break;
6167 
6168 	case AARCH64_OPND_COND:
6169 	case AARCH64_OPND_COND1:
6170 	  {
6171 	    char *start = str;
6172 	    do
6173 	      str++;
6174 	    while (ISALPHA (*str));
6175 	    info->cond = hash_find_n (aarch64_cond_hsh, start, str - start);
6176 	    if (info->cond == NULL)
6177 	      {
6178 		set_syntax_error (_("invalid condition"));
6179 		goto failure;
6180 	      }
6181 	    else if (operands[i] == AARCH64_OPND_COND1
6182 		     && (info->cond->value & 0xe) == 0xe)
6183 	      {
6184 		/* Do not allow AL or NV.  */
6185 		set_default_error ();
6186 		goto failure;
6187 	      }
6188 	  }
6189 	  break;
6190 
6191 	case AARCH64_OPND_ADDR_ADRP:
6192 	  po_misc_or_fail (parse_adrp (&str));
6193 	  /* Clear the value as operand needs to be relocated.  */
6194 	  info->imm.value = 0;
6195 	  break;
6196 
6197 	case AARCH64_OPND_ADDR_PCREL14:
6198 	case AARCH64_OPND_ADDR_PCREL19:
6199 	case AARCH64_OPND_ADDR_PCREL21:
6200 	case AARCH64_OPND_ADDR_PCREL26:
6201 	  po_misc_or_fail (parse_address (&str, info));
6202 	  if (!info->addr.pcrel)
6203 	    {
6204 	      set_syntax_error (_("invalid pc-relative address"));
6205 	      goto failure;
6206 	    }
6207 	  if (inst.gen_lit_pool
6208 	      && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
6209 	    {
6210 	      /* Only permit "=value" in the literal load instructions.
6211 		 The literal will be generated by programmer_friendly_fixup.  */
6212 	      set_syntax_error (_("invalid use of \"=immediate\""));
6213 	      goto failure;
6214 	    }
6215 	  if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
6216 	    {
6217 	      set_syntax_error (_("unrecognized relocation suffix"));
6218 	      goto failure;
6219 	    }
6220 	  if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
6221 	    {
6222 	      info->imm.value = inst.reloc.exp.X_add_number;
6223 	      inst.reloc.type = BFD_RELOC_UNUSED;
6224 	    }
6225 	  else
6226 	    {
6227 	      info->imm.value = 0;
6228 	      if (inst.reloc.type == BFD_RELOC_UNUSED)
6229 		switch (opcode->iclass)
6230 		  {
6231 		  case compbranch:
6232 		  case condbranch:
6233 		    /* e.g. CBZ or B.COND  */
6234 		    gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6235 		    inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
6236 		    break;
6237 		  case testbranch:
6238 		    /* e.g. TBZ  */
6239 		    gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
6240 		    inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
6241 		    break;
6242 		  case branch_imm:
6243 		    /* e.g. B or BL  */
6244 		    gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
6245 		    inst.reloc.type =
6246 		      (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
6247 			 : BFD_RELOC_AARCH64_JUMP26;
6248 		    break;
6249 		  case loadlit:
6250 		    gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
6251 		    inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
6252 		    break;
6253 		  case pcreladdr:
6254 		    gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
6255 		    inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
6256 		    break;
6257 		  default:
6258 		    gas_assert (0);
6259 		    abort ();
6260 		  }
6261 	      inst.reloc.pc_rel = 1;
6262 	    }
6263 	  break;
6264 
6265 	case AARCH64_OPND_ADDR_SIMPLE:
6266 	case AARCH64_OPND_SIMD_ADDR_SIMPLE:
6267 	  {
6268 	    /* [<Xn|SP>{, #<simm>}]  */
6269 	    char *start = str;
6270 	    /* First use the normal address-parsing routines, to get
6271 	       the usual syntax errors.  */
6272 	    po_misc_or_fail (parse_address (&str, info));
6273 	    if (info->addr.pcrel || info->addr.offset.is_reg
6274 		|| !info->addr.preind || info->addr.postind
6275 		|| info->addr.writeback)
6276 	      {
6277 		set_syntax_error (_("invalid addressing mode"));
6278 		goto failure;
6279 	      }
6280 
6281 	    /* Then retry, matching the specific syntax of these addresses.  */
6282 	    str = start;
6283 	    po_char_or_fail ('[');
6284 	    po_reg_or_fail (REG_TYPE_R64_SP);
6285 	    /* Accept optional ", #0".  */
6286 	    if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
6287 		&& skip_past_char (&str, ','))
6288 	      {
6289 		skip_past_char (&str, '#');
6290 		if (! skip_past_char (&str, '0'))
6291 		  {
6292 		    set_fatal_syntax_error
6293 		      (_("the optional immediate offset can only be 0"));
6294 		    goto failure;
6295 		  }
6296 	      }
6297 	    po_char_or_fail (']');
6298 	    break;
6299 	  }
6300 
6301 	case AARCH64_OPND_ADDR_REGOFF:
6302 	  /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}]  */
6303 	  po_misc_or_fail (parse_address (&str, info));
6304 	regoff_addr:
6305 	  if (info->addr.pcrel || !info->addr.offset.is_reg
6306 	      || !info->addr.preind || info->addr.postind
6307 	      || info->addr.writeback)
6308 	    {
6309 	      set_syntax_error (_("invalid addressing mode"));
6310 	      goto failure;
6311 	    }
6312 	  if (!info->shifter.operator_present)
6313 	    {
6314 	      /* Default to LSL if not present.  Libopcodes prefers shifter
6315 		 kind to be explicit.  */
6316 	      gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
6317 	      info->shifter.kind = AARCH64_MOD_LSL;
6318 	    }
6319 	  /* Qualifier to be deduced by libopcodes.  */
6320 	  break;
6321 
6322 	case AARCH64_OPND_ADDR_SIMM7:
6323 	  po_misc_or_fail (parse_address (&str, info));
6324 	  if (info->addr.pcrel || info->addr.offset.is_reg
6325 	      || (!info->addr.preind && !info->addr.postind))
6326 	    {
6327 	      set_syntax_error (_("invalid addressing mode"));
6328 	      goto failure;
6329 	    }
6330 	  if (inst.reloc.type != BFD_RELOC_UNUSED)
6331 	    {
6332 	      set_syntax_error (_("relocation not allowed"));
6333 	      goto failure;
6334 	    }
6335 	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6336 					      /* addr_off_p */ 1,
6337 					      /* need_libopcodes_p */ 1,
6338 					      /* skip_p */ 0);
6339 	  break;
6340 
6341 	case AARCH64_OPND_ADDR_SIMM9:
6342 	case AARCH64_OPND_ADDR_SIMM9_2:
6343 	case AARCH64_OPND_ADDR_SIMM11:
6344 	case AARCH64_OPND_ADDR_SIMM13:
6345 	  po_misc_or_fail (parse_address (&str, info));
6346 	  if (info->addr.pcrel || info->addr.offset.is_reg
6347 	      || (!info->addr.preind && !info->addr.postind)
6348 	      || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6349 		  && info->addr.writeback))
6350 	    {
6351 	      set_syntax_error (_("invalid addressing mode"));
6352 	      goto failure;
6353 	    }
6354 	  if (inst.reloc.type != BFD_RELOC_UNUSED)
6355 	    {
6356 	      set_syntax_error (_("relocation not allowed"));
6357 	      goto failure;
6358 	    }
6359 	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6360 					      /* addr_off_p */ 1,
6361 					      /* need_libopcodes_p */ 1,
6362 					      /* skip_p */ 0);
6363 	  break;
6364 
6365 	case AARCH64_OPND_ADDR_SIMM10:
6366 	case AARCH64_OPND_ADDR_OFFSET:
6367 	  po_misc_or_fail (parse_address (&str, info));
6368 	  if (info->addr.pcrel || info->addr.offset.is_reg
6369 	      || !info->addr.preind || info->addr.postind)
6370 	    {
6371 	      set_syntax_error (_("invalid addressing mode"));
6372 	      goto failure;
6373 	    }
6374 	  if (inst.reloc.type != BFD_RELOC_UNUSED)
6375 	    {
6376 	      set_syntax_error (_("relocation not allowed"));
6377 	      goto failure;
6378 	    }
6379 	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6380 					      /* addr_off_p */ 1,
6381 					      /* need_libopcodes_p */ 1,
6382 					      /* skip_p */ 0);
6383 	  break;
6384 
6385 	case AARCH64_OPND_ADDR_UIMM12:
6386 	  po_misc_or_fail (parse_address (&str, info));
6387 	  if (info->addr.pcrel || info->addr.offset.is_reg
6388 	      || !info->addr.preind || info->addr.writeback)
6389 	    {
6390 	      set_syntax_error (_("invalid addressing mode"));
6391 	      goto failure;
6392 	    }
6393 	  if (inst.reloc.type == BFD_RELOC_UNUSED)
6394 	    aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6395 	  else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6396 		   || (inst.reloc.type
6397 		       == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6398 		   || (inst.reloc.type
6399 		       == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
6400 		   || (inst.reloc.type
6401 		       == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12)
6402 		   || (inst.reloc.type
6403 		       == BFD_RELOC_AARCH64_TLSLE_LDST_TPREL_LO12_NC))
6404 	    inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6405 	  /* Leave qualifier to be determined by libopcodes.  */
6406 	  break;
6407 
6408 	case AARCH64_OPND_SIMD_ADDR_POST:
6409 	  /* [<Xn|SP>], <Xm|#<amount>>  */
6410 	  po_misc_or_fail (parse_address (&str, info));
6411 	  if (!info->addr.postind || !info->addr.writeback)
6412 	    {
6413 	      set_syntax_error (_("invalid addressing mode"));
6414 	      goto failure;
6415 	    }
6416 	  if (!info->addr.offset.is_reg)
6417 	    {
6418 	      if (inst.reloc.exp.X_op == O_constant)
6419 		info->addr.offset.imm = inst.reloc.exp.X_add_number;
6420 	      else
6421 		{
6422 		  set_fatal_syntax_error
6423 		    (_("writeback value must be an immediate constant"));
6424 		  goto failure;
6425 		}
6426 	    }
6427 	  /* No qualifier.  */
6428 	  break;
6429 
6430 	case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6431 	case AARCH64_OPND_SVE_ADDR_RI_S4x32:
6432 	case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6433 	case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6434 	case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6435 	case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6436 	case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6437 	case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6438 	case AARCH64_OPND_SVE_ADDR_RI_U6:
6439 	case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6440 	case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6441 	case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6442 	  /* [X<n>{, #imm, MUL VL}]
6443 	     [X<n>{, #imm}]
6444 	     but recognizing SVE registers.  */
6445 	  po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6446 					      &offset_qualifier));
6447 	  if (base_qualifier != AARCH64_OPND_QLF_X)
6448 	    {
6449 	      set_syntax_error (_("invalid addressing mode"));
6450 	      goto failure;
6451 	    }
6452 	sve_regimm:
6453 	  if (info->addr.pcrel || info->addr.offset.is_reg
6454 	      || !info->addr.preind || info->addr.writeback)
6455 	    {
6456 	      set_syntax_error (_("invalid addressing mode"));
6457 	      goto failure;
6458 	    }
6459 	  if (inst.reloc.type != BFD_RELOC_UNUSED
6460 	      || inst.reloc.exp.X_op != O_constant)
6461 	    {
6462 	      /* Make sure this has priority over
6463 		 "invalid addressing mode".  */
6464 	      set_fatal_syntax_error (_("constant offset required"));
6465 	      goto failure;
6466 	    }
6467 	  info->addr.offset.imm = inst.reloc.exp.X_add_number;
6468 	  break;
6469 
6470 	case AARCH64_OPND_SVE_ADDR_R:
6471 	  /* [<Xn|SP>{, <R><m>}]
6472 	     but recognizing SVE registers.  */
6473 	  po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6474 					      &offset_qualifier));
6475 	  if (offset_qualifier == AARCH64_OPND_QLF_NIL)
6476 	    {
6477 	      offset_qualifier = AARCH64_OPND_QLF_X;
6478 	      info->addr.offset.is_reg = 1;
6479 	      info->addr.offset.regno = 31;
6480 	    }
6481 	  else if (base_qualifier != AARCH64_OPND_QLF_X
6482 	      || offset_qualifier != AARCH64_OPND_QLF_X)
6483 	    {
6484 	      set_syntax_error (_("invalid addressing mode"));
6485 	      goto failure;
6486 	    }
6487 	  goto regoff_addr;
6488 
6489 	case AARCH64_OPND_SVE_ADDR_RR:
6490 	case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6491 	case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6492 	case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6493 	case AARCH64_OPND_SVE_ADDR_RX:
6494 	case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6495 	case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6496 	case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6497 	  /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6498 	     but recognizing SVE registers.  */
6499 	  po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6500 					      &offset_qualifier));
6501 	  if (base_qualifier != AARCH64_OPND_QLF_X
6502 	      || offset_qualifier != AARCH64_OPND_QLF_X)
6503 	    {
6504 	      set_syntax_error (_("invalid addressing mode"));
6505 	      goto failure;
6506 	    }
6507 	  goto regoff_addr;
6508 
6509 	case AARCH64_OPND_SVE_ADDR_RZ:
6510 	case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6511 	case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6512 	case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6513 	case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6514 	case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6515 	case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6516 	case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6517 	case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6518 	case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6519 	case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6520 	case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6521 	  /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6522 	     [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}]  */
6523 	  po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6524 					      &offset_qualifier));
6525 	  if (base_qualifier != AARCH64_OPND_QLF_X
6526 	      || (offset_qualifier != AARCH64_OPND_QLF_S_S
6527 		  && offset_qualifier != AARCH64_OPND_QLF_S_D))
6528 	    {
6529 	      set_syntax_error (_("invalid addressing mode"));
6530 	      goto failure;
6531 	    }
6532 	  info->qualifier = offset_qualifier;
6533 	  goto regoff_addr;
6534 
6535 	case AARCH64_OPND_SVE_ADDR_ZX:
6536 	  /* [Zn.<T>{, <Xm>}].  */
6537 	  po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6538 					      &offset_qualifier));
6539 	  /* Things to check:
6540 	      base_qualifier either S_S or S_D
6541 	      offset_qualifier must be X
6542 	      */
6543 	  if ((base_qualifier != AARCH64_OPND_QLF_S_S
6544 	       && base_qualifier != AARCH64_OPND_QLF_S_D)
6545 	      || offset_qualifier != AARCH64_OPND_QLF_X)
6546 	    {
6547 	      set_syntax_error (_("invalid addressing mode"));
6548 	      goto failure;
6549 	    }
6550 	  info->qualifier = base_qualifier;
6551 	  if (!info->addr.offset.is_reg || info->addr.pcrel
6552 	      || !info->addr.preind || info->addr.writeback
6553 	      || info->shifter.operator_present != 0)
6554 	    {
6555 	      set_syntax_error (_("invalid addressing mode"));
6556 	      goto failure;
6557 	    }
6558 	  info->shifter.kind = AARCH64_MOD_LSL;
6559 	  break;
6560 
6561 
6562 	case AARCH64_OPND_SVE_ADDR_ZI_U5:
6563 	case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6564 	case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6565 	case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6566 	  /* [Z<n>.<T>{, #imm}]  */
6567 	  po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6568 					      &offset_qualifier));
6569 	  if (base_qualifier != AARCH64_OPND_QLF_S_S
6570 	      && base_qualifier != AARCH64_OPND_QLF_S_D)
6571 	    {
6572 	      set_syntax_error (_("invalid addressing mode"));
6573 	      goto failure;
6574 	    }
6575 	  info->qualifier = base_qualifier;
6576 	  goto sve_regimm;
6577 
6578 	case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6579 	case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6580 	case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6581 	  /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6582 	     [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6583 
6584 	     We don't reject:
6585 
6586 	     [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6587 
6588 	     here since we get better error messages by leaving it to
6589 	     the qualifier checking routines.  */
6590 	  po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6591 					      &offset_qualifier));
6592 	  if ((base_qualifier != AARCH64_OPND_QLF_S_S
6593 	       && base_qualifier != AARCH64_OPND_QLF_S_D)
6594 	      || offset_qualifier != base_qualifier)
6595 	    {
6596 	      set_syntax_error (_("invalid addressing mode"));
6597 	      goto failure;
6598 	    }
6599 	  info->qualifier = base_qualifier;
6600 	  goto regoff_addr;
6601 
6602 	case AARCH64_OPND_SYSREG:
6603 	  {
6604 	    uint32_t sysreg_flags;
6605 	    if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0,
6606 				      &sysreg_flags)) == PARSE_FAIL)
6607 	      {
6608 		set_syntax_error (_("unknown or missing system register name"));
6609 		goto failure;
6610 	      }
6611 	    inst.base.operands[i].sysreg.value = val;
6612 	    inst.base.operands[i].sysreg.flags = sysreg_flags;
6613 	    break;
6614 	  }
6615 
6616 	case AARCH64_OPND_PSTATEFIELD:
6617 	  if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1, NULL))
6618 	      == PARSE_FAIL)
6619 	    {
6620 	      set_syntax_error (_("unknown or missing PSTATE field name"));
6621 	      goto failure;
6622 	    }
6623 	  inst.base.operands[i].pstatefield = val;
6624 	  break;
6625 
6626 	case AARCH64_OPND_SYSREG_IC:
6627 	  inst.base.operands[i].sysins_op =
6628 	    parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6629 	  goto sys_reg_ins;
6630 
6631 	case AARCH64_OPND_SYSREG_DC:
6632 	  inst.base.operands[i].sysins_op =
6633 	    parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6634 	  goto sys_reg_ins;
6635 
6636 	case AARCH64_OPND_SYSREG_AT:
6637 	  inst.base.operands[i].sysins_op =
6638 	    parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6639 	  goto sys_reg_ins;
6640 
6641 	case AARCH64_OPND_SYSREG_SR:
6642 	  inst.base.operands[i].sysins_op =
6643 	    parse_sys_ins_reg (&str, aarch64_sys_regs_sr_hsh);
6644 	  goto sys_reg_ins;
6645 
6646 	case AARCH64_OPND_SYSREG_TLBI:
6647 	  inst.base.operands[i].sysins_op =
6648 	    parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6649 sys_reg_ins:
6650 	  if (inst.base.operands[i].sysins_op == NULL)
6651 	    {
6652 	      set_fatal_syntax_error ( _("unknown or missing operation name"));
6653 	      goto failure;
6654 	    }
6655 	  break;
6656 
6657 	case AARCH64_OPND_BARRIER:
6658 	case AARCH64_OPND_BARRIER_ISB:
6659 	  val = parse_barrier (&str);
6660 	  if (val != PARSE_FAIL
6661 	      && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6662 	    {
6663 	      /* ISB only accepts options name 'sy'.  */
6664 	      set_syntax_error
6665 		(_("the specified option is not accepted in ISB"));
6666 	      /* Turn off backtrack as this optional operand is present.  */
6667 	      backtrack_pos = 0;
6668 	      goto failure;
6669 	    }
6670 	  /* This is an extension to accept a 0..15 immediate.  */
6671 	  if (val == PARSE_FAIL)
6672 	    po_imm_or_fail (0, 15);
6673 	  info->barrier = aarch64_barrier_options + val;
6674 	  break;
6675 
6676 	case AARCH64_OPND_PRFOP:
6677 	  val = parse_pldop (&str);
6678 	  /* This is an extension to accept a 0..31 immediate.  */
6679 	  if (val == PARSE_FAIL)
6680 	    po_imm_or_fail (0, 31);
6681 	  inst.base.operands[i].prfop = aarch64_prfops + val;
6682 	  break;
6683 
6684 	case AARCH64_OPND_BARRIER_PSB:
6685 	  val = parse_barrier_psb (&str, &(info->hint_option));
6686 	  if (val == PARSE_FAIL)
6687 	    goto failure;
6688 	  break;
6689 
6690 	case AARCH64_OPND_BTI_TARGET:
6691 	  val = parse_bti_operand (&str, &(info->hint_option));
6692 	  if (val == PARSE_FAIL)
6693 	    goto failure;
6694 	  break;
6695 
6696 	default:
6697 	  as_fatal (_("unhandled operand code %d"), operands[i]);
6698 	}
6699 
6700       /* If we get here, this operand was successfully parsed.  */
6701       inst.base.operands[i].present = 1;
6702       continue;
6703 
6704 failure:
6705       /* The parse routine should already have set the error, but in case
6706 	 not, set a default one here.  */
6707       if (! error_p ())
6708 	set_default_error ();
6709 
6710       if (! backtrack_pos)
6711 	goto parse_operands_return;
6712 
6713       {
6714 	/* We reach here because this operand is marked as optional, and
6715 	   either no operand was supplied or the operand was supplied but it
6716 	   was syntactically incorrect.  In the latter case we report an
6717 	   error.  In the former case we perform a few more checks before
6718 	   dropping through to the code to insert the default operand.  */
6719 
6720 	char *tmp = backtrack_pos;
6721 	char endchar = END_OF_INSN;
6722 
6723 	if (i != (aarch64_num_of_operands (opcode) - 1))
6724 	  endchar = ',';
6725 	skip_past_char (&tmp, ',');
6726 
6727 	if (*tmp != endchar)
6728 	  /* The user has supplied an operand in the wrong format.  */
6729 	  goto parse_operands_return;
6730 
6731 	/* Make sure there is not a comma before the optional operand.
6732 	   For example the fifth operand of 'sys' is optional:
6733 
6734 	     sys #0,c0,c0,#0,  <--- wrong
6735 	     sys #0,c0,c0,#0   <--- correct.  */
6736 	if (comma_skipped_p && i && endchar == END_OF_INSN)
6737 	  {
6738 	    set_fatal_syntax_error
6739 	      (_("unexpected comma before the omitted optional operand"));
6740 	    goto parse_operands_return;
6741 	  }
6742       }
6743 
6744       /* Reaching here means we are dealing with an optional operand that is
6745 	 omitted from the assembly line.  */
6746       gas_assert (optional_operand_p (opcode, i));
6747       info->present = 0;
6748       process_omitted_operand (operands[i], opcode, i, info);
6749 
6750       /* Try again, skipping the optional operand at backtrack_pos.  */
6751       str = backtrack_pos;
6752       backtrack_pos = 0;
6753 
6754       /* Clear any error record after the omitted optional operand has been
6755 	 successfully handled.  */
6756       clear_error ();
6757     }
6758 
6759   /* Check if we have parsed all the operands.  */
6760   if (*str != '\0' && ! error_p ())
6761     {
6762       /* Set I to the index of the last present operand; this is
6763 	 for the purpose of diagnostics.  */
6764       for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6765 	;
6766       set_fatal_syntax_error
6767 	(_("unexpected characters following instruction"));
6768     }
6769 
6770 parse_operands_return:
6771 
6772   if (error_p ())
6773     {
6774       DEBUG_TRACE ("parsing FAIL: %s - %s",
6775 		   operand_mismatch_kind_names[get_error_kind ()],
6776 		   get_error_message ());
6777       /* Record the operand error properly; this is useful when there
6778 	 are multiple instruction templates for a mnemonic name, so that
6779 	 later on, we can select the error that most closely describes
6780 	 the problem.  */
6781       record_operand_error (opcode, i, get_error_kind (),
6782 			    get_error_message ());
6783       return FALSE;
6784     }
6785   else
6786     {
6787       DEBUG_TRACE ("parsing SUCCESS");
6788       return TRUE;
6789     }
6790 }
6791 
6792 /* It does some fix-up to provide some programmer friendly feature while
6793    keeping the libopcodes happy, i.e. libopcodes only accepts
6794    the preferred architectural syntax.
6795    Return FALSE if there is any failure; otherwise return TRUE.  */
6796 
6797 static bfd_boolean
programmer_friendly_fixup(aarch64_instruction * instr)6798 programmer_friendly_fixup (aarch64_instruction *instr)
6799 {
6800   aarch64_inst *base = &instr->base;
6801   const aarch64_opcode *opcode = base->opcode;
6802   enum aarch64_op op = opcode->op;
6803   aarch64_opnd_info *operands = base->operands;
6804 
6805   DEBUG_TRACE ("enter");
6806 
6807   switch (opcode->iclass)
6808     {
6809     case testbranch:
6810       /* TBNZ Xn|Wn, #uimm6, label
6811 	 Test and Branch Not Zero: conditionally jumps to label if bit number
6812 	 uimm6 in register Xn is not zero.  The bit number implies the width of
6813 	 the register, which may be written and should be disassembled as Wn if
6814 	 uimm is less than 32.  */
6815       if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6816 	{
6817 	  if (operands[1].imm.value >= 32)
6818 	    {
6819 	      record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6820 						 0, 31);
6821 	      return FALSE;
6822 	    }
6823 	  operands[0].qualifier = AARCH64_OPND_QLF_X;
6824 	}
6825       break;
6826     case loadlit:
6827       /* LDR Wt, label | =value
6828 	 As a convenience assemblers will typically permit the notation
6829 	 "=value" in conjunction with the pc-relative literal load instructions
6830 	 to automatically place an immediate value or symbolic address in a
6831 	 nearby literal pool and generate a hidden label which references it.
6832 	 ISREG has been set to 0 in the case of =value.  */
6833       if (instr->gen_lit_pool
6834 	  && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6835 	{
6836 	  int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6837 	  if (op == OP_LDRSW_LIT)
6838 	    size = 4;
6839 	  if (instr->reloc.exp.X_op != O_constant
6840 	      && instr->reloc.exp.X_op != O_big
6841 	      && instr->reloc.exp.X_op != O_symbol)
6842 	    {
6843 	      record_operand_error (opcode, 1,
6844 				    AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6845 				    _("constant expression expected"));
6846 	      return FALSE;
6847 	    }
6848 	  if (! add_to_lit_pool (&instr->reloc.exp, size))
6849 	    {
6850 	      record_operand_error (opcode, 1,
6851 				    AARCH64_OPDE_OTHER_ERROR,
6852 				    _("literal pool insertion failed"));
6853 	      return FALSE;
6854 	    }
6855 	}
6856       break;
6857     case log_shift:
6858     case bitfield:
6859       /* UXT[BHW] Wd, Wn
6860 	 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6861 	 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6862 	 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6863 	 A programmer-friendly assembler should accept a destination Xd in
6864 	 place of Wd, however that is not the preferred form for disassembly.
6865 	 */
6866       if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6867 	  && operands[1].qualifier == AARCH64_OPND_QLF_W
6868 	  && operands[0].qualifier == AARCH64_OPND_QLF_X)
6869 	operands[0].qualifier = AARCH64_OPND_QLF_W;
6870       break;
6871 
6872     case addsub_ext:
6873 	{
6874 	  /* In the 64-bit form, the final register operand is written as Wm
6875 	     for all but the (possibly omitted) UXTX/LSL and SXTX
6876 	     operators.
6877 	     As a programmer-friendly assembler, we accept e.g.
6878 	     ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6879 	     ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}.  */
6880 	  int idx = aarch64_operand_index (opcode->operands,
6881 					   AARCH64_OPND_Rm_EXT);
6882 	  gas_assert (idx == 1 || idx == 2);
6883 	  if (operands[0].qualifier == AARCH64_OPND_QLF_X
6884 	      && operands[idx].qualifier == AARCH64_OPND_QLF_X
6885 	      && operands[idx].shifter.kind != AARCH64_MOD_LSL
6886 	      && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6887 	      && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6888 	    operands[idx].qualifier = AARCH64_OPND_QLF_W;
6889 	}
6890       break;
6891 
6892     default:
6893       break;
6894     }
6895 
6896   DEBUG_TRACE ("exit with SUCCESS");
6897   return TRUE;
6898 }
6899 
6900 /* Check for loads and stores that will cause unpredictable behavior.  */
6901 
6902 static void
warn_unpredictable_ldst(aarch64_instruction * instr,char * str)6903 warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6904 {
6905   aarch64_inst *base = &instr->base;
6906   const aarch64_opcode *opcode = base->opcode;
6907   const aarch64_opnd_info *opnds = base->operands;
6908   switch (opcode->iclass)
6909     {
6910     case ldst_pos:
6911     case ldst_imm9:
6912     case ldst_imm10:
6913     case ldst_unscaled:
6914     case ldst_unpriv:
6915       /* Loading/storing the base register is unpredictable if writeback.  */
6916       if ((aarch64_get_operand_class (opnds[0].type)
6917 	   == AARCH64_OPND_CLASS_INT_REG)
6918 	  && opnds[0].reg.regno == opnds[1].addr.base_regno
6919 	  && opnds[1].addr.base_regno != REG_SP
6920 	  /* Exempt STG/STZG/ST2G/STZ2G.  */
6921 	  && !(opnds[1].type == AARCH64_OPND_ADDR_SIMM13)
6922 	  && opnds[1].addr.writeback)
6923 	as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6924       break;
6925 
6926     case ldstpair_off:
6927     case ldstnapair_offs:
6928     case ldstpair_indexed:
6929       /* Loading/storing the base register is unpredictable if writeback.  */
6930       if ((aarch64_get_operand_class (opnds[0].type)
6931 	   == AARCH64_OPND_CLASS_INT_REG)
6932 	  && (opnds[0].reg.regno == opnds[2].addr.base_regno
6933 	    || opnds[1].reg.regno == opnds[2].addr.base_regno)
6934 	  && opnds[2].addr.base_regno != REG_SP
6935 	  /* Exempt STGP.  */
6936 	  && !(opnds[2].type == AARCH64_OPND_ADDR_SIMM11)
6937 	  && opnds[2].addr.writeback)
6938 	    as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6939       /* Load operations must load different registers.  */
6940       if ((opcode->opcode & (1 << 22))
6941 	  && opnds[0].reg.regno == opnds[1].reg.regno)
6942 	    as_warn (_("unpredictable load of register pair -- `%s'"), str);
6943       break;
6944 
6945     case ldstexcl:
6946       /* It is unpredictable if the destination and status registers are the
6947 	 same.  */
6948       if ((aarch64_get_operand_class (opnds[0].type)
6949 	   == AARCH64_OPND_CLASS_INT_REG)
6950 	  && (aarch64_get_operand_class (opnds[1].type)
6951 	      == AARCH64_OPND_CLASS_INT_REG)
6952 	  && (opnds[0].reg.regno == opnds[1].reg.regno
6953 	      || opnds[0].reg.regno == opnds[2].reg.regno))
6954 	as_warn (_("unpredictable: identical transfer and status registers"
6955 		   " --`%s'"),
6956 		 str);
6957 
6958       break;
6959 
6960     default:
6961       break;
6962     }
6963 }
6964 
6965 static void
force_automatic_sequence_close(void)6966 force_automatic_sequence_close (void)
6967 {
6968   if (now_instr_sequence.instr)
6969     {
6970       as_warn (_("previous `%s' sequence has not been closed"),
6971 	       now_instr_sequence.instr->opcode->name);
6972       init_insn_sequence (NULL, &now_instr_sequence);
6973     }
6974 }
6975 
6976 /* A wrapper function to interface with libopcodes on encoding and
6977    record the error message if there is any.
6978 
6979    Return TRUE on success; otherwise return FALSE.  */
6980 
6981 static bfd_boolean
do_encode(const aarch64_opcode * opcode,aarch64_inst * instr,aarch64_insn * code)6982 do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
6983 	   aarch64_insn *code)
6984 {
6985   aarch64_operand_error error_info;
6986   memset (&error_info, '\0', sizeof (error_info));
6987   error_info.kind = AARCH64_OPDE_NIL;
6988   if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info, insn_sequence)
6989       && !error_info.non_fatal)
6990     return TRUE;
6991 
6992   gas_assert (error_info.kind != AARCH64_OPDE_NIL);
6993   record_operand_error_info (opcode, &error_info);
6994   return error_info.non_fatal;
6995 }
6996 
6997 #ifdef DEBUG_AARCH64
6998 static inline void
dump_opcode_operands(const aarch64_opcode * opcode)6999 dump_opcode_operands (const aarch64_opcode *opcode)
7000 {
7001   int i = 0;
7002   while (opcode->operands[i] != AARCH64_OPND_NIL)
7003     {
7004       aarch64_verbose ("\t\t opnd%d: %s", i,
7005 		       aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
7006 		       ? aarch64_get_operand_name (opcode->operands[i])
7007 		       : aarch64_get_operand_desc (opcode->operands[i]));
7008       ++i;
7009     }
7010 }
7011 #endif /* DEBUG_AARCH64 */
7012 
7013 /* This is the guts of the machine-dependent assembler.  STR points to a
7014    machine dependent instruction.  This function is supposed to emit
7015    the frags/bytes it assembles to.  */
7016 
7017 void
md_assemble(char * str)7018 md_assemble (char *str)
7019 {
7020   char *p = str;
7021   templates *template;
7022   aarch64_opcode *opcode;
7023   aarch64_inst *inst_base;
7024   unsigned saved_cond;
7025 
7026   /* Align the previous label if needed.  */
7027   if (last_label_seen != NULL)
7028     {
7029       symbol_set_frag (last_label_seen, frag_now);
7030       S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
7031       S_SET_SEGMENT (last_label_seen, now_seg);
7032     }
7033 
7034   /* Update the current insn_sequence from the segment.  */
7035   insn_sequence = &seg_info (now_seg)->tc_segment_info_data.insn_sequence;
7036 
7037   inst.reloc.type = BFD_RELOC_UNUSED;
7038 
7039   DEBUG_TRACE ("\n\n");
7040   DEBUG_TRACE ("==============================");
7041   DEBUG_TRACE ("Enter md_assemble with %s", str);
7042 
7043   template = opcode_lookup (&p);
7044   if (!template)
7045     {
7046       /* It wasn't an instruction, but it might be a register alias of
7047          the form alias .req reg directive.  */
7048       if (!create_register_alias (str, p))
7049 	as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
7050 		str);
7051       return;
7052     }
7053 
7054   skip_whitespace (p);
7055   if (*p == ',')
7056     {
7057       as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
7058 	      get_mnemonic_name (str), str);
7059       return;
7060     }
7061 
7062   init_operand_error_report ();
7063 
7064   /* Sections are assumed to start aligned. In executable section, there is no
7065      MAP_DATA symbol pending. So we only align the address during
7066      MAP_DATA --> MAP_INSN transition.
7067      For other sections, this is not guaranteed.  */
7068   enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
7069   if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
7070     frag_align_code (2, 0);
7071 
7072   saved_cond = inst.cond;
7073   reset_aarch64_instruction (&inst);
7074   inst.cond = saved_cond;
7075 
7076   /* Iterate through all opcode entries with the same mnemonic name.  */
7077   do
7078     {
7079       opcode = template->opcode;
7080 
7081       DEBUG_TRACE ("opcode %s found", opcode->name);
7082 #ifdef DEBUG_AARCH64
7083       if (debug_dump)
7084 	dump_opcode_operands (opcode);
7085 #endif /* DEBUG_AARCH64 */
7086 
7087       mapping_state (MAP_INSN);
7088 
7089       inst_base = &inst.base;
7090       inst_base->opcode = opcode;
7091 
7092       /* Truly conditionally executed instructions, e.g. b.cond.  */
7093       if (opcode->flags & F_COND)
7094 	{
7095 	  gas_assert (inst.cond != COND_ALWAYS);
7096 	  inst_base->cond = get_cond_from_value (inst.cond);
7097 	  DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
7098 	}
7099       else if (inst.cond != COND_ALWAYS)
7100 	{
7101 	  /* It shouldn't arrive here, where the assembly looks like a
7102 	     conditional instruction but the found opcode is unconditional.  */
7103 	  gas_assert (0);
7104 	  continue;
7105 	}
7106 
7107       if (parse_operands (p, opcode)
7108 	  && programmer_friendly_fixup (&inst)
7109 	  && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
7110 	{
7111 	  /* Check that this instruction is supported for this CPU.  */
7112 	  if (!opcode->avariant
7113 	      || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
7114 	    {
7115 	      as_bad (_("selected processor does not support `%s'"), str);
7116 	      return;
7117 	    }
7118 
7119 	  warn_unpredictable_ldst (&inst, str);
7120 
7121 	  if (inst.reloc.type == BFD_RELOC_UNUSED
7122 	      || !inst.reloc.need_libopcodes_p)
7123 	    output_inst (NULL);
7124 	  else
7125 	    {
7126 	      /* If there is relocation generated for the instruction,
7127 	         store the instruction information for the future fix-up.  */
7128 	      struct aarch64_inst *copy;
7129 	      gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
7130 	      copy = XNEW (struct aarch64_inst);
7131 	      memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
7132 	      output_inst (copy);
7133 	    }
7134 
7135 	  /* Issue non-fatal messages if any.  */
7136 	  output_operand_error_report (str, TRUE);
7137 	  return;
7138 	}
7139 
7140       template = template->next;
7141       if (template != NULL)
7142 	{
7143 	  reset_aarch64_instruction (&inst);
7144 	  inst.cond = saved_cond;
7145 	}
7146     }
7147   while (template != NULL);
7148 
7149   /* Issue the error messages if any.  */
7150   output_operand_error_report (str, FALSE);
7151 }
7152 
7153 /* Various frobbings of labels and their addresses.  */
7154 
7155 void
aarch64_start_line_hook(void)7156 aarch64_start_line_hook (void)
7157 {
7158   last_label_seen = NULL;
7159 }
7160 
7161 void
aarch64_frob_label(symbolS * sym)7162 aarch64_frob_label (symbolS * sym)
7163 {
7164   last_label_seen = sym;
7165 
7166   dwarf2_emit_label (sym);
7167 }
7168 
7169 void
aarch64_frob_section(asection * sec ATTRIBUTE_UNUSED)7170 aarch64_frob_section (asection *sec ATTRIBUTE_UNUSED)
7171 {
7172   /* Check to see if we have a block to close.  */
7173   force_automatic_sequence_close ();
7174 }
7175 
7176 int
aarch64_data_in_code(void)7177 aarch64_data_in_code (void)
7178 {
7179   if (!strncmp (input_line_pointer + 1, "data:", 5))
7180     {
7181       *input_line_pointer = '/';
7182       input_line_pointer += 5;
7183       *input_line_pointer = 0;
7184       return 1;
7185     }
7186 
7187   return 0;
7188 }
7189 
7190 char *
aarch64_canonicalize_symbol_name(char * name)7191 aarch64_canonicalize_symbol_name (char *name)
7192 {
7193   int len;
7194 
7195   if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
7196     *(name + len - 5) = 0;
7197 
7198   return name;
7199 }
7200 
7201 /* Table of all register names defined by default.  The user can
7202    define additional names with .req.  Note that all register names
7203    should appear in both upper and lowercase variants.	Some registers
7204    also have mixed-case names.	*/
7205 
7206 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
7207 #define REGDEF_ALIAS(s, n, t) { #s, n, REG_TYPE_##t, FALSE}
7208 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
7209 #define REGSET16(p,t) \
7210   REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
7211   REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
7212   REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
7213   REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
7214 #define REGSET31(p,t) \
7215   REGSET16(p, t), \
7216   REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
7217   REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
7218   REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
7219   REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
7220 #define REGSET(p,t) \
7221   REGSET31(p,t), REGNUM(p,31,t)
7222 
7223 /* These go into aarch64_reg_hsh hash-table.  */
7224 static const reg_entry reg_names[] = {
7225   /* Integer registers.  */
7226   REGSET31 (x, R_64), REGSET31 (X, R_64),
7227   REGSET31 (w, R_32), REGSET31 (W, R_32),
7228 
7229   REGDEF_ALIAS (ip0, 16, R_64), REGDEF_ALIAS (IP0, 16, R_64),
7230   REGDEF_ALIAS (ip1, 17, R_64), REGDEF_ALIAS (IP1, 17, R_64),
7231   REGDEF_ALIAS (fp, 29, R_64), REGDEF_ALIAS (FP, 29, R_64),
7232   REGDEF_ALIAS (lr, 30, R_64), REGDEF_ALIAS (LR, 30, R_64),
7233   REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
7234   REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
7235 
7236   REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
7237   REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
7238 
7239   /* Floating-point single precision registers.  */
7240   REGSET (s, FP_S), REGSET (S, FP_S),
7241 
7242   /* Floating-point double precision registers.  */
7243   REGSET (d, FP_D), REGSET (D, FP_D),
7244 
7245   /* Floating-point half precision registers.  */
7246   REGSET (h, FP_H), REGSET (H, FP_H),
7247 
7248   /* Floating-point byte precision registers.  */
7249   REGSET (b, FP_B), REGSET (B, FP_B),
7250 
7251   /* Floating-point quad precision registers.  */
7252   REGSET (q, FP_Q), REGSET (Q, FP_Q),
7253 
7254   /* FP/SIMD registers.  */
7255   REGSET (v, VN), REGSET (V, VN),
7256 
7257   /* SVE vector registers.  */
7258   REGSET (z, ZN), REGSET (Z, ZN),
7259 
7260   /* SVE predicate registers.  */
7261   REGSET16 (p, PN), REGSET16 (P, PN)
7262 };
7263 
7264 #undef REGDEF
7265 #undef REGDEF_ALIAS
7266 #undef REGNUM
7267 #undef REGSET16
7268 #undef REGSET31
7269 #undef REGSET
7270 
7271 #define N 1
7272 #define n 0
7273 #define Z 1
7274 #define z 0
7275 #define C 1
7276 #define c 0
7277 #define V 1
7278 #define v 0
7279 #define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
7280 static const asm_nzcv nzcv_names[] = {
7281   {"nzcv", B (n, z, c, v)},
7282   {"nzcV", B (n, z, c, V)},
7283   {"nzCv", B (n, z, C, v)},
7284   {"nzCV", B (n, z, C, V)},
7285   {"nZcv", B (n, Z, c, v)},
7286   {"nZcV", B (n, Z, c, V)},
7287   {"nZCv", B (n, Z, C, v)},
7288   {"nZCV", B (n, Z, C, V)},
7289   {"Nzcv", B (N, z, c, v)},
7290   {"NzcV", B (N, z, c, V)},
7291   {"NzCv", B (N, z, C, v)},
7292   {"NzCV", B (N, z, C, V)},
7293   {"NZcv", B (N, Z, c, v)},
7294   {"NZcV", B (N, Z, c, V)},
7295   {"NZCv", B (N, Z, C, v)},
7296   {"NZCV", B (N, Z, C, V)}
7297 };
7298 
7299 #undef N
7300 #undef n
7301 #undef Z
7302 #undef z
7303 #undef C
7304 #undef c
7305 #undef V
7306 #undef v
7307 #undef B
7308 
7309 /* MD interface: bits in the object file.  */
7310 
7311 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
7312    for use in the a.out file, and stores them in the array pointed to by buf.
7313    This knows about the endian-ness of the target machine and does
7314    THE RIGHT THING, whatever it is.  Possible values for n are 1 (byte)
7315    2 (short) and 4 (long)  Floating numbers are put out as a series of
7316    LITTLENUMS (shorts, here at least).	*/
7317 
7318 void
md_number_to_chars(char * buf,valueT val,int n)7319 md_number_to_chars (char *buf, valueT val, int n)
7320 {
7321   if (target_big_endian)
7322     number_to_chars_bigendian (buf, val, n);
7323   else
7324     number_to_chars_littleendian (buf, val, n);
7325 }
7326 
7327 /* MD interface: Sections.  */
7328 
7329 /* Estimate the size of a frag before relaxing.  Assume everything fits in
7330    4 bytes.  */
7331 
7332 int
md_estimate_size_before_relax(fragS * fragp,segT segtype ATTRIBUTE_UNUSED)7333 md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
7334 {
7335   fragp->fr_var = 4;
7336   return 4;
7337 }
7338 
7339 /* Round up a section size to the appropriate boundary.	 */
7340 
7341 valueT
md_section_align(segT segment ATTRIBUTE_UNUSED,valueT size)7342 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
7343 {
7344   return size;
7345 }
7346 
7347 /* This is called from HANDLE_ALIGN in write.c.	 Fill in the contents
7348    of an rs_align_code fragment.
7349 
7350    Here we fill the frag with the appropriate info for padding the
7351    output stream.  The resulting frag will consist of a fixed (fr_fix)
7352    and of a repeating (fr_var) part.
7353 
7354    The fixed content is always emitted before the repeating content and
7355    these two parts are used as follows in constructing the output:
7356    - the fixed part will be used to align to a valid instruction word
7357      boundary, in case that we start at a misaligned address; as no
7358      executable instruction can live at the misaligned location, we
7359      simply fill with zeros;
7360    - the variable part will be used to cover the remaining padding and
7361      we fill using the AArch64 NOP instruction.
7362 
7363    Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
7364    enough storage space for up to 3 bytes for padding the back to a valid
7365    instruction alignment and exactly 4 bytes to store the NOP pattern.  */
7366 
7367 void
aarch64_handle_align(fragS * fragP)7368 aarch64_handle_align (fragS * fragP)
7369 {
7370   /* NOP = d503201f */
7371   /* AArch64 instructions are always little-endian.  */
7372   static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
7373 
7374   int bytes, fix, noop_size;
7375   char *p;
7376 
7377   if (fragP->fr_type != rs_align_code)
7378     return;
7379 
7380   bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
7381   p = fragP->fr_literal + fragP->fr_fix;
7382 
7383 #ifdef OBJ_ELF
7384   gas_assert (fragP->tc_frag_data.recorded);
7385 #endif
7386 
7387   noop_size = sizeof (aarch64_noop);
7388 
7389   fix = bytes & (noop_size - 1);
7390   if (fix)
7391     {
7392 #ifdef OBJ_ELF
7393       insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
7394 #endif
7395       memset (p, 0, fix);
7396       p += fix;
7397       fragP->fr_fix += fix;
7398     }
7399 
7400   if (noop_size)
7401     memcpy (p, aarch64_noop, noop_size);
7402   fragP->fr_var = noop_size;
7403 }
7404 
7405 /* Perform target specific initialisation of a frag.
7406    Note - despite the name this initialisation is not done when the frag
7407    is created, but only when its type is assigned.  A frag can be created
7408    and used a long time before its type is set, so beware of assuming that
7409    this initialisation is performed first.  */
7410 
7411 #ifndef OBJ_ELF
7412 void
aarch64_init_frag(fragS * fragP ATTRIBUTE_UNUSED,int max_chars ATTRIBUTE_UNUSED)7413 aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
7414 		   int max_chars ATTRIBUTE_UNUSED)
7415 {
7416 }
7417 
7418 #else /* OBJ_ELF is defined.  */
7419 void
aarch64_init_frag(fragS * fragP,int max_chars)7420 aarch64_init_frag (fragS * fragP, int max_chars)
7421 {
7422   /* Record a mapping symbol for alignment frags.  We will delete this
7423      later if the alignment ends up empty.  */
7424   if (!fragP->tc_frag_data.recorded)
7425     fragP->tc_frag_data.recorded = 1;
7426 
7427   /* PR 21809: Do not set a mapping state for debug sections
7428      - it just confuses other tools.  */
7429   if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
7430     return;
7431 
7432   switch (fragP->fr_type)
7433     {
7434     case rs_align_test:
7435     case rs_fill:
7436       mapping_state_2 (MAP_DATA, max_chars);
7437       break;
7438     case rs_align:
7439       /* PR 20364: We can get alignment frags in code sections,
7440 	 so do not just assume that we should use the MAP_DATA state.  */
7441       mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
7442       break;
7443     case rs_align_code:
7444       mapping_state_2 (MAP_INSN, max_chars);
7445       break;
7446     default:
7447       break;
7448     }
7449 }
7450 
7451 /* Initialize the DWARF-2 unwind information for this procedure.  */
7452 
7453 void
tc_aarch64_frame_initial_instructions(void)7454 tc_aarch64_frame_initial_instructions (void)
7455 {
7456   cfi_add_CFA_def_cfa (REG_SP, 0);
7457 }
7458 #endif /* OBJ_ELF */
7459 
7460 /* Convert REGNAME to a DWARF-2 register number.  */
7461 
7462 int
tc_aarch64_regname_to_dw2regnum(char * regname)7463 tc_aarch64_regname_to_dw2regnum (char *regname)
7464 {
7465   const reg_entry *reg = parse_reg (&regname);
7466   if (reg == NULL)
7467     return -1;
7468 
7469   switch (reg->type)
7470     {
7471     case REG_TYPE_SP_32:
7472     case REG_TYPE_SP_64:
7473     case REG_TYPE_R_32:
7474     case REG_TYPE_R_64:
7475       return reg->number;
7476 
7477     case REG_TYPE_FP_B:
7478     case REG_TYPE_FP_H:
7479     case REG_TYPE_FP_S:
7480     case REG_TYPE_FP_D:
7481     case REG_TYPE_FP_Q:
7482       return reg->number + 64;
7483 
7484     default:
7485       break;
7486     }
7487   return -1;
7488 }
7489 
7490 /* Implement DWARF2_ADDR_SIZE.  */
7491 
7492 int
aarch64_dwarf2_addr_size(void)7493 aarch64_dwarf2_addr_size (void)
7494 {
7495 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7496   if (ilp32_p)
7497     return 4;
7498 #endif
7499   return bfd_arch_bits_per_address (stdoutput) / 8;
7500 }
7501 
7502 /* MD interface: Symbol and relocation handling.  */
7503 
7504 /* Return the address within the segment that a PC-relative fixup is
7505    relative to.  For AArch64 PC-relative fixups applied to instructions
7506    are generally relative to the location plus AARCH64_PCREL_OFFSET bytes.  */
7507 
7508 long
md_pcrel_from_section(fixS * fixP,segT seg)7509 md_pcrel_from_section (fixS * fixP, segT seg)
7510 {
7511   offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7512 
7513   /* If this is pc-relative and we are going to emit a relocation
7514      then we just want to put out any pipeline compensation that the linker
7515      will need.  Otherwise we want to use the calculated base.  */
7516   if (fixP->fx_pcrel
7517       && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7518 	  || aarch64_force_relocation (fixP)))
7519     base = 0;
7520 
7521   /* AArch64 should be consistent for all pc-relative relocations.  */
7522   return base + AARCH64_PCREL_OFFSET;
7523 }
7524 
7525 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7526    Otherwise we have no need to default values of symbols.  */
7527 
7528 symbolS *
md_undefined_symbol(char * name ATTRIBUTE_UNUSED)7529 md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7530 {
7531 #ifdef OBJ_ELF
7532   if (name[0] == '_' && name[1] == 'G'
7533       && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7534     {
7535       if (!GOT_symbol)
7536 	{
7537 	  if (symbol_find (name))
7538 	    as_bad (_("GOT already in the symbol table"));
7539 
7540 	  GOT_symbol = symbol_new (name, undefined_section,
7541 				   (valueT) 0, &zero_address_frag);
7542 	}
7543 
7544       return GOT_symbol;
7545     }
7546 #endif
7547 
7548   return 0;
7549 }
7550 
7551 /* Return non-zero if the indicated VALUE has overflowed the maximum
7552    range expressible by a unsigned number with the indicated number of
7553    BITS.  */
7554 
7555 static bfd_boolean
unsigned_overflow(valueT value,unsigned bits)7556 unsigned_overflow (valueT value, unsigned bits)
7557 {
7558   valueT lim;
7559   if (bits >= sizeof (valueT) * 8)
7560     return FALSE;
7561   lim = (valueT) 1 << bits;
7562   return (value >= lim);
7563 }
7564 
7565 
7566 /* Return non-zero if the indicated VALUE has overflowed the maximum
7567    range expressible by an signed number with the indicated number of
7568    BITS.  */
7569 
7570 static bfd_boolean
signed_overflow(offsetT value,unsigned bits)7571 signed_overflow (offsetT value, unsigned bits)
7572 {
7573   offsetT lim;
7574   if (bits >= sizeof (offsetT) * 8)
7575     return FALSE;
7576   lim = (offsetT) 1 << (bits - 1);
7577   return (value < -lim || value >= lim);
7578 }
7579 
7580 /* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7581    unsigned immediate offset load/store instruction, try to encode it as
7582    an unscaled, 9-bit, signed immediate offset load/store instruction.
7583    Return TRUE if it is successful; otherwise return FALSE.
7584 
7585    As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7586    in response to the standard LDR/STR mnemonics when the immediate offset is
7587    unambiguous, i.e. when it is negative or unaligned.  */
7588 
7589 static bfd_boolean
try_to_encode_as_unscaled_ldst(aarch64_inst * instr)7590 try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7591 {
7592   int idx;
7593   enum aarch64_op new_op;
7594   const aarch64_opcode *new_opcode;
7595 
7596   gas_assert (instr->opcode->iclass == ldst_pos);
7597 
7598   switch (instr->opcode->op)
7599     {
7600     case OP_LDRB_POS:new_op = OP_LDURB; break;
7601     case OP_STRB_POS: new_op = OP_STURB; break;
7602     case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7603     case OP_LDRH_POS: new_op = OP_LDURH; break;
7604     case OP_STRH_POS: new_op = OP_STURH; break;
7605     case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7606     case OP_LDR_POS: new_op = OP_LDUR; break;
7607     case OP_STR_POS: new_op = OP_STUR; break;
7608     case OP_LDRF_POS: new_op = OP_LDURV; break;
7609     case OP_STRF_POS: new_op = OP_STURV; break;
7610     case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7611     case OP_PRFM_POS: new_op = OP_PRFUM; break;
7612     default: new_op = OP_NIL; break;
7613     }
7614 
7615   if (new_op == OP_NIL)
7616     return FALSE;
7617 
7618   new_opcode = aarch64_get_opcode (new_op);
7619   gas_assert (new_opcode != NULL);
7620 
7621   DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7622 	       instr->opcode->op, new_opcode->op);
7623 
7624   aarch64_replace_opcode (instr, new_opcode);
7625 
7626   /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7627      qualifier matching may fail because the out-of-date qualifier will
7628      prevent the operand being updated with a new and correct qualifier.  */
7629   idx = aarch64_operand_index (instr->opcode->operands,
7630 			       AARCH64_OPND_ADDR_SIMM9);
7631   gas_assert (idx == 1);
7632   instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7633 
7634   DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7635 
7636   if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL,
7637 			      insn_sequence))
7638     return FALSE;
7639 
7640   return TRUE;
7641 }
7642 
7643 /* Called by fix_insn to fix a MOV immediate alias instruction.
7644 
7645    Operand for a generic move immediate instruction, which is an alias
7646    instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7647    a 32-bit/64-bit immediate value into general register.  An assembler error
7648    shall result if the immediate cannot be created by a single one of these
7649    instructions. If there is a choice, then to ensure reversability an
7650    assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR.  */
7651 
7652 static void
fix_mov_imm_insn(fixS * fixP,char * buf,aarch64_inst * instr,offsetT value)7653 fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7654 {
7655   const aarch64_opcode *opcode;
7656 
7657   /* Need to check if the destination is SP/ZR.  The check has to be done
7658      before any aarch64_replace_opcode.  */
7659   int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7660   int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7661 
7662   instr->operands[1].imm.value = value;
7663   instr->operands[1].skip = 0;
7664 
7665   if (try_mov_wide_p)
7666     {
7667       /* Try the MOVZ alias.  */
7668       opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7669       aarch64_replace_opcode (instr, opcode);
7670       if (aarch64_opcode_encode (instr->opcode, instr,
7671 				 &instr->value, NULL, NULL, insn_sequence))
7672 	{
7673 	  put_aarch64_insn (buf, instr->value);
7674 	  return;
7675 	}
7676       /* Try the MOVK alias.  */
7677       opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7678       aarch64_replace_opcode (instr, opcode);
7679       if (aarch64_opcode_encode (instr->opcode, instr,
7680 				 &instr->value, NULL, NULL, insn_sequence))
7681 	{
7682 	  put_aarch64_insn (buf, instr->value);
7683 	  return;
7684 	}
7685     }
7686 
7687   if (try_mov_bitmask_p)
7688     {
7689       /* Try the ORR alias.  */
7690       opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7691       aarch64_replace_opcode (instr, opcode);
7692       if (aarch64_opcode_encode (instr->opcode, instr,
7693 				 &instr->value, NULL, NULL, insn_sequence))
7694 	{
7695 	  put_aarch64_insn (buf, instr->value);
7696 	  return;
7697 	}
7698     }
7699 
7700   as_bad_where (fixP->fx_file, fixP->fx_line,
7701 		_("immediate cannot be moved by a single instruction"));
7702 }
7703 
7704 /* An instruction operand which is immediate related may have symbol used
7705    in the assembly, e.g.
7706 
7707      mov     w0, u32
7708      .set    u32,    0x00ffff00
7709 
7710    At the time when the assembly instruction is parsed, a referenced symbol,
7711    like 'u32' in the above example may not have been seen; a fixS is created
7712    in such a case and is handled here after symbols have been resolved.
7713    Instruction is fixed up with VALUE using the information in *FIXP plus
7714    extra information in FLAGS.
7715 
7716    This function is called by md_apply_fix to fix up instructions that need
7717    a fix-up described above but does not involve any linker-time relocation.  */
7718 
7719 static void
fix_insn(fixS * fixP,uint32_t flags,offsetT value)7720 fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7721 {
7722   int idx;
7723   uint32_t insn;
7724   char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7725   enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7726   aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7727 
7728   if (new_inst)
7729     {
7730       /* Now the instruction is about to be fixed-up, so the operand that
7731 	 was previously marked as 'ignored' needs to be unmarked in order
7732 	 to get the encoding done properly.  */
7733       idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7734       new_inst->operands[idx].skip = 0;
7735     }
7736 
7737   gas_assert (opnd != AARCH64_OPND_NIL);
7738 
7739   switch (opnd)
7740     {
7741     case AARCH64_OPND_EXCEPTION:
7742       if (unsigned_overflow (value, 16))
7743 	as_bad_where (fixP->fx_file, fixP->fx_line,
7744 		      _("immediate out of range"));
7745       insn = get_aarch64_insn (buf);
7746       insn |= encode_svc_imm (value);
7747       put_aarch64_insn (buf, insn);
7748       break;
7749 
7750     case AARCH64_OPND_AIMM:
7751       /* ADD or SUB with immediate.
7752 	 NOTE this assumes we come here with a add/sub shifted reg encoding
7753 		  3  322|2222|2  2  2 21111 111111
7754 		  1  098|7654|3  2  1 09876 543210 98765 43210
7755 	 0b000000 sf 000|1011|shift 0 Rm    imm6   Rn    Rd    ADD
7756 	 2b000000 sf 010|1011|shift 0 Rm    imm6   Rn    Rd    ADDS
7757 	 4b000000 sf 100|1011|shift 0 Rm    imm6   Rn    Rd    SUB
7758 	 6b000000 sf 110|1011|shift 0 Rm    imm6   Rn    Rd    SUBS
7759 	 ->
7760 		  3  322|2222|2 2   221111111111
7761 		  1  098|7654|3 2   109876543210 98765 43210
7762 	 11000000 sf 001|0001|shift imm12        Rn    Rd    ADD
7763 	 31000000 sf 011|0001|shift imm12        Rn    Rd    ADDS
7764 	 51000000 sf 101|0001|shift imm12        Rn    Rd    SUB
7765 	 71000000 sf 111|0001|shift imm12        Rn    Rd    SUBS
7766 	 Fields sf Rn Rd are already set.  */
7767       insn = get_aarch64_insn (buf);
7768       if (value < 0)
7769 	{
7770 	  /* Add <-> sub.  */
7771 	  insn = reencode_addsub_switch_add_sub (insn);
7772 	  value = -value;
7773 	}
7774 
7775       if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7776 	  && unsigned_overflow (value, 12))
7777 	{
7778 	  /* Try to shift the value by 12 to make it fit.  */
7779 	  if (((value >> 12) << 12) == value
7780 	      && ! unsigned_overflow (value, 12 + 12))
7781 	    {
7782 	      value >>= 12;
7783 	      insn |= encode_addsub_imm_shift_amount (1);
7784 	    }
7785 	}
7786 
7787       if (unsigned_overflow (value, 12))
7788 	as_bad_where (fixP->fx_file, fixP->fx_line,
7789 		      _("immediate out of range"));
7790 
7791       insn |= encode_addsub_imm (value);
7792 
7793       put_aarch64_insn (buf, insn);
7794       break;
7795 
7796     case AARCH64_OPND_SIMD_IMM:
7797     case AARCH64_OPND_SIMD_IMM_SFT:
7798     case AARCH64_OPND_LIMM:
7799       /* Bit mask immediate.  */
7800       gas_assert (new_inst != NULL);
7801       idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7802       new_inst->operands[idx].imm.value = value;
7803       if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7804 				 &new_inst->value, NULL, NULL, insn_sequence))
7805 	put_aarch64_insn (buf, new_inst->value);
7806       else
7807 	as_bad_where (fixP->fx_file, fixP->fx_line,
7808 		      _("invalid immediate"));
7809       break;
7810 
7811     case AARCH64_OPND_HALF:
7812       /* 16-bit unsigned immediate.  */
7813       if (unsigned_overflow (value, 16))
7814 	as_bad_where (fixP->fx_file, fixP->fx_line,
7815 		      _("immediate out of range"));
7816       insn = get_aarch64_insn (buf);
7817       insn |= encode_movw_imm (value & 0xffff);
7818       put_aarch64_insn (buf, insn);
7819       break;
7820 
7821     case AARCH64_OPND_IMM_MOV:
7822       /* Operand for a generic move immediate instruction, which is
7823 	 an alias instruction that generates a single MOVZ, MOVN or ORR
7824 	 instruction to loads a 32-bit/64-bit immediate value into general
7825 	 register.  An assembler error shall result if the immediate cannot be
7826 	 created by a single one of these instructions. If there is a choice,
7827 	 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7828 	 and MOVZ or MOVN to ORR.  */
7829       gas_assert (new_inst != NULL);
7830       fix_mov_imm_insn (fixP, buf, new_inst, value);
7831       break;
7832 
7833     case AARCH64_OPND_ADDR_SIMM7:
7834     case AARCH64_OPND_ADDR_SIMM9:
7835     case AARCH64_OPND_ADDR_SIMM9_2:
7836     case AARCH64_OPND_ADDR_SIMM10:
7837     case AARCH64_OPND_ADDR_UIMM12:
7838     case AARCH64_OPND_ADDR_SIMM11:
7839     case AARCH64_OPND_ADDR_SIMM13:
7840       /* Immediate offset in an address.  */
7841       insn = get_aarch64_insn (buf);
7842 
7843       gas_assert (new_inst != NULL && new_inst->value == insn);
7844       gas_assert (new_inst->opcode->operands[1] == opnd
7845 		  || new_inst->opcode->operands[2] == opnd);
7846 
7847       /* Get the index of the address operand.  */
7848       if (new_inst->opcode->operands[1] == opnd)
7849 	/* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
7850 	idx = 1;
7851       else
7852 	/* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}].  */
7853 	idx = 2;
7854 
7855       /* Update the resolved offset value.  */
7856       new_inst->operands[idx].addr.offset.imm = value;
7857 
7858       /* Encode/fix-up.  */
7859       if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7860 				 &new_inst->value, NULL, NULL, insn_sequence))
7861 	{
7862 	  put_aarch64_insn (buf, new_inst->value);
7863 	  break;
7864 	}
7865       else if (new_inst->opcode->iclass == ldst_pos
7866 	       && try_to_encode_as_unscaled_ldst (new_inst))
7867 	{
7868 	  put_aarch64_insn (buf, new_inst->value);
7869 	  break;
7870 	}
7871 
7872       as_bad_where (fixP->fx_file, fixP->fx_line,
7873 		    _("immediate offset out of range"));
7874       break;
7875 
7876     default:
7877       gas_assert (0);
7878       as_fatal (_("unhandled operand code %d"), opnd);
7879     }
7880 }
7881 
7882 /* Apply a fixup (fixP) to segment data, once it has been determined
7883    by our caller that we have all the info we need to fix it up.
7884 
7885    Parameter valP is the pointer to the value of the bits.  */
7886 
7887 void
md_apply_fix(fixS * fixP,valueT * valP,segT seg)7888 md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7889 {
7890   offsetT value = *valP;
7891   uint32_t insn;
7892   char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7893   int scale;
7894   unsigned flags = fixP->fx_addnumber;
7895 
7896   DEBUG_TRACE ("\n\n");
7897   DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
7898   DEBUG_TRACE ("Enter md_apply_fix");
7899 
7900   gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
7901 
7902   /* Note whether this will delete the relocation.  */
7903 
7904   if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
7905     fixP->fx_done = 1;
7906 
7907   /* Process the relocations.  */
7908   switch (fixP->fx_r_type)
7909     {
7910     case BFD_RELOC_NONE:
7911       /* This will need to go in the object file.  */
7912       fixP->fx_done = 0;
7913       break;
7914 
7915     case BFD_RELOC_8:
7916     case BFD_RELOC_8_PCREL:
7917       if (fixP->fx_done || !seg->use_rela_p)
7918 	md_number_to_chars (buf, value, 1);
7919       break;
7920 
7921     case BFD_RELOC_16:
7922     case BFD_RELOC_16_PCREL:
7923       if (fixP->fx_done || !seg->use_rela_p)
7924 	md_number_to_chars (buf, value, 2);
7925       break;
7926 
7927     case BFD_RELOC_32:
7928     case BFD_RELOC_32_PCREL:
7929       if (fixP->fx_done || !seg->use_rela_p)
7930 	md_number_to_chars (buf, value, 4);
7931       break;
7932 
7933     case BFD_RELOC_64:
7934     case BFD_RELOC_64_PCREL:
7935       if (fixP->fx_done || !seg->use_rela_p)
7936 	md_number_to_chars (buf, value, 8);
7937       break;
7938 
7939     case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7940       /* We claim that these fixups have been processed here, even if
7941          in fact we generate an error because we do not have a reloc
7942          for them, so tc_gen_reloc() will reject them.  */
7943       fixP->fx_done = 1;
7944       if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
7945 	{
7946 	  as_bad_where (fixP->fx_file, fixP->fx_line,
7947 			_("undefined symbol %s used as an immediate value"),
7948 			S_GET_NAME (fixP->fx_addsy));
7949 	  goto apply_fix_return;
7950 	}
7951       fix_insn (fixP, flags, value);
7952       break;
7953 
7954     case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7955       if (fixP->fx_done || !seg->use_rela_p)
7956 	{
7957 	  if (value & 3)
7958 	    as_bad_where (fixP->fx_file, fixP->fx_line,
7959 			  _("pc-relative load offset not word aligned"));
7960 	  if (signed_overflow (value, 21))
7961 	    as_bad_where (fixP->fx_file, fixP->fx_line,
7962 			  _("pc-relative load offset out of range"));
7963 	  insn = get_aarch64_insn (buf);
7964 	  insn |= encode_ld_lit_ofs_19 (value >> 2);
7965 	  put_aarch64_insn (buf, insn);
7966 	}
7967       break;
7968 
7969     case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7970       if (fixP->fx_done || !seg->use_rela_p)
7971 	{
7972 	  if (signed_overflow (value, 21))
7973 	    as_bad_where (fixP->fx_file, fixP->fx_line,
7974 			  _("pc-relative address offset out of range"));
7975 	  insn = get_aarch64_insn (buf);
7976 	  insn |= encode_adr_imm (value);
7977 	  put_aarch64_insn (buf, insn);
7978 	}
7979       break;
7980 
7981     case BFD_RELOC_AARCH64_BRANCH19:
7982       if (fixP->fx_done || !seg->use_rela_p)
7983 	{
7984 	  if (value & 3)
7985 	    as_bad_where (fixP->fx_file, fixP->fx_line,
7986 			  _("conditional branch target not word aligned"));
7987 	  if (signed_overflow (value, 21))
7988 	    as_bad_where (fixP->fx_file, fixP->fx_line,
7989 			  _("conditional branch out of range"));
7990 	  insn = get_aarch64_insn (buf);
7991 	  insn |= encode_cond_branch_ofs_19 (value >> 2);
7992 	  put_aarch64_insn (buf, insn);
7993 	}
7994       break;
7995 
7996     case BFD_RELOC_AARCH64_TSTBR14:
7997       if (fixP->fx_done || !seg->use_rela_p)
7998 	{
7999 	  if (value & 3)
8000 	    as_bad_where (fixP->fx_file, fixP->fx_line,
8001 			  _("conditional branch target not word aligned"));
8002 	  if (signed_overflow (value, 16))
8003 	    as_bad_where (fixP->fx_file, fixP->fx_line,
8004 			  _("conditional branch out of range"));
8005 	  insn = get_aarch64_insn (buf);
8006 	  insn |= encode_tst_branch_ofs_14 (value >> 2);
8007 	  put_aarch64_insn (buf, insn);
8008 	}
8009       break;
8010 
8011     case BFD_RELOC_AARCH64_CALL26:
8012     case BFD_RELOC_AARCH64_JUMP26:
8013       if (fixP->fx_done || !seg->use_rela_p)
8014 	{
8015 	  if (value & 3)
8016 	    as_bad_where (fixP->fx_file, fixP->fx_line,
8017 			  _("branch target not word aligned"));
8018 	  if (signed_overflow (value, 28))
8019 	    as_bad_where (fixP->fx_file, fixP->fx_line,
8020 			  _("branch out of range"));
8021 	  insn = get_aarch64_insn (buf);
8022 	  insn |= encode_branch_ofs_26 (value >> 2);
8023 	  put_aarch64_insn (buf, insn);
8024 	}
8025       break;
8026 
8027     case BFD_RELOC_AARCH64_MOVW_G0:
8028     case BFD_RELOC_AARCH64_MOVW_G0_NC:
8029     case BFD_RELOC_AARCH64_MOVW_G0_S:
8030     case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
8031     case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8032     case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
8033       scale = 0;
8034       goto movw_common;
8035     case BFD_RELOC_AARCH64_MOVW_G1:
8036     case BFD_RELOC_AARCH64_MOVW_G1_NC:
8037     case BFD_RELOC_AARCH64_MOVW_G1_S:
8038     case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8039     case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8040     case BFD_RELOC_AARCH64_MOVW_PREL_G1_NC:
8041       scale = 16;
8042       goto movw_common;
8043     case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8044       scale = 0;
8045       S_SET_THREAD_LOCAL (fixP->fx_addsy);
8046       /* Should always be exported to object file, see
8047 	 aarch64_force_relocation().  */
8048       gas_assert (!fixP->fx_done);
8049       gas_assert (seg->use_rela_p);
8050       goto movw_common;
8051     case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8052       scale = 16;
8053       S_SET_THREAD_LOCAL (fixP->fx_addsy);
8054       /* Should always be exported to object file, see
8055 	 aarch64_force_relocation().  */
8056       gas_assert (!fixP->fx_done);
8057       gas_assert (seg->use_rela_p);
8058       goto movw_common;
8059     case BFD_RELOC_AARCH64_MOVW_G2:
8060     case BFD_RELOC_AARCH64_MOVW_G2_NC:
8061     case BFD_RELOC_AARCH64_MOVW_G2_S:
8062     case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8063     case BFD_RELOC_AARCH64_MOVW_PREL_G2_NC:
8064       scale = 32;
8065       goto movw_common;
8066     case BFD_RELOC_AARCH64_MOVW_G3:
8067     case BFD_RELOC_AARCH64_MOVW_PREL_G3:
8068       scale = 48;
8069     movw_common:
8070       if (fixP->fx_done || !seg->use_rela_p)
8071 	{
8072 	  insn = get_aarch64_insn (buf);
8073 
8074 	  if (!fixP->fx_done)
8075 	    {
8076 	      /* REL signed addend must fit in 16 bits */
8077 	      if (signed_overflow (value, 16))
8078 		as_bad_where (fixP->fx_file, fixP->fx_line,
8079 			      _("offset out of range"));
8080 	    }
8081 	  else
8082 	    {
8083 	      /* Check for overflow and scale. */
8084 	      switch (fixP->fx_r_type)
8085 		{
8086 		case BFD_RELOC_AARCH64_MOVW_G0:
8087 		case BFD_RELOC_AARCH64_MOVW_G1:
8088 		case BFD_RELOC_AARCH64_MOVW_G2:
8089 		case BFD_RELOC_AARCH64_MOVW_G3:
8090 		case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
8091 		case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8092 		  if (unsigned_overflow (value, scale + 16))
8093 		    as_bad_where (fixP->fx_file, fixP->fx_line,
8094 				  _("unsigned value out of range"));
8095 		  break;
8096 		case BFD_RELOC_AARCH64_MOVW_G0_S:
8097 		case BFD_RELOC_AARCH64_MOVW_G1_S:
8098 		case BFD_RELOC_AARCH64_MOVW_G2_S:
8099 		case BFD_RELOC_AARCH64_MOVW_PREL_G0:
8100 		case BFD_RELOC_AARCH64_MOVW_PREL_G1:
8101 		case BFD_RELOC_AARCH64_MOVW_PREL_G2:
8102 		  /* NOTE: We can only come here with movz or movn. */
8103 		  if (signed_overflow (value, scale + 16))
8104 		    as_bad_where (fixP->fx_file, fixP->fx_line,
8105 				  _("signed value out of range"));
8106 		  if (value < 0)
8107 		    {
8108 		      /* Force use of MOVN.  */
8109 		      value = ~value;
8110 		      insn = reencode_movzn_to_movn (insn);
8111 		    }
8112 		  else
8113 		    {
8114 		      /* Force use of MOVZ.  */
8115 		      insn = reencode_movzn_to_movz (insn);
8116 		    }
8117 		  break;
8118 		default:
8119 		  /* Unchecked relocations.  */
8120 		  break;
8121 		}
8122 	      value >>= scale;
8123 	    }
8124 
8125 	  /* Insert value into MOVN/MOVZ/MOVK instruction. */
8126 	  insn |= encode_movw_imm (value & 0xffff);
8127 
8128 	  put_aarch64_insn (buf, insn);
8129 	}
8130       break;
8131 
8132     case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8133       fixP->fx_r_type = (ilp32_p
8134 			 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
8135 			 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8136       S_SET_THREAD_LOCAL (fixP->fx_addsy);
8137       /* Should always be exported to object file, see
8138 	 aarch64_force_relocation().  */
8139       gas_assert (!fixP->fx_done);
8140       gas_assert (seg->use_rela_p);
8141       break;
8142 
8143     case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8144       fixP->fx_r_type = (ilp32_p
8145 			 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
8146 			 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12);
8147       S_SET_THREAD_LOCAL (fixP->fx_addsy);
8148       /* Should always be exported to object file, see
8149 	 aarch64_force_relocation().  */
8150       gas_assert (!fixP->fx_done);
8151       gas_assert (seg->use_rela_p);
8152       break;
8153 
8154     case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8155     case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8156     case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8157     case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8158     case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8159     case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8160     case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8161     case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8162     case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8163     case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8164     case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8165     case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8166     case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8167     case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8168     case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8169     case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8170     case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8171     case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8172     case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8173     case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8174     case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8175     case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8176     case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8177     case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8178     case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8179     case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8180     case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8181     case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8182     case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8183     case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8184     case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8185     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8186     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8187     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8188     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8189     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8190     case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8191     case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8192     case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8193     case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8194     case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8195     case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8196     case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8197     case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8198     case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8199     case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8200     case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8201     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8202     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8203     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8204     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8205     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8206       S_SET_THREAD_LOCAL (fixP->fx_addsy);
8207       /* Should always be exported to object file, see
8208 	 aarch64_force_relocation().  */
8209       gas_assert (!fixP->fx_done);
8210       gas_assert (seg->use_rela_p);
8211       break;
8212 
8213     case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8214       /* Should always be exported to object file, see
8215 	 aarch64_force_relocation().  */
8216       fixP->fx_r_type = (ilp32_p
8217 			 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
8218 			 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
8219       gas_assert (!fixP->fx_done);
8220       gas_assert (seg->use_rela_p);
8221       break;
8222 
8223     case BFD_RELOC_AARCH64_ADD_LO12:
8224     case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8225     case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8226     case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8227     case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8228     case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8229     case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8230     case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8231     case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8232     case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8233     case BFD_RELOC_AARCH64_LDST128_LO12:
8234     case BFD_RELOC_AARCH64_LDST16_LO12:
8235     case BFD_RELOC_AARCH64_LDST32_LO12:
8236     case BFD_RELOC_AARCH64_LDST64_LO12:
8237     case BFD_RELOC_AARCH64_LDST8_LO12:
8238       /* Should always be exported to object file, see
8239 	 aarch64_force_relocation().  */
8240       gas_assert (!fixP->fx_done);
8241       gas_assert (seg->use_rela_p);
8242       break;
8243 
8244     case BFD_RELOC_AARCH64_TLSDESC_ADD:
8245     case BFD_RELOC_AARCH64_TLSDESC_CALL:
8246     case BFD_RELOC_AARCH64_TLSDESC_LDR:
8247       break;
8248 
8249     case BFD_RELOC_UNUSED:
8250       /* An error will already have been reported.  */
8251       break;
8252 
8253     default:
8254       as_bad_where (fixP->fx_file, fixP->fx_line,
8255 		    _("unexpected %s fixup"),
8256 		    bfd_get_reloc_code_name (fixP->fx_r_type));
8257       break;
8258     }
8259 
8260 apply_fix_return:
8261   /* Free the allocated the struct aarch64_inst.
8262      N.B. currently there are very limited number of fix-up types actually use
8263      this field, so the impact on the performance should be minimal .  */
8264   if (fixP->tc_fix_data.inst != NULL)
8265     free (fixP->tc_fix_data.inst);
8266 
8267   return;
8268 }
8269 
8270 /* Translate internal representation of relocation info to BFD target
8271    format.  */
8272 
8273 arelent *
tc_gen_reloc(asection * section,fixS * fixp)8274 tc_gen_reloc (asection * section, fixS * fixp)
8275 {
8276   arelent *reloc;
8277   bfd_reloc_code_real_type code;
8278 
8279   reloc = XNEW (arelent);
8280 
8281   reloc->sym_ptr_ptr = XNEW (asymbol *);
8282   *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
8283   reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
8284 
8285   if (fixp->fx_pcrel)
8286     {
8287       if (section->use_rela_p)
8288 	fixp->fx_offset -= md_pcrel_from_section (fixp, section);
8289       else
8290 	fixp->fx_offset = reloc->address;
8291     }
8292   reloc->addend = fixp->fx_offset;
8293 
8294   code = fixp->fx_r_type;
8295   switch (code)
8296     {
8297     case BFD_RELOC_16:
8298       if (fixp->fx_pcrel)
8299 	code = BFD_RELOC_16_PCREL;
8300       break;
8301 
8302     case BFD_RELOC_32:
8303       if (fixp->fx_pcrel)
8304 	code = BFD_RELOC_32_PCREL;
8305       break;
8306 
8307     case BFD_RELOC_64:
8308       if (fixp->fx_pcrel)
8309 	code = BFD_RELOC_64_PCREL;
8310       break;
8311 
8312     default:
8313       break;
8314     }
8315 
8316   reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
8317   if (reloc->howto == NULL)
8318     {
8319       as_bad_where (fixp->fx_file, fixp->fx_line,
8320 		    _
8321 		    ("cannot represent %s relocation in this object file format"),
8322 		    bfd_get_reloc_code_name (code));
8323       return NULL;
8324     }
8325 
8326   return reloc;
8327 }
8328 
8329 /* This fix_new is called by cons via TC_CONS_FIX_NEW.	*/
8330 
8331 void
cons_fix_new_aarch64(fragS * frag,int where,int size,expressionS * exp)8332 cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
8333 {
8334   bfd_reloc_code_real_type type;
8335   int pcrel = 0;
8336 
8337   /* Pick a reloc.
8338      FIXME: @@ Should look at CPU word size.  */
8339   switch (size)
8340     {
8341     case 1:
8342       type = BFD_RELOC_8;
8343       break;
8344     case 2:
8345       type = BFD_RELOC_16;
8346       break;
8347     case 4:
8348       type = BFD_RELOC_32;
8349       break;
8350     case 8:
8351       type = BFD_RELOC_64;
8352       break;
8353     default:
8354       as_bad (_("cannot do %u-byte relocation"), size);
8355       type = BFD_RELOC_UNUSED;
8356       break;
8357     }
8358 
8359   fix_new_exp (frag, where, (int) size, exp, pcrel, type);
8360 }
8361 
8362 int
aarch64_force_relocation(struct fix * fixp)8363 aarch64_force_relocation (struct fix *fixp)
8364 {
8365   switch (fixp->fx_r_type)
8366     {
8367     case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
8368       /* Perform these "immediate" internal relocations
8369          even if the symbol is extern or weak.  */
8370       return 0;
8371 
8372     case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
8373     case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
8374     case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
8375       /* Pseudo relocs that need to be fixed up according to
8376 	 ilp32_p.  */
8377       return 0;
8378 
8379     case BFD_RELOC_AARCH64_ADD_LO12:
8380     case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
8381     case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
8382     case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
8383     case BFD_RELOC_AARCH64_GOT_LD_PREL19:
8384     case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
8385     case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
8386     case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
8387     case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
8388     case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
8389     case BFD_RELOC_AARCH64_LDST128_LO12:
8390     case BFD_RELOC_AARCH64_LDST16_LO12:
8391     case BFD_RELOC_AARCH64_LDST32_LO12:
8392     case BFD_RELOC_AARCH64_LDST64_LO12:
8393     case BFD_RELOC_AARCH64_LDST8_LO12:
8394     case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
8395     case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
8396     case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
8397     case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
8398     case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
8399     case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
8400     case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
8401     case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
8402     case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
8403     case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
8404     case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
8405     case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
8406     case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
8407     case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
8408     case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
8409     case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
8410     case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
8411     case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
8412     case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
8413    case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
8414     case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
8415     case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
8416     case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
8417     case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
8418     case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
8419     case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
8420     case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
8421     case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
8422     case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
8423     case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
8424     case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
8425     case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
8426     case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
8427     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
8428     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
8429     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
8430     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
8431     case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
8432     case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12:
8433     case BFD_RELOC_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
8434     case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12:
8435     case BFD_RELOC_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
8436     case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12:
8437     case BFD_RELOC_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
8438     case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12:
8439     case BFD_RELOC_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
8440     case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
8441     case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
8442     case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
8443     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
8444     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
8445     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
8446     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
8447     case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
8448       /* Always leave these relocations for the linker.  */
8449       return 1;
8450 
8451     default:
8452       break;
8453     }
8454 
8455   return generic_force_reloc (fixp);
8456 }
8457 
8458 #ifdef OBJ_ELF
8459 
8460 /* Implement md_after_parse_args.  This is the earliest time we need to decide
8461    ABI.  If no -mabi specified, the ABI will be decided by target triplet.  */
8462 
8463 void
aarch64_after_parse_args(void)8464 aarch64_after_parse_args (void)
8465 {
8466   if (aarch64_abi != AARCH64_ABI_NONE)
8467     return;
8468 
8469   /* DEFAULT_ARCH will have ":32" extension if it's configured for ILP32.  */
8470   if (strlen (default_arch) > 7 && strcmp (default_arch + 7, ":32") == 0)
8471     aarch64_abi = AARCH64_ABI_ILP32;
8472   else
8473     aarch64_abi = AARCH64_ABI_LP64;
8474 }
8475 
8476 const char *
elf64_aarch64_target_format(void)8477 elf64_aarch64_target_format (void)
8478 {
8479 #ifdef TE_CLOUDABI
8480   /* FIXME: What to do for ilp32_p ?  */
8481   if (target_big_endian)
8482     return "elf64-bigaarch64-cloudabi";
8483   else
8484     return "elf64-littleaarch64-cloudabi";
8485 #else
8486   if (target_big_endian)
8487     return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
8488   else
8489     return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
8490 #endif
8491 }
8492 
8493 void
aarch64elf_frob_symbol(symbolS * symp,int * puntp)8494 aarch64elf_frob_symbol (symbolS * symp, int *puntp)
8495 {
8496   elf_frob_symbol (symp, puntp);
8497 }
8498 #endif
8499 
8500 /* MD interface: Finalization.	*/
8501 
8502 /* A good place to do this, although this was probably not intended
8503    for this kind of use.  We need to dump the literal pool before
8504    references are made to a null symbol pointer.  */
8505 
8506 void
aarch64_cleanup(void)8507 aarch64_cleanup (void)
8508 {
8509   literal_pool *pool;
8510 
8511   for (pool = list_of_pools; pool; pool = pool->next)
8512     {
8513       /* Put it at the end of the relevant section.  */
8514       subseg_set (pool->section, pool->sub_section);
8515       s_ltorg (0);
8516     }
8517 }
8518 
8519 #ifdef OBJ_ELF
8520 /* Remove any excess mapping symbols generated for alignment frags in
8521    SEC.  We may have created a mapping symbol before a zero byte
8522    alignment; remove it if there's a mapping symbol after the
8523    alignment.  */
8524 static void
check_mapping_symbols(bfd * abfd ATTRIBUTE_UNUSED,asection * sec,void * dummy ATTRIBUTE_UNUSED)8525 check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8526 		       void *dummy ATTRIBUTE_UNUSED)
8527 {
8528   segment_info_type *seginfo = seg_info (sec);
8529   fragS *fragp;
8530 
8531   if (seginfo == NULL || seginfo->frchainP == NULL)
8532     return;
8533 
8534   for (fragp = seginfo->frchainP->frch_root;
8535        fragp != NULL; fragp = fragp->fr_next)
8536     {
8537       symbolS *sym = fragp->tc_frag_data.last_map;
8538       fragS *next = fragp->fr_next;
8539 
8540       /* Variable-sized frags have been converted to fixed size by
8541          this point.  But if this was variable-sized to start with,
8542          there will be a fixed-size frag after it.  So don't handle
8543          next == NULL.  */
8544       if (sym == NULL || next == NULL)
8545 	continue;
8546 
8547       if (S_GET_VALUE (sym) < next->fr_address)
8548 	/* Not at the end of this frag.  */
8549 	continue;
8550       know (S_GET_VALUE (sym) == next->fr_address);
8551 
8552       do
8553 	{
8554 	  if (next->tc_frag_data.first_map != NULL)
8555 	    {
8556 	      /* Next frag starts with a mapping symbol.  Discard this
8557 	         one.  */
8558 	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8559 	      break;
8560 	    }
8561 
8562 	  if (next->fr_next == NULL)
8563 	    {
8564 	      /* This mapping symbol is at the end of the section.  Discard
8565 	         it.  */
8566 	      know (next->fr_fix == 0 && next->fr_var == 0);
8567 	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8568 	      break;
8569 	    }
8570 
8571 	  /* As long as we have empty frags without any mapping symbols,
8572 	     keep looking.  */
8573 	  /* If the next frag is non-empty and does not start with a
8574 	     mapping symbol, then this mapping symbol is required.  */
8575 	  if (next->fr_address != next->fr_next->fr_address)
8576 	    break;
8577 
8578 	  next = next->fr_next;
8579 	}
8580       while (next != NULL);
8581     }
8582 }
8583 #endif
8584 
8585 /* Adjust the symbol table.  */
8586 
8587 void
aarch64_adjust_symtab(void)8588 aarch64_adjust_symtab (void)
8589 {
8590 #ifdef OBJ_ELF
8591   /* Remove any overlapping mapping symbols generated by alignment frags.  */
8592   bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8593   /* Now do generic ELF adjustments.  */
8594   elf_adjust_symtab ();
8595 #endif
8596 }
8597 
8598 static void
checked_hash_insert(struct hash_control * table,const char * key,void * value)8599 checked_hash_insert (struct hash_control *table, const char *key, void *value)
8600 {
8601   const char *hash_err;
8602 
8603   hash_err = hash_insert (table, key, value);
8604   if (hash_err)
8605     printf ("Internal Error:  Can't hash %s\n", key);
8606 }
8607 
8608 static void
fill_instruction_hash_table(void)8609 fill_instruction_hash_table (void)
8610 {
8611   aarch64_opcode *opcode = aarch64_opcode_table;
8612 
8613   while (opcode->name != NULL)
8614     {
8615       templates *templ, *new_templ;
8616       templ = hash_find (aarch64_ops_hsh, opcode->name);
8617 
8618       new_templ = XNEW (templates);
8619       new_templ->opcode = opcode;
8620       new_templ->next = NULL;
8621 
8622       if (!templ)
8623 	checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8624       else
8625 	{
8626 	  new_templ->next = templ->next;
8627 	  templ->next = new_templ;
8628 	}
8629       ++opcode;
8630     }
8631 }
8632 
8633 static inline void
convert_to_upper(char * dst,const char * src,size_t num)8634 convert_to_upper (char *dst, const char *src, size_t num)
8635 {
8636   unsigned int i;
8637   for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8638     *dst = TOUPPER (*src);
8639   *dst = '\0';
8640 }
8641 
8642 /* Assume STR point to a lower-case string, allocate, convert and return
8643    the corresponding upper-case string.  */
8644 static inline const char*
get_upper_str(const char * str)8645 get_upper_str (const char *str)
8646 {
8647   char *ret;
8648   size_t len = strlen (str);
8649   ret = XNEWVEC (char, len + 1);
8650   convert_to_upper (ret, str, len);
8651   return ret;
8652 }
8653 
8654 /* MD interface: Initialization.  */
8655 
8656 void
md_begin(void)8657 md_begin (void)
8658 {
8659   unsigned mach;
8660   unsigned int i;
8661 
8662   if ((aarch64_ops_hsh = hash_new ()) == NULL
8663       || (aarch64_cond_hsh = hash_new ()) == NULL
8664       || (aarch64_shift_hsh = hash_new ()) == NULL
8665       || (aarch64_sys_regs_hsh = hash_new ()) == NULL
8666       || (aarch64_pstatefield_hsh = hash_new ()) == NULL
8667       || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
8668       || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
8669       || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
8670       || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
8671       || (aarch64_sys_regs_sr_hsh = hash_new ()) == NULL
8672       || (aarch64_reg_hsh = hash_new ()) == NULL
8673       || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
8674       || (aarch64_nzcv_hsh = hash_new ()) == NULL
8675       || (aarch64_pldop_hsh = hash_new ()) == NULL
8676       || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
8677     as_fatal (_("virtual memory exhausted"));
8678 
8679   fill_instruction_hash_table ();
8680 
8681   for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8682     checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8683 			 (void *) (aarch64_sys_regs + i));
8684 
8685   for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8686     checked_hash_insert (aarch64_pstatefield_hsh,
8687 			 aarch64_pstatefields[i].name,
8688 			 (void *) (aarch64_pstatefields + i));
8689 
8690   for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8691     checked_hash_insert (aarch64_sys_regs_ic_hsh,
8692 			 aarch64_sys_regs_ic[i].name,
8693 			 (void *) (aarch64_sys_regs_ic + i));
8694 
8695   for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8696     checked_hash_insert (aarch64_sys_regs_dc_hsh,
8697 			 aarch64_sys_regs_dc[i].name,
8698 			 (void *) (aarch64_sys_regs_dc + i));
8699 
8700   for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8701     checked_hash_insert (aarch64_sys_regs_at_hsh,
8702 			 aarch64_sys_regs_at[i].name,
8703 			 (void *) (aarch64_sys_regs_at + i));
8704 
8705   for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8706     checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
8707 			 aarch64_sys_regs_tlbi[i].name,
8708 			 (void *) (aarch64_sys_regs_tlbi + i));
8709 
8710   for (i = 0; aarch64_sys_regs_sr[i].name != NULL; i++)
8711     checked_hash_insert (aarch64_sys_regs_sr_hsh,
8712 			 aarch64_sys_regs_sr[i].name,
8713 			 (void *) (aarch64_sys_regs_sr + i));
8714 
8715   for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8716     checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8717 			 (void *) (reg_names + i));
8718 
8719   for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8720     checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8721 			 (void *) (nzcv_names + i));
8722 
8723   for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8724     {
8725       const char *name = aarch64_operand_modifiers[i].name;
8726       checked_hash_insert (aarch64_shift_hsh, name,
8727 			   (void *) (aarch64_operand_modifiers + i));
8728       /* Also hash the name in the upper case.  */
8729       checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8730 			   (void *) (aarch64_operand_modifiers + i));
8731     }
8732 
8733   for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8734     {
8735       unsigned int j;
8736       /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8737 	 the same condition code.  */
8738       for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8739 	{
8740 	  const char *name = aarch64_conds[i].names[j];
8741 	  if (name == NULL)
8742 	    break;
8743 	  checked_hash_insert (aarch64_cond_hsh, name,
8744 			       (void *) (aarch64_conds + i));
8745 	  /* Also hash the name in the upper case.  */
8746 	  checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8747 			       (void *) (aarch64_conds + i));
8748 	}
8749     }
8750 
8751   for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8752     {
8753       const char *name = aarch64_barrier_options[i].name;
8754       /* Skip xx00 - the unallocated values of option.  */
8755       if ((i & 0x3) == 0)
8756 	continue;
8757       checked_hash_insert (aarch64_barrier_opt_hsh, name,
8758 			   (void *) (aarch64_barrier_options + i));
8759       /* Also hash the name in the upper case.  */
8760       checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8761 			   (void *) (aarch64_barrier_options + i));
8762     }
8763 
8764   for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8765     {
8766       const char* name = aarch64_prfops[i].name;
8767       /* Skip the unallocated hint encodings.  */
8768       if (name == NULL)
8769 	continue;
8770       checked_hash_insert (aarch64_pldop_hsh, name,
8771 			   (void *) (aarch64_prfops + i));
8772       /* Also hash the name in the upper case.  */
8773       checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8774 			   (void *) (aarch64_prfops + i));
8775     }
8776 
8777   for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8778     {
8779       const char* name = aarch64_hint_options[i].name;
8780       const char* upper_name = get_upper_str(name);
8781 
8782       checked_hash_insert (aarch64_hint_opt_hsh, name,
8783 			   (void *) (aarch64_hint_options + i));
8784 
8785       /* Also hash the name in the upper case if not the same.  */
8786       if (strcmp (name, upper_name) != 0)
8787 	checked_hash_insert (aarch64_hint_opt_hsh, upper_name,
8788 			     (void *) (aarch64_hint_options + i));
8789     }
8790 
8791   /* Set the cpu variant based on the command-line options.  */
8792   if (!mcpu_cpu_opt)
8793     mcpu_cpu_opt = march_cpu_opt;
8794 
8795   if (!mcpu_cpu_opt)
8796     mcpu_cpu_opt = &cpu_default;
8797 
8798   cpu_variant = *mcpu_cpu_opt;
8799 
8800   /* Record the CPU type.  */
8801   mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8802 
8803   bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8804 }
8805 
8806 /* Command line processing.  */
8807 
8808 const char *md_shortopts = "m:";
8809 
8810 #ifdef AARCH64_BI_ENDIAN
8811 #define OPTION_EB (OPTION_MD_BASE + 0)
8812 #define OPTION_EL (OPTION_MD_BASE + 1)
8813 #else
8814 #if TARGET_BYTES_BIG_ENDIAN
8815 #define OPTION_EB (OPTION_MD_BASE + 0)
8816 #else
8817 #define OPTION_EL (OPTION_MD_BASE + 1)
8818 #endif
8819 #endif
8820 
8821 struct option md_longopts[] = {
8822 #ifdef OPTION_EB
8823   {"EB", no_argument, NULL, OPTION_EB},
8824 #endif
8825 #ifdef OPTION_EL
8826   {"EL", no_argument, NULL, OPTION_EL},
8827 #endif
8828   {NULL, no_argument, NULL, 0}
8829 };
8830 
8831 size_t md_longopts_size = sizeof (md_longopts);
8832 
8833 struct aarch64_option_table
8834 {
8835   const char *option;			/* Option name to match.  */
8836   const char *help;			/* Help information.  */
8837   int *var;			/* Variable to change.  */
8838   int value;			/* What to change it to.  */
8839   char *deprecated;		/* If non-null, print this message.  */
8840 };
8841 
8842 static struct aarch64_option_table aarch64_opts[] = {
8843   {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8844   {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8845    NULL},
8846 #ifdef DEBUG_AARCH64
8847   {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8848 #endif /* DEBUG_AARCH64 */
8849   {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8850    NULL},
8851   {"mno-verbose-error", N_("do not output verbose error messages"),
8852    &verbose_error_p, 0, NULL},
8853   {NULL, NULL, NULL, 0, NULL}
8854 };
8855 
8856 struct aarch64_cpu_option_table
8857 {
8858   const char *name;
8859   const aarch64_feature_set value;
8860   /* The canonical name of the CPU, or NULL to use NAME converted to upper
8861      case.  */
8862   const char *canonical_name;
8863 };
8864 
8865 /* This list should, at a minimum, contain all the cpu names
8866    recognized by GCC.  */
8867 static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8868   {"all", AARCH64_ANY, NULL},
8869   {"cortex-a34", AARCH64_FEATURE (AARCH64_ARCH_V8,
8870 				  AARCH64_FEATURE_CRC), "Cortex-A34"},
8871   {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8872 				  AARCH64_FEATURE_CRC), "Cortex-A35"},
8873   {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8874 				  AARCH64_FEATURE_CRC), "Cortex-A53"},
8875   {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8876 				  AARCH64_FEATURE_CRC), "Cortex-A57"},
8877   {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8878 				  AARCH64_FEATURE_CRC), "Cortex-A72"},
8879   {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8880 				  AARCH64_FEATURE_CRC), "Cortex-A73"},
8881   {"cortex-a55", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8882 				  AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8883 				  "Cortex-A55"},
8884   {"cortex-a75", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8885 				  AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8886 				  "Cortex-A75"},
8887   {"cortex-a76", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8888 				  AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16 | AARCH64_FEATURE_DOTPROD),
8889 				  "Cortex-A76"},
8890   {"cortex-a76ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8891 				    AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8892 				    | AARCH64_FEATURE_DOTPROD
8893 				    | AARCH64_FEATURE_SSBS),
8894 				    "Cortex-A76AE"},
8895   {"cortex-a77", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8896 				  AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8897 				  | AARCH64_FEATURE_DOTPROD
8898 				  | AARCH64_FEATURE_SSBS),
8899 				  "Cortex-A77"},
8900   {"cortex-a65", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8901 				  AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8902 				  | AARCH64_FEATURE_DOTPROD
8903 				  | AARCH64_FEATURE_SSBS),
8904 				  "Cortex-A65"},
8905   {"cortex-a65ae", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8906 				    AARCH64_FEATURE_F16 | AARCH64_FEATURE_RCPC
8907 				    | AARCH64_FEATURE_DOTPROD
8908 				    | AARCH64_FEATURE_SSBS),
8909 				    "Cortex-A65AE"},
8910   {"ares", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8911 				  AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8912 				  | AARCH64_FEATURE_DOTPROD
8913 				  | AARCH64_FEATURE_PROFILE),
8914 				  "Ares"},
8915   {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
8916 				 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8917 				"Samsung Exynos M1"},
8918   {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
8919 			      AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8920 			      | AARCH64_FEATURE_RDMA),
8921    "Qualcomm Falkor"},
8922   {"neoverse-e1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8923 				  AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8924 				  | AARCH64_FEATURE_DOTPROD
8925 				  | AARCH64_FEATURE_SSBS),
8926 				  "Neoverse E1"},
8927   {"neoverse-n1", AARCH64_FEATURE (AARCH64_ARCH_V8_2,
8928 				  AARCH64_FEATURE_RCPC | AARCH64_FEATURE_F16
8929 				  | AARCH64_FEATURE_DOTPROD
8930 				  | AARCH64_FEATURE_PROFILE),
8931 				  "Neoverse N1"},
8932   {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8933 			       AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO
8934 			       | AARCH64_FEATURE_RDMA),
8935    "Qualcomm QDF24XX"},
8936   {"saphira", AARCH64_FEATURE (AARCH64_ARCH_V8_4,
8937 			       AARCH64_FEATURE_CRYPTO | AARCH64_FEATURE_PROFILE),
8938    "Qualcomm Saphira"},
8939   {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8940 				AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8941    "Cavium ThunderX"},
8942   {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
8943 			      AARCH64_FEATURE_CRYPTO),
8944   "Broadcom Vulcan"},
8945   /* The 'xgene-1' name is an older name for 'xgene1', which was used
8946      in earlier releases and is superseded by 'xgene1' in all
8947      tools.  */
8948   {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8949   {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8950   {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
8951 			      AARCH64_FEATURE_CRC), "APM X-Gene 2"},
8952   {"generic", AARCH64_ARCH_V8, NULL},
8953 
8954   {NULL, AARCH64_ARCH_NONE, NULL}
8955 };
8956 
8957 struct aarch64_arch_option_table
8958 {
8959   const char *name;
8960   const aarch64_feature_set value;
8961 };
8962 
8963 /* This list should, at a minimum, contain all the architecture names
8964    recognized by GCC.  */
8965 static const struct aarch64_arch_option_table aarch64_archs[] = {
8966   {"all", AARCH64_ANY},
8967   {"armv8-a", AARCH64_ARCH_V8},
8968   {"armv8.1-a", AARCH64_ARCH_V8_1},
8969   {"armv8.2-a", AARCH64_ARCH_V8_2},
8970   {"armv8.3-a", AARCH64_ARCH_V8_3},
8971   {"armv8.4-a", AARCH64_ARCH_V8_4},
8972   {"armv8.5-a", AARCH64_ARCH_V8_5},
8973   {"armv8.6-a", AARCH64_ARCH_V8_6},
8974   {NULL, AARCH64_ARCH_NONE}
8975 };
8976 
8977 /* ISA extensions.  */
8978 struct aarch64_option_cpu_value_table
8979 {
8980   const char *name;
8981   const aarch64_feature_set value;
8982   const aarch64_feature_set require; /* Feature dependencies.  */
8983 };
8984 
8985 static const struct aarch64_option_cpu_value_table aarch64_features[] = {
8986   {"crc",		AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
8987 			AARCH64_ARCH_NONE},
8988   {"crypto",		AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
8989 			AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8990   {"fp",		AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
8991 			AARCH64_ARCH_NONE},
8992   {"lse",		AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
8993 			AARCH64_ARCH_NONE},
8994   {"simd",		AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
8995 			AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8996   {"pan",		AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
8997 			AARCH64_ARCH_NONE},
8998   {"lor",		AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
8999 			AARCH64_ARCH_NONE},
9000   {"ras",		AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
9001 			AARCH64_ARCH_NONE},
9002   {"rdma",		AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
9003 			AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
9004   {"fp16",		AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
9005 			AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
9006   {"fp16fml",		AARCH64_FEATURE (AARCH64_FEATURE_F16_FML, 0),
9007 			AARCH64_FEATURE (AARCH64_FEATURE_FP
9008 					 | AARCH64_FEATURE_F16, 0)},
9009   {"profile",		AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
9010 			AARCH64_ARCH_NONE},
9011   {"sve",		AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
9012 			AARCH64_FEATURE (AARCH64_FEATURE_F16
9013 					 | AARCH64_FEATURE_SIMD
9014 					 | AARCH64_FEATURE_COMPNUM, 0)},
9015   {"tme",		AARCH64_FEATURE (AARCH64_FEATURE_TME, 0),
9016 			AARCH64_ARCH_NONE},
9017   {"compnum",		AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
9018 			AARCH64_FEATURE (AARCH64_FEATURE_F16
9019 					 | AARCH64_FEATURE_SIMD, 0)},
9020   {"rcpc",		AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
9021 			AARCH64_ARCH_NONE},
9022   {"dotprod",		AARCH64_FEATURE (AARCH64_FEATURE_DOTPROD, 0),
9023 			AARCH64_ARCH_NONE},
9024   {"sha2",		AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0),
9025 			AARCH64_ARCH_NONE},
9026   {"sb",		AARCH64_FEATURE (AARCH64_FEATURE_SB, 0),
9027 			AARCH64_ARCH_NONE},
9028   {"predres",		AARCH64_FEATURE (AARCH64_FEATURE_PREDRES, 0),
9029 			AARCH64_ARCH_NONE},
9030   {"aes",		AARCH64_FEATURE (AARCH64_FEATURE_AES, 0),
9031 			AARCH64_ARCH_NONE},
9032   {"sm4",		AARCH64_FEATURE (AARCH64_FEATURE_SM4, 0),
9033 			AARCH64_ARCH_NONE},
9034   {"sha3",		AARCH64_FEATURE (AARCH64_FEATURE_SHA3, 0),
9035 			AARCH64_FEATURE (AARCH64_FEATURE_SHA2, 0)},
9036   {"rng",		AARCH64_FEATURE (AARCH64_FEATURE_RNG, 0),
9037 			AARCH64_ARCH_NONE},
9038   {"ssbs",		AARCH64_FEATURE (AARCH64_FEATURE_SSBS, 0),
9039 			AARCH64_ARCH_NONE},
9040   {"memtag",		AARCH64_FEATURE (AARCH64_FEATURE_MEMTAG, 0),
9041 			AARCH64_ARCH_NONE},
9042   {"sve2",		AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0),
9043 			AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9044   {"sve2-sm4",		AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SM4, 0),
9045 			AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9046 					 | AARCH64_FEATURE_SM4, 0)},
9047   {"sve2-aes",		AARCH64_FEATURE (AARCH64_FEATURE_SVE2_AES, 0),
9048 			AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9049 					 | AARCH64_FEATURE_AES, 0)},
9050   {"sve2-sha3",		AARCH64_FEATURE (AARCH64_FEATURE_SVE2_SHA3, 0),
9051 			AARCH64_FEATURE (AARCH64_FEATURE_SVE2
9052 					 | AARCH64_FEATURE_SHA3, 0)},
9053   {"sve2-bitperm",	AARCH64_FEATURE (AARCH64_FEATURE_SVE2_BITPERM, 0),
9054 			AARCH64_FEATURE (AARCH64_FEATURE_SVE2, 0)},
9055   {"bf16",		AARCH64_FEATURE (AARCH64_FEATURE_BFLOAT16, 0),
9056 			AARCH64_ARCH_NONE},
9057   {"i8mm",		AARCH64_FEATURE (AARCH64_FEATURE_I8MM, 0),
9058 			AARCH64_ARCH_NONE},
9059   {"f32mm",		AARCH64_FEATURE (AARCH64_FEATURE_F32MM, 0),
9060 			AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9061   {"f64mm",		AARCH64_FEATURE (AARCH64_FEATURE_F64MM, 0),
9062 			AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0)},
9063   {NULL,		AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
9064 };
9065 
9066 struct aarch64_long_option_table
9067 {
9068   const char *option;			/* Substring to match.  */
9069   const char *help;			/* Help information.  */
9070   int (*func) (const char *subopt);	/* Function to decode sub-option.  */
9071   char *deprecated;		/* If non-null, print this message.  */
9072 };
9073 
9074 /* Transitive closure of features depending on set.  */
9075 static aarch64_feature_set
aarch64_feature_disable_set(aarch64_feature_set set)9076 aarch64_feature_disable_set (aarch64_feature_set set)
9077 {
9078   const struct aarch64_option_cpu_value_table *opt;
9079   aarch64_feature_set prev = 0;
9080 
9081   while (prev != set) {
9082     prev = set;
9083     for (opt = aarch64_features; opt->name != NULL; opt++)
9084       if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
9085         AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
9086   }
9087   return set;
9088 }
9089 
9090 /* Transitive closure of dependencies of set.  */
9091 static aarch64_feature_set
aarch64_feature_enable_set(aarch64_feature_set set)9092 aarch64_feature_enable_set (aarch64_feature_set set)
9093 {
9094   const struct aarch64_option_cpu_value_table *opt;
9095   aarch64_feature_set prev = 0;
9096 
9097   while (prev != set) {
9098     prev = set;
9099     for (opt = aarch64_features; opt->name != NULL; opt++)
9100       if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
9101         AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
9102   }
9103   return set;
9104 }
9105 
9106 static int
aarch64_parse_features(const char * str,const aarch64_feature_set ** opt_p,bfd_boolean ext_only)9107 aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
9108 			bfd_boolean ext_only)
9109 {
9110   /* We insist on extensions being added before being removed.  We achieve
9111      this by using the ADDING_VALUE variable to indicate whether we are
9112      adding an extension (1) or removing it (0) and only allowing it to
9113      change in the order -1 -> 1 -> 0.  */
9114   int adding_value = -1;
9115   aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
9116 
9117   /* Copy the feature set, so that we can modify it.  */
9118   *ext_set = **opt_p;
9119   *opt_p = ext_set;
9120 
9121   while (str != NULL && *str != 0)
9122     {
9123       const struct aarch64_option_cpu_value_table *opt;
9124       const char *ext = NULL;
9125       int optlen;
9126 
9127       if (!ext_only)
9128 	{
9129 	  if (*str != '+')
9130 	    {
9131 	      as_bad (_("invalid architectural extension"));
9132 	      return 0;
9133 	    }
9134 
9135 	  ext = strchr (++str, '+');
9136 	}
9137 
9138       if (ext != NULL)
9139 	optlen = ext - str;
9140       else
9141 	optlen = strlen (str);
9142 
9143       if (optlen >= 2 && strncmp (str, "no", 2) == 0)
9144 	{
9145 	  if (adding_value != 0)
9146 	    adding_value = 0;
9147 	  optlen -= 2;
9148 	  str += 2;
9149 	}
9150       else if (optlen > 0)
9151 	{
9152 	  if (adding_value == -1)
9153 	    adding_value = 1;
9154 	  else if (adding_value != 1)
9155 	    {
9156 	      as_bad (_("must specify extensions to add before specifying "
9157 			"those to remove"));
9158 	      return FALSE;
9159 	    }
9160 	}
9161 
9162       if (optlen == 0)
9163 	{
9164 	  as_bad (_("missing architectural extension"));
9165 	  return 0;
9166 	}
9167 
9168       gas_assert (adding_value != -1);
9169 
9170       for (opt = aarch64_features; opt->name != NULL; opt++)
9171 	if (strncmp (opt->name, str, optlen) == 0)
9172 	  {
9173 	    aarch64_feature_set set;
9174 
9175 	    /* Add or remove the extension.  */
9176 	    if (adding_value)
9177 	      {
9178 		set = aarch64_feature_enable_set (opt->value);
9179 		AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
9180 	      }
9181 	    else
9182 	      {
9183 		set = aarch64_feature_disable_set (opt->value);
9184 		AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
9185 	      }
9186 	    break;
9187 	  }
9188 
9189       if (opt->name == NULL)
9190 	{
9191 	  as_bad (_("unknown architectural extension `%s'"), str);
9192 	  return 0;
9193 	}
9194 
9195       str = ext;
9196     };
9197 
9198   return 1;
9199 }
9200 
9201 static int
aarch64_parse_cpu(const char * str)9202 aarch64_parse_cpu (const char *str)
9203 {
9204   const struct aarch64_cpu_option_table *opt;
9205   const char *ext = strchr (str, '+');
9206   size_t optlen;
9207 
9208   if (ext != NULL)
9209     optlen = ext - str;
9210   else
9211     optlen = strlen (str);
9212 
9213   if (optlen == 0)
9214     {
9215       as_bad (_("missing cpu name `%s'"), str);
9216       return 0;
9217     }
9218 
9219   for (opt = aarch64_cpus; opt->name != NULL; opt++)
9220     if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9221       {
9222 	mcpu_cpu_opt = &opt->value;
9223 	if (ext != NULL)
9224 	  return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
9225 
9226 	return 1;
9227       }
9228 
9229   as_bad (_("unknown cpu `%s'"), str);
9230   return 0;
9231 }
9232 
9233 static int
aarch64_parse_arch(const char * str)9234 aarch64_parse_arch (const char *str)
9235 {
9236   const struct aarch64_arch_option_table *opt;
9237   const char *ext = strchr (str, '+');
9238   size_t optlen;
9239 
9240   if (ext != NULL)
9241     optlen = ext - str;
9242   else
9243     optlen = strlen (str);
9244 
9245   if (optlen == 0)
9246     {
9247       as_bad (_("missing architecture name `%s'"), str);
9248       return 0;
9249     }
9250 
9251   for (opt = aarch64_archs; opt->name != NULL; opt++)
9252     if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
9253       {
9254 	march_cpu_opt = &opt->value;
9255 	if (ext != NULL)
9256 	  return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
9257 
9258 	return 1;
9259       }
9260 
9261   as_bad (_("unknown architecture `%s'\n"), str);
9262   return 0;
9263 }
9264 
9265 /* ABIs.  */
9266 struct aarch64_option_abi_value_table
9267 {
9268   const char *name;
9269   enum aarch64_abi_type value;
9270 };
9271 
9272 static const struct aarch64_option_abi_value_table aarch64_abis[] = {
9273   {"ilp32",		AARCH64_ABI_ILP32},
9274   {"lp64",		AARCH64_ABI_LP64},
9275 };
9276 
9277 static int
aarch64_parse_abi(const char * str)9278 aarch64_parse_abi (const char *str)
9279 {
9280   unsigned int i;
9281 
9282   if (str[0] == '\0')
9283     {
9284       as_bad (_("missing abi name `%s'"), str);
9285       return 0;
9286     }
9287 
9288   for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
9289     if (strcmp (str, aarch64_abis[i].name) == 0)
9290       {
9291 	aarch64_abi = aarch64_abis[i].value;
9292 	return 1;
9293       }
9294 
9295   as_bad (_("unknown abi `%s'\n"), str);
9296   return 0;
9297 }
9298 
9299 static struct aarch64_long_option_table aarch64_long_opts[] = {
9300 #ifdef OBJ_ELF
9301   {"mabi=", N_("<abi name>\t  specify for ABI <abi name>"),
9302    aarch64_parse_abi, NULL},
9303 #endif /* OBJ_ELF */
9304   {"mcpu=", N_("<cpu name>\t  assemble for CPU <cpu name>"),
9305    aarch64_parse_cpu, NULL},
9306   {"march=", N_("<arch name>\t  assemble for architecture <arch name>"),
9307    aarch64_parse_arch, NULL},
9308   {NULL, NULL, 0, NULL}
9309 };
9310 
9311 int
md_parse_option(int c,const char * arg)9312 md_parse_option (int c, const char *arg)
9313 {
9314   struct aarch64_option_table *opt;
9315   struct aarch64_long_option_table *lopt;
9316 
9317   switch (c)
9318     {
9319 #ifdef OPTION_EB
9320     case OPTION_EB:
9321       target_big_endian = 1;
9322       break;
9323 #endif
9324 
9325 #ifdef OPTION_EL
9326     case OPTION_EL:
9327       target_big_endian = 0;
9328       break;
9329 #endif
9330 
9331     case 'a':
9332       /* Listing option.  Just ignore these, we don't support additional
9333          ones.  */
9334       return 0;
9335 
9336     default:
9337       for (opt = aarch64_opts; opt->option != NULL; opt++)
9338 	{
9339 	  if (c == opt->option[0]
9340 	      && ((arg == NULL && opt->option[1] == 0)
9341 		  || streq (arg, opt->option + 1)))
9342 	    {
9343 	      /* If the option is deprecated, tell the user.  */
9344 	      if (opt->deprecated != NULL)
9345 		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
9346 			   arg ? arg : "", _(opt->deprecated));
9347 
9348 	      if (opt->var != NULL)
9349 		*opt->var = opt->value;
9350 
9351 	      return 1;
9352 	    }
9353 	}
9354 
9355       for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9356 	{
9357 	  /* These options are expected to have an argument.  */
9358 	  if (c == lopt->option[0]
9359 	      && arg != NULL
9360 	      && strncmp (arg, lopt->option + 1,
9361 			  strlen (lopt->option + 1)) == 0)
9362 	    {
9363 	      /* If the option is deprecated, tell the user.  */
9364 	      if (lopt->deprecated != NULL)
9365 		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
9366 			   _(lopt->deprecated));
9367 
9368 	      /* Call the sup-option parser.  */
9369 	      return lopt->func (arg + strlen (lopt->option) - 1);
9370 	    }
9371 	}
9372 
9373       return 0;
9374     }
9375 
9376   return 1;
9377 }
9378 
9379 void
md_show_usage(FILE * fp)9380 md_show_usage (FILE * fp)
9381 {
9382   struct aarch64_option_table *opt;
9383   struct aarch64_long_option_table *lopt;
9384 
9385   fprintf (fp, _(" AArch64-specific assembler options:\n"));
9386 
9387   for (opt = aarch64_opts; opt->option != NULL; opt++)
9388     if (opt->help != NULL)
9389       fprintf (fp, "  -%-23s%s\n", opt->option, _(opt->help));
9390 
9391   for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
9392     if (lopt->help != NULL)
9393       fprintf (fp, "  -%s%s\n", lopt->option, _(lopt->help));
9394 
9395 #ifdef OPTION_EB
9396   fprintf (fp, _("\
9397   -EB                     assemble code for a big-endian cpu\n"));
9398 #endif
9399 
9400 #ifdef OPTION_EL
9401   fprintf (fp, _("\
9402   -EL                     assemble code for a little-endian cpu\n"));
9403 #endif
9404 }
9405 
9406 /* Parse a .cpu directive.  */
9407 
9408 static void
s_aarch64_cpu(int ignored ATTRIBUTE_UNUSED)9409 s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
9410 {
9411   const struct aarch64_cpu_option_table *opt;
9412   char saved_char;
9413   char *name;
9414   char *ext;
9415   size_t optlen;
9416 
9417   name = input_line_pointer;
9418   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9419     input_line_pointer++;
9420   saved_char = *input_line_pointer;
9421   *input_line_pointer = 0;
9422 
9423   ext = strchr (name, '+');
9424 
9425   if (ext != NULL)
9426     optlen = ext - name;
9427   else
9428     optlen = strlen (name);
9429 
9430   /* Skip the first "all" entry.  */
9431   for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
9432     if (strlen (opt->name) == optlen
9433 	&& strncmp (name, opt->name, optlen) == 0)
9434       {
9435 	mcpu_cpu_opt = &opt->value;
9436 	if (ext != NULL)
9437 	  if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9438 	    return;
9439 
9440 	cpu_variant = *mcpu_cpu_opt;
9441 
9442 	*input_line_pointer = saved_char;
9443 	demand_empty_rest_of_line ();
9444 	return;
9445       }
9446   as_bad (_("unknown cpu `%s'"), name);
9447   *input_line_pointer = saved_char;
9448   ignore_rest_of_line ();
9449 }
9450 
9451 
9452 /* Parse a .arch directive.  */
9453 
9454 static void
s_aarch64_arch(int ignored ATTRIBUTE_UNUSED)9455 s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
9456 {
9457   const struct aarch64_arch_option_table *opt;
9458   char saved_char;
9459   char *name;
9460   char *ext;
9461   size_t optlen;
9462 
9463   name = input_line_pointer;
9464   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9465     input_line_pointer++;
9466   saved_char = *input_line_pointer;
9467   *input_line_pointer = 0;
9468 
9469   ext = strchr (name, '+');
9470 
9471   if (ext != NULL)
9472     optlen = ext - name;
9473   else
9474     optlen = strlen (name);
9475 
9476   /* Skip the first "all" entry.  */
9477   for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
9478     if (strlen (opt->name) == optlen
9479 	&& strncmp (name, opt->name, optlen) == 0)
9480       {
9481 	mcpu_cpu_opt = &opt->value;
9482 	if (ext != NULL)
9483 	  if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
9484 	    return;
9485 
9486 	cpu_variant = *mcpu_cpu_opt;
9487 
9488 	*input_line_pointer = saved_char;
9489 	demand_empty_rest_of_line ();
9490 	return;
9491       }
9492 
9493   as_bad (_("unknown architecture `%s'\n"), name);
9494   *input_line_pointer = saved_char;
9495   ignore_rest_of_line ();
9496 }
9497 
9498 /* Parse a .arch_extension directive.  */
9499 
9500 static void
s_aarch64_arch_extension(int ignored ATTRIBUTE_UNUSED)9501 s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
9502 {
9503   char saved_char;
9504   char *ext = input_line_pointer;;
9505 
9506   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
9507     input_line_pointer++;
9508   saved_char = *input_line_pointer;
9509   *input_line_pointer = 0;
9510 
9511   if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
9512     return;
9513 
9514   cpu_variant = *mcpu_cpu_opt;
9515 
9516   *input_line_pointer = saved_char;
9517   demand_empty_rest_of_line ();
9518 }
9519 
9520 /* Copy symbol information.  */
9521 
9522 void
aarch64_copy_symbol_attributes(symbolS * dest,symbolS * src)9523 aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
9524 {
9525   AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
9526 }
9527 
9528 #ifdef OBJ_ELF
9529 /* Same as elf_copy_symbol_attributes, but without copying st_other.
9530    This is needed so AArch64 specific st_other values can be independently
9531    specified for an IFUNC resolver (that is called by the dynamic linker)
9532    and the symbol it resolves (aliased to the resolver).  In particular,
9533    if a function symbol has special st_other value set via directives,
9534    then attaching an IFUNC resolver to that symbol should not override
9535    the st_other setting.  Requiring the directive on the IFUNC resolver
9536    symbol would be unexpected and problematic in C code, where the two
9537    symbols appear as two independent function declarations.  */
9538 
9539 void
aarch64_elf_copy_symbol_attributes(symbolS * dest,symbolS * src)9540 aarch64_elf_copy_symbol_attributes (symbolS *dest, symbolS *src)
9541 {
9542   struct elf_obj_sy *srcelf = symbol_get_obj (src);
9543   struct elf_obj_sy *destelf = symbol_get_obj (dest);
9544   if (srcelf->size)
9545     {
9546       if (destelf->size == NULL)
9547 	destelf->size = XNEW (expressionS);
9548       *destelf->size = *srcelf->size;
9549     }
9550   else
9551     {
9552       if (destelf->size != NULL)
9553 	free (destelf->size);
9554       destelf->size = NULL;
9555     }
9556   S_SET_SIZE (dest, S_GET_SIZE (src));
9557 }
9558 #endif
9559