1 /* aarch64-dis.c -- AArch64 disassembler.
2    Copyright (C) 2009-2021 Free Software Foundation, Inc.
3    Contributed by ARM Ltd.
4 
5    This file is part of the GNU opcodes library.
6 
7    This library is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3, or (at your option)
10    any later version.
11 
12    It is distributed in the hope that it will be useful, but WITHOUT
13    ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14    or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15    License for more details.
16 
17    You should have received a copy of the GNU General Public License
18    along with this program; see the file COPYING3. If not,
19    see <http://www.gnu.org/licenses/>.  */
20 
21 #include "sysdep.h"
22 #include <stdint.h>
23 #include "disassemble.h"
24 #include "libiberty.h"
25 #include "opintl.h"
26 #include "aarch64-dis.h"
27 #include "elf-bfd.h"
28 
29 #define INSNLEN 4
30 
31 /* Cached mapping symbol state.  */
32 enum map_type
33 {
34   MAP_INSN,
35   MAP_DATA
36 };
37 
38 static aarch64_feature_set arch_variant; /* See select_aarch64_variant.  */
39 static enum map_type last_type;
40 static int last_mapping_sym = -1;
41 static bfd_vma last_stop_offset = 0;
42 static bfd_vma last_mapping_addr = 0;
43 
44 /* Other options */
45 static int no_aliases = 0;	/* If set disassemble as most general inst.  */
46 static int no_notes = 1;	/* If set do not print disassemble notes in the
47 				  output as comments.  */
48 
49 /* Currently active instruction sequence.  */
50 static aarch64_instr_sequence insn_sequence;
51 
52 static void
set_default_aarch64_dis_options(struct disassemble_info * info ATTRIBUTE_UNUSED)53 set_default_aarch64_dis_options (struct disassemble_info *info ATTRIBUTE_UNUSED)
54 {
55 }
56 
57 static void
parse_aarch64_dis_option(const char * option,unsigned int len ATTRIBUTE_UNUSED)58 parse_aarch64_dis_option (const char *option, unsigned int len ATTRIBUTE_UNUSED)
59 {
60   /* Try to match options that are simple flags */
61   if (startswith (option, "no-aliases"))
62     {
63       no_aliases = 1;
64       return;
65     }
66 
67   if (startswith (option, "aliases"))
68     {
69       no_aliases = 0;
70       return;
71     }
72 
73   if (startswith (option, "no-notes"))
74     {
75       no_notes = 1;
76       return;
77     }
78 
79   if (startswith (option, "notes"))
80     {
81       no_notes = 0;
82       return;
83     }
84 
85 #ifdef DEBUG_AARCH64
86   if (startswith (option, "debug_dump"))
87     {
88       debug_dump = 1;
89       return;
90     }
91 #endif /* DEBUG_AARCH64 */
92 
93   /* Invalid option.  */
94   opcodes_error_handler (_("unrecognised disassembler option: %s"), option);
95 }
96 
97 static void
parse_aarch64_dis_options(const char * options)98 parse_aarch64_dis_options (const char *options)
99 {
100   const char *option_end;
101 
102   if (options == NULL)
103     return;
104 
105   while (*options != '\0')
106     {
107       /* Skip empty options.  */
108       if (*options == ',')
109 	{
110 	  options++;
111 	  continue;
112 	}
113 
114       /* We know that *options is neither NUL or a comma.  */
115       option_end = options + 1;
116       while (*option_end != ',' && *option_end != '\0')
117 	option_end++;
118 
119       parse_aarch64_dis_option (options, option_end - options);
120 
121       /* Go on to the next one.  If option_end points to a comma, it
122 	 will be skipped above.  */
123       options = option_end;
124     }
125 }
126 
127 /* Functions doing the instruction disassembling.  */
128 
129 /* The unnamed arguments consist of the number of fields and information about
130    these fields where the VALUE will be extracted from CODE and returned.
131    MASK can be zero or the base mask of the opcode.
132 
133    N.B. the fields are required to be in such an order than the most signficant
134    field for VALUE comes the first, e.g. the <index> in
135     SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
136    is encoded in H:L:M in some cases, the fields H:L:M should be passed in
137    the order of H, L, M.  */
138 
139 aarch64_insn
extract_fields(aarch64_insn code,aarch64_insn mask,...)140 extract_fields (aarch64_insn code, aarch64_insn mask, ...)
141 {
142   uint32_t num;
143   const aarch64_field *field;
144   enum aarch64_field_kind kind;
145   va_list va;
146 
147   va_start (va, mask);
148   num = va_arg (va, uint32_t);
149   assert (num <= 5);
150   aarch64_insn value = 0x0;
151   while (num--)
152     {
153       kind = va_arg (va, enum aarch64_field_kind);
154       field = &fields[kind];
155       value <<= field->width;
156       value |= extract_field (kind, code, mask);
157     }
158   return value;
159 }
160 
161 /* Extract the value of all fields in SELF->fields from instruction CODE.
162    The least significant bit comes from the final field.  */
163 
164 static aarch64_insn
extract_all_fields(const aarch64_operand * self,aarch64_insn code)165 extract_all_fields (const aarch64_operand *self, aarch64_insn code)
166 {
167   aarch64_insn value;
168   unsigned int i;
169   enum aarch64_field_kind kind;
170 
171   value = 0;
172   for (i = 0; i < ARRAY_SIZE (self->fields) && self->fields[i] != FLD_NIL; ++i)
173     {
174       kind = self->fields[i];
175       value <<= fields[kind].width;
176       value |= extract_field (kind, code, 0);
177     }
178   return value;
179 }
180 
181 /* Sign-extend bit I of VALUE.  */
182 static inline uint64_t
sign_extend(aarch64_insn value,unsigned i)183 sign_extend (aarch64_insn value, unsigned i)
184 {
185   uint64_t ret, sign;
186 
187   assert (i < 32);
188   ret = value;
189   sign = (uint64_t) 1 << i;
190   return ((ret & (sign + sign - 1)) ^ sign) - sign;
191 }
192 
193 /* N.B. the following inline helpfer functions create a dependency on the
194    order of operand qualifier enumerators.  */
195 
196 /* Given VALUE, return qualifier for a general purpose register.  */
197 static inline enum aarch64_opnd_qualifier
get_greg_qualifier_from_value(aarch64_insn value)198 get_greg_qualifier_from_value (aarch64_insn value)
199 {
200   enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_W + value;
201   assert (value <= 0x1
202 	  && aarch64_get_qualifier_standard_value (qualifier) == value);
203   return qualifier;
204 }
205 
206 /* Given VALUE, return qualifier for a vector register.  This does not support
207    decoding instructions that accept the 2H vector type.  */
208 
209 static inline enum aarch64_opnd_qualifier
get_vreg_qualifier_from_value(aarch64_insn value)210 get_vreg_qualifier_from_value (aarch64_insn value)
211 {
212   enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_V_8B + value;
213 
214   /* Instructions using vector type 2H should not call this function.  Skip over
215      the 2H qualifier.  */
216   if (qualifier >= AARCH64_OPND_QLF_V_2H)
217     qualifier += 1;
218 
219   assert (value <= 0x8
220 	  && aarch64_get_qualifier_standard_value (qualifier) == value);
221   return qualifier;
222 }
223 
224 /* Given VALUE, return qualifier for an FP or AdvSIMD scalar register.  */
225 static inline enum aarch64_opnd_qualifier
get_sreg_qualifier_from_value(aarch64_insn value)226 get_sreg_qualifier_from_value (aarch64_insn value)
227 {
228   enum aarch64_opnd_qualifier qualifier = AARCH64_OPND_QLF_S_B + value;
229 
230   assert (value <= 0x4
231 	  && aarch64_get_qualifier_standard_value (qualifier) == value);
232   return qualifier;
233 }
234 
235 /* Given the instruction in *INST which is probably half way through the
236    decoding and our caller wants to know the expected qualifier for operand
237    I.  Return such a qualifier if we can establish it; otherwise return
238    AARCH64_OPND_QLF_NIL.  */
239 
240 static aarch64_opnd_qualifier_t
get_expected_qualifier(const aarch64_inst * inst,int i)241 get_expected_qualifier (const aarch64_inst *inst, int i)
242 {
243   aarch64_opnd_qualifier_seq_t qualifiers;
244   /* Should not be called if the qualifier is known.  */
245   assert (inst->operands[i].qualifier == AARCH64_OPND_QLF_NIL);
246   if (aarch64_find_best_match (inst, inst->opcode->qualifiers_list,
247 			       i, qualifiers))
248     return qualifiers[i];
249   else
250     return AARCH64_OPND_QLF_NIL;
251 }
252 
253 /* Operand extractors.  */
254 
255 bool
aarch64_ext_none(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info ATTRIBUTE_UNUSED,const aarch64_insn code ATTRIBUTE_UNUSED,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)256 aarch64_ext_none (const aarch64_operand *self ATTRIBUTE_UNUSED,
257 		  aarch64_opnd_info *info ATTRIBUTE_UNUSED,
258 		  const aarch64_insn code ATTRIBUTE_UNUSED,
259 		  const aarch64_inst *inst ATTRIBUTE_UNUSED,
260 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
261 {
262   return true;
263 }
264 
265 bool
aarch64_ext_regno(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)266 aarch64_ext_regno (const aarch64_operand *self, aarch64_opnd_info *info,
267 		   const aarch64_insn code,
268 		   const aarch64_inst *inst ATTRIBUTE_UNUSED,
269 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
270 {
271   info->reg.regno = extract_field (self->fields[0], code, 0);
272   return true;
273 }
274 
275 bool
aarch64_ext_regno_pair(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code ATTRIBUTE_UNUSED,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)276 aarch64_ext_regno_pair (const aarch64_operand *self ATTRIBUTE_UNUSED, aarch64_opnd_info *info,
277 		   const aarch64_insn code ATTRIBUTE_UNUSED,
278 		   const aarch64_inst *inst ATTRIBUTE_UNUSED,
279 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
280 {
281   assert (info->idx == 1
282 	  || info->idx ==3);
283   info->reg.regno = inst->operands[info->idx - 1].reg.regno + 1;
284   return true;
285 }
286 
287 /* e.g. IC <ic_op>{, <Xt>}.  */
288 bool
aarch64_ext_regrt_sysins(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)289 aarch64_ext_regrt_sysins (const aarch64_operand *self, aarch64_opnd_info *info,
290 			  const aarch64_insn code,
291 			  const aarch64_inst *inst ATTRIBUTE_UNUSED,
292 			  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
293 {
294   info->reg.regno = extract_field (self->fields[0], code, 0);
295   assert (info->idx == 1
296 	  && (aarch64_get_operand_class (inst->operands[0].type)
297 	      == AARCH64_OPND_CLASS_SYSTEM));
298   /* This will make the constraint checking happy and more importantly will
299      help the disassembler determine whether this operand is optional or
300      not.  */
301   info->present = aarch64_sys_ins_reg_has_xt (inst->operands[0].sysins_op);
302 
303   return true;
304 }
305 
306 /* e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>].  */
307 bool
aarch64_ext_reglane(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)308 aarch64_ext_reglane (const aarch64_operand *self, aarch64_opnd_info *info,
309 		     const aarch64_insn code,
310 		     const aarch64_inst *inst ATTRIBUTE_UNUSED,
311 		     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
312 {
313   /* regno */
314   info->reglane.regno = extract_field (self->fields[0], code,
315 				       inst->opcode->mask);
316 
317   /* Index and/or type.  */
318   if (inst->opcode->iclass == asisdone
319     || inst->opcode->iclass == asimdins)
320     {
321       if (info->type == AARCH64_OPND_En
322 	  && inst->opcode->operands[0] == AARCH64_OPND_Ed)
323 	{
324 	  unsigned shift;
325 	  /* index2 for e.g. INS <Vd>.<Ts>[<index1>], <Vn>.<Ts>[<index2>].  */
326 	  assert (info->idx == 1);	/* Vn */
327 	  aarch64_insn value = extract_field (FLD_imm4, code, 0);
328 	  /* Depend on AARCH64_OPND_Ed to determine the qualifier.  */
329 	  info->qualifier = get_expected_qualifier (inst, info->idx);
330 	  shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
331 	  info->reglane.index = value >> shift;
332 	}
333       else
334 	{
335 	  /* index and type for e.g. DUP <V><d>, <Vn>.<T>[<index>].
336 	     imm5<3:0>	<V>
337 	     0000	RESERVED
338 	     xxx1	B
339 	     xx10	H
340 	     x100	S
341 	     1000	D  */
342 	  int pos = -1;
343 	  aarch64_insn value = extract_field (FLD_imm5, code, 0);
344 	  while (++pos <= 3 && (value & 0x1) == 0)
345 	    value >>= 1;
346 	  if (pos > 3)
347 	    return false;
348 	  info->qualifier = get_sreg_qualifier_from_value (pos);
349 	  info->reglane.index = (unsigned) (value >> 1);
350 	}
351     }
352   else if (inst->opcode->iclass == dotproduct)
353     {
354       /* Need information in other operand(s) to help decoding.  */
355       info->qualifier = get_expected_qualifier (inst, info->idx);
356       switch (info->qualifier)
357 	{
358 	case AARCH64_OPND_QLF_S_4B:
359 	case AARCH64_OPND_QLF_S_2H:
360 	  /* L:H */
361 	  info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
362 	  info->reglane.regno &= 0x1f;
363 	  break;
364 	default:
365 	  return false;
366 	}
367     }
368   else if (inst->opcode->iclass == cryptosm3)
369     {
370       /* index for e.g. SM3TT2A <Vd>.4S, <Vn>.4S, <Vm>S[<imm2>].  */
371       info->reglane.index = extract_field (FLD_SM3_imm2, code, 0);
372     }
373   else
374     {
375       /* Index only for e.g. SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>]
376          or SQDMLAL <Va><d>, <Vb><n>, <Vm>.<Ts>[<index>].  */
377 
378       /* Need information in other operand(s) to help decoding.  */
379       info->qualifier = get_expected_qualifier (inst, info->idx);
380       switch (info->qualifier)
381 	{
382 	case AARCH64_OPND_QLF_S_H:
383 	  if (info->type == AARCH64_OPND_Em16)
384 	    {
385 	      /* h:l:m */
386 	      info->reglane.index = extract_fields (code, 0, 3, FLD_H, FLD_L,
387 						    FLD_M);
388 	      info->reglane.regno &= 0xf;
389 	    }
390 	  else
391 	    {
392 	      /* h:l */
393 	      info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
394 	    }
395 	  break;
396 	case AARCH64_OPND_QLF_S_S:
397 	  /* h:l */
398 	  info->reglane.index = extract_fields (code, 0, 2, FLD_H, FLD_L);
399 	  break;
400 	case AARCH64_OPND_QLF_S_D:
401 	  /* H */
402 	  info->reglane.index = extract_field (FLD_H, code, 0);
403 	  break;
404 	default:
405 	  return false;
406 	}
407 
408       if (inst->opcode->op == OP_FCMLA_ELEM
409 	  && info->qualifier != AARCH64_OPND_QLF_S_H)
410 	{
411 	  /* Complex operand takes two elements.  */
412 	  if (info->reglane.index & 1)
413 	    return false;
414 	  info->reglane.index /= 2;
415 	}
416     }
417 
418   return true;
419 }
420 
421 bool
aarch64_ext_reglist(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)422 aarch64_ext_reglist (const aarch64_operand *self, aarch64_opnd_info *info,
423 		     const aarch64_insn code,
424 		     const aarch64_inst *inst ATTRIBUTE_UNUSED,
425 		     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
426 {
427   /* R */
428   info->reglist.first_regno = extract_field (self->fields[0], code, 0);
429   /* len */
430   info->reglist.num_regs = extract_field (FLD_len, code, 0) + 1;
431   return true;
432 }
433 
434 /* Decode Rt and opcode fields of Vt in AdvSIMD load/store instructions.  */
435 bool
aarch64_ext_ldst_reglist(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)436 aarch64_ext_ldst_reglist (const aarch64_operand *self ATTRIBUTE_UNUSED,
437 			  aarch64_opnd_info *info, const aarch64_insn code,
438 			  const aarch64_inst *inst,
439 			  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
440 {
441   aarch64_insn value;
442   /* Number of elements in each structure to be loaded/stored.  */
443   unsigned expected_num = get_opcode_dependent_value (inst->opcode);
444 
445   struct
446     {
447       unsigned is_reserved;
448       unsigned num_regs;
449       unsigned num_elements;
450     } data [] =
451   {   {0, 4, 4},
452       {1, 4, 4},
453       {0, 4, 1},
454       {0, 4, 2},
455       {0, 3, 3},
456       {1, 3, 3},
457       {0, 3, 1},
458       {0, 1, 1},
459       {0, 2, 2},
460       {1, 2, 2},
461       {0, 2, 1},
462   };
463 
464   /* Rt */
465   info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
466   /* opcode */
467   value = extract_field (FLD_opcode, code, 0);
468   /* PR 21595: Check for a bogus value.  */
469   if (value >= ARRAY_SIZE (data))
470     return false;
471   if (expected_num != data[value].num_elements || data[value].is_reserved)
472     return false;
473   info->reglist.num_regs = data[value].num_regs;
474 
475   return true;
476 }
477 
478 /* Decode Rt and S fields of Vt in AdvSIMD load single structure to all
479    lanes instructions.  */
480 bool
aarch64_ext_ldst_reglist_r(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)481 aarch64_ext_ldst_reglist_r (const aarch64_operand *self ATTRIBUTE_UNUSED,
482 			    aarch64_opnd_info *info, const aarch64_insn code,
483 			    const aarch64_inst *inst,
484 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
485 {
486   aarch64_insn value;
487 
488   /* Rt */
489   info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
490   /* S */
491   value = extract_field (FLD_S, code, 0);
492 
493   /* Number of registers is equal to the number of elements in
494      each structure to be loaded/stored.  */
495   info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
496   assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
497 
498   /* Except when it is LD1R.  */
499   if (info->reglist.num_regs == 1 && value == (aarch64_insn) 1)
500     info->reglist.num_regs = 2;
501 
502   return true;
503 }
504 
505 /* Decode Q, opcode<2:1>, S, size and Rt fields of Vt in AdvSIMD
506    load/store single element instructions.  */
507 bool
aarch64_ext_ldst_elemlist(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)508 aarch64_ext_ldst_elemlist (const aarch64_operand *self ATTRIBUTE_UNUSED,
509 			   aarch64_opnd_info *info, const aarch64_insn code,
510 			   const aarch64_inst *inst ATTRIBUTE_UNUSED,
511 			   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
512 {
513   aarch64_field field = {0, 0};
514   aarch64_insn QSsize;		/* fields Q:S:size.  */
515   aarch64_insn opcodeh2;	/* opcode<2:1> */
516 
517   /* Rt */
518   info->reglist.first_regno = extract_field (FLD_Rt, code, 0);
519 
520   /* Decode the index, opcode<2:1> and size.  */
521   gen_sub_field (FLD_asisdlso_opcode, 1, 2, &field);
522   opcodeh2 = extract_field_2 (&field, code, 0);
523   QSsize = extract_fields (code, 0, 3, FLD_Q, FLD_S, FLD_vldst_size);
524   switch (opcodeh2)
525     {
526     case 0x0:
527       info->qualifier = AARCH64_OPND_QLF_S_B;
528       /* Index encoded in "Q:S:size".  */
529       info->reglist.index = QSsize;
530       break;
531     case 0x1:
532       if (QSsize & 0x1)
533 	/* UND.  */
534 	return false;
535       info->qualifier = AARCH64_OPND_QLF_S_H;
536       /* Index encoded in "Q:S:size<1>".  */
537       info->reglist.index = QSsize >> 1;
538       break;
539     case 0x2:
540       if ((QSsize >> 1) & 0x1)
541 	/* UND.  */
542 	return false;
543       if ((QSsize & 0x1) == 0)
544 	{
545 	  info->qualifier = AARCH64_OPND_QLF_S_S;
546 	  /* Index encoded in "Q:S".  */
547 	  info->reglist.index = QSsize >> 2;
548 	}
549       else
550 	{
551 	  if (extract_field (FLD_S, code, 0))
552 	    /* UND */
553 	    return false;
554 	  info->qualifier = AARCH64_OPND_QLF_S_D;
555 	  /* Index encoded in "Q".  */
556 	  info->reglist.index = QSsize >> 3;
557 	}
558       break;
559     default:
560       return false;
561     }
562 
563   info->reglist.has_index = 1;
564   info->reglist.num_regs = 0;
565   /* Number of registers is equal to the number of elements in
566      each structure to be loaded/stored.  */
567   info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
568   assert (info->reglist.num_regs >= 1 && info->reglist.num_regs <= 4);
569 
570   return true;
571 }
572 
573 /* Decode fields immh:immb and/or Q for e.g.
574    SSHR <Vd>.<T>, <Vn>.<T>, #<shift>
575    or SSHR <V><d>, <V><n>, #<shift>.  */
576 
577 bool
aarch64_ext_advsimd_imm_shift(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)578 aarch64_ext_advsimd_imm_shift (const aarch64_operand *self ATTRIBUTE_UNUSED,
579 			       aarch64_opnd_info *info, const aarch64_insn code,
580 			       const aarch64_inst *inst,
581 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
582 {
583   int pos;
584   aarch64_insn Q, imm, immh;
585   enum aarch64_insn_class iclass = inst->opcode->iclass;
586 
587   immh = extract_field (FLD_immh, code, 0);
588   if (immh == 0)
589     return false;
590   imm = extract_fields (code, 0, 2, FLD_immh, FLD_immb);
591   pos = 4;
592   /* Get highest set bit in immh.  */
593   while (--pos >= 0 && (immh & 0x8) == 0)
594     immh <<= 1;
595 
596   assert ((iclass == asimdshf || iclass == asisdshf)
597 	  && (info->type == AARCH64_OPND_IMM_VLSR
598 	      || info->type == AARCH64_OPND_IMM_VLSL));
599 
600   if (iclass == asimdshf)
601     {
602       Q = extract_field (FLD_Q, code, 0);
603       /* immh	Q	<T>
604 	 0000	x	SEE AdvSIMD modified immediate
605 	 0001	0	8B
606 	 0001	1	16B
607 	 001x	0	4H
608 	 001x	1	8H
609 	 01xx	0	2S
610 	 01xx	1	4S
611 	 1xxx	0	RESERVED
612 	 1xxx	1	2D  */
613       info->qualifier =
614 	get_vreg_qualifier_from_value ((pos << 1) | (int) Q);
615     }
616   else
617     info->qualifier = get_sreg_qualifier_from_value (pos);
618 
619   if (info->type == AARCH64_OPND_IMM_VLSR)
620     /* immh	<shift>
621        0000	SEE AdvSIMD modified immediate
622        0001	(16-UInt(immh:immb))
623        001x	(32-UInt(immh:immb))
624        01xx	(64-UInt(immh:immb))
625        1xxx	(128-UInt(immh:immb))  */
626     info->imm.value = (16 << pos) - imm;
627   else
628     /* immh:immb
629        immh	<shift>
630        0000	SEE AdvSIMD modified immediate
631        0001	(UInt(immh:immb)-8)
632        001x	(UInt(immh:immb)-16)
633        01xx	(UInt(immh:immb)-32)
634        1xxx	(UInt(immh:immb)-64)  */
635     info->imm.value = imm - (8 << pos);
636 
637   return true;
638 }
639 
640 /* Decode shift immediate for e.g. sshr (imm).  */
641 bool
aarch64_ext_shll_imm(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)642 aarch64_ext_shll_imm (const aarch64_operand *self ATTRIBUTE_UNUSED,
643 		      aarch64_opnd_info *info, const aarch64_insn code,
644 		      const aarch64_inst *inst ATTRIBUTE_UNUSED,
645 		      aarch64_operand_error *errors ATTRIBUTE_UNUSED)
646 {
647   int64_t imm;
648   aarch64_insn val;
649   val = extract_field (FLD_size, code, 0);
650   switch (val)
651     {
652     case 0: imm = 8; break;
653     case 1: imm = 16; break;
654     case 2: imm = 32; break;
655     default: return false;
656     }
657   info->imm.value = imm;
658   return true;
659 }
660 
661 /* Decode imm for e.g. BFM <Wd>, <Wn>, #<immr>, #<imms>.
662    value in the field(s) will be extracted as unsigned immediate value.  */
663 bool
aarch64_ext_imm(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)664 aarch64_ext_imm (const aarch64_operand *self, aarch64_opnd_info *info,
665 		 const aarch64_insn code,
666 		 const aarch64_inst *inst ATTRIBUTE_UNUSED,
667 		 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
668 {
669   uint64_t imm;
670 
671   imm = extract_all_fields (self, code);
672 
673   if (operand_need_sign_extension (self))
674     imm = sign_extend (imm, get_operand_fields_width (self) - 1);
675 
676   if (operand_need_shift_by_two (self))
677     imm <<= 2;
678   else if (operand_need_shift_by_four (self))
679     imm <<= 4;
680 
681   if (info->type == AARCH64_OPND_ADDR_ADRP)
682     imm <<= 12;
683 
684   info->imm.value = imm;
685   return true;
686 }
687 
688 /* Decode imm and its shifter for e.g. MOVZ <Wd>, #<imm16>{, LSL #<shift>}.  */
689 bool
aarch64_ext_imm_half(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors)690 aarch64_ext_imm_half (const aarch64_operand *self, aarch64_opnd_info *info,
691 		      const aarch64_insn code,
692 		      const aarch64_inst *inst ATTRIBUTE_UNUSED,
693 		      aarch64_operand_error *errors)
694 {
695   aarch64_ext_imm (self, info, code, inst, errors);
696   info->shifter.kind = AARCH64_MOD_LSL;
697   info->shifter.amount = extract_field (FLD_hw, code, 0) << 4;
698   return true;
699 }
700 
701 /* Decode cmode and "a:b:c:d:e:f:g:h" for e.g.
702      MOVI <Vd>.<T>, #<imm8> {, LSL #<amount>}.  */
703 bool
aarch64_ext_advsimd_imm_modified(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)704 aarch64_ext_advsimd_imm_modified (const aarch64_operand *self ATTRIBUTE_UNUSED,
705 				  aarch64_opnd_info *info,
706 				  const aarch64_insn code,
707 				  const aarch64_inst *inst ATTRIBUTE_UNUSED,
708 				  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
709 {
710   uint64_t imm;
711   enum aarch64_opnd_qualifier opnd0_qualifier = inst->operands[0].qualifier;
712   aarch64_field field = {0, 0};
713 
714   assert (info->idx == 1);
715 
716   if (info->type == AARCH64_OPND_SIMD_FPIMM)
717     info->imm.is_fp = 1;
718 
719   /* a:b:c:d:e:f:g:h */
720   imm = extract_fields (code, 0, 2, FLD_abc, FLD_defgh);
721   if (!info->imm.is_fp && aarch64_get_qualifier_esize (opnd0_qualifier) == 8)
722     {
723       /* Either MOVI <Dd>, #<imm>
724 	 or     MOVI <Vd>.2D, #<imm>.
725 	 <imm> is a 64-bit immediate
726 	 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh',
727 	 encoded in "a:b:c:d:e:f:g:h".	*/
728       int i;
729       unsigned abcdefgh = imm;
730       for (imm = 0ull, i = 0; i < 8; i++)
731 	if (((abcdefgh >> i) & 0x1) != 0)
732 	  imm |= 0xffull << (8 * i);
733     }
734   info->imm.value = imm;
735 
736   /* cmode */
737   info->qualifier = get_expected_qualifier (inst, info->idx);
738   switch (info->qualifier)
739     {
740     case AARCH64_OPND_QLF_NIL:
741       /* no shift */
742       info->shifter.kind = AARCH64_MOD_NONE;
743       return 1;
744     case AARCH64_OPND_QLF_LSL:
745       /* shift zeros */
746       info->shifter.kind = AARCH64_MOD_LSL;
747       switch (aarch64_get_qualifier_esize (opnd0_qualifier))
748 	{
749 	case 4: gen_sub_field (FLD_cmode, 1, 2, &field); break;	/* per word */
750 	case 2: gen_sub_field (FLD_cmode, 1, 1, &field); break;	/* per half */
751 	case 1: gen_sub_field (FLD_cmode, 1, 0, &field); break;	/* per byte */
752 	default: assert (0); return false;
753 	}
754       /* 00: 0; 01: 8; 10:16; 11:24.  */
755       info->shifter.amount = extract_field_2 (&field, code, 0) << 3;
756       break;
757     case AARCH64_OPND_QLF_MSL:
758       /* shift ones */
759       info->shifter.kind = AARCH64_MOD_MSL;
760       gen_sub_field (FLD_cmode, 0, 1, &field);		/* per word */
761       info->shifter.amount = extract_field_2 (&field, code, 0) ? 16 : 8;
762       break;
763     default:
764       assert (0);
765       return false;
766     }
767 
768   return true;
769 }
770 
771 /* Decode an 8-bit floating-point immediate.  */
772 bool
aarch64_ext_fpimm(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)773 aarch64_ext_fpimm (const aarch64_operand *self, aarch64_opnd_info *info,
774 		   const aarch64_insn code,
775 		   const aarch64_inst *inst ATTRIBUTE_UNUSED,
776 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
777 {
778   info->imm.value = extract_all_fields (self, code);
779   info->imm.is_fp = 1;
780   return true;
781 }
782 
783 /* Decode a 1-bit rotate immediate (#90 or #270).  */
784 bool
aarch64_ext_imm_rotate1(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)785 aarch64_ext_imm_rotate1 (const aarch64_operand *self, aarch64_opnd_info *info,
786 			 const aarch64_insn code,
787 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
788 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
789 {
790   uint64_t rot = extract_field (self->fields[0], code, 0);
791   assert (rot < 2U);
792   info->imm.value = rot * 180 + 90;
793   return true;
794 }
795 
796 /* Decode a 2-bit rotate immediate (#0, #90, #180 or #270).  */
797 bool
aarch64_ext_imm_rotate2(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)798 aarch64_ext_imm_rotate2 (const aarch64_operand *self, aarch64_opnd_info *info,
799 			 const aarch64_insn code,
800 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
801 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
802 {
803   uint64_t rot = extract_field (self->fields[0], code, 0);
804   assert (rot < 4U);
805   info->imm.value = rot * 90;
806   return true;
807 }
808 
809 /* Decode scale for e.g. SCVTF <Dd>, <Wn>, #<fbits>.  */
810 bool
aarch64_ext_fbits(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)811 aarch64_ext_fbits (const aarch64_operand *self ATTRIBUTE_UNUSED,
812 		   aarch64_opnd_info *info, const aarch64_insn code,
813 		   const aarch64_inst *inst ATTRIBUTE_UNUSED,
814 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
815 {
816   info->imm.value = 64- extract_field (FLD_scale, code, 0);
817   return true;
818 }
819 
820 /* Decode arithmetic immediate for e.g.
821      SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}.  */
822 bool
aarch64_ext_aimm(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)823 aarch64_ext_aimm (const aarch64_operand *self ATTRIBUTE_UNUSED,
824 		  aarch64_opnd_info *info, const aarch64_insn code,
825 		  const aarch64_inst *inst ATTRIBUTE_UNUSED,
826 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
827 {
828   aarch64_insn value;
829 
830   info->shifter.kind = AARCH64_MOD_LSL;
831   /* shift */
832   value = extract_field (FLD_shift, code, 0);
833   if (value >= 2)
834     return false;
835   info->shifter.amount = value ? 12 : 0;
836   /* imm12 (unsigned) */
837   info->imm.value = extract_field (FLD_imm12, code, 0);
838 
839   return true;
840 }
841 
842 /* Return true if VALUE is a valid logical immediate encoding, storing the
843    decoded value in *RESULT if so.  ESIZE is the number of bytes in the
844    decoded immediate.  */
845 static bool
decode_limm(uint32_t esize,aarch64_insn value,int64_t * result)846 decode_limm (uint32_t esize, aarch64_insn value, int64_t *result)
847 {
848   uint64_t imm, mask;
849   uint32_t N, R, S;
850   unsigned simd_size;
851 
852   /* value is N:immr:imms.  */
853   S = value & 0x3f;
854   R = (value >> 6) & 0x3f;
855   N = (value >> 12) & 0x1;
856 
857   /* The immediate value is S+1 bits to 1, left rotated by SIMDsize - R
858      (in other words, right rotated by R), then replicated.  */
859   if (N != 0)
860     {
861       simd_size = 64;
862       mask = 0xffffffffffffffffull;
863     }
864   else
865     {
866       switch (S)
867 	{
868 	case 0x00 ... 0x1f: /* 0xxxxx */ simd_size = 32;           break;
869 	case 0x20 ... 0x2f: /* 10xxxx */ simd_size = 16; S &= 0xf; break;
870 	case 0x30 ... 0x37: /* 110xxx */ simd_size =  8; S &= 0x7; break;
871 	case 0x38 ... 0x3b: /* 1110xx */ simd_size =  4; S &= 0x3; break;
872 	case 0x3c ... 0x3d: /* 11110x */ simd_size =  2; S &= 0x1; break;
873 	default: return false;
874 	}
875       mask = (1ull << simd_size) - 1;
876       /* Top bits are IGNORED.  */
877       R &= simd_size - 1;
878     }
879 
880   if (simd_size > esize * 8)
881     return false;
882 
883   /* NOTE: if S = simd_size - 1 we get 0xf..f which is rejected.  */
884   if (S == simd_size - 1)
885     return false;
886   /* S+1 consecutive bits to 1.  */
887   /* NOTE: S can't be 63 due to detection above.  */
888   imm = (1ull << (S + 1)) - 1;
889   /* Rotate to the left by simd_size - R.  */
890   if (R != 0)
891     imm = ((imm << (simd_size - R)) & mask) | (imm >> R);
892   /* Replicate the value according to SIMD size.  */
893   switch (simd_size)
894     {
895     case  2: imm = (imm <<  2) | imm;
896       /* Fall through.  */
897     case  4: imm = (imm <<  4) | imm;
898       /* Fall through.  */
899     case  8: imm = (imm <<  8) | imm;
900       /* Fall through.  */
901     case 16: imm = (imm << 16) | imm;
902       /* Fall through.  */
903     case 32: imm = (imm << 32) | imm;
904       /* Fall through.  */
905     case 64: break;
906     default: assert (0); return 0;
907     }
908 
909   *result = imm & ~((uint64_t) -1 << (esize * 4) << (esize * 4));
910 
911   return true;
912 }
913 
914 /* Decode a logical immediate for e.g. ORR <Wd|WSP>, <Wn>, #<imm>.  */
915 bool
aarch64_ext_limm(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)916 aarch64_ext_limm (const aarch64_operand *self,
917 		  aarch64_opnd_info *info, const aarch64_insn code,
918 		  const aarch64_inst *inst,
919 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
920 {
921   uint32_t esize;
922   aarch64_insn value;
923 
924   value = extract_fields (code, 0, 3, self->fields[0], self->fields[1],
925 			  self->fields[2]);
926   esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
927   return decode_limm (esize, value, &info->imm.value);
928 }
929 
930 /* Decode a logical immediate for the BIC alias of AND (etc.).  */
931 bool
aarch64_ext_inv_limm(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors)932 aarch64_ext_inv_limm (const aarch64_operand *self,
933 		      aarch64_opnd_info *info, const aarch64_insn code,
934 		      const aarch64_inst *inst,
935 		      aarch64_operand_error *errors)
936 {
937   if (!aarch64_ext_limm (self, info, code, inst, errors))
938     return false;
939   info->imm.value = ~info->imm.value;
940   return true;
941 }
942 
943 /* Decode Ft for e.g. STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}]
944    or LDP <Qt1>, <Qt2>, [<Xn|SP>], #<imm>.  */
945 bool
aarch64_ext_ft(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)946 aarch64_ext_ft (const aarch64_operand *self ATTRIBUTE_UNUSED,
947 		aarch64_opnd_info *info,
948 		const aarch64_insn code, const aarch64_inst *inst,
949 		aarch64_operand_error *errors ATTRIBUTE_UNUSED)
950 {
951   aarch64_insn value;
952 
953   /* Rt */
954   info->reg.regno = extract_field (FLD_Rt, code, 0);
955 
956   /* size */
957   value = extract_field (FLD_ldst_size, code, 0);
958   if (inst->opcode->iclass == ldstpair_indexed
959       || inst->opcode->iclass == ldstnapair_offs
960       || inst->opcode->iclass == ldstpair_off
961       || inst->opcode->iclass == loadlit)
962     {
963       enum aarch64_opnd_qualifier qualifier;
964       switch (value)
965 	{
966 	case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
967 	case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
968 	case 2: qualifier = AARCH64_OPND_QLF_S_Q; break;
969 	default: return false;
970 	}
971       info->qualifier = qualifier;
972     }
973   else
974     {
975       /* opc1:size */
976       value = extract_fields (code, 0, 2, FLD_opc1, FLD_ldst_size);
977       if (value > 0x4)
978 	return false;
979       info->qualifier = get_sreg_qualifier_from_value (value);
980     }
981 
982   return true;
983 }
984 
985 /* Decode the address operand for e.g. STXRB <Ws>, <Wt>, [<Xn|SP>{,#0}].  */
986 bool
aarch64_ext_addr_simple(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)987 aarch64_ext_addr_simple (const aarch64_operand *self ATTRIBUTE_UNUSED,
988 			 aarch64_opnd_info *info,
989 			 aarch64_insn code,
990 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
991 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
992 {
993   /* Rn */
994   info->addr.base_regno = extract_field (FLD_Rn, code, 0);
995   return true;
996 }
997 
998 /* Decode the address operand for e.g.
999      stlur <Xt>, [<Xn|SP>{, <amount>}].  */
1000 bool
aarch64_ext_addr_offset(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1001 aarch64_ext_addr_offset (const aarch64_operand *self ATTRIBUTE_UNUSED,
1002 			 aarch64_opnd_info *info,
1003 			 aarch64_insn code, const aarch64_inst *inst,
1004 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1005 {
1006   info->qualifier = get_expected_qualifier (inst, info->idx);
1007 
1008   /* Rn */
1009   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1010 
1011   /* simm9 */
1012   aarch64_insn imm = extract_fields (code, 0, 1, self->fields[1]);
1013   info->addr.offset.imm = sign_extend (imm, 8);
1014   if (extract_field (self->fields[2], code, 0) == 1) {
1015     info->addr.writeback = 1;
1016     info->addr.preind = 1;
1017   }
1018   return true;
1019 }
1020 
1021 /* Decode the address operand for e.g.
1022      STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
1023 bool
aarch64_ext_addr_regoff(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1024 aarch64_ext_addr_regoff (const aarch64_operand *self ATTRIBUTE_UNUSED,
1025 			 aarch64_opnd_info *info,
1026 			 aarch64_insn code, const aarch64_inst *inst,
1027 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1028 {
1029   aarch64_insn S, value;
1030 
1031   /* Rn */
1032   info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1033   /* Rm */
1034   info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1035   /* option */
1036   value = extract_field (FLD_option, code, 0);
1037   info->shifter.kind =
1038     aarch64_get_operand_modifier_from_value (value, true /* extend_p */);
1039   /* Fix-up the shifter kind; although the table-driven approach is
1040      efficient, it is slightly inflexible, thus needing this fix-up.  */
1041   if (info->shifter.kind == AARCH64_MOD_UXTX)
1042     info->shifter.kind = AARCH64_MOD_LSL;
1043   /* S */
1044   S = extract_field (FLD_S, code, 0);
1045   if (S == 0)
1046     {
1047       info->shifter.amount = 0;
1048       info->shifter.amount_present = 0;
1049     }
1050   else
1051     {
1052       int size;
1053       /* Need information in other operand(s) to help achieve the decoding
1054 	 from 'S' field.  */
1055       info->qualifier = get_expected_qualifier (inst, info->idx);
1056       /* Get the size of the data element that is accessed, which may be
1057 	 different from that of the source register size, e.g. in strb/ldrb.  */
1058       size = aarch64_get_qualifier_esize (info->qualifier);
1059       info->shifter.amount = get_logsz (size);
1060       info->shifter.amount_present = 1;
1061     }
1062 
1063   return true;
1064 }
1065 
1066 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>], #<simm>.  */
1067 bool
aarch64_ext_addr_simm(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1068 aarch64_ext_addr_simm (const aarch64_operand *self, aarch64_opnd_info *info,
1069 		       aarch64_insn code, const aarch64_inst *inst,
1070 		       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1071 {
1072   aarch64_insn imm;
1073   info->qualifier = get_expected_qualifier (inst, info->idx);
1074 
1075   /* Rn */
1076   info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1077   /* simm (imm9 or imm7)  */
1078   imm = extract_field (self->fields[0], code, 0);
1079   info->addr.offset.imm = sign_extend (imm, fields[self->fields[0]].width - 1);
1080   if (self->fields[0] == FLD_imm7
1081       || info->qualifier == AARCH64_OPND_QLF_imm_tag)
1082     /* scaled immediate in ld/st pair instructions.  */
1083     info->addr.offset.imm *= aarch64_get_qualifier_esize (info->qualifier);
1084   /* qualifier */
1085   if (inst->opcode->iclass == ldst_unscaled
1086       || inst->opcode->iclass == ldstnapair_offs
1087       || inst->opcode->iclass == ldstpair_off
1088       || inst->opcode->iclass == ldst_unpriv)
1089     info->addr.writeback = 0;
1090   else
1091     {
1092       /* pre/post- index */
1093       info->addr.writeback = 1;
1094       if (extract_field (self->fields[1], code, 0) == 1)
1095 	info->addr.preind = 1;
1096       else
1097 	info->addr.postind = 1;
1098     }
1099 
1100   return true;
1101 }
1102 
1103 /* Decode the address operand for e.g. LDRSW <Xt>, [<Xn|SP>{, #<simm>}].  */
1104 bool
aarch64_ext_addr_uimm12(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1105 aarch64_ext_addr_uimm12 (const aarch64_operand *self, aarch64_opnd_info *info,
1106 			 aarch64_insn code,
1107 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1108 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1109 {
1110   int shift;
1111   info->qualifier = get_expected_qualifier (inst, info->idx);
1112   shift = get_logsz (aarch64_get_qualifier_esize (info->qualifier));
1113   /* Rn */
1114   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1115   /* uimm12 */
1116   info->addr.offset.imm = extract_field (self->fields[1], code, 0) << shift;
1117   return true;
1118 }
1119 
1120 /* Decode the address operand for e.g. LDRAA <Xt>, [<Xn|SP>{, #<simm>}].  */
1121 bool
aarch64_ext_addr_simm10(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1122 aarch64_ext_addr_simm10 (const aarch64_operand *self, aarch64_opnd_info *info,
1123 			 aarch64_insn code,
1124 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1125 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1126 {
1127   aarch64_insn imm;
1128 
1129   info->qualifier = get_expected_qualifier (inst, info->idx);
1130   /* Rn */
1131   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1132   /* simm10 */
1133   imm = extract_fields (code, 0, 2, self->fields[1], self->fields[2]);
1134   info->addr.offset.imm = sign_extend (imm, 9) << 3;
1135   if (extract_field (self->fields[3], code, 0) == 1) {
1136     info->addr.writeback = 1;
1137     info->addr.preind = 1;
1138   }
1139   return true;
1140 }
1141 
1142 /* Decode the address operand for e.g.
1143      LD1 {<Vt>.<T>, <Vt2>.<T>, <Vt3>.<T>}, [<Xn|SP>], <Xm|#<amount>>.  */
1144 bool
aarch64_ext_simd_addr_post(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1145 aarch64_ext_simd_addr_post (const aarch64_operand *self ATTRIBUTE_UNUSED,
1146 			    aarch64_opnd_info *info,
1147 			    aarch64_insn code, const aarch64_inst *inst,
1148 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1149 {
1150   /* The opcode dependent area stores the number of elements in
1151      each structure to be loaded/stored.  */
1152   int is_ld1r = get_opcode_dependent_value (inst->opcode) == 1;
1153 
1154   /* Rn */
1155   info->addr.base_regno = extract_field (FLD_Rn, code, 0);
1156   /* Rm | #<amount>  */
1157   info->addr.offset.regno = extract_field (FLD_Rm, code, 0);
1158   if (info->addr.offset.regno == 31)
1159     {
1160       if (inst->opcode->operands[0] == AARCH64_OPND_LVt_AL)
1161 	/* Special handling of loading single structure to all lane.  */
1162 	info->addr.offset.imm = (is_ld1r ? 1
1163 				 : inst->operands[0].reglist.num_regs)
1164 	  * aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1165       else
1166 	info->addr.offset.imm = inst->operands[0].reglist.num_regs
1167 	  * aarch64_get_qualifier_esize (inst->operands[0].qualifier)
1168 	  * aarch64_get_qualifier_nelem (inst->operands[0].qualifier);
1169     }
1170   else
1171     info->addr.offset.is_reg = 1;
1172   info->addr.writeback = 1;
1173 
1174   return true;
1175 }
1176 
1177 /* Decode the condition operand for e.g. CSEL <Xd>, <Xn>, <Xm>, <cond>.  */
1178 bool
aarch64_ext_cond(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1179 aarch64_ext_cond (const aarch64_operand *self ATTRIBUTE_UNUSED,
1180 		  aarch64_opnd_info *info,
1181 		  aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1182 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1183 {
1184   aarch64_insn value;
1185   /* cond */
1186   value = extract_field (FLD_cond, code, 0);
1187   info->cond = get_cond_from_value (value);
1188   return true;
1189 }
1190 
1191 /* Decode the system register operand for e.g. MRS <Xt>, <systemreg>.  */
1192 bool
aarch64_ext_sysreg(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1193 aarch64_ext_sysreg (const aarch64_operand *self ATTRIBUTE_UNUSED,
1194 		    aarch64_opnd_info *info,
1195 		    aarch64_insn code,
1196 		    const aarch64_inst *inst ATTRIBUTE_UNUSED,
1197 		    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1198 {
1199   /* op0:op1:CRn:CRm:op2 */
1200   info->sysreg.value = extract_fields (code, 0, 5, FLD_op0, FLD_op1, FLD_CRn,
1201 				       FLD_CRm, FLD_op2);
1202   info->sysreg.flags = 0;
1203 
1204   /* If a system instruction, check which restrictions should be on the register
1205      value during decoding, these will be enforced then.  */
1206   if (inst->opcode->iclass == ic_system)
1207     {
1208       /* Check to see if it's read-only, else check if it's write only.
1209 	 if it's both or unspecified don't care.  */
1210       if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE)) == F_SYS_READ)
1211 	info->sysreg.flags = F_REG_READ;
1212       else if ((inst->opcode->flags & (F_SYS_READ | F_SYS_WRITE))
1213 	       == F_SYS_WRITE)
1214 	info->sysreg.flags = F_REG_WRITE;
1215     }
1216 
1217   return true;
1218 }
1219 
1220 /* Decode the PSTATE field operand for e.g. MSR <pstatefield>, #<imm>.  */
1221 bool
aarch64_ext_pstatefield(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1222 aarch64_ext_pstatefield (const aarch64_operand *self ATTRIBUTE_UNUSED,
1223 			 aarch64_opnd_info *info, aarch64_insn code,
1224 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1225 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1226 {
1227   int i;
1228   /* op1:op2 */
1229   info->pstatefield = extract_fields (code, 0, 2, FLD_op1, FLD_op2);
1230   for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
1231     if (aarch64_pstatefields[i].value == (aarch64_insn)info->pstatefield)
1232       return true;
1233   /* Reserved value in <pstatefield>.  */
1234   return false;
1235 }
1236 
1237 /* Decode the system instruction op operand for e.g. AT <at_op>, <Xt>.  */
1238 bool
aarch64_ext_sysins_op(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1239 aarch64_ext_sysins_op (const aarch64_operand *self ATTRIBUTE_UNUSED,
1240 		       aarch64_opnd_info *info,
1241 		       aarch64_insn code,
1242 		       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1243 		       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1244 {
1245   int i;
1246   aarch64_insn value;
1247   const aarch64_sys_ins_reg *sysins_ops;
1248   /* op0:op1:CRn:CRm:op2 */
1249   value = extract_fields (code, 0, 5,
1250 			  FLD_op0, FLD_op1, FLD_CRn,
1251 			  FLD_CRm, FLD_op2);
1252 
1253   switch (info->type)
1254     {
1255     case AARCH64_OPND_SYSREG_AT: sysins_ops = aarch64_sys_regs_at; break;
1256     case AARCH64_OPND_SYSREG_DC: sysins_ops = aarch64_sys_regs_dc; break;
1257     case AARCH64_OPND_SYSREG_IC: sysins_ops = aarch64_sys_regs_ic; break;
1258     case AARCH64_OPND_SYSREG_TLBI: sysins_ops = aarch64_sys_regs_tlbi; break;
1259     case AARCH64_OPND_SYSREG_SR:
1260 	sysins_ops = aarch64_sys_regs_sr;
1261 	 /* Let's remove op2 for rctx.  Refer to comments in the definition of
1262 	    aarch64_sys_regs_sr[].  */
1263 	value = value & ~(0x7);
1264 	break;
1265     default: assert (0); return false;
1266     }
1267 
1268   for (i = 0; sysins_ops[i].name != NULL; ++i)
1269     if (sysins_ops[i].value == value)
1270       {
1271 	info->sysins_op = sysins_ops + i;
1272 	DEBUG_TRACE ("%s found value: %x, has_xt: %d, i: %d.",
1273 		     info->sysins_op->name,
1274 		     (unsigned)info->sysins_op->value,
1275 		     aarch64_sys_ins_reg_has_xt (info->sysins_op), i);
1276 	return true;
1277       }
1278 
1279   return false;
1280 }
1281 
1282 /* Decode the memory barrier option operand for e.g. DMB <option>|#<imm>.  */
1283 
1284 bool
aarch64_ext_barrier(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1285 aarch64_ext_barrier (const aarch64_operand *self ATTRIBUTE_UNUSED,
1286 		     aarch64_opnd_info *info,
1287 		     aarch64_insn code,
1288 		     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1289 		     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1290 {
1291   /* CRm */
1292   info->barrier = aarch64_barrier_options + extract_field (FLD_CRm, code, 0);
1293   return true;
1294 }
1295 
1296 /* Decode the memory barrier option operand for DSB <option>nXS|#<imm>.  */
1297 
1298 bool
aarch64_ext_barrier_dsb_nxs(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1299 aarch64_ext_barrier_dsb_nxs (const aarch64_operand *self ATTRIBUTE_UNUSED,
1300 		     aarch64_opnd_info *info,
1301 		     aarch64_insn code,
1302 		     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1303 		     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1304 {
1305   /* For the DSB nXS barrier variant immediate is encoded in 2-bit field.  */
1306   aarch64_insn field = extract_field (FLD_CRm_dsb_nxs, code, 0);
1307   info->barrier = aarch64_barrier_dsb_nxs_options + field;
1308   return true;
1309 }
1310 
1311 /* Decode the prefetch operation option operand for e.g.
1312      PRFM <prfop>, [<Xn|SP>{, #<pimm>}].  */
1313 
1314 bool
aarch64_ext_prfop(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1315 aarch64_ext_prfop (const aarch64_operand *self ATTRIBUTE_UNUSED,
1316 		   aarch64_opnd_info *info,
1317 		   aarch64_insn code, const aarch64_inst *inst ATTRIBUTE_UNUSED,
1318 		   aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1319 {
1320   /* prfop in Rt */
1321   info->prfop = aarch64_prfops + extract_field (FLD_Rt, code, 0);
1322   return true;
1323 }
1324 
1325 /* Decode the hint number for an alias taking an operand.  Set info->hint_option
1326    to the matching name/value pair in aarch64_hint_options.  */
1327 
1328 bool
aarch64_ext_hint(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1329 aarch64_ext_hint (const aarch64_operand *self ATTRIBUTE_UNUSED,
1330 		  aarch64_opnd_info *info,
1331 		  aarch64_insn code,
1332 		  const aarch64_inst *inst ATTRIBUTE_UNUSED,
1333 		  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1334 {
1335   /* CRm:op2.  */
1336   unsigned hint_number;
1337   int i;
1338 
1339   hint_number = extract_fields (code, 0, 2, FLD_CRm, FLD_op2);
1340 
1341   for (i = 0; aarch64_hint_options[i].name != NULL; i++)
1342     {
1343       if (hint_number == HINT_VAL (aarch64_hint_options[i].value))
1344 	{
1345 	  info->hint_option = &(aarch64_hint_options[i]);
1346 	  return true;
1347 	}
1348     }
1349 
1350   return false;
1351 }
1352 
1353 /* Decode the extended register operand for e.g.
1354      STR <Qt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
1355 bool
aarch64_ext_reg_extended(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1356 aarch64_ext_reg_extended (const aarch64_operand *self ATTRIBUTE_UNUSED,
1357 			  aarch64_opnd_info *info,
1358 			  aarch64_insn code,
1359 			  const aarch64_inst *inst ATTRIBUTE_UNUSED,
1360 			  aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1361 {
1362   aarch64_insn value;
1363 
1364   /* Rm */
1365   info->reg.regno = extract_field (FLD_Rm, code, 0);
1366   /* option */
1367   value = extract_field (FLD_option, code, 0);
1368   info->shifter.kind =
1369     aarch64_get_operand_modifier_from_value (value, true /* extend_p */);
1370   /* imm3 */
1371   info->shifter.amount = extract_field (FLD_imm3, code,  0);
1372 
1373   /* This makes the constraint checking happy.  */
1374   info->shifter.operator_present = 1;
1375 
1376   /* Assume inst->operands[0].qualifier has been resolved.  */
1377   assert (inst->operands[0].qualifier != AARCH64_OPND_QLF_NIL);
1378   info->qualifier = AARCH64_OPND_QLF_W;
1379   if (inst->operands[0].qualifier == AARCH64_OPND_QLF_X
1380       && (info->shifter.kind == AARCH64_MOD_UXTX
1381 	  || info->shifter.kind == AARCH64_MOD_SXTX))
1382     info->qualifier = AARCH64_OPND_QLF_X;
1383 
1384   return true;
1385 }
1386 
1387 /* Decode the shifted register operand for e.g.
1388      SUBS <Xd>, <Xn>, <Xm> {, <shift> #<amount>}.  */
1389 bool
aarch64_ext_reg_shifted(const aarch64_operand * self ATTRIBUTE_UNUSED,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1390 aarch64_ext_reg_shifted (const aarch64_operand *self ATTRIBUTE_UNUSED,
1391 			 aarch64_opnd_info *info,
1392 			 aarch64_insn code,
1393 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1394 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1395 {
1396   aarch64_insn value;
1397 
1398   /* Rm */
1399   info->reg.regno = extract_field (FLD_Rm, code, 0);
1400   /* shift */
1401   value = extract_field (FLD_shift, code, 0);
1402   info->shifter.kind =
1403     aarch64_get_operand_modifier_from_value (value, false /* extend_p */);
1404   if (info->shifter.kind == AARCH64_MOD_ROR
1405       && inst->opcode->iclass != log_shift)
1406     /* ROR is not available for the shifted register operand in arithmetic
1407        instructions.  */
1408     return false;
1409   /* imm6 */
1410   info->shifter.amount = extract_field (FLD_imm6, code,  0);
1411 
1412   /* This makes the constraint checking happy.  */
1413   info->shifter.operator_present = 1;
1414 
1415   return true;
1416 }
1417 
1418 /* Decode an SVE address [<base>, #<offset>*<factor>, MUL VL],
1419    where <offset> is given by the OFFSET parameter and where <factor> is
1420    1 plus SELF's operand-dependent value.  fields[0] specifies the field
1421    that holds <base>.  */
1422 static bool
aarch64_ext_sve_addr_reg_mul_vl(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,int64_t offset)1423 aarch64_ext_sve_addr_reg_mul_vl (const aarch64_operand *self,
1424 				 aarch64_opnd_info *info, aarch64_insn code,
1425 				 int64_t offset)
1426 {
1427   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1428   info->addr.offset.imm = offset * (1 + get_operand_specific_data (self));
1429   info->addr.offset.is_reg = false;
1430   info->addr.writeback = false;
1431   info->addr.preind = true;
1432   if (offset != 0)
1433     info->shifter.kind = AARCH64_MOD_MUL_VL;
1434   info->shifter.amount = 1;
1435   info->shifter.operator_present = (info->addr.offset.imm != 0);
1436   info->shifter.amount_present = false;
1437   return true;
1438 }
1439 
1440 /* Decode an SVE address [<base>, #<simm4>*<factor>, MUL VL],
1441    where <simm4> is a 4-bit signed value and where <factor> is 1 plus
1442    SELF's operand-dependent value.  fields[0] specifies the field that
1443    holds <base>.  <simm4> is encoded in the SVE_imm4 field.  */
1444 bool
aarch64_ext_sve_addr_ri_s4xvl(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1445 aarch64_ext_sve_addr_ri_s4xvl (const aarch64_operand *self,
1446 			       aarch64_opnd_info *info, aarch64_insn code,
1447 			       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1448 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1449 {
1450   int offset;
1451 
1452   offset = extract_field (FLD_SVE_imm4, code, 0);
1453   offset = ((offset + 8) & 15) - 8;
1454   return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1455 }
1456 
1457 /* Decode an SVE address [<base>, #<simm6>*<factor>, MUL VL],
1458    where <simm6> is a 6-bit signed value and where <factor> is 1 plus
1459    SELF's operand-dependent value.  fields[0] specifies the field that
1460    holds <base>.  <simm6> is encoded in the SVE_imm6 field.  */
1461 bool
aarch64_ext_sve_addr_ri_s6xvl(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1462 aarch64_ext_sve_addr_ri_s6xvl (const aarch64_operand *self,
1463 			       aarch64_opnd_info *info, aarch64_insn code,
1464 			       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1465 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1466 {
1467   int offset;
1468 
1469   offset = extract_field (FLD_SVE_imm6, code, 0);
1470   offset = (((offset + 32) & 63) - 32);
1471   return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1472 }
1473 
1474 /* Decode an SVE address [<base>, #<simm9>*<factor>, MUL VL],
1475    where <simm9> is a 9-bit signed value and where <factor> is 1 plus
1476    SELF's operand-dependent value.  fields[0] specifies the field that
1477    holds <base>.  <simm9> is encoded in the concatenation of the SVE_imm6
1478    and imm3 fields, with imm3 being the less-significant part.  */
1479 bool
aarch64_ext_sve_addr_ri_s9xvl(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1480 aarch64_ext_sve_addr_ri_s9xvl (const aarch64_operand *self,
1481 			       aarch64_opnd_info *info,
1482 			       aarch64_insn code,
1483 			       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1484 			       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1485 {
1486   int offset;
1487 
1488   offset = extract_fields (code, 0, 2, FLD_SVE_imm6, FLD_imm3);
1489   offset = (((offset + 256) & 511) - 256);
1490   return aarch64_ext_sve_addr_reg_mul_vl (self, info, code, offset);
1491 }
1492 
1493 /* Decode an SVE address [<base>, #<offset> << <shift>], where <offset>
1494    is given by the OFFSET parameter and where <shift> is SELF's operand-
1495    dependent value.  fields[0] specifies the base register field <base>.  */
1496 static bool
aarch64_ext_sve_addr_reg_imm(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,int64_t offset)1497 aarch64_ext_sve_addr_reg_imm (const aarch64_operand *self,
1498 			      aarch64_opnd_info *info, aarch64_insn code,
1499 			      int64_t offset)
1500 {
1501   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1502   info->addr.offset.imm = offset * (1 << get_operand_specific_data (self));
1503   info->addr.offset.is_reg = false;
1504   info->addr.writeback = false;
1505   info->addr.preind = true;
1506   info->shifter.operator_present = false;
1507   info->shifter.amount_present = false;
1508   return true;
1509 }
1510 
1511 /* Decode an SVE address [X<n>, #<SVE_imm4> << <shift>], where <SVE_imm4>
1512    is a 4-bit signed number and where <shift> is SELF's operand-dependent
1513    value.  fields[0] specifies the base register field.  */
1514 bool
aarch64_ext_sve_addr_ri_s4(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1515 aarch64_ext_sve_addr_ri_s4 (const aarch64_operand *self,
1516 			    aarch64_opnd_info *info, aarch64_insn code,
1517 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
1518 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1519 {
1520   int offset = sign_extend (extract_field (FLD_SVE_imm4, code, 0), 3);
1521   return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1522 }
1523 
1524 /* Decode an SVE address [X<n>, #<SVE_imm6> << <shift>], where <SVE_imm6>
1525    is a 6-bit unsigned number and where <shift> is SELF's operand-dependent
1526    value.  fields[0] specifies the base register field.  */
1527 bool
aarch64_ext_sve_addr_ri_u6(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1528 aarch64_ext_sve_addr_ri_u6 (const aarch64_operand *self,
1529 			    aarch64_opnd_info *info, aarch64_insn code,
1530 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
1531 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1532 {
1533   int offset = extract_field (FLD_SVE_imm6, code, 0);
1534   return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1535 }
1536 
1537 /* Decode an SVE address [X<n>, X<m>{, LSL #<shift>}], where <shift>
1538    is SELF's operand-dependent value.  fields[0] specifies the base
1539    register field and fields[1] specifies the offset register field.  */
1540 bool
aarch64_ext_sve_addr_rr_lsl(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1541 aarch64_ext_sve_addr_rr_lsl (const aarch64_operand *self,
1542 			     aarch64_opnd_info *info, aarch64_insn code,
1543 			     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1544 			     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1545 {
1546   int index_regno;
1547 
1548   index_regno = extract_field (self->fields[1], code, 0);
1549   if (index_regno == 31 && (self->flags & OPD_F_NO_ZR) != 0)
1550     return false;
1551 
1552   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1553   info->addr.offset.regno = index_regno;
1554   info->addr.offset.is_reg = true;
1555   info->addr.writeback = false;
1556   info->addr.preind = true;
1557   info->shifter.kind = AARCH64_MOD_LSL;
1558   info->shifter.amount = get_operand_specific_data (self);
1559   info->shifter.operator_present = (info->shifter.amount != 0);
1560   info->shifter.amount_present = (info->shifter.amount != 0);
1561   return true;
1562 }
1563 
1564 /* Decode an SVE address [X<n>, Z<m>.<T>, (S|U)XTW {#<shift>}], where
1565    <shift> is SELF's operand-dependent value.  fields[0] specifies the
1566    base register field, fields[1] specifies the offset register field and
1567    fields[2] is a single-bit field that selects SXTW over UXTW.  */
1568 bool
aarch64_ext_sve_addr_rz_xtw(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1569 aarch64_ext_sve_addr_rz_xtw (const aarch64_operand *self,
1570 			     aarch64_opnd_info *info, aarch64_insn code,
1571 			     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1572 			     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1573 {
1574   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1575   info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1576   info->addr.offset.is_reg = true;
1577   info->addr.writeback = false;
1578   info->addr.preind = true;
1579   if (extract_field (self->fields[2], code, 0))
1580     info->shifter.kind = AARCH64_MOD_SXTW;
1581   else
1582     info->shifter.kind = AARCH64_MOD_UXTW;
1583   info->shifter.amount = get_operand_specific_data (self);
1584   info->shifter.operator_present = true;
1585   info->shifter.amount_present = (info->shifter.amount != 0);
1586   return true;
1587 }
1588 
1589 /* Decode an SVE address [Z<n>.<T>, #<imm5> << <shift>], where <imm5> is a
1590    5-bit unsigned number and where <shift> is SELF's operand-dependent value.
1591    fields[0] specifies the base register field.  */
1592 bool
aarch64_ext_sve_addr_zi_u5(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1593 aarch64_ext_sve_addr_zi_u5 (const aarch64_operand *self,
1594 			    aarch64_opnd_info *info, aarch64_insn code,
1595 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
1596 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1597 {
1598   int offset = extract_field (FLD_imm5, code, 0);
1599   return aarch64_ext_sve_addr_reg_imm (self, info, code, offset);
1600 }
1601 
1602 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, <modifier> {#<msz>}}],
1603    where <modifier> is given by KIND and where <msz> is a 2-bit unsigned
1604    number.  fields[0] specifies the base register field and fields[1]
1605    specifies the offset register field.  */
1606 static bool
aarch64_ext_sve_addr_zz(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,enum aarch64_modifier_kind kind)1607 aarch64_ext_sve_addr_zz (const aarch64_operand *self, aarch64_opnd_info *info,
1608 			 aarch64_insn code, enum aarch64_modifier_kind kind)
1609 {
1610   info->addr.base_regno = extract_field (self->fields[0], code, 0);
1611   info->addr.offset.regno = extract_field (self->fields[1], code, 0);
1612   info->addr.offset.is_reg = true;
1613   info->addr.writeback = false;
1614   info->addr.preind = true;
1615   info->shifter.kind = kind;
1616   info->shifter.amount = extract_field (FLD_SVE_msz, code, 0);
1617   info->shifter.operator_present = (kind != AARCH64_MOD_LSL
1618 				    || info->shifter.amount != 0);
1619   info->shifter.amount_present = (info->shifter.amount != 0);
1620   return true;
1621 }
1622 
1623 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>{, LSL #<msz>}], where
1624    <msz> is a 2-bit unsigned number.  fields[0] specifies the base register
1625    field and fields[1] specifies the offset register field.  */
1626 bool
aarch64_ext_sve_addr_zz_lsl(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1627 aarch64_ext_sve_addr_zz_lsl (const aarch64_operand *self,
1628 			     aarch64_opnd_info *info, aarch64_insn code,
1629 			     const aarch64_inst *inst ATTRIBUTE_UNUSED,
1630 			     aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1631 {
1632   return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_LSL);
1633 }
1634 
1635 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, SXTW {#<msz>}], where
1636    <msz> is a 2-bit unsigned number.  fields[0] specifies the base register
1637    field and fields[1] specifies the offset register field.  */
1638 bool
aarch64_ext_sve_addr_zz_sxtw(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1639 aarch64_ext_sve_addr_zz_sxtw (const aarch64_operand *self,
1640 			      aarch64_opnd_info *info, aarch64_insn code,
1641 			      const aarch64_inst *inst ATTRIBUTE_UNUSED,
1642 			      aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1643 {
1644   return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_SXTW);
1645 }
1646 
1647 /* Decode an SVE address [Z<n>.<T>, Z<m>.<T>, UXTW {#<msz>}], where
1648    <msz> is a 2-bit unsigned number.  fields[0] specifies the base register
1649    field and fields[1] specifies the offset register field.  */
1650 bool
aarch64_ext_sve_addr_zz_uxtw(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1651 aarch64_ext_sve_addr_zz_uxtw (const aarch64_operand *self,
1652 			      aarch64_opnd_info *info, aarch64_insn code,
1653 			      const aarch64_inst *inst ATTRIBUTE_UNUSED,
1654 			      aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1655 {
1656   return aarch64_ext_sve_addr_zz (self, info, code, AARCH64_MOD_UXTW);
1657 }
1658 
1659 /* Finish decoding an SVE arithmetic immediate, given that INFO already
1660    has the raw field value and that the low 8 bits decode to VALUE.  */
1661 static bool
decode_sve_aimm(aarch64_opnd_info * info,int64_t value)1662 decode_sve_aimm (aarch64_opnd_info *info, int64_t value)
1663 {
1664   info->shifter.kind = AARCH64_MOD_LSL;
1665   info->shifter.amount = 0;
1666   if (info->imm.value & 0x100)
1667     {
1668       if (value == 0)
1669 	/* Decode 0x100 as #0, LSL #8.  */
1670 	info->shifter.amount = 8;
1671       else
1672 	value *= 256;
1673     }
1674   info->shifter.operator_present = (info->shifter.amount != 0);
1675   info->shifter.amount_present = (info->shifter.amount != 0);
1676   info->imm.value = value;
1677   return true;
1678 }
1679 
1680 /* Decode an SVE ADD/SUB immediate.  */
1681 bool
aarch64_ext_sve_aimm(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors)1682 aarch64_ext_sve_aimm (const aarch64_operand *self,
1683 		      aarch64_opnd_info *info, const aarch64_insn code,
1684 		      const aarch64_inst *inst,
1685 		      aarch64_operand_error *errors)
1686 {
1687   return (aarch64_ext_imm (self, info, code, inst, errors)
1688 	  && decode_sve_aimm (info, (uint8_t) info->imm.value));
1689 }
1690 
1691 /* Decode an SVE CPY/DUP immediate.  */
1692 bool
aarch64_ext_sve_asimm(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors)1693 aarch64_ext_sve_asimm (const aarch64_operand *self,
1694 		       aarch64_opnd_info *info, const aarch64_insn code,
1695 		       const aarch64_inst *inst,
1696 		       aarch64_operand_error *errors)
1697 {
1698   return (aarch64_ext_imm (self, info, code, inst, errors)
1699 	  && decode_sve_aimm (info, (int8_t) info->imm.value));
1700 }
1701 
1702 /* Decode a single-bit immediate that selects between #0.5 and #1.0.
1703    The fields array specifies which field to use.  */
1704 bool
aarch64_ext_sve_float_half_one(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1705 aarch64_ext_sve_float_half_one (const aarch64_operand *self,
1706 				aarch64_opnd_info *info, aarch64_insn code,
1707 				const aarch64_inst *inst ATTRIBUTE_UNUSED,
1708 				aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1709 {
1710   if (extract_field (self->fields[0], code, 0))
1711     info->imm.value = 0x3f800000;
1712   else
1713     info->imm.value = 0x3f000000;
1714   info->imm.is_fp = true;
1715   return true;
1716 }
1717 
1718 /* Decode a single-bit immediate that selects between #0.5 and #2.0.
1719    The fields array specifies which field to use.  */
1720 bool
aarch64_ext_sve_float_half_two(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1721 aarch64_ext_sve_float_half_two (const aarch64_operand *self,
1722 				aarch64_opnd_info *info, aarch64_insn code,
1723 				const aarch64_inst *inst ATTRIBUTE_UNUSED,
1724 				aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1725 {
1726   if (extract_field (self->fields[0], code, 0))
1727     info->imm.value = 0x40000000;
1728   else
1729     info->imm.value = 0x3f000000;
1730   info->imm.is_fp = true;
1731   return true;
1732 }
1733 
1734 /* Decode a single-bit immediate that selects between #0.0 and #1.0.
1735    The fields array specifies which field to use.  */
1736 bool
aarch64_ext_sve_float_zero_one(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1737 aarch64_ext_sve_float_zero_one (const aarch64_operand *self,
1738 				aarch64_opnd_info *info, aarch64_insn code,
1739 				const aarch64_inst *inst ATTRIBUTE_UNUSED,
1740 				aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1741 {
1742   if (extract_field (self->fields[0], code, 0))
1743     info->imm.value = 0x3f800000;
1744   else
1745     info->imm.value = 0x0;
1746   info->imm.is_fp = true;
1747   return true;
1748 }
1749 
1750 /* Decode Zn[MM], where MM has a 7-bit triangular encoding.  The fields
1751    array specifies which field to use for Zn.  MM is encoded in the
1752    concatenation of imm5 and SVE_tszh, with imm5 being the less
1753    significant part.  */
1754 bool
aarch64_ext_sve_index(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1755 aarch64_ext_sve_index (const aarch64_operand *self,
1756 		       aarch64_opnd_info *info, aarch64_insn code,
1757 		       const aarch64_inst *inst ATTRIBUTE_UNUSED,
1758 		       aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1759 {
1760   int val;
1761 
1762   info->reglane.regno = extract_field (self->fields[0], code, 0);
1763   val = extract_fields (code, 0, 2, FLD_SVE_tszh, FLD_imm5);
1764   if ((val & 31) == 0)
1765     return 0;
1766   while ((val & 1) == 0)
1767     val /= 2;
1768   info->reglane.index = val / 2;
1769   return true;
1770 }
1771 
1772 /* Decode a logical immediate for the MOV alias of SVE DUPM.  */
1773 bool
aarch64_ext_sve_limm_mov(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors)1774 aarch64_ext_sve_limm_mov (const aarch64_operand *self,
1775 			  aarch64_opnd_info *info, const aarch64_insn code,
1776 			  const aarch64_inst *inst,
1777 			  aarch64_operand_error *errors)
1778 {
1779   int esize = aarch64_get_qualifier_esize (inst->operands[0].qualifier);
1780   return (aarch64_ext_limm (self, info, code, inst, errors)
1781 	  && aarch64_sve_dupm_mov_immediate_p (info->imm.value, esize));
1782 }
1783 
1784 /* Decode Zn[MM], where Zn occupies the least-significant part of the field
1785    and where MM occupies the most-significant part.  The operand-dependent
1786    value specifies the number of bits in Zn.  */
1787 bool
aarch64_ext_sve_quad_index(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1788 aarch64_ext_sve_quad_index (const aarch64_operand *self,
1789 			    aarch64_opnd_info *info, aarch64_insn code,
1790 			    const aarch64_inst *inst ATTRIBUTE_UNUSED,
1791 			    aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1792 {
1793   unsigned int reg_bits = get_operand_specific_data (self);
1794   unsigned int val = extract_all_fields (self, code);
1795   info->reglane.regno = val & ((1 << reg_bits) - 1);
1796   info->reglane.index = val >> reg_bits;
1797   return true;
1798 }
1799 
1800 /* Decode {Zn.<T> - Zm.<T>}.  The fields array specifies which field
1801    to use for Zn.  The opcode-dependent value specifies the number
1802    of registers in the list.  */
1803 bool
aarch64_ext_sve_reglist(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst ATTRIBUTE_UNUSED,aarch64_operand_error * errors ATTRIBUTE_UNUSED)1804 aarch64_ext_sve_reglist (const aarch64_operand *self,
1805 			 aarch64_opnd_info *info, aarch64_insn code,
1806 			 const aarch64_inst *inst ATTRIBUTE_UNUSED,
1807 			 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
1808 {
1809   info->reglist.first_regno = extract_field (self->fields[0], code, 0);
1810   info->reglist.num_regs = get_opcode_dependent_value (inst->opcode);
1811   return true;
1812 }
1813 
1814 /* Decode <pattern>{, MUL #<amount>}.  The fields array specifies which
1815    fields to use for <pattern>.  <amount> - 1 is encoded in the SVE_imm4
1816    field.  */
1817 bool
aarch64_ext_sve_scale(const aarch64_operand * self,aarch64_opnd_info * info,aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors)1818 aarch64_ext_sve_scale (const aarch64_operand *self,
1819 		       aarch64_opnd_info *info, aarch64_insn code,
1820 		       const aarch64_inst *inst, aarch64_operand_error *errors)
1821 {
1822   int val;
1823 
1824   if (!aarch64_ext_imm (self, info, code, inst, errors))
1825     return false;
1826   val = extract_field (FLD_SVE_imm4, code, 0);
1827   info->shifter.kind = AARCH64_MOD_MUL;
1828   info->shifter.amount = val + 1;
1829   info->shifter.operator_present = (val != 0);
1830   info->shifter.amount_present = (val != 0);
1831   return true;
1832 }
1833 
1834 /* Return the top set bit in VALUE, which is expected to be relatively
1835    small.  */
1836 static uint64_t
get_top_bit(uint64_t value)1837 get_top_bit (uint64_t value)
1838 {
1839   while ((value & -value) != value)
1840     value -= value & -value;
1841   return value;
1842 }
1843 
1844 /* Decode an SVE shift-left immediate.  */
1845 bool
aarch64_ext_sve_shlimm(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors)1846 aarch64_ext_sve_shlimm (const aarch64_operand *self,
1847 			aarch64_opnd_info *info, const aarch64_insn code,
1848 			const aarch64_inst *inst, aarch64_operand_error *errors)
1849 {
1850   if (!aarch64_ext_imm (self, info, code, inst, errors)
1851       || info->imm.value == 0)
1852     return false;
1853 
1854   info->imm.value -= get_top_bit (info->imm.value);
1855   return true;
1856 }
1857 
1858 /* Decode an SVE shift-right immediate.  */
1859 bool
aarch64_ext_sve_shrimm(const aarch64_operand * self,aarch64_opnd_info * info,const aarch64_insn code,const aarch64_inst * inst,aarch64_operand_error * errors)1860 aarch64_ext_sve_shrimm (const aarch64_operand *self,
1861 			aarch64_opnd_info *info, const aarch64_insn code,
1862 			const aarch64_inst *inst, aarch64_operand_error *errors)
1863 {
1864   if (!aarch64_ext_imm (self, info, code, inst, errors)
1865       || info->imm.value == 0)
1866     return false;
1867 
1868   info->imm.value = get_top_bit (info->imm.value) * 2 - info->imm.value;
1869   return true;
1870 }
1871 
1872 /* Bitfields that are commonly used to encode certain operands' information
1873    may be partially used as part of the base opcode in some instructions.
1874    For example, the bit 1 of the field 'size' in
1875      FCVTXN <Vb><d>, <Va><n>
1876    is actually part of the base opcode, while only size<0> is available
1877    for encoding the register type.  Another example is the AdvSIMD
1878    instruction ORR (register), in which the field 'size' is also used for
1879    the base opcode, leaving only the field 'Q' available to encode the
1880    vector register arrangement specifier '8B' or '16B'.
1881 
1882    This function tries to deduce the qualifier from the value of partially
1883    constrained field(s).  Given the VALUE of such a field or fields, the
1884    qualifiers CANDIDATES and the MASK (indicating which bits are valid for
1885    operand encoding), the function returns the matching qualifier or
1886    AARCH64_OPND_QLF_NIL if nothing matches.
1887 
1888    N.B. CANDIDATES is a group of possible qualifiers that are valid for
1889    one operand; it has a maximum of AARCH64_MAX_QLF_SEQ_NUM qualifiers and
1890    may end with AARCH64_OPND_QLF_NIL.  */
1891 
1892 static enum aarch64_opnd_qualifier
get_qualifier_from_partial_encoding(aarch64_insn value,const enum aarch64_opnd_qualifier * candidates,aarch64_insn mask)1893 get_qualifier_from_partial_encoding (aarch64_insn value,
1894 				     const enum aarch64_opnd_qualifier* \
1895 				     candidates,
1896 				     aarch64_insn mask)
1897 {
1898   int i;
1899   DEBUG_TRACE ("enter with value: %d, mask: %d", (int)value, (int)mask);
1900   for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1901     {
1902       aarch64_insn standard_value;
1903       if (candidates[i] == AARCH64_OPND_QLF_NIL)
1904 	break;
1905       standard_value = aarch64_get_qualifier_standard_value (candidates[i]);
1906       if ((standard_value & mask) == (value & mask))
1907 	return candidates[i];
1908     }
1909   return AARCH64_OPND_QLF_NIL;
1910 }
1911 
1912 /* Given a list of qualifier sequences, return all possible valid qualifiers
1913    for operand IDX in QUALIFIERS.
1914    Assume QUALIFIERS is an array whose length is large enough.  */
1915 
1916 static void
get_operand_possible_qualifiers(int idx,const aarch64_opnd_qualifier_seq_t * list,enum aarch64_opnd_qualifier * qualifiers)1917 get_operand_possible_qualifiers (int idx,
1918 				 const aarch64_opnd_qualifier_seq_t *list,
1919 				 enum aarch64_opnd_qualifier *qualifiers)
1920 {
1921   int i;
1922   for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1923     if ((qualifiers[i] = list[i][idx]) == AARCH64_OPND_QLF_NIL)
1924       break;
1925 }
1926 
1927 /* Decode the size Q field for e.g. SHADD.
1928    We tag one operand with the qualifer according to the code;
1929    whether the qualifier is valid for this opcode or not, it is the
1930    duty of the semantic checking.  */
1931 
1932 static int
decode_sizeq(aarch64_inst * inst)1933 decode_sizeq (aarch64_inst *inst)
1934 {
1935   int idx;
1936   enum aarch64_opnd_qualifier qualifier;
1937   aarch64_insn code;
1938   aarch64_insn value, mask;
1939   enum aarch64_field_kind fld_sz;
1940   enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
1941 
1942   if (inst->opcode->iclass == asisdlse
1943      || inst->opcode->iclass == asisdlsep
1944      || inst->opcode->iclass == asisdlso
1945      || inst->opcode->iclass == asisdlsop)
1946     fld_sz = FLD_vldst_size;
1947   else
1948     fld_sz = FLD_size;
1949 
1950   code = inst->value;
1951   value = extract_fields (code, inst->opcode->mask, 2, fld_sz, FLD_Q);
1952   /* Obtain the info that which bits of fields Q and size are actually
1953      available for operand encoding.  Opcodes like FMAXNM and FMLA have
1954      size[1] unavailable.  */
1955   mask = extract_fields (~inst->opcode->mask, 0, 2, fld_sz, FLD_Q);
1956 
1957   /* The index of the operand we are going to tag a qualifier and the qualifer
1958      itself are reasoned from the value of the size and Q fields and the
1959      possible valid qualifier lists.  */
1960   idx = aarch64_select_operand_for_sizeq_field_coding (inst->opcode);
1961   DEBUG_TRACE ("key idx: %d", idx);
1962 
1963   /* For most related instruciton, size:Q are fully available for operand
1964      encoding.  */
1965   if (mask == 0x7)
1966     {
1967       inst->operands[idx].qualifier = get_vreg_qualifier_from_value (value);
1968       return 1;
1969     }
1970 
1971   get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
1972 				   candidates);
1973 #ifdef DEBUG_AARCH64
1974   if (debug_dump)
1975     {
1976       int i;
1977       for (i = 0; candidates[i] != AARCH64_OPND_QLF_NIL
1978 	   && i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
1979 	DEBUG_TRACE ("qualifier %d: %s", i,
1980 		     aarch64_get_qualifier_name(candidates[i]));
1981       DEBUG_TRACE ("%d, %d", (int)value, (int)mask);
1982     }
1983 #endif /* DEBUG_AARCH64 */
1984 
1985   qualifier = get_qualifier_from_partial_encoding (value, candidates, mask);
1986 
1987   if (qualifier == AARCH64_OPND_QLF_NIL)
1988     return 0;
1989 
1990   inst->operands[idx].qualifier = qualifier;
1991   return 1;
1992 }
1993 
1994 /* Decode size[0]:Q, i.e. bit 22 and bit 30, for
1995      e.g. FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>.  */
1996 
1997 static int
decode_asimd_fcvt(aarch64_inst * inst)1998 decode_asimd_fcvt (aarch64_inst *inst)
1999 {
2000   aarch64_field field = {0, 0};
2001   aarch64_insn value;
2002   enum aarch64_opnd_qualifier qualifier;
2003 
2004   gen_sub_field (FLD_size, 0, 1, &field);
2005   value = extract_field_2 (&field, inst->value, 0);
2006   qualifier = value == 0 ? AARCH64_OPND_QLF_V_4S
2007     : AARCH64_OPND_QLF_V_2D;
2008   switch (inst->opcode->op)
2009     {
2010     case OP_FCVTN:
2011     case OP_FCVTN2:
2012       /* FCVTN<Q> <Vd>.<Tb>, <Vn>.<Ta>.  */
2013       inst->operands[1].qualifier = qualifier;
2014       break;
2015     case OP_FCVTL:
2016     case OP_FCVTL2:
2017       /* FCVTL<Q> <Vd>.<Ta>, <Vn>.<Tb>.  */
2018       inst->operands[0].qualifier = qualifier;
2019       break;
2020     default:
2021       assert (0);
2022       return 0;
2023     }
2024 
2025   return 1;
2026 }
2027 
2028 /* Decode size[0], i.e. bit 22, for
2029      e.g. FCVTXN <Vb><d>, <Va><n>.  */
2030 
2031 static int
decode_asisd_fcvtxn(aarch64_inst * inst)2032 decode_asisd_fcvtxn (aarch64_inst *inst)
2033 {
2034   aarch64_field field = {0, 0};
2035   gen_sub_field (FLD_size, 0, 1, &field);
2036   if (!extract_field_2 (&field, inst->value, 0))
2037     return 0;
2038   inst->operands[0].qualifier = AARCH64_OPND_QLF_S_S;
2039   return 1;
2040 }
2041 
2042 /* Decode the 'opc' field for e.g. FCVT <Dd>, <Sn>.  */
2043 static int
decode_fcvt(aarch64_inst * inst)2044 decode_fcvt (aarch64_inst *inst)
2045 {
2046   enum aarch64_opnd_qualifier qualifier;
2047   aarch64_insn value;
2048   const aarch64_field field = {15, 2};
2049 
2050   /* opc dstsize */
2051   value = extract_field_2 (&field, inst->value, 0);
2052   switch (value)
2053     {
2054     case 0: qualifier = AARCH64_OPND_QLF_S_S; break;
2055     case 1: qualifier = AARCH64_OPND_QLF_S_D; break;
2056     case 3: qualifier = AARCH64_OPND_QLF_S_H; break;
2057     default: return 0;
2058     }
2059   inst->operands[0].qualifier = qualifier;
2060 
2061   return 1;
2062 }
2063 
2064 /* Do miscellaneous decodings that are not common enough to be driven by
2065    flags.  */
2066 
2067 static int
do_misc_decoding(aarch64_inst * inst)2068 do_misc_decoding (aarch64_inst *inst)
2069 {
2070   unsigned int value;
2071   switch (inst->opcode->op)
2072     {
2073     case OP_FCVT:
2074       return decode_fcvt (inst);
2075 
2076     case OP_FCVTN:
2077     case OP_FCVTN2:
2078     case OP_FCVTL:
2079     case OP_FCVTL2:
2080       return decode_asimd_fcvt (inst);
2081 
2082     case OP_FCVTXN_S:
2083       return decode_asisd_fcvtxn (inst);
2084 
2085     case OP_MOV_P_P:
2086     case OP_MOVS_P_P:
2087       value = extract_field (FLD_SVE_Pn, inst->value, 0);
2088       return (value == extract_field (FLD_SVE_Pm, inst->value, 0)
2089 	      && value == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2090 
2091     case OP_MOV_Z_P_Z:
2092       return (extract_field (FLD_SVE_Zd, inst->value, 0)
2093 	      == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2094 
2095     case OP_MOV_Z_V:
2096       /* Index must be zero.  */
2097       value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2098       return value > 0 && value <= 16 && value == (value & -value);
2099 
2100     case OP_MOV_Z_Z:
2101       return (extract_field (FLD_SVE_Zn, inst->value, 0)
2102 	      == extract_field (FLD_SVE_Zm_16, inst->value, 0));
2103 
2104     case OP_MOV_Z_Zi:
2105       /* Index must be nonzero.  */
2106       value = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2107       return value > 0 && value != (value & -value);
2108 
2109     case OP_MOVM_P_P_P:
2110       return (extract_field (FLD_SVE_Pd, inst->value, 0)
2111 	      == extract_field (FLD_SVE_Pm, inst->value, 0));
2112 
2113     case OP_MOVZS_P_P_P:
2114     case OP_MOVZ_P_P_P:
2115       return (extract_field (FLD_SVE_Pn, inst->value, 0)
2116 	      == extract_field (FLD_SVE_Pm, inst->value, 0));
2117 
2118     case OP_NOTS_P_P_P_Z:
2119     case OP_NOT_P_P_P_Z:
2120       return (extract_field (FLD_SVE_Pm, inst->value, 0)
2121 	      == extract_field (FLD_SVE_Pg4_10, inst->value, 0));
2122 
2123     default:
2124       return 0;
2125     }
2126 }
2127 
2128 /* Opcodes that have fields shared by multiple operands are usually flagged
2129    with flags.  In this function, we detect such flags, decode the related
2130    field(s) and store the information in one of the related operands.  The
2131    'one' operand is not any operand but one of the operands that can
2132    accommadate all the information that has been decoded.  */
2133 
2134 static int
do_special_decoding(aarch64_inst * inst)2135 do_special_decoding (aarch64_inst *inst)
2136 {
2137   int idx;
2138   aarch64_insn value;
2139   /* Condition for truly conditional executed instructions, e.g. b.cond.  */
2140   if (inst->opcode->flags & F_COND)
2141     {
2142       value = extract_field (FLD_cond2, inst->value, 0);
2143       inst->cond = get_cond_from_value (value);
2144     }
2145   /* 'sf' field.  */
2146   if (inst->opcode->flags & F_SF)
2147     {
2148       idx = select_operand_for_sf_field_coding (inst->opcode);
2149       value = extract_field (FLD_sf, inst->value, 0);
2150       inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2151       if ((inst->opcode->flags & F_N)
2152 	  && extract_field (FLD_N, inst->value, 0) != value)
2153 	return 0;
2154     }
2155   /* 'sf' field.  */
2156   if (inst->opcode->flags & F_LSE_SZ)
2157     {
2158       idx = select_operand_for_sf_field_coding (inst->opcode);
2159       value = extract_field (FLD_lse_sz, inst->value, 0);
2160       inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2161     }
2162   /* size:Q fields.  */
2163   if (inst->opcode->flags & F_SIZEQ)
2164     return decode_sizeq (inst);
2165 
2166   if (inst->opcode->flags & F_FPTYPE)
2167     {
2168       idx = select_operand_for_fptype_field_coding (inst->opcode);
2169       value = extract_field (FLD_type, inst->value, 0);
2170       switch (value)
2171 	{
2172 	case 0: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_S; break;
2173 	case 1: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_D; break;
2174 	case 3: inst->operands[idx].qualifier = AARCH64_OPND_QLF_S_H; break;
2175 	default: return 0;
2176 	}
2177     }
2178 
2179   if (inst->opcode->flags & F_SSIZE)
2180     {
2181       /* N.B. some opcodes like FCMGT <V><d>, <V><n>, #0 have the size[1] as part
2182 	 of the base opcode.  */
2183       aarch64_insn mask;
2184       enum aarch64_opnd_qualifier candidates[AARCH64_MAX_QLF_SEQ_NUM];
2185       idx = select_operand_for_scalar_size_field_coding (inst->opcode);
2186       value = extract_field (FLD_size, inst->value, inst->opcode->mask);
2187       mask = extract_field (FLD_size, ~inst->opcode->mask, 0);
2188       /* For most related instruciton, the 'size' field is fully available for
2189 	 operand encoding.  */
2190       if (mask == 0x3)
2191 	inst->operands[idx].qualifier = get_sreg_qualifier_from_value (value);
2192       else
2193 	{
2194 	  get_operand_possible_qualifiers (idx, inst->opcode->qualifiers_list,
2195 					   candidates);
2196 	  inst->operands[idx].qualifier
2197 	    = get_qualifier_from_partial_encoding (value, candidates, mask);
2198 	}
2199     }
2200 
2201   if (inst->opcode->flags & F_T)
2202     {
2203       /* Num of consecutive '0's on the right side of imm5<3:0>.  */
2204       int num = 0;
2205       unsigned val, Q;
2206       assert (aarch64_get_operand_class (inst->opcode->operands[0])
2207 	      == AARCH64_OPND_CLASS_SIMD_REG);
2208       /* imm5<3:0>	q	<t>
2209 	 0000		x	reserved
2210 	 xxx1		0	8b
2211 	 xxx1		1	16b
2212 	 xx10		0	4h
2213 	 xx10		1	8h
2214 	 x100		0	2s
2215 	 x100		1	4s
2216 	 1000		0	reserved
2217 	 1000		1	2d  */
2218       val = extract_field (FLD_imm5, inst->value, 0);
2219       while ((val & 0x1) == 0 && ++num <= 3)
2220 	val >>= 1;
2221       if (num > 3)
2222 	return 0;
2223       Q = (unsigned) extract_field (FLD_Q, inst->value, inst->opcode->mask);
2224       inst->operands[0].qualifier =
2225 	get_vreg_qualifier_from_value ((num << 1) | Q);
2226     }
2227 
2228   if (inst->opcode->flags & F_GPRSIZE_IN_Q)
2229     {
2230       /* Use Rt to encode in the case of e.g.
2231 	 STXP <Ws>, <Xt1>, <Xt2>, [<Xn|SP>{,#0}].  */
2232       idx = aarch64_operand_index (inst->opcode->operands, AARCH64_OPND_Rt);
2233       if (idx == -1)
2234 	{
2235 	  /* Otherwise use the result operand, which has to be a integer
2236 	     register.  */
2237 	  assert (aarch64_get_operand_class (inst->opcode->operands[0])
2238 		  == AARCH64_OPND_CLASS_INT_REG);
2239 	  idx = 0;
2240 	}
2241       assert (idx == 0 || idx == 1);
2242       value = extract_field (FLD_Q, inst->value, 0);
2243       inst->operands[idx].qualifier = get_greg_qualifier_from_value (value);
2244     }
2245 
2246   if (inst->opcode->flags & F_LDS_SIZE)
2247     {
2248       aarch64_field field = {0, 0};
2249       assert (aarch64_get_operand_class (inst->opcode->operands[0])
2250 	      == AARCH64_OPND_CLASS_INT_REG);
2251       gen_sub_field (FLD_opc, 0, 1, &field);
2252       value = extract_field_2 (&field, inst->value, 0);
2253       inst->operands[0].qualifier
2254 	= value ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X;
2255     }
2256 
2257   /* Miscellaneous decoding; done as the last step.  */
2258   if (inst->opcode->flags & F_MISC)
2259     return do_misc_decoding (inst);
2260 
2261   return 1;
2262 }
2263 
2264 /* Converters converting a real opcode instruction to its alias form.  */
2265 
2266 /* ROR <Wd>, <Ws>, #<shift>
2267      is equivalent to:
2268    EXTR <Wd>, <Ws>, <Ws>, #<shift>.  */
2269 static int
convert_extr_to_ror(aarch64_inst * inst)2270 convert_extr_to_ror (aarch64_inst *inst)
2271 {
2272   if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2273     {
2274       copy_operand_info (inst, 2, 3);
2275       inst->operands[3].type = AARCH64_OPND_NIL;
2276       return 1;
2277     }
2278   return 0;
2279 }
2280 
2281 /* UXTL<Q> <Vd>.<Ta>, <Vn>.<Tb>
2282      is equivalent to:
2283    USHLL<Q> <Vd>.<Ta>, <Vn>.<Tb>, #0.  */
2284 static int
convert_shll_to_xtl(aarch64_inst * inst)2285 convert_shll_to_xtl (aarch64_inst *inst)
2286 {
2287   if (inst->operands[2].imm.value == 0)
2288     {
2289       inst->operands[2].type = AARCH64_OPND_NIL;
2290       return 1;
2291     }
2292   return 0;
2293 }
2294 
2295 /* Convert
2296      UBFM <Xd>, <Xn>, #<shift>, #63.
2297    to
2298      LSR <Xd>, <Xn>, #<shift>.  */
2299 static int
convert_bfm_to_sr(aarch64_inst * inst)2300 convert_bfm_to_sr (aarch64_inst *inst)
2301 {
2302   int64_t imms, val;
2303 
2304   imms = inst->operands[3].imm.value;
2305   val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2306   if (imms == val)
2307     {
2308       inst->operands[3].type = AARCH64_OPND_NIL;
2309       return 1;
2310     }
2311 
2312   return 0;
2313 }
2314 
2315 /* Convert MOV to ORR.  */
2316 static int
convert_orr_to_mov(aarch64_inst * inst)2317 convert_orr_to_mov (aarch64_inst *inst)
2318 {
2319   /* MOV <Vd>.<T>, <Vn>.<T>
2320      is equivalent to:
2321      ORR <Vd>.<T>, <Vn>.<T>, <Vn>.<T>.  */
2322   if (inst->operands[1].reg.regno == inst->operands[2].reg.regno)
2323     {
2324       inst->operands[2].type = AARCH64_OPND_NIL;
2325       return 1;
2326     }
2327   return 0;
2328 }
2329 
2330 /* When <imms> >= <immr>, the instruction written:
2331      SBFX <Xd>, <Xn>, #<lsb>, #<width>
2332    is equivalent to:
2333      SBFM <Xd>, <Xn>, #<lsb>, #(<lsb>+<width>-1).  */
2334 
2335 static int
convert_bfm_to_bfx(aarch64_inst * inst)2336 convert_bfm_to_bfx (aarch64_inst *inst)
2337 {
2338   int64_t immr, imms;
2339 
2340   immr = inst->operands[2].imm.value;
2341   imms = inst->operands[3].imm.value;
2342   if (imms >= immr)
2343     {
2344       int64_t lsb = immr;
2345       inst->operands[2].imm.value = lsb;
2346       inst->operands[3].imm.value = imms + 1 - lsb;
2347       /* The two opcodes have different qualifiers for
2348 	 the immediate operands; reset to help the checking.  */
2349       reset_operand_qualifier (inst, 2);
2350       reset_operand_qualifier (inst, 3);
2351       return 1;
2352     }
2353 
2354   return 0;
2355 }
2356 
2357 /* When <imms> < <immr>, the instruction written:
2358      SBFIZ <Xd>, <Xn>, #<lsb>, #<width>
2359    is equivalent to:
2360      SBFM <Xd>, <Xn>, #((64-<lsb>)&0x3f), #(<width>-1).  */
2361 
2362 static int
convert_bfm_to_bfi(aarch64_inst * inst)2363 convert_bfm_to_bfi (aarch64_inst *inst)
2364 {
2365   int64_t immr, imms, val;
2366 
2367   immr = inst->operands[2].imm.value;
2368   imms = inst->operands[3].imm.value;
2369   val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2370   if (imms < immr)
2371     {
2372       inst->operands[2].imm.value = (val - immr) & (val - 1);
2373       inst->operands[3].imm.value = imms + 1;
2374       /* The two opcodes have different qualifiers for
2375 	 the immediate operands; reset to help the checking.  */
2376       reset_operand_qualifier (inst, 2);
2377       reset_operand_qualifier (inst, 3);
2378       return 1;
2379     }
2380 
2381   return 0;
2382 }
2383 
2384 /* The instruction written:
2385      BFC <Xd>, #<lsb>, #<width>
2386    is equivalent to:
2387      BFM <Xd>, XZR, #((64-<lsb>)&0x3f), #(<width>-1).  */
2388 
2389 static int
convert_bfm_to_bfc(aarch64_inst * inst)2390 convert_bfm_to_bfc (aarch64_inst *inst)
2391 {
2392   int64_t immr, imms, val;
2393 
2394   /* Should have been assured by the base opcode value.  */
2395   assert (inst->operands[1].reg.regno == 0x1f);
2396 
2397   immr = inst->operands[2].imm.value;
2398   imms = inst->operands[3].imm.value;
2399   val = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 32 : 64;
2400   if (imms < immr)
2401     {
2402       /* Drop XZR from the second operand.  */
2403       copy_operand_info (inst, 1, 2);
2404       copy_operand_info (inst, 2, 3);
2405       inst->operands[3].type = AARCH64_OPND_NIL;
2406 
2407       /* Recalculate the immediates.  */
2408       inst->operands[1].imm.value = (val - immr) & (val - 1);
2409       inst->operands[2].imm.value = imms + 1;
2410 
2411       /* The two opcodes have different qualifiers for the operands; reset to
2412 	 help the checking.  */
2413       reset_operand_qualifier (inst, 1);
2414       reset_operand_qualifier (inst, 2);
2415       reset_operand_qualifier (inst, 3);
2416 
2417       return 1;
2418     }
2419 
2420   return 0;
2421 }
2422 
2423 /* The instruction written:
2424      LSL <Xd>, <Xn>, #<shift>
2425    is equivalent to:
2426      UBFM <Xd>, <Xn>, #((64-<shift>)&0x3f), #(63-<shift>).  */
2427 
2428 static int
convert_ubfm_to_lsl(aarch64_inst * inst)2429 convert_ubfm_to_lsl (aarch64_inst *inst)
2430 {
2431   int64_t immr = inst->operands[2].imm.value;
2432   int64_t imms = inst->operands[3].imm.value;
2433   int64_t val
2434     = inst->operands[2].qualifier == AARCH64_OPND_QLF_imm_0_31 ? 31 : 63;
2435 
2436   if ((immr == 0 && imms == val) || immr == imms + 1)
2437     {
2438       inst->operands[3].type = AARCH64_OPND_NIL;
2439       inst->operands[2].imm.value = val - imms;
2440       return 1;
2441     }
2442 
2443   return 0;
2444 }
2445 
2446 /* CINC <Wd>, <Wn>, <cond>
2447      is equivalent to:
2448    CSINC <Wd>, <Wn>, <Wn>, invert(<cond>)
2449      where <cond> is not AL or NV.  */
2450 
2451 static int
convert_from_csel(aarch64_inst * inst)2452 convert_from_csel (aarch64_inst *inst)
2453 {
2454   if (inst->operands[1].reg.regno == inst->operands[2].reg.regno
2455       && (inst->operands[3].cond->value & 0xe) != 0xe)
2456     {
2457       copy_operand_info (inst, 2, 3);
2458       inst->operands[2].cond = get_inverted_cond (inst->operands[3].cond);
2459       inst->operands[3].type = AARCH64_OPND_NIL;
2460       return 1;
2461     }
2462   return 0;
2463 }
2464 
2465 /* CSET <Wd>, <cond>
2466      is equivalent to:
2467    CSINC <Wd>, WZR, WZR, invert(<cond>)
2468      where <cond> is not AL or NV.  */
2469 
2470 static int
convert_csinc_to_cset(aarch64_inst * inst)2471 convert_csinc_to_cset (aarch64_inst *inst)
2472 {
2473   if (inst->operands[1].reg.regno == 0x1f
2474       && inst->operands[2].reg.regno == 0x1f
2475       && (inst->operands[3].cond->value & 0xe) != 0xe)
2476     {
2477       copy_operand_info (inst, 1, 3);
2478       inst->operands[1].cond = get_inverted_cond (inst->operands[3].cond);
2479       inst->operands[3].type = AARCH64_OPND_NIL;
2480       inst->operands[2].type = AARCH64_OPND_NIL;
2481       return 1;
2482     }
2483   return 0;
2484 }
2485 
2486 /* MOV <Wd>, #<imm>
2487      is equivalent to:
2488    MOVZ <Wd>, #<imm16>, LSL #<shift>.
2489 
2490    A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2491    ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2492    or where a MOVN has an immediate that could be encoded by MOVZ, or where
2493    MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2494    machine-instruction mnemonic must be used.  */
2495 
2496 static int
convert_movewide_to_mov(aarch64_inst * inst)2497 convert_movewide_to_mov (aarch64_inst *inst)
2498 {
2499   uint64_t value = inst->operands[1].imm.value;
2500   /* MOVZ/MOVN #0 have a shift amount other than LSL #0.  */
2501   if (value == 0 && inst->operands[1].shifter.amount != 0)
2502     return 0;
2503   inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2504   inst->operands[1].shifter.kind = AARCH64_MOD_NONE;
2505   value <<= inst->operands[1].shifter.amount;
2506   /* As an alias convertor, it has to be clear that the INST->OPCODE
2507      is the opcode of the real instruction.  */
2508   if (inst->opcode->op == OP_MOVN)
2509     {
2510       int is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2511       value = ~value;
2512       /* A MOVN has an immediate that could be encoded by MOVZ.  */
2513       if (aarch64_wide_constant_p (value, is32, NULL))
2514 	return 0;
2515     }
2516   inst->operands[1].imm.value = value;
2517   inst->operands[1].shifter.amount = 0;
2518   return 1;
2519 }
2520 
2521 /* MOV <Wd>, #<imm>
2522      is equivalent to:
2523    ORR <Wd>, WZR, #<imm>.
2524 
2525    A disassembler may output ORR, MOVZ and MOVN as a MOV mnemonic, except when
2526    ORR has an immediate that could be generated by a MOVZ or MOVN instruction,
2527    or where a MOVN has an immediate that could be encoded by MOVZ, or where
2528    MOVZ/MOVN #0 have a shift amount other than LSL #0, in which case the
2529    machine-instruction mnemonic must be used.  */
2530 
2531 static int
convert_movebitmask_to_mov(aarch64_inst * inst)2532 convert_movebitmask_to_mov (aarch64_inst *inst)
2533 {
2534   int is32;
2535   uint64_t value;
2536 
2537   /* Should have been assured by the base opcode value.  */
2538   assert (inst->operands[1].reg.regno == 0x1f);
2539   copy_operand_info (inst, 1, 2);
2540   is32 = inst->operands[0].qualifier == AARCH64_OPND_QLF_W;
2541   inst->operands[1].type = AARCH64_OPND_IMM_MOV;
2542   value = inst->operands[1].imm.value;
2543   /* ORR has an immediate that could be generated by a MOVZ or MOVN
2544      instruction.  */
2545   if (inst->operands[0].reg.regno != 0x1f
2546       && (aarch64_wide_constant_p (value, is32, NULL)
2547 	  || aarch64_wide_constant_p (~value, is32, NULL)))
2548     return 0;
2549 
2550   inst->operands[2].type = AARCH64_OPND_NIL;
2551   return 1;
2552 }
2553 
2554 /* Some alias opcodes are disassembled by being converted from their real-form.
2555    N.B. INST->OPCODE is the real opcode rather than the alias.  */
2556 
2557 static int
convert_to_alias(aarch64_inst * inst,const aarch64_opcode * alias)2558 convert_to_alias (aarch64_inst *inst, const aarch64_opcode *alias)
2559 {
2560   switch (alias->op)
2561     {
2562     case OP_ASR_IMM:
2563     case OP_LSR_IMM:
2564       return convert_bfm_to_sr (inst);
2565     case OP_LSL_IMM:
2566       return convert_ubfm_to_lsl (inst);
2567     case OP_CINC:
2568     case OP_CINV:
2569     case OP_CNEG:
2570       return convert_from_csel (inst);
2571     case OP_CSET:
2572     case OP_CSETM:
2573       return convert_csinc_to_cset (inst);
2574     case OP_UBFX:
2575     case OP_BFXIL:
2576     case OP_SBFX:
2577       return convert_bfm_to_bfx (inst);
2578     case OP_SBFIZ:
2579     case OP_BFI:
2580     case OP_UBFIZ:
2581       return convert_bfm_to_bfi (inst);
2582     case OP_BFC:
2583       return convert_bfm_to_bfc (inst);
2584     case OP_MOV_V:
2585       return convert_orr_to_mov (inst);
2586     case OP_MOV_IMM_WIDE:
2587     case OP_MOV_IMM_WIDEN:
2588       return convert_movewide_to_mov (inst);
2589     case OP_MOV_IMM_LOG:
2590       return convert_movebitmask_to_mov (inst);
2591     case OP_ROR_IMM:
2592       return convert_extr_to_ror (inst);
2593     case OP_SXTL:
2594     case OP_SXTL2:
2595     case OP_UXTL:
2596     case OP_UXTL2:
2597       return convert_shll_to_xtl (inst);
2598     default:
2599       return 0;
2600     }
2601 }
2602 
2603 static bool
2604 aarch64_opcode_decode (const aarch64_opcode *, const aarch64_insn,
2605 		       aarch64_inst *, int, aarch64_operand_error *errors);
2606 
2607 /* Given the instruction information in *INST, check if the instruction has
2608    any alias form that can be used to represent *INST.  If the answer is yes,
2609    update *INST to be in the form of the determined alias.  */
2610 
2611 /* In the opcode description table, the following flags are used in opcode
2612    entries to help establish the relations between the real and alias opcodes:
2613 
2614 	F_ALIAS:	opcode is an alias
2615 	F_HAS_ALIAS:	opcode has alias(es)
2616 	F_P1
2617 	F_P2
2618 	F_P3:		Disassembly preference priority 1-3 (the larger the
2619 			higher).  If nothing is specified, it is the priority
2620 			0 by default, i.e. the lowest priority.
2621 
2622    Although the relation between the machine and the alias instructions are not
2623    explicitly described, it can be easily determined from the base opcode
2624    values, masks and the flags F_ALIAS and F_HAS_ALIAS in their opcode
2625    description entries:
2626 
2627    The mask of an alias opcode must be equal to or a super-set (i.e. more
2628    constrained) of that of the aliased opcode; so is the base opcode value.
2629 
2630    if (opcode_has_alias (real) && alias_opcode_p (opcode)
2631        && (opcode->mask & real->mask) == real->mask
2632        && (real->mask & opcode->opcode) == (real->mask & real->opcode))
2633    then OPCODE is an alias of, and only of, the REAL instruction
2634 
2635    The alias relationship is forced flat-structured to keep related algorithm
2636    simple; an opcode entry cannot be flagged with both F_ALIAS and F_HAS_ALIAS.
2637 
2638    During the disassembling, the decoding decision tree (in
2639    opcodes/aarch64-dis-2.c) always returns an machine instruction opcode entry;
2640    if the decoding of such a machine instruction succeeds (and -Mno-aliases is
2641    not specified), the disassembler will check whether there is any alias
2642    instruction exists for this real instruction.  If there is, the disassembler
2643    will try to disassemble the 32-bit binary again using the alias's rule, or
2644    try to convert the IR to the form of the alias.  In the case of the multiple
2645    aliases, the aliases are tried one by one from the highest priority
2646    (currently the flag F_P3) to the lowest priority (no priority flag), and the
2647    first succeeds first adopted.
2648 
2649    You may ask why there is a need for the conversion of IR from one form to
2650    another in handling certain aliases.  This is because on one hand it avoids
2651    adding more operand code to handle unusual encoding/decoding; on other
2652    hand, during the disassembling, the conversion is an effective approach to
2653    check the condition of an alias (as an alias may be adopted only if certain
2654    conditions are met).
2655 
2656    In order to speed up the alias opcode lookup, aarch64-gen has preprocessed
2657    aarch64_opcode_table and generated aarch64_find_alias_opcode and
2658    aarch64_find_next_alias_opcode (in opcodes/aarch64-dis-2.c) to help.  */
2659 
2660 static void
determine_disassembling_preference(struct aarch64_inst * inst,aarch64_operand_error * errors)2661 determine_disassembling_preference (struct aarch64_inst *inst,
2662 				    aarch64_operand_error *errors)
2663 {
2664   const aarch64_opcode *opcode;
2665   const aarch64_opcode *alias;
2666 
2667   opcode = inst->opcode;
2668 
2669   /* This opcode does not have an alias, so use itself.  */
2670   if (!opcode_has_alias (opcode))
2671     return;
2672 
2673   alias = aarch64_find_alias_opcode (opcode);
2674   assert (alias);
2675 
2676 #ifdef DEBUG_AARCH64
2677   if (debug_dump)
2678     {
2679       const aarch64_opcode *tmp = alias;
2680       printf ("####   LIST    orderd: ");
2681       while (tmp)
2682 	{
2683 	  printf ("%s, ", tmp->name);
2684 	  tmp = aarch64_find_next_alias_opcode (tmp);
2685 	}
2686       printf ("\n");
2687     }
2688 #endif /* DEBUG_AARCH64 */
2689 
2690   for (; alias; alias = aarch64_find_next_alias_opcode (alias))
2691     {
2692       DEBUG_TRACE ("try %s", alias->name);
2693       assert (alias_opcode_p (alias) || opcode_has_alias (opcode));
2694 
2695       /* An alias can be a pseudo opcode which will never be used in the
2696 	 disassembly, e.g. BIC logical immediate is such a pseudo opcode
2697 	 aliasing AND.  */
2698       if (pseudo_opcode_p (alias))
2699 	{
2700 	  DEBUG_TRACE ("skip pseudo %s", alias->name);
2701 	  continue;
2702 	}
2703 
2704       if ((inst->value & alias->mask) != alias->opcode)
2705 	{
2706 	  DEBUG_TRACE ("skip %s as base opcode not match", alias->name);
2707 	  continue;
2708 	}
2709 
2710       if (!AARCH64_CPU_HAS_FEATURE (arch_variant, *alias->avariant))
2711 	{
2712 	  DEBUG_TRACE ("skip %s: we're missing features", alias->name);
2713 	  continue;
2714 	}
2715 
2716       /* No need to do any complicated transformation on operands, if the alias
2717 	 opcode does not have any operand.  */
2718       if (aarch64_num_of_operands (alias) == 0 && alias->opcode == inst->value)
2719 	{
2720 	  DEBUG_TRACE ("succeed with 0-operand opcode %s", alias->name);
2721 	  aarch64_replace_opcode (inst, alias);
2722 	  return;
2723 	}
2724       if (alias->flags & F_CONV)
2725 	{
2726 	  aarch64_inst copy;
2727 	  memcpy (&copy, inst, sizeof (aarch64_inst));
2728 	  /* ALIAS is the preference as long as the instruction can be
2729 	     successfully converted to the form of ALIAS.  */
2730 	  if (convert_to_alias (&copy, alias) == 1)
2731 	    {
2732 	      int res;
2733 	      aarch64_replace_opcode (&copy, alias);
2734 	      res = aarch64_match_operands_constraint (&copy, NULL);
2735 	      assert (res == 1);
2736 	      DEBUG_TRACE ("succeed with %s via conversion", alias->name);
2737 	      memcpy (inst, &copy, sizeof (aarch64_inst));
2738 	      return;
2739 	    }
2740 	}
2741       else
2742 	{
2743 	  /* Directly decode the alias opcode.  */
2744 	  aarch64_inst temp;
2745 	  memset (&temp, '\0', sizeof (aarch64_inst));
2746 	  if (aarch64_opcode_decode (alias, inst->value, &temp, 1, errors) == 1)
2747 	    {
2748 	      DEBUG_TRACE ("succeed with %s via direct decoding", alias->name);
2749 	      memcpy (inst, &temp, sizeof (aarch64_inst));
2750 	      return;
2751 	    }
2752 	}
2753     }
2754 }
2755 
2756 /* Some instructions (including all SVE ones) use the instruction class
2757    to describe how a qualifiers_list index is represented in the instruction
2758    encoding.  If INST is such an instruction, decode the appropriate fields
2759    and fill in the operand qualifiers accordingly.  Return true if no
2760    problems are found.  */
2761 
2762 static bool
aarch64_decode_variant_using_iclass(aarch64_inst * inst)2763 aarch64_decode_variant_using_iclass (aarch64_inst *inst)
2764 {
2765   int i, variant;
2766 
2767   variant = 0;
2768   switch (inst->opcode->iclass)
2769     {
2770     case sve_cpy:
2771       variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_14);
2772       break;
2773 
2774     case sve_index:
2775       i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_imm5);
2776       if ((i & 31) == 0)
2777 	return false;
2778       while ((i & 1) == 0)
2779 	{
2780 	  i >>= 1;
2781 	  variant += 1;
2782 	}
2783       break;
2784 
2785     case sve_limm:
2786       /* Pick the smallest applicable element size.  */
2787       if ((inst->value & 0x20600) == 0x600)
2788 	variant = 0;
2789       else if ((inst->value & 0x20400) == 0x400)
2790 	variant = 1;
2791       else if ((inst->value & 0x20000) == 0)
2792 	variant = 2;
2793       else
2794 	variant = 3;
2795       break;
2796 
2797     case sve_misc:
2798       /* sve_misc instructions have only a single variant.  */
2799       break;
2800 
2801     case sve_movprfx:
2802       variant = extract_fields (inst->value, 0, 2, FLD_size, FLD_SVE_M_16);
2803       break;
2804 
2805     case sve_pred_zm:
2806       variant = extract_field (FLD_SVE_M_4, inst->value, 0);
2807       break;
2808 
2809     case sve_shift_pred:
2810       i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_8);
2811     sve_shift:
2812       if (i == 0)
2813 	return false;
2814       while (i != 1)
2815 	{
2816 	  i >>= 1;
2817 	  variant += 1;
2818 	}
2819       break;
2820 
2821     case sve_shift_unpred:
2822       i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2823       goto sve_shift;
2824 
2825     case sve_size_bhs:
2826       variant = extract_field (FLD_size, inst->value, 0);
2827       if (variant >= 3)
2828 	return false;
2829       break;
2830 
2831     case sve_size_bhsd:
2832       variant = extract_field (FLD_size, inst->value, 0);
2833       break;
2834 
2835     case sve_size_hsd:
2836       i = extract_field (FLD_size, inst->value, 0);
2837       if (i < 1)
2838 	return false;
2839       variant = i - 1;
2840       break;
2841 
2842     case sve_size_bh:
2843     case sve_size_sd:
2844       variant = extract_field (FLD_SVE_sz, inst->value, 0);
2845       break;
2846 
2847     case sve_size_sd2:
2848       variant = extract_field (FLD_SVE_sz2, inst->value, 0);
2849       break;
2850 
2851     case sve_size_hsd2:
2852       i = extract_field (FLD_SVE_size, inst->value, 0);
2853       if (i < 1)
2854 	return false;
2855       variant = i - 1;
2856       break;
2857 
2858     case sve_size_13:
2859       /* Ignore low bit of this field since that is set in the opcode for
2860 	 instructions of this iclass.  */
2861       i = (extract_field (FLD_size, inst->value, 0) & 2);
2862       variant = (i >> 1);
2863       break;
2864 
2865     case sve_shift_tsz_bhsd:
2866       i = extract_fields (inst->value, 0, 2, FLD_SVE_tszh, FLD_SVE_tszl_19);
2867       if (i == 0)
2868 	return false;
2869       while (i != 1)
2870 	{
2871 	  i >>= 1;
2872 	  variant += 1;
2873 	}
2874       break;
2875 
2876     case sve_size_tsz_bhs:
2877       i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
2878       if (i == 0)
2879 	return false;
2880       while (i != 1)
2881 	{
2882 	  if (i & 1)
2883 	    return false;
2884 	  i >>= 1;
2885 	  variant += 1;
2886 	}
2887       break;
2888 
2889     case sve_shift_tsz_hsd:
2890       i = extract_fields (inst->value, 0, 2, FLD_SVE_sz, FLD_SVE_tszl_19);
2891       if (i == 0)
2892 	return false;
2893       while (i != 1)
2894 	{
2895 	  i >>= 1;
2896 	  variant += 1;
2897 	}
2898       break;
2899 
2900     default:
2901       /* No mapping between instruction class and qualifiers.  */
2902       return true;
2903     }
2904 
2905   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2906     inst->operands[i].qualifier = inst->opcode->qualifiers_list[variant][i];
2907   return true;
2908 }
2909 /* Decode the CODE according to OPCODE; fill INST.  Return 0 if the decoding
2910    fails, which meanes that CODE is not an instruction of OPCODE; otherwise
2911    return 1.
2912 
2913    If OPCODE has alias(es) and NOALIASES_P is 0, an alias opcode may be
2914    determined and used to disassemble CODE; this is done just before the
2915    return.  */
2916 
2917 static bool
aarch64_opcode_decode(const aarch64_opcode * opcode,const aarch64_insn code,aarch64_inst * inst,int noaliases_p,aarch64_operand_error * errors)2918 aarch64_opcode_decode (const aarch64_opcode *opcode, const aarch64_insn code,
2919 		       aarch64_inst *inst, int noaliases_p,
2920 		       aarch64_operand_error *errors)
2921 {
2922   int i;
2923 
2924   DEBUG_TRACE ("enter with %s", opcode->name);
2925 
2926   assert (opcode && inst);
2927 
2928   /* Clear inst.  */
2929   memset (inst, '\0', sizeof (aarch64_inst));
2930 
2931   /* Check the base opcode.  */
2932   if ((code & opcode->mask) != (opcode->opcode & opcode->mask))
2933     {
2934       DEBUG_TRACE ("base opcode match FAIL");
2935       goto decode_fail;
2936     }
2937 
2938   inst->opcode = opcode;
2939   inst->value = code;
2940 
2941   /* Assign operand codes and indexes.  */
2942   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2943     {
2944       if (opcode->operands[i] == AARCH64_OPND_NIL)
2945 	break;
2946       inst->operands[i].type = opcode->operands[i];
2947       inst->operands[i].idx = i;
2948     }
2949 
2950   /* Call the opcode decoder indicated by flags.  */
2951   if (opcode_has_special_coder (opcode) && do_special_decoding (inst) == 0)
2952     {
2953       DEBUG_TRACE ("opcode flag-based decoder FAIL");
2954       goto decode_fail;
2955     }
2956 
2957   /* Possibly use the instruction class to determine the correct
2958      qualifier.  */
2959   if (!aarch64_decode_variant_using_iclass (inst))
2960     {
2961       DEBUG_TRACE ("iclass-based decoder FAIL");
2962       goto decode_fail;
2963     }
2964 
2965   /* Call operand decoders.  */
2966   for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2967     {
2968       const aarch64_operand *opnd;
2969       enum aarch64_opnd type;
2970 
2971       type = opcode->operands[i];
2972       if (type == AARCH64_OPND_NIL)
2973 	break;
2974       opnd = &aarch64_operands[type];
2975       if (operand_has_extractor (opnd)
2976 	  && (! aarch64_extract_operand (opnd, &inst->operands[i], code, inst,
2977 					 errors)))
2978 	{
2979 	  DEBUG_TRACE ("operand decoder FAIL at operand %d", i);
2980 	  goto decode_fail;
2981 	}
2982     }
2983 
2984   /* If the opcode has a verifier, then check it now.  */
2985   if (opcode->verifier
2986       && opcode->verifier (inst, code, 0, false, errors, NULL) != ERR_OK)
2987     {
2988       DEBUG_TRACE ("operand verifier FAIL");
2989       goto decode_fail;
2990     }
2991 
2992   /* Match the qualifiers.  */
2993   if (aarch64_match_operands_constraint (inst, NULL) == 1)
2994     {
2995       /* Arriving here, the CODE has been determined as a valid instruction
2996 	 of OPCODE and *INST has been filled with information of this OPCODE
2997 	 instruction.  Before the return, check if the instruction has any
2998 	 alias and should be disassembled in the form of its alias instead.
2999 	 If the answer is yes, *INST will be updated.  */
3000       if (!noaliases_p)
3001 	determine_disassembling_preference (inst, errors);
3002       DEBUG_TRACE ("SUCCESS");
3003       return true;
3004     }
3005   else
3006     {
3007       DEBUG_TRACE ("constraint matching FAIL");
3008     }
3009 
3010  decode_fail:
3011   return false;
3012 }
3013 
3014 /* This does some user-friendly fix-up to *INST.  It is currently focus on
3015    the adjustment of qualifiers to help the printed instruction
3016    recognized/understood more easily.  */
3017 
3018 static void
user_friendly_fixup(aarch64_inst * inst)3019 user_friendly_fixup (aarch64_inst *inst)
3020 {
3021   switch (inst->opcode->iclass)
3022     {
3023     case testbranch:
3024       /* TBNZ Xn|Wn, #uimm6, label
3025 	 Test and Branch Not Zero: conditionally jumps to label if bit number
3026 	 uimm6 in register Xn is not zero.  The bit number implies the width of
3027 	 the register, which may be written and should be disassembled as Wn if
3028 	 uimm is less than 32. Limited to a branch offset range of +/- 32KiB.
3029 	 */
3030       if (inst->operands[1].imm.value < 32)
3031 	inst->operands[0].qualifier = AARCH64_OPND_QLF_W;
3032       break;
3033     default: break;
3034     }
3035 }
3036 
3037 /* Decode INSN and fill in *INST the instruction information.  An alias
3038    opcode may be filled in *INSN if NOALIASES_P is FALSE.  Return zero on
3039    success.  */
3040 
3041 enum err_type
aarch64_decode_insn(aarch64_insn insn,aarch64_inst * inst,bool noaliases_p,aarch64_operand_error * errors)3042 aarch64_decode_insn (aarch64_insn insn, aarch64_inst *inst,
3043 		     bool noaliases_p,
3044 		     aarch64_operand_error *errors)
3045 {
3046   const aarch64_opcode *opcode = aarch64_opcode_lookup (insn);
3047 
3048 #ifdef DEBUG_AARCH64
3049   if (debug_dump)
3050     {
3051       const aarch64_opcode *tmp = opcode;
3052       printf ("\n");
3053       DEBUG_TRACE ("opcode lookup:");
3054       while (tmp != NULL)
3055 	{
3056 	  aarch64_verbose ("  %s", tmp->name);
3057 	  tmp = aarch64_find_next_opcode (tmp);
3058 	}
3059     }
3060 #endif /* DEBUG_AARCH64 */
3061 
3062   /* A list of opcodes may have been found, as aarch64_opcode_lookup cannot
3063      distinguish some opcodes, e.g. SSHR and MOVI, which almost share the same
3064      opcode field and value, apart from the difference that one of them has an
3065      extra field as part of the opcode, but such a field is used for operand
3066      encoding in other opcode(s) ('immh' in the case of the example).  */
3067   while (opcode != NULL)
3068     {
3069       /* But only one opcode can be decoded successfully for, as the
3070 	 decoding routine will check the constraint carefully.  */
3071       if (aarch64_opcode_decode (opcode, insn, inst, noaliases_p, errors) == 1)
3072 	return ERR_OK;
3073       opcode = aarch64_find_next_opcode (opcode);
3074     }
3075 
3076   return ERR_UND;
3077 }
3078 
3079 /* Print operands.  */
3080 
3081 static void
print_operands(bfd_vma pc,const aarch64_opcode * opcode,const aarch64_opnd_info * opnds,struct disassemble_info * info,bool * has_notes)3082 print_operands (bfd_vma pc, const aarch64_opcode *opcode,
3083 		const aarch64_opnd_info *opnds, struct disassemble_info *info,
3084 		bool *has_notes)
3085 {
3086   char *notes = NULL;
3087   int i, pcrel_p, num_printed;
3088   for (i = 0, num_printed = 0; i < AARCH64_MAX_OPND_NUM; ++i)
3089     {
3090       char str[128];
3091       /* We regard the opcode operand info more, however we also look into
3092 	 the inst->operands to support the disassembling of the optional
3093 	 operand.
3094 	 The two operand code should be the same in all cases, apart from
3095 	 when the operand can be optional.  */
3096       if (opcode->operands[i] == AARCH64_OPND_NIL
3097 	  || opnds[i].type == AARCH64_OPND_NIL)
3098 	break;
3099 
3100       /* Generate the operand string in STR.  */
3101       aarch64_print_operand (str, sizeof (str), pc, opcode, opnds, i, &pcrel_p,
3102 			     &info->target, &notes, arch_variant);
3103 
3104       /* Print the delimiter (taking account of omitted operand(s)).  */
3105       if (str[0] != '\0')
3106 	(*info->fprintf_func) (info->stream, "%s",
3107 			       num_printed++ == 0 ? "\t" : ", ");
3108 
3109       /* Print the operand.  */
3110       if (pcrel_p)
3111 	(*info->print_address_func) (info->target, info);
3112       else
3113 	(*info->fprintf_func) (info->stream, "%s", str);
3114     }
3115 
3116     if (notes && !no_notes)
3117       {
3118 	*has_notes = true;
3119 	(*info->fprintf_func) (info->stream, "  // note: %s", notes);
3120       }
3121 }
3122 
3123 /* Set NAME to a copy of INST's mnemonic with the "." suffix removed.  */
3124 
3125 static void
remove_dot_suffix(char * name,const aarch64_inst * inst)3126 remove_dot_suffix (char *name, const aarch64_inst *inst)
3127 {
3128   char *ptr;
3129   size_t len;
3130 
3131   ptr = strchr (inst->opcode->name, '.');
3132   assert (ptr && inst->cond);
3133   len = ptr - inst->opcode->name;
3134   assert (len < 8);
3135   strncpy (name, inst->opcode->name, len);
3136   name[len] = '\0';
3137 }
3138 
3139 /* Print the instruction mnemonic name.  */
3140 
3141 static void
print_mnemonic_name(const aarch64_inst * inst,struct disassemble_info * info)3142 print_mnemonic_name (const aarch64_inst *inst, struct disassemble_info *info)
3143 {
3144   if (inst->opcode->flags & F_COND)
3145     {
3146       /* For instructions that are truly conditionally executed, e.g. b.cond,
3147 	 prepare the full mnemonic name with the corresponding condition
3148 	 suffix.  */
3149       char name[8];
3150 
3151       remove_dot_suffix (name, inst);
3152       (*info->fprintf_func) (info->stream, "%s.%s", name, inst->cond->names[0]);
3153     }
3154   else
3155     (*info->fprintf_func) (info->stream, "%s", inst->opcode->name);
3156 }
3157 
3158 /* Decide whether we need to print a comment after the operands of
3159    instruction INST.  */
3160 
3161 static void
print_comment(const aarch64_inst * inst,struct disassemble_info * info)3162 print_comment (const aarch64_inst *inst, struct disassemble_info *info)
3163 {
3164   if (inst->opcode->flags & F_COND)
3165     {
3166       char name[8];
3167       unsigned int i, num_conds;
3168 
3169       remove_dot_suffix (name, inst);
3170       num_conds = ARRAY_SIZE (inst->cond->names);
3171       for (i = 1; i < num_conds && inst->cond->names[i]; ++i)
3172 	(*info->fprintf_func) (info->stream, "%s %s.%s",
3173 			       i == 1 ? "  //" : ",",
3174 			       name, inst->cond->names[i]);
3175     }
3176 }
3177 
3178 /* Build notes from verifiers into a string for printing.  */
3179 
3180 static void
print_verifier_notes(aarch64_operand_error * detail,struct disassemble_info * info)3181 print_verifier_notes (aarch64_operand_error *detail,
3182 		      struct disassemble_info *info)
3183 {
3184   if (no_notes)
3185     return;
3186 
3187   /* The output of the verifier cannot be a fatal error, otherwise the assembly
3188      would not have succeeded.  We can safely ignore these.  */
3189   assert (detail->non_fatal);
3190   assert (detail->error);
3191 
3192   /* If there are multiple verifier messages, concat them up to 1k.  */
3193   (*info->fprintf_func) (info->stream, "  // note: %s", detail->error);
3194   if (detail->index >= 0)
3195      (*info->fprintf_func) (info->stream, " at operand %d", detail->index + 1);
3196 }
3197 
3198 /* Print the instruction according to *INST.  */
3199 
3200 static void
print_aarch64_insn(bfd_vma pc,const aarch64_inst * inst,const aarch64_insn code,struct disassemble_info * info,aarch64_operand_error * mismatch_details)3201 print_aarch64_insn (bfd_vma pc, const aarch64_inst *inst,
3202 		    const aarch64_insn code,
3203 		    struct disassemble_info *info,
3204 		    aarch64_operand_error *mismatch_details)
3205 {
3206   bool has_notes = false;
3207 
3208   print_mnemonic_name (inst, info);
3209   print_operands (pc, inst->opcode, inst->operands, info, &has_notes);
3210   print_comment (inst, info);
3211 
3212   /* We've already printed a note, not enough space to print more so exit.
3213      Usually notes shouldn't overlap so it shouldn't happen that we have a note
3214      from a register and instruction at the same time.  */
3215   if (has_notes)
3216     return;
3217 
3218   /* Always run constraint verifiers, this is needed because constraints need to
3219      maintain a global state regardless of whether the instruction has the flag
3220      set or not.  */
3221   enum err_type result = verify_constraints (inst, code, pc, false,
3222 					     mismatch_details, &insn_sequence);
3223   switch (result)
3224     {
3225     case ERR_UND:
3226     case ERR_UNP:
3227     case ERR_NYI:
3228       assert (0);
3229     case ERR_VFI:
3230       print_verifier_notes (mismatch_details, info);
3231       break;
3232     default:
3233       break;
3234     }
3235 }
3236 
3237 /* Entry-point of the instruction disassembler and printer.  */
3238 
3239 static void
print_insn_aarch64_word(bfd_vma pc,uint32_t word,struct disassemble_info * info,aarch64_operand_error * errors)3240 print_insn_aarch64_word (bfd_vma pc,
3241 			 uint32_t word,
3242 			 struct disassemble_info *info,
3243 			 aarch64_operand_error *errors)
3244 {
3245   static const char *err_msg[ERR_NR_ENTRIES+1] =
3246     {
3247       [ERR_OK]  = "_",
3248       [ERR_UND] = "undefined",
3249       [ERR_UNP] = "unpredictable",
3250       [ERR_NYI] = "NYI"
3251     };
3252 
3253   enum err_type ret;
3254   aarch64_inst inst;
3255 
3256   info->insn_info_valid = 1;
3257   info->branch_delay_insns = 0;
3258   info->data_size = 0;
3259   info->target = 0;
3260   info->target2 = 0;
3261 
3262   if (info->flags & INSN_HAS_RELOC)
3263     /* If the instruction has a reloc associated with it, then
3264        the offset field in the instruction will actually be the
3265        addend for the reloc.  (If we are using REL type relocs).
3266        In such cases, we can ignore the pc when computing
3267        addresses, since the addend is not currently pc-relative.  */
3268     pc = 0;
3269 
3270   ret = aarch64_decode_insn (word, &inst, no_aliases, errors);
3271 
3272   if (((word >> 21) & 0x3ff) == 1)
3273     {
3274       /* RESERVED for ALES.  */
3275       assert (ret != ERR_OK);
3276       ret = ERR_NYI;
3277     }
3278 
3279   switch (ret)
3280     {
3281     case ERR_UND:
3282     case ERR_UNP:
3283     case ERR_NYI:
3284       /* Handle undefined instructions.  */
3285       info->insn_type = dis_noninsn;
3286       (*info->fprintf_func) (info->stream,".inst\t0x%08x ; %s",
3287 			     word, err_msg[ret]);
3288       break;
3289     case ERR_OK:
3290       user_friendly_fixup (&inst);
3291       print_aarch64_insn (pc, &inst, word, info, errors);
3292       break;
3293     default:
3294       abort ();
3295     }
3296 }
3297 
3298 /* Disallow mapping symbols ($x, $d etc) from
3299    being displayed in symbol relative addresses.  */
3300 
3301 bool
aarch64_symbol_is_valid(asymbol * sym,struct disassemble_info * info ATTRIBUTE_UNUSED)3302 aarch64_symbol_is_valid (asymbol * sym,
3303 			 struct disassemble_info * info ATTRIBUTE_UNUSED)
3304 {
3305   const char * name;
3306 
3307   if (sym == NULL)
3308     return false;
3309 
3310   name = bfd_asymbol_name (sym);
3311 
3312   return name
3313     && (name[0] != '$'
3314 	|| (name[1] != 'x' && name[1] != 'd')
3315 	|| (name[2] != '\0' && name[2] != '.'));
3316 }
3317 
3318 /* Print data bytes on INFO->STREAM.  */
3319 
3320 static void
print_insn_data(bfd_vma pc ATTRIBUTE_UNUSED,uint32_t word,struct disassemble_info * info,aarch64_operand_error * errors ATTRIBUTE_UNUSED)3321 print_insn_data (bfd_vma pc ATTRIBUTE_UNUSED,
3322 		 uint32_t word,
3323 		 struct disassemble_info *info,
3324 		 aarch64_operand_error *errors ATTRIBUTE_UNUSED)
3325 {
3326   switch (info->bytes_per_chunk)
3327     {
3328     case 1:
3329       info->fprintf_func (info->stream, ".byte\t0x%02x", word);
3330       break;
3331     case 2:
3332       info->fprintf_func (info->stream, ".short\t0x%04x", word);
3333       break;
3334     case 4:
3335       info->fprintf_func (info->stream, ".word\t0x%08x", word);
3336       break;
3337     default:
3338       abort ();
3339     }
3340 }
3341 
3342 /* Try to infer the code or data type from a symbol.
3343    Returns nonzero if *MAP_TYPE was set.  */
3344 
3345 static int
get_sym_code_type(struct disassemble_info * info,int n,enum map_type * map_type)3346 get_sym_code_type (struct disassemble_info *info, int n,
3347 		   enum map_type *map_type)
3348 {
3349   asymbol * as;
3350   elf_symbol_type *es;
3351   unsigned int type;
3352   const char *name;
3353 
3354   /* If the symbol is in a different section, ignore it.  */
3355   if (info->section != NULL && info->section != info->symtab[n]->section)
3356     return false;
3357 
3358   if (n >= info->symtab_size)
3359     return false;
3360 
3361   as = info->symtab[n];
3362   if (bfd_asymbol_flavour (as) != bfd_target_elf_flavour)
3363     return false;
3364   es = (elf_symbol_type *) as;
3365 
3366   type = ELF_ST_TYPE (es->internal_elf_sym.st_info);
3367 
3368   /* If the symbol has function type then use that.  */
3369   if (type == STT_FUNC)
3370     {
3371       *map_type = MAP_INSN;
3372       return true;
3373     }
3374 
3375   /* Check for mapping symbols.  */
3376   name = bfd_asymbol_name(info->symtab[n]);
3377   if (name[0] == '$'
3378       && (name[1] == 'x' || name[1] == 'd')
3379       && (name[2] == '\0' || name[2] == '.'))
3380     {
3381       *map_type = (name[1] == 'x' ? MAP_INSN : MAP_DATA);
3382       return true;
3383     }
3384 
3385   return false;
3386 }
3387 
3388 /* Set the feature bits in arch_variant in order to get the correct disassembly
3389    for the chosen architecture variant.
3390 
3391    Currently we only restrict disassembly for Armv8-R and otherwise enable all
3392    non-R-profile features.  */
3393 static void
select_aarch64_variant(unsigned mach)3394 select_aarch64_variant (unsigned mach)
3395 {
3396   switch (mach)
3397     {
3398     case bfd_mach_aarch64_8R:
3399       arch_variant = AARCH64_ARCH_V8_R;
3400       break;
3401     default:
3402       arch_variant = AARCH64_ANY & ~(AARCH64_FEATURE_V8_R);
3403     }
3404 }
3405 
3406 /* Entry-point of the AArch64 disassembler.  */
3407 
3408 int
print_insn_aarch64(bfd_vma pc,struct disassemble_info * info)3409 print_insn_aarch64 (bfd_vma pc,
3410 		    struct disassemble_info *info)
3411 {
3412   bfd_byte	buffer[INSNLEN];
3413   int		status;
3414   void		(*printer) (bfd_vma, uint32_t, struct disassemble_info *,
3415 			    aarch64_operand_error *);
3416   bool   found = false;
3417   unsigned int	size = 4;
3418   unsigned long	data;
3419   aarch64_operand_error errors;
3420   static bool set_features;
3421 
3422   if (info->disassembler_options)
3423     {
3424       set_default_aarch64_dis_options (info);
3425 
3426       parse_aarch64_dis_options (info->disassembler_options);
3427 
3428       /* To avoid repeated parsing of these options, we remove them here.  */
3429       info->disassembler_options = NULL;
3430     }
3431 
3432   if (!set_features)
3433     {
3434       select_aarch64_variant (info->mach);
3435       set_features = true;
3436     }
3437 
3438   /* Aarch64 instructions are always little-endian */
3439   info->endian_code = BFD_ENDIAN_LITTLE;
3440 
3441   /* Default to DATA.  A text section is required by the ABI to contain an
3442      INSN mapping symbol at the start.  A data section has no such
3443      requirement, hence if no mapping symbol is found the section must
3444      contain only data.  This however isn't very useful if the user has
3445      fully stripped the binaries.  If this is the case use the section
3446      attributes to determine the default.  If we have no section default to
3447      INSN as well, as we may be disassembling some raw bytes on a baremetal
3448      HEX file or similar.  */
3449   enum map_type type = MAP_DATA;
3450   if ((info->section && info->section->flags & SEC_CODE) || !info->section)
3451     type = MAP_INSN;
3452 
3453   /* First check the full symtab for a mapping symbol, even if there
3454      are no usable non-mapping symbols for this address.  */
3455   if (info->symtab_size != 0
3456       && bfd_asymbol_flavour (*info->symtab) == bfd_target_elf_flavour)
3457     {
3458       int last_sym = -1;
3459       bfd_vma addr, section_vma = 0;
3460       bool can_use_search_opt_p;
3461       int n;
3462 
3463       if (pc <= last_mapping_addr)
3464 	last_mapping_sym = -1;
3465 
3466       /* Start scanning at the start of the function, or wherever
3467 	 we finished last time.  */
3468       n = info->symtab_pos + 1;
3469 
3470       /* If the last stop offset is different from the current one it means we
3471 	 are disassembling a different glob of bytes.  As such the optimization
3472 	 would not be safe and we should start over.  */
3473       can_use_search_opt_p = last_mapping_sym >= 0
3474 			     && info->stop_offset == last_stop_offset;
3475 
3476       if (n >= last_mapping_sym && can_use_search_opt_p)
3477 	n = last_mapping_sym;
3478 
3479       /* Look down while we haven't passed the location being disassembled.
3480 	 The reason for this is that there's no defined order between a symbol
3481 	 and an mapping symbol that may be at the same address.  We may have to
3482 	 look at least one position ahead.  */
3483       for (; n < info->symtab_size; n++)
3484 	{
3485 	  addr = bfd_asymbol_value (info->symtab[n]);
3486 	  if (addr > pc)
3487 	    break;
3488 	  if (get_sym_code_type (info, n, &type))
3489 	    {
3490 	      last_sym = n;
3491 	      found = true;
3492 	    }
3493 	}
3494 
3495       if (!found)
3496 	{
3497 	  n = info->symtab_pos;
3498 	  if (n >= last_mapping_sym && can_use_search_opt_p)
3499 	    n = last_mapping_sym;
3500 
3501 	  /* No mapping symbol found at this address.  Look backwards
3502 	     for a preceeding one, but don't go pass the section start
3503 	     otherwise a data section with no mapping symbol can pick up
3504 	     a text mapping symbol of a preceeding section.  The documentation
3505 	     says section can be NULL, in which case we will seek up all the
3506 	     way to the top.  */
3507 	  if (info->section)
3508 	    section_vma = info->section->vma;
3509 
3510 	  for (; n >= 0; n--)
3511 	    {
3512 	      addr = bfd_asymbol_value (info->symtab[n]);
3513 	      if (addr < section_vma)
3514 		break;
3515 
3516 	      if (get_sym_code_type (info, n, &type))
3517 		{
3518 		  last_sym = n;
3519 		  found = true;
3520 		  break;
3521 		}
3522 	    }
3523 	}
3524 
3525       last_mapping_sym = last_sym;
3526       last_type = type;
3527       last_stop_offset = info->stop_offset;
3528 
3529       /* Look a little bit ahead to see if we should print out
3530 	 less than four bytes of data.  If there's a symbol,
3531 	 mapping or otherwise, after two bytes then don't
3532 	 print more.  */
3533       if (last_type == MAP_DATA)
3534 	{
3535 	  size = 4 - (pc & 3);
3536 	  for (n = last_sym + 1; n < info->symtab_size; n++)
3537 	    {
3538 	      addr = bfd_asymbol_value (info->symtab[n]);
3539 	      if (addr > pc)
3540 		{
3541 		  if (addr - pc < size)
3542 		    size = addr - pc;
3543 		  break;
3544 		}
3545 	    }
3546 	  /* If the next symbol is after three bytes, we need to
3547 	     print only part of the data, so that we can use either
3548 	     .byte or .short.  */
3549 	  if (size == 3)
3550 	    size = (pc & 1) ? 1 : 2;
3551 	}
3552     }
3553   else
3554     last_type = type;
3555 
3556   /* PR 10263: Disassemble data if requested to do so by the user.  */
3557   if (last_type == MAP_DATA && ((info->flags & DISASSEMBLE_DATA) == 0))
3558     {
3559       /* size was set above.  */
3560       info->bytes_per_chunk = size;
3561       info->display_endian = info->endian;
3562       printer = print_insn_data;
3563     }
3564   else
3565     {
3566       info->bytes_per_chunk = size = INSNLEN;
3567       info->display_endian = info->endian_code;
3568       printer = print_insn_aarch64_word;
3569     }
3570 
3571   status = (*info->read_memory_func) (pc, buffer, size, info);
3572   if (status != 0)
3573     {
3574       (*info->memory_error_func) (status, pc, info);
3575       return -1;
3576     }
3577 
3578   data = bfd_get_bits (buffer, size * 8,
3579 		       info->display_endian == BFD_ENDIAN_BIG);
3580 
3581   (*printer) (pc, data, info, &errors);
3582 
3583   return size;
3584 }
3585 
3586 void
print_aarch64_disassembler_options(FILE * stream)3587 print_aarch64_disassembler_options (FILE *stream)
3588 {
3589   fprintf (stream, _("\n\
3590 The following AARCH64 specific disassembler options are supported for use\n\
3591 with the -M switch (multiple options should be separated by commas):\n"));
3592 
3593   fprintf (stream, _("\n\
3594   no-aliases         Don't print instruction aliases.\n"));
3595 
3596   fprintf (stream, _("\n\
3597   aliases            Do print instruction aliases.\n"));
3598 
3599   fprintf (stream, _("\n\
3600   no-notes         Don't print instruction notes.\n"));
3601 
3602   fprintf (stream, _("\n\
3603   notes            Do print instruction notes.\n"));
3604 
3605 #ifdef DEBUG_AARCH64
3606   fprintf (stream, _("\n\
3607   debug_dump         Temp switch for debug trace.\n"));
3608 #endif /* DEBUG_AARCH64 */
3609 
3610   fprintf (stream, _("\n"));
3611 }
3612