1 /* tc-arm.c -- Assemble for the ARM
2    Copyright (C) 1994-2021 Free Software Foundation, Inc.
3    Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4 	Modified by David Taylor (dtaylor@armltd.co.uk)
5 	Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6 	Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7 	Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8 
9    This file is part of GAS, the GNU Assembler.
10 
11    GAS is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License as published by
13    the Free Software Foundation; either version 3, or (at your option)
14    any later version.
15 
16    GAS is distributed in the hope that it will be useful,
17    but WITHOUT ANY WARRANTY; without even the implied warranty of
18    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
19    GNU General Public License for more details.
20 
21    You should have received a copy of the GNU General Public License
22    along with GAS; see the file COPYING.  If not, write to the Free
23    Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24    02110-1301, USA.  */
25 
26 #include "as.h"
27 #include <limits.h>
28 #include <stdarg.h>
29 #define	 NO_RELOC 0
30 #include "safe-ctype.h"
31 #include "subsegs.h"
32 #include "obstack.h"
33 #include "libiberty.h"
34 #include "opcode/arm.h"
35 #include "cpu-arm.h"
36 
37 #ifdef OBJ_ELF
38 #include "elf/arm.h"
39 #include "dw2gencfi.h"
40 #endif
41 
42 #include "dwarf2dbg.h"
43 
44 #ifdef OBJ_ELF
45 /* Must be at least the size of the largest unwind opcode (currently two).  */
46 #define ARM_OPCODE_CHUNK_SIZE 8
47 
48 /* This structure holds the unwinding state.  */
49 
50 static struct
51 {
52   symbolS *	  proc_start;
53   symbolS *	  table_entry;
54   symbolS *	  personality_routine;
55   int		  personality_index;
56   /* The segment containing the function.  */
57   segT		  saved_seg;
58   subsegT	  saved_subseg;
59   /* Opcodes generated from this function.  */
60   unsigned char * opcodes;
61   int		  opcode_count;
62   int		  opcode_alloc;
63   /* The number of bytes pushed to the stack.  */
64   offsetT	  frame_size;
65   /* We don't add stack adjustment opcodes immediately so that we can merge
66      multiple adjustments.  We can also omit the final adjustment
67      when using a frame pointer.  */
68   offsetT	  pending_offset;
69   /* These two fields are set by both unwind_movsp and unwind_setfp.  They
70      hold the reg+offset to use when restoring sp from a frame pointer.	 */
71   offsetT	  fp_offset;
72   int		  fp_reg;
73   /* Nonzero if an unwind_setfp directive has been seen.  */
74   unsigned	  fp_used:1;
75   /* Nonzero if the last opcode restores sp from fp_reg.  */
76   unsigned	  sp_restored:1;
77 } unwind;
78 
79 /* Whether --fdpic was given.  */
80 static int arm_fdpic;
81 
82 #endif /* OBJ_ELF */
83 
84 /* Results from operand parsing worker functions.  */
85 
86 typedef enum
87 {
88   PARSE_OPERAND_SUCCESS,
89   PARSE_OPERAND_FAIL,
90   PARSE_OPERAND_FAIL_NO_BACKTRACK
91 } parse_operand_result;
92 
93 enum arm_float_abi
94 {
95   ARM_FLOAT_ABI_HARD,
96   ARM_FLOAT_ABI_SOFTFP,
97   ARM_FLOAT_ABI_SOFT
98 };
99 
100 /* Types of processor to assemble for.	*/
101 #ifndef CPU_DEFAULT
102 /* The code that was here used to select a default CPU depending on compiler
103    pre-defines which were only present when doing native builds, thus
104    changing gas' default behaviour depending upon the build host.
105 
106    If you have a target that requires a default CPU option then the you
107    should define CPU_DEFAULT here.  */
108 #endif
109 
110 /* Perform range checks on positive and negative overflows by checking if the
111    VALUE given fits within the range of an BITS sized immediate.  */
out_of_range_p(offsetT value,offsetT bits)112 static bool out_of_range_p (offsetT value, offsetT bits)
113  {
114   gas_assert (bits < (offsetT)(sizeof (value) * 8));
115   return (value & ~((1 << bits)-1))
116 	  && ((value & ~((1 << bits)-1)) != ~((1 << bits)-1));
117 }
118 
119 #ifndef FPU_DEFAULT
120 # ifdef TE_LINUX
121 #  define FPU_DEFAULT FPU_ARCH_FPA
122 # elif defined (TE_NetBSD)
123 #  ifdef OBJ_ELF
124 #   define FPU_DEFAULT FPU_ARCH_VFP	/* Soft-float, but VFP order.  */
125 #  else
126     /* Legacy a.out format.  */
127 #   define FPU_DEFAULT FPU_ARCH_FPA	/* Soft-float, but FPA order.  */
128 #  endif
129 # elif defined (TE_VXWORKS)
130 #  define FPU_DEFAULT FPU_ARCH_VFP	/* Soft-float, VFP order.  */
131 # else
132    /* For backwards compatibility, default to FPA.  */
133 #  define FPU_DEFAULT FPU_ARCH_FPA
134 # endif
135 #endif /* ifndef FPU_DEFAULT */
136 
137 #define streq(a, b)	      (strcmp (a, b) == 0)
138 
139 /* Current set of feature bits available (CPU+FPU).  Different from
140    selected_cpu + selected_fpu in case of autodetection since the CPU
141    feature bits are then all set.  */
142 static arm_feature_set cpu_variant;
143 /* Feature bits used in each execution state.  Used to set build attribute
144    (in particular Tag_*_ISA_use) in CPU autodetection mode.  */
145 static arm_feature_set arm_arch_used;
146 static arm_feature_set thumb_arch_used;
147 
148 /* Flags stored in private area of BFD structure.  */
149 static int uses_apcs_26	     = false;
150 static int atpcs	     = false;
151 static int support_interwork = false;
152 static int uses_apcs_float   = false;
153 static int pic_code	     = false;
154 static int fix_v4bx	     = false;
155 /* Warn on using deprecated features.  */
156 static int warn_on_deprecated = true;
157 static int warn_on_restrict_it = false;
158 
159 /* Understand CodeComposer Studio assembly syntax.  */
160 bool codecomposer_syntax = false;
161 
162 /* Variables that we set while parsing command-line options.  Once all
163    options have been read we re-process these values to set the real
164    assembly flags.  */
165 
166 /* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
167    instead of -mcpu=arm1).  */
168 static const arm_feature_set *legacy_cpu = NULL;
169 static const arm_feature_set *legacy_fpu = NULL;
170 
171 /* CPU, extension and FPU feature bits selected by -mcpu.  */
172 static const arm_feature_set *mcpu_cpu_opt = NULL;
173 static arm_feature_set *mcpu_ext_opt = NULL;
174 static const arm_feature_set *mcpu_fpu_opt = NULL;
175 
176 /* CPU, extension and FPU feature bits selected by -march.  */
177 static const arm_feature_set *march_cpu_opt = NULL;
178 static arm_feature_set *march_ext_opt = NULL;
179 static const arm_feature_set *march_fpu_opt = NULL;
180 
181 /* Feature bits selected by -mfpu.  */
182 static const arm_feature_set *mfpu_opt = NULL;
183 
184 /* Constants for known architecture features.  */
185 static const arm_feature_set fpu_default = FPU_DEFAULT;
186 static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
187 static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
188 static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
189 static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
190 static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
191 static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
192 #ifdef OBJ_ELF
193 static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
194 #endif
195 static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
196 
197 #ifdef CPU_DEFAULT
198 static const arm_feature_set cpu_default = CPU_DEFAULT;
199 #endif
200 
201 static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
202 static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V2);
203 static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
204 static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
205 static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
206 static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
207 static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
208 static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
209 static const arm_feature_set arm_ext_v4t_5 =
210   ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
211 static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
212 static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
213 static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
214 static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
215 static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
216 static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
217 static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
218 /* Only for compatability of hint instructions.  */
219 static const arm_feature_set arm_ext_v6k_v6t2 =
220   ARM_FEATURE_CORE_LOW (ARM_EXT_V6K | ARM_EXT_V6T2);
221 static const arm_feature_set arm_ext_v6_notm =
222   ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
223 static const arm_feature_set arm_ext_v6_dsp =
224   ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
225 static const arm_feature_set arm_ext_barrier =
226   ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
227 static const arm_feature_set arm_ext_msr =
228   ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
229 static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
230 static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
231 static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
232 static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
233 static const arm_feature_set arm_ext_v8r = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8R);
234 #ifdef OBJ_ELF
235 static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
236 #endif
237 static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
238 static const arm_feature_set arm_ext_m =
239   ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_V7M,
240 		    ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
241 static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
242 static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
243 static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
244 static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
245 static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
246 static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
247 static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
248 static const arm_feature_set arm_ext_v8m_main =
249   ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
250 static const arm_feature_set arm_ext_v8_1m_main =
251 ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN);
252 /* Instructions in ARMv8-M only found in M profile architectures.  */
253 static const arm_feature_set arm_ext_v8m_m_only =
254   ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
255 static const arm_feature_set arm_ext_v6t2_v8m =
256   ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
257 /* Instructions shared between ARMv8-A and ARMv8-M.  */
258 static const arm_feature_set arm_ext_atomics =
259   ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
260 #ifdef OBJ_ELF
261 /* DSP instructions Tag_DSP_extension refers to.  */
262 static const arm_feature_set arm_ext_dsp =
263   ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
264 #endif
265 static const arm_feature_set arm_ext_ras =
266   ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
267 /* FP16 instructions.  */
268 static const arm_feature_set arm_ext_fp16 =
269   ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
270 static const arm_feature_set arm_ext_fp16_fml =
271   ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML);
272 static const arm_feature_set arm_ext_v8_2 =
273   ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A);
274 static const arm_feature_set arm_ext_v8_3 =
275   ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A);
276 static const arm_feature_set arm_ext_sb =
277   ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB);
278 static const arm_feature_set arm_ext_predres =
279   ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES);
280 static const arm_feature_set arm_ext_bf16 =
281   ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16);
282 static const arm_feature_set arm_ext_i8mm =
283   ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM);
284 static const arm_feature_set arm_ext_crc =
285   ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC);
286 static const arm_feature_set arm_ext_cde =
287   ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE);
288 static const arm_feature_set arm_ext_cde0 =
289   ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE0);
290 static const arm_feature_set arm_ext_cde1 =
291   ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE1);
292 static const arm_feature_set arm_ext_cde2 =
293   ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE2);
294 static const arm_feature_set arm_ext_cde3 =
295   ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE3);
296 static const arm_feature_set arm_ext_cde4 =
297   ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE4);
298 static const arm_feature_set arm_ext_cde5 =
299   ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE5);
300 static const arm_feature_set arm_ext_cde6 =
301   ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE6);
302 static const arm_feature_set arm_ext_cde7 =
303   ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE7);
304 
305 static const arm_feature_set arm_arch_any = ARM_ANY;
306 static const arm_feature_set fpu_any = FPU_ANY;
307 static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
308 static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
309 static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
310 
311 static const arm_feature_set arm_cext_iwmmxt2 =
312   ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
313 static const arm_feature_set arm_cext_iwmmxt =
314   ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
315 static const arm_feature_set arm_cext_xscale =
316   ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
317 static const arm_feature_set arm_cext_maverick =
318   ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
319 static const arm_feature_set fpu_fpa_ext_v1 =
320   ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
321 static const arm_feature_set fpu_fpa_ext_v2 =
322   ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
323 static const arm_feature_set fpu_vfp_ext_v1xd =
324   ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
325 static const arm_feature_set fpu_vfp_ext_v1 =
326   ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
327 static const arm_feature_set fpu_vfp_ext_v2 =
328   ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
329 static const arm_feature_set fpu_vfp_ext_v3xd =
330   ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
331 static const arm_feature_set fpu_vfp_ext_v3 =
332   ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
333 static const arm_feature_set fpu_vfp_ext_d32 =
334   ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
335 static const arm_feature_set fpu_neon_ext_v1 =
336   ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
337 static const arm_feature_set fpu_vfp_v3_or_neon_ext =
338   ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
339 static const arm_feature_set mve_ext =
340   ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE);
341 static const arm_feature_set mve_fp_ext =
342   ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP);
343 /* Note: This has more than one bit set, which means using it with
344    mark_feature_used (which returns if *any* of the bits are set in the current
345    cpu variant) can give surprising results.  */
346 static const arm_feature_set armv8m_fp =
347   ARM_FEATURE_COPROC (FPU_VFP_V5_SP_D16);
348 #ifdef OBJ_ELF
349 static const arm_feature_set fpu_vfp_fp16 =
350   ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
351 static const arm_feature_set fpu_neon_ext_fma =
352   ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
353 #endif
354 static const arm_feature_set fpu_vfp_ext_fma =
355   ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
356 static const arm_feature_set fpu_vfp_ext_armv8 =
357   ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
358 static const arm_feature_set fpu_vfp_ext_armv8xd =
359   ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
360 static const arm_feature_set fpu_neon_ext_armv8 =
361   ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
362 static const arm_feature_set fpu_crypto_ext_armv8 =
363   ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
364 static const arm_feature_set fpu_neon_ext_v8_1 =
365   ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
366 static const arm_feature_set fpu_neon_ext_dotprod =
367   ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD);
368 
369 static int mfloat_abi_opt = -1;
370 /* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
371    directive.  */
372 static arm_feature_set selected_arch = ARM_ARCH_NONE;
373 /* Extension feature bits selected by the last -mcpu/-march or .arch_extension
374    directive.  */
375 static arm_feature_set selected_ext = ARM_ARCH_NONE;
376 /* Feature bits selected by the last -mcpu/-march or by the combination of the
377    last .cpu/.arch directive .arch_extension directives since that
378    directive.  */
379 static arm_feature_set selected_cpu = ARM_ARCH_NONE;
380 /* FPU feature bits selected by the last -mfpu or .fpu directive.  */
381 static arm_feature_set selected_fpu = FPU_NONE;
382 /* Feature bits selected by the last .object_arch directive.  */
383 static arm_feature_set selected_object_arch = ARM_ARCH_NONE;
384 /* Must be long enough to hold any of the names in arm_cpus.  */
385 static const struct arm_ext_table * selected_ctx_ext_table = NULL;
386 static char selected_cpu_name[20];
387 
388 extern FLONUM_TYPE generic_floating_point_number;
389 
390 /* Return if no cpu was selected on command-line.  */
391 static bool
no_cpu_selected(void)392 no_cpu_selected (void)
393 {
394   return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
395 }
396 
397 #ifdef OBJ_ELF
398 # ifdef EABI_DEFAULT
399 static int meabi_flags = EABI_DEFAULT;
400 # else
401 static int meabi_flags = EF_ARM_EABI_UNKNOWN;
402 # endif
403 
404 static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
405 
406 bool
arm_is_eabi(void)407 arm_is_eabi (void)
408 {
409   return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
410 }
411 #endif
412 
413 #ifdef OBJ_ELF
414 /* Pre-defined "_GLOBAL_OFFSET_TABLE_"	*/
415 symbolS * GOT_symbol;
416 #endif
417 
418 /* 0: assemble for ARM,
419    1: assemble for Thumb,
420    2: assemble for Thumb even though target CPU does not support thumb
421       instructions.  */
422 static int thumb_mode = 0;
423 /* A value distinct from the possible values for thumb_mode that we
424    can use to record whether thumb_mode has been copied into the
425    tc_frag_data field of a frag.  */
426 #define MODE_RECORDED (1 << 4)
427 
428 /* Specifies the intrinsic IT insn behavior mode.  */
429 enum implicit_it_mode
430 {
431   IMPLICIT_IT_MODE_NEVER  = 0x00,
432   IMPLICIT_IT_MODE_ARM    = 0x01,
433   IMPLICIT_IT_MODE_THUMB  = 0x02,
434   IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
435 };
436 static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
437 
438 /* If unified_syntax is true, we are processing the new unified
439    ARM/Thumb syntax.  Important differences from the old ARM mode:
440 
441      - Immediate operands do not require a # prefix.
442      - Conditional affixes always appear at the end of the
443        instruction.  (For backward compatibility, those instructions
444        that formerly had them in the middle, continue to accept them
445        there.)
446      - The IT instruction may appear, and if it does is validated
447        against subsequent conditional affixes.  It does not generate
448        machine code.
449 
450    Important differences from the old Thumb mode:
451 
452      - Immediate operands do not require a # prefix.
453      - Most of the V6T2 instructions are only available in unified mode.
454      - The .N and .W suffixes are recognized and honored (it is an error
455        if they cannot be honored).
456      - All instructions set the flags if and only if they have an 's' affix.
457      - Conditional affixes may be used.  They are validated against
458        preceding IT instructions.  Unlike ARM mode, you cannot use a
459        conditional affix except in the scope of an IT instruction.  */
460 
461 static bool unified_syntax = false;
462 
463 /* An immediate operand can start with #, and ld*, st*, pld operands
464    can contain [ and ].  We need to tell APP not to elide whitespace
465    before a [, which can appear as the first operand for pld.
466    Likewise, a { can appear as the first operand for push, pop, vld*, etc.  */
467 const char arm_symbol_chars[] = "#[]{}";
468 
469 enum neon_el_type
470 {
471   NT_invtype,
472   NT_untyped,
473   NT_integer,
474   NT_float,
475   NT_poly,
476   NT_signed,
477   NT_bfloat,
478   NT_unsigned
479 };
480 
481 struct neon_type_el
482 {
483   enum neon_el_type type;
484   unsigned size;
485 };
486 
487 #define NEON_MAX_TYPE_ELS 5
488 
489 struct neon_type
490 {
491   struct neon_type_el el[NEON_MAX_TYPE_ELS];
492   unsigned elems;
493 };
494 
495 enum pred_instruction_type
496 {
497    OUTSIDE_PRED_INSN,
498    INSIDE_VPT_INSN,
499    INSIDE_IT_INSN,
500    INSIDE_IT_LAST_INSN,
501    IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
502 			      if inside, should be the last one.  */
503    NEUTRAL_IT_INSN,        /* This could be either inside or outside,
504 			      i.e. BKPT and NOP.  */
505    IT_INSN,		   /* The IT insn has been parsed.  */
506    VPT_INSN,		   /* The VPT/VPST insn has been parsed.  */
507    MVE_OUTSIDE_PRED_INSN , /* Instruction to indicate a MVE instruction without
508 			      a predication code.  */
509    MVE_UNPREDICABLE_INSN,  /* MVE instruction that is non-predicable.  */
510 };
511 
512 /* The maximum number of operands we need.  */
513 #define ARM_IT_MAX_OPERANDS 6
514 #define ARM_IT_MAX_RELOCS 3
515 
516 struct arm_it
517 {
518   const char *	error;
519   unsigned long instruction;
520   unsigned int	size;
521   unsigned int	size_req;
522   unsigned int	cond;
523   /* "uncond_value" is set to the value in place of the conditional field in
524      unconditional versions of the instruction, or -1u if nothing is
525      appropriate.  */
526   unsigned int	uncond_value;
527   struct neon_type vectype;
528   /* This does not indicate an actual NEON instruction, only that
529      the mnemonic accepts neon-style type suffixes.  */
530   int		is_neon;
531   /* Set to the opcode if the instruction needs relaxation.
532      Zero if the instruction is not relaxed.  */
533   unsigned long	relax;
534   struct
535   {
536     bfd_reloc_code_real_type type;
537     expressionS		     exp;
538     int			     pc_rel;
539   } relocs[ARM_IT_MAX_RELOCS];
540 
541   enum pred_instruction_type pred_insn_type;
542 
543   struct
544   {
545     unsigned reg;
546     signed int imm;
547     struct neon_type_el vectype;
548     unsigned present	: 1;  /* Operand present.  */
549     unsigned isreg	: 1;  /* Operand was a register.  */
550     unsigned immisreg	: 2;  /* .imm field is a second register.
551 				 0: imm, 1: gpr, 2: MVE Q-register.  */
552     unsigned isscalar   : 2;  /* Operand is a (SIMD) scalar:
553 				 0) not scalar,
554 				 1) Neon scalar,
555 				 2) MVE scalar.  */
556     unsigned immisalign : 1;  /* Immediate is an alignment specifier.  */
557     unsigned immisfloat : 1;  /* Immediate was parsed as a float.  */
558     /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
559        instructions. This allows us to disambiguate ARM <-> vector insns.  */
560     unsigned regisimm   : 1;  /* 64-bit immediate, reg forms high 32 bits.  */
561     unsigned isvec      : 1;  /* Is a single, double or quad VFP/Neon reg.  */
562     unsigned isquad     : 1;  /* Operand is SIMD quad register.  */
563     unsigned issingle   : 1;  /* Operand is VFP single-precision register.  */
564     unsigned iszr	: 1;  /* Operand is ZR register.  */
565     unsigned hasreloc	: 1;  /* Operand has relocation suffix.  */
566     unsigned writeback	: 1;  /* Operand has trailing !  */
567     unsigned preind	: 1;  /* Preindexed address.  */
568     unsigned postind	: 1;  /* Postindexed address.  */
569     unsigned negative	: 1;  /* Index register was negated.  */
570     unsigned shifted	: 1;  /* Shift applied to operation.  */
571     unsigned shift_kind : 3;  /* Shift operation (enum shift_kind).  */
572   } operands[ARM_IT_MAX_OPERANDS];
573 };
574 
575 static struct arm_it inst;
576 
577 #define NUM_FLOAT_VALS 8
578 
579 const char * fp_const[] =
580 {
581   "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
582 };
583 
584 LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
585 
586 #define FAIL	(-1)
587 #define SUCCESS (0)
588 
589 #define SUFF_S 1
590 #define SUFF_D 2
591 #define SUFF_E 3
592 #define SUFF_P 4
593 
594 #define CP_T_X	 0x00008000
595 #define CP_T_Y	 0x00400000
596 
597 #define CONDS_BIT	 0x00100000
598 #define LOAD_BIT	 0x00100000
599 
600 #define DOUBLE_LOAD_FLAG 0x00000001
601 
602 struct asm_cond
603 {
604   const char *	 template_name;
605   unsigned long  value;
606 };
607 
608 #define COND_ALWAYS 0xE
609 
610 struct asm_psr
611 {
612   const char *   template_name;
613   unsigned long  field;
614 };
615 
616 struct asm_barrier_opt
617 {
618   const char *    template_name;
619   unsigned long   value;
620   const arm_feature_set arch;
621 };
622 
623 /* The bit that distinguishes CPSR and SPSR.  */
624 #define SPSR_BIT   (1 << 22)
625 
626 /* The individual PSR flag bits.  */
627 #define PSR_c	(1 << 16)
628 #define PSR_x	(1 << 17)
629 #define PSR_s	(1 << 18)
630 #define PSR_f	(1 << 19)
631 
632 struct reloc_entry
633 {
634   const char *              name;
635   bfd_reloc_code_real_type  reloc;
636 };
637 
638 enum vfp_reg_pos
639 {
640   VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
641   VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
642 };
643 
644 enum vfp_ldstm_type
645 {
646   VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
647 };
648 
649 /* Bits for DEFINED field in neon_typed_alias.  */
650 #define NTA_HASTYPE  1
651 #define NTA_HASINDEX 2
652 
653 struct neon_typed_alias
654 {
655   unsigned char        defined;
656   unsigned char        index;
657   struct neon_type_el  eltype;
658 };
659 
660 /* ARM register categories.  This includes coprocessor numbers and various
661    architecture extensions' registers.  Each entry should have an error message
662    in reg_expected_msgs below.  */
663 enum arm_reg_type
664 {
665   REG_TYPE_RN,
666   REG_TYPE_CP,
667   REG_TYPE_CN,
668   REG_TYPE_FN,
669   REG_TYPE_VFS,
670   REG_TYPE_VFD,
671   REG_TYPE_NQ,
672   REG_TYPE_VFSD,
673   REG_TYPE_NDQ,
674   REG_TYPE_NSD,
675   REG_TYPE_NSDQ,
676   REG_TYPE_VFC,
677   REG_TYPE_MVF,
678   REG_TYPE_MVD,
679   REG_TYPE_MVFX,
680   REG_TYPE_MVDX,
681   REG_TYPE_MVAX,
682   REG_TYPE_MQ,
683   REG_TYPE_DSPSC,
684   REG_TYPE_MMXWR,
685   REG_TYPE_MMXWC,
686   REG_TYPE_MMXWCG,
687   REG_TYPE_XSCALE,
688   REG_TYPE_RNB,
689   REG_TYPE_ZR
690 };
691 
692 /* Structure for a hash table entry for a register.
693    If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
694    information which states whether a vector type or index is specified (for a
695    register alias created with .dn or .qn). Otherwise NEON should be NULL.  */
696 struct reg_entry
697 {
698   const char *               name;
699   unsigned int               number;
700   unsigned char              type;
701   unsigned char              builtin;
702   struct neon_typed_alias *  neon;
703 };
704 
705 /* Diagnostics used when we don't get a register of the expected type.	*/
706 const char * const reg_expected_msgs[] =
707 {
708   [REG_TYPE_RN]	    = N_("ARM register expected"),
709   [REG_TYPE_CP]	    = N_("bad or missing co-processor number"),
710   [REG_TYPE_CN]	    = N_("co-processor register expected"),
711   [REG_TYPE_FN]	    = N_("FPA register expected"),
712   [REG_TYPE_VFS]    = N_("VFP single precision register expected"),
713   [REG_TYPE_VFD]    = N_("VFP/Neon double precision register expected"),
714   [REG_TYPE_NQ]	    = N_("Neon quad precision register expected"),
715   [REG_TYPE_VFSD]   = N_("VFP single or double precision register expected"),
716   [REG_TYPE_NDQ]    = N_("Neon double or quad precision register expected"),
717   [REG_TYPE_NSD]    = N_("Neon single or double precision register expected"),
718   [REG_TYPE_NSDQ]   = N_("VFP single, double or Neon quad precision register"
719 			 " expected"),
720   [REG_TYPE_VFC]    = N_("VFP system register expected"),
721   [REG_TYPE_MVF]    = N_("Maverick MVF register expected"),
722   [REG_TYPE_MVD]    = N_("Maverick MVD register expected"),
723   [REG_TYPE_MVFX]   = N_("Maverick MVFX register expected"),
724   [REG_TYPE_MVDX]   = N_("Maverick MVDX register expected"),
725   [REG_TYPE_MVAX]   = N_("Maverick MVAX register expected"),
726   [REG_TYPE_DSPSC]  = N_("Maverick DSPSC register expected"),
727   [REG_TYPE_MMXWR]  = N_("iWMMXt data register expected"),
728   [REG_TYPE_MMXWC]  = N_("iWMMXt control register expected"),
729   [REG_TYPE_MMXWCG] = N_("iWMMXt scalar register expected"),
730   [REG_TYPE_XSCALE] = N_("XScale accumulator register expected"),
731   [REG_TYPE_MQ]	    = N_("MVE vector register expected"),
732   [REG_TYPE_RNB]    = "",
733   [REG_TYPE_ZR]     = N_("ZR register expected"),
734 };
735 
736 /* Some well known registers that we refer to directly elsewhere.  */
737 #define REG_R12	12
738 #define REG_SP	13
739 #define REG_LR	14
740 #define REG_PC	15
741 
742 /* ARM instructions take 4bytes in the object file, Thumb instructions
743    take 2:  */
744 #define INSN_SIZE	4
745 
746 struct asm_opcode
747 {
748   /* Basic string to match.  */
749   const char * template_name;
750 
751   /* Parameters to instruction.	 */
752   unsigned int operands[8];
753 
754   /* Conditional tag - see opcode_lookup.  */
755   unsigned int tag : 4;
756 
757   /* Basic instruction code.  */
758   unsigned int avalue;
759 
760   /* Thumb-format instruction code.  */
761   unsigned int tvalue;
762 
763   /* Which architecture variant provides this instruction.  */
764   const arm_feature_set * avariant;
765   const arm_feature_set * tvariant;
766 
767   /* Function to call to encode instruction in ARM format.  */
768   void (* aencode) (void);
769 
770   /* Function to call to encode instruction in Thumb format.  */
771   void (* tencode) (void);
772 
773   /* Indicates whether this instruction may be vector predicated.  */
774   unsigned int mayBeVecPred : 1;
775 };
776 
777 /* Defines for various bits that we will want to toggle.  */
778 #define INST_IMMEDIATE	0x02000000
779 #define OFFSET_REG	0x02000000
780 #define HWOFFSET_IMM	0x00400000
781 #define SHIFT_BY_REG	0x00000010
782 #define PRE_INDEX	0x01000000
783 #define INDEX_UP	0x00800000
784 #define WRITE_BACK	0x00200000
785 #define LDM_TYPE_2_OR_3	0x00400000
786 #define CPSI_MMOD	0x00020000
787 
788 #define LITERAL_MASK	0xf000f000
789 #define OPCODE_MASK	0xfe1fffff
790 #define V4_STR_BIT	0x00000020
791 #define VLDR_VMOV_SAME	0x0040f000
792 
793 #define T2_SUBS_PC_LR	0xf3de8f00
794 
795 #define DATA_OP_SHIFT	21
796 #define SBIT_SHIFT	20
797 
798 #define T2_OPCODE_MASK	0xfe1fffff
799 #define T2_DATA_OP_SHIFT 21
800 #define T2_SBIT_SHIFT	 20
801 
802 #define A_COND_MASK         0xf0000000
803 #define A_PUSH_POP_OP_MASK  0x0fff0000
804 
805 /* Opcodes for pushing/poping registers to/from the stack.  */
806 #define A1_OPCODE_PUSH    0x092d0000
807 #define A2_OPCODE_PUSH    0x052d0004
808 #define A2_OPCODE_POP     0x049d0004
809 
810 /* Codes to distinguish the arithmetic instructions.  */
811 #define OPCODE_AND	0
812 #define OPCODE_EOR	1
813 #define OPCODE_SUB	2
814 #define OPCODE_RSB	3
815 #define OPCODE_ADD	4
816 #define OPCODE_ADC	5
817 #define OPCODE_SBC	6
818 #define OPCODE_RSC	7
819 #define OPCODE_TST	8
820 #define OPCODE_TEQ	9
821 #define OPCODE_CMP	10
822 #define OPCODE_CMN	11
823 #define OPCODE_ORR	12
824 #define OPCODE_MOV	13
825 #define OPCODE_BIC	14
826 #define OPCODE_MVN	15
827 
828 #define T2_OPCODE_AND	0
829 #define T2_OPCODE_BIC	1
830 #define T2_OPCODE_ORR	2
831 #define T2_OPCODE_ORN	3
832 #define T2_OPCODE_EOR	4
833 #define T2_OPCODE_ADD	8
834 #define T2_OPCODE_ADC	10
835 #define T2_OPCODE_SBC	11
836 #define T2_OPCODE_SUB	13
837 #define T2_OPCODE_RSB	14
838 
839 #define T_OPCODE_MUL 0x4340
840 #define T_OPCODE_TST 0x4200
841 #define T_OPCODE_CMN 0x42c0
842 #define T_OPCODE_NEG 0x4240
843 #define T_OPCODE_MVN 0x43c0
844 
845 #define T_OPCODE_ADD_R3	0x1800
846 #define T_OPCODE_SUB_R3 0x1a00
847 #define T_OPCODE_ADD_HI 0x4400
848 #define T_OPCODE_ADD_ST 0xb000
849 #define T_OPCODE_SUB_ST 0xb080
850 #define T_OPCODE_ADD_SP 0xa800
851 #define T_OPCODE_ADD_PC 0xa000
852 #define T_OPCODE_ADD_I8 0x3000
853 #define T_OPCODE_SUB_I8 0x3800
854 #define T_OPCODE_ADD_I3 0x1c00
855 #define T_OPCODE_SUB_I3 0x1e00
856 
857 #define T_OPCODE_ASR_R	0x4100
858 #define T_OPCODE_LSL_R	0x4080
859 #define T_OPCODE_LSR_R	0x40c0
860 #define T_OPCODE_ROR_R	0x41c0
861 #define T_OPCODE_ASR_I	0x1000
862 #define T_OPCODE_LSL_I	0x0000
863 #define T_OPCODE_LSR_I	0x0800
864 
865 #define T_OPCODE_MOV_I8	0x2000
866 #define T_OPCODE_CMP_I8 0x2800
867 #define T_OPCODE_CMP_LR 0x4280
868 #define T_OPCODE_MOV_HR 0x4600
869 #define T_OPCODE_CMP_HR 0x4500
870 
871 #define T_OPCODE_LDR_PC 0x4800
872 #define T_OPCODE_LDR_SP 0x9800
873 #define T_OPCODE_STR_SP 0x9000
874 #define T_OPCODE_LDR_IW 0x6800
875 #define T_OPCODE_STR_IW 0x6000
876 #define T_OPCODE_LDR_IH 0x8800
877 #define T_OPCODE_STR_IH 0x8000
878 #define T_OPCODE_LDR_IB 0x7800
879 #define T_OPCODE_STR_IB 0x7000
880 #define T_OPCODE_LDR_RW 0x5800
881 #define T_OPCODE_STR_RW 0x5000
882 #define T_OPCODE_LDR_RH 0x5a00
883 #define T_OPCODE_STR_RH 0x5200
884 #define T_OPCODE_LDR_RB 0x5c00
885 #define T_OPCODE_STR_RB 0x5400
886 
887 #define T_OPCODE_PUSH	0xb400
888 #define T_OPCODE_POP	0xbc00
889 
890 #define T_OPCODE_BRANCH 0xe000
891 
892 #define THUMB_SIZE	2	/* Size of thumb instruction.  */
893 #define THUMB_PP_PC_LR 0x0100
894 #define THUMB_LOAD_BIT 0x0800
895 #define THUMB2_LOAD_BIT 0x00100000
896 
897 #define BAD_SYNTAX	_("syntax error")
898 #define BAD_ARGS	_("bad arguments to instruction")
899 #define BAD_SP          _("r13 not allowed here")
900 #define BAD_PC		_("r15 not allowed here")
901 #define BAD_ODD		_("Odd register not allowed here")
902 #define BAD_EVEN	_("Even register not allowed here")
903 #define BAD_COND	_("instruction cannot be conditional")
904 #define BAD_OVERLAP	_("registers may not be the same")
905 #define BAD_HIREG	_("lo register required")
906 #define BAD_THUMB32	_("instruction not supported in Thumb16 mode")
907 #define BAD_ADDR_MODE   _("instruction does not accept this addressing mode")
908 #define BAD_BRANCH	_("branch must be last instruction in IT block")
909 #define BAD_BRANCH_OFF	_("branch out of range or not a multiple of 2")
910 #define BAD_NO_VPT	_("instruction not allowed in VPT block")
911 #define BAD_NOT_IT	_("instruction not allowed in IT block")
912 #define BAD_NOT_VPT	_("instruction missing MVE vector predication code")
913 #define BAD_FPU		_("selected FPU does not support instruction")
914 #define BAD_OUT_IT 	_("thumb conditional instruction should be in IT block")
915 #define BAD_OUT_VPT	\
916 	_("vector predicated instruction should be in VPT/VPST block")
917 #define BAD_IT_COND	_("incorrect condition in IT block")
918 #define BAD_VPT_COND	_("incorrect condition in VPT/VPST block")
919 #define BAD_IT_IT 	_("IT falling in the range of a previous IT block")
920 #define MISSING_FNSTART	_("missing .fnstart before unwinding directive")
921 #define BAD_PC_ADDRESSING \
922 	_("cannot use register index with PC-relative addressing")
923 #define BAD_PC_WRITEBACK \
924 	_("cannot use writeback with PC-relative addressing")
925 #define BAD_RANGE	_("branch out of range")
926 #define BAD_FP16	_("selected processor does not support fp16 instruction")
927 #define BAD_BF16	_("selected processor does not support bf16 instruction")
928 #define BAD_CDE	_("selected processor does not support cde instruction")
929 #define BAD_CDE_COPROC	_("coprocessor for insn is not enabled for cde")
930 #define UNPRED_REG(R)	_("using " R " results in unpredictable behaviour")
931 #define THUMB1_RELOC_ONLY  _("relocation valid in thumb1 code only")
932 #define MVE_NOT_IT	_("Warning: instruction is UNPREDICTABLE in an IT " \
933 			  "block")
934 #define MVE_NOT_VPT	_("Warning: instruction is UNPREDICTABLE in a VPT " \
935 			  "block")
936 #define MVE_BAD_PC	_("Warning: instruction is UNPREDICTABLE with PC" \
937 			  " operand")
938 #define MVE_BAD_SP	_("Warning: instruction is UNPREDICTABLE with SP" \
939 			  " operand")
940 #define BAD_SIMD_TYPE	_("bad type in SIMD instruction")
941 #define BAD_MVE_AUTO	\
942   _("GAS auto-detection mode and -march=all is deprecated for MVE, please" \
943     " use a valid -march or -mcpu option.")
944 #define BAD_MVE_SRCDEST	_("Warning: 32-bit element size and same destination "\
945 			  "and source operands makes instruction UNPREDICTABLE")
946 #define BAD_EL_TYPE	_("bad element type for instruction")
947 #define MVE_BAD_QREG	_("MVE vector register Q[0..7] expected")
948 
949 static htab_t  arm_ops_hsh;
950 static htab_t  arm_cond_hsh;
951 static htab_t  arm_vcond_hsh;
952 static htab_t  arm_shift_hsh;
953 static htab_t  arm_psr_hsh;
954 static htab_t  arm_v7m_psr_hsh;
955 static htab_t  arm_reg_hsh;
956 static htab_t  arm_reloc_hsh;
957 static htab_t  arm_barrier_opt_hsh;
958 
959 /* Stuff needed to resolve the label ambiguity
960    As:
961      ...
962      label:   <insn>
963    may differ from:
964      ...
965      label:
966 	      <insn>  */
967 
968 symbolS *  last_label_seen;
969 static int label_is_thumb_function_name = false;
970 
971 /* Literal pool structure.  Held on a per-section
972    and per-sub-section basis.  */
973 
974 #define MAX_LITERAL_POOL_SIZE 1024
975 typedef struct literal_pool
976 {
977   expressionS	         literals [MAX_LITERAL_POOL_SIZE];
978   unsigned int	         next_free_entry;
979   unsigned int	         id;
980   symbolS *	         symbol;
981   segT		         section;
982   subsegT	         sub_section;
983 #ifdef OBJ_ELF
984   struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
985 #endif
986   struct literal_pool *  next;
987   unsigned int		 alignment;
988 } literal_pool;
989 
990 /* Pointer to a linked list of literal pools.  */
991 literal_pool * list_of_pools = NULL;
992 
993 typedef enum asmfunc_states
994 {
995   OUTSIDE_ASMFUNC,
996   WAITING_ASMFUNC_NAME,
997   WAITING_ENDASMFUNC
998 } asmfunc_states;
999 
1000 static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
1001 
1002 #ifdef OBJ_ELF
1003 #  define now_pred seg_info (now_seg)->tc_segment_info_data.current_pred
1004 #else
1005 static struct current_pred now_pred;
1006 #endif
1007 
1008 static inline int
now_pred_compatible(int cond)1009 now_pred_compatible (int cond)
1010 {
1011   return (cond & ~1) == (now_pred.cc & ~1);
1012 }
1013 
1014 static inline int
conditional_insn(void)1015 conditional_insn (void)
1016 {
1017   return inst.cond != COND_ALWAYS;
1018 }
1019 
1020 static int in_pred_block (void);
1021 
1022 static int handle_pred_state (void);
1023 
1024 static void force_automatic_it_block_close (void);
1025 
1026 static void it_fsm_post_encode (void);
1027 
1028 #define set_pred_insn_type(type)			\
1029   do						\
1030     {						\
1031       inst.pred_insn_type = type;			\
1032       if (handle_pred_state () == FAIL)		\
1033 	return;					\
1034     }						\
1035   while (0)
1036 
1037 #define set_pred_insn_type_nonvoid(type, failret) \
1038   do						\
1039     {                                           \
1040       inst.pred_insn_type = type;			\
1041       if (handle_pred_state () == FAIL)		\
1042 	return failret;				\
1043     }						\
1044   while(0)
1045 
1046 #define set_pred_insn_type_last()				\
1047   do							\
1048     {							\
1049       if (inst.cond == COND_ALWAYS)			\
1050 	set_pred_insn_type (IF_INSIDE_IT_LAST_INSN);	\
1051       else						\
1052 	set_pred_insn_type (INSIDE_IT_LAST_INSN);		\
1053     }							\
1054   while (0)
1055 
1056 /* Toggle value[pos].  */
1057 #define TOGGLE_BIT(value, pos) (value ^ (1 << pos))
1058 
1059 /* Pure syntax.	 */
1060 
1061 /* This array holds the chars that always start a comment.  If the
1062    pre-processor is disabled, these aren't very useful.	 */
1063 char arm_comment_chars[] = "@";
1064 
1065 /* This array holds the chars that only start a comment at the beginning of
1066    a line.  If the line seems to have the form '# 123 filename'
1067    .line and .file directives will appear in the pre-processed output.	*/
1068 /* Note that input_file.c hand checks for '#' at the beginning of the
1069    first line of the input file.  This is because the compiler outputs
1070    #NO_APP at the beginning of its output.  */
1071 /* Also note that comments like this one will always work.  */
1072 const char line_comment_chars[] = "#";
1073 
1074 char arm_line_separator_chars[] = ";";
1075 
1076 /* Chars that can be used to separate mant
1077    from exp in floating point numbers.	*/
1078 const char EXP_CHARS[] = "eE";
1079 
1080 /* Chars that mean this number is a floating point constant.  */
1081 /* As in 0f12.456  */
1082 /* or	 0d1.2345e12  */
1083 
1084 const char FLT_CHARS[] = "rRsSfFdDxXeEpPHh";
1085 
1086 /* Prefix characters that indicate the start of an immediate
1087    value.  */
1088 #define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
1089 
1090 /* Separator character handling.  */
1091 
1092 #define skip_whitespace(str)  do { if (*(str) == ' ') ++(str); } while (0)
1093 
1094 enum fp_16bit_format
1095 {
1096   ARM_FP16_FORMAT_IEEE		= 0x1,
1097   ARM_FP16_FORMAT_ALTERNATIVE	= 0x2,
1098   ARM_FP16_FORMAT_DEFAULT	= 0x3
1099 };
1100 
1101 static enum fp_16bit_format fp16_format = ARM_FP16_FORMAT_DEFAULT;
1102 
1103 
1104 static inline int
skip_past_char(char ** str,char c)1105 skip_past_char (char ** str, char c)
1106 {
1107   /* PR gas/14987: Allow for whitespace before the expected character.  */
1108   skip_whitespace (*str);
1109 
1110   if (**str == c)
1111     {
1112       (*str)++;
1113       return SUCCESS;
1114     }
1115   else
1116     return FAIL;
1117 }
1118 
1119 #define skip_past_comma(str) skip_past_char (str, ',')
1120 
1121 /* Arithmetic expressions (possibly involving symbols).	 */
1122 
1123 /* Return TRUE if anything in the expression is a bignum.  */
1124 
1125 static bool
walk_no_bignums(symbolS * sp)1126 walk_no_bignums (symbolS * sp)
1127 {
1128   if (symbol_get_value_expression (sp)->X_op == O_big)
1129     return true;
1130 
1131   if (symbol_get_value_expression (sp)->X_add_symbol)
1132     {
1133       return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
1134 	      || (symbol_get_value_expression (sp)->X_op_symbol
1135 		  && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
1136     }
1137 
1138   return false;
1139 }
1140 
1141 static bool in_my_get_expression = false;
1142 
1143 /* Third argument to my_get_expression.	 */
1144 #define GE_NO_PREFIX 0
1145 #define GE_IMM_PREFIX 1
1146 #define GE_OPT_PREFIX 2
1147 /* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1148    immediates, as can be used in Neon VMVN and VMOV immediate instructions.  */
1149 #define GE_OPT_PREFIX_BIG 3
1150 
1151 static int
my_get_expression(expressionS * ep,char ** str,int prefix_mode)1152 my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1153 {
1154   char * save_in;
1155 
1156   /* In unified syntax, all prefixes are optional.  */
1157   if (unified_syntax)
1158     prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1159 		  : GE_OPT_PREFIX;
1160 
1161   switch (prefix_mode)
1162     {
1163     case GE_NO_PREFIX: break;
1164     case GE_IMM_PREFIX:
1165       if (!is_immediate_prefix (**str))
1166 	{
1167 	  inst.error = _("immediate expression requires a # prefix");
1168 	  return FAIL;
1169 	}
1170       (*str)++;
1171       break;
1172     case GE_OPT_PREFIX:
1173     case GE_OPT_PREFIX_BIG:
1174       if (is_immediate_prefix (**str))
1175 	(*str)++;
1176       break;
1177     default:
1178       abort ();
1179     }
1180 
1181   memset (ep, 0, sizeof (expressionS));
1182 
1183   save_in = input_line_pointer;
1184   input_line_pointer = *str;
1185   in_my_get_expression = true;
1186   expression (ep);
1187   in_my_get_expression = false;
1188 
1189   if (ep->X_op == O_illegal || ep->X_op == O_absent)
1190     {
1191       /* We found a bad or missing expression in md_operand().  */
1192       *str = input_line_pointer;
1193       input_line_pointer = save_in;
1194       if (inst.error == NULL)
1195 	inst.error = (ep->X_op == O_absent
1196 		      ? _("missing expression") :_("bad expression"));
1197       return 1;
1198     }
1199 
1200   /* Get rid of any bignums now, so that we don't generate an error for which
1201      we can't establish a line number later on.	 Big numbers are never valid
1202      in instructions, which is where this routine is always called.  */
1203   if (prefix_mode != GE_OPT_PREFIX_BIG
1204       && (ep->X_op == O_big
1205 	  || (ep->X_add_symbol
1206 	      && (walk_no_bignums (ep->X_add_symbol)
1207 		  || (ep->X_op_symbol
1208 		      && walk_no_bignums (ep->X_op_symbol))))))
1209     {
1210       inst.error = _("invalid constant");
1211       *str = input_line_pointer;
1212       input_line_pointer = save_in;
1213       return 1;
1214     }
1215 
1216   *str = input_line_pointer;
1217   input_line_pointer = save_in;
1218   return SUCCESS;
1219 }
1220 
1221 /* Turn a string in input_line_pointer into a floating point constant
1222    of type TYPE, and store the appropriate bytes in *LITP.  The number
1223    of LITTLENUMS emitted is stored in *SIZEP.  An error message is
1224    returned, or NULL on OK.
1225 
1226    Note that fp constants aren't represent in the normal way on the ARM.
1227    In big endian mode, things are as expected.	However, in little endian
1228    mode fp constants are big-endian word-wise, and little-endian byte-wise
1229    within the words.  For example, (double) 1.1 in big endian mode is
1230    the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1231    the byte sequence 99 99 f1 3f 9a 99 99 99.
1232 
1233    ??? The format of 12 byte floats is uncertain according to gcc's arm.h.  */
1234 
1235 const char *
md_atof(int type,char * litP,int * sizeP)1236 md_atof (int type, char * litP, int * sizeP)
1237 {
1238   int prec;
1239   LITTLENUM_TYPE words[MAX_LITTLENUMS];
1240   char *t;
1241   int i;
1242 
1243   switch (type)
1244     {
1245     case 'H':
1246     case 'h':
1247       prec = 1;
1248       break;
1249 
1250     /* If this is a bfloat16, then parse it slightly differently, as it
1251        does not follow the IEEE specification for floating point numbers
1252        exactly.  */
1253     case 'b':
1254       {
1255 	FLONUM_TYPE generic_float;
1256 
1257 	t = atof_ieee_detail (input_line_pointer, 1, 8, words, &generic_float);
1258 
1259 	if (t)
1260 	  input_line_pointer = t;
1261 	else
1262 	  return _("invalid floating point number");
1263 
1264 	switch (generic_float.sign)
1265 	  {
1266 	  /* Is +Inf.  */
1267 	  case 'P':
1268 	    words[0] = 0x7f80;
1269 	    break;
1270 
1271 	  /* Is -Inf.  */
1272 	  case 'N':
1273 	    words[0] = 0xff80;
1274 	    break;
1275 
1276 	  /* Is NaN.  */
1277 	  /* bfloat16 has two types of NaN - quiet and signalling.
1278 	     Quiet NaN has bit[6] == 1 && faction != 0, whereas
1279 	     signalling NaN's have bit[0] == 0 && fraction != 0.
1280 	     Chosen this specific encoding as it is the same form
1281 	     as used by other IEEE 754 encodings in GAS.  */
1282 	  case 0:
1283 	    words[0] = 0x7fff;
1284 	    break;
1285 
1286 	  default:
1287 	    break;
1288 	  }
1289 
1290 	*sizeP = 2;
1291 
1292 	md_number_to_chars (litP, (valueT) words[0], sizeof (LITTLENUM_TYPE));
1293 
1294 	return NULL;
1295       }
1296     case 'f':
1297     case 'F':
1298     case 's':
1299     case 'S':
1300       prec = 2;
1301       break;
1302 
1303     case 'd':
1304     case 'D':
1305     case 'r':
1306     case 'R':
1307       prec = 4;
1308       break;
1309 
1310     case 'x':
1311     case 'X':
1312       prec = 5;
1313       break;
1314 
1315     case 'p':
1316     case 'P':
1317       prec = 5;
1318       break;
1319 
1320     default:
1321       *sizeP = 0;
1322       return _("Unrecognized or unsupported floating point constant");
1323     }
1324 
1325   t = atof_ieee (input_line_pointer, type, words);
1326   if (t)
1327     input_line_pointer = t;
1328   *sizeP = prec * sizeof (LITTLENUM_TYPE);
1329 
1330   if (target_big_endian || prec == 1)
1331     for (i = 0; i < prec; i++)
1332       {
1333 	md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1334 	litP += sizeof (LITTLENUM_TYPE);
1335       }
1336   else if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1337     for (i = prec - 1; i >= 0; i--)
1338       {
1339 	md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1340 	litP += sizeof (LITTLENUM_TYPE);
1341       }
1342   else
1343     /* For a 4 byte float the order of elements in `words' is 1 0.
1344        For an 8 byte float the order is 1 0 3 2.  */
1345     for (i = 0; i < prec; i += 2)
1346       {
1347 	md_number_to_chars (litP, (valueT) words[i + 1],
1348 			    sizeof (LITTLENUM_TYPE));
1349 	md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1350 			    (valueT) words[i], sizeof (LITTLENUM_TYPE));
1351 	litP += 2 * sizeof (LITTLENUM_TYPE);
1352       }
1353 
1354   return NULL;
1355 }
1356 
1357 /* We handle all bad expressions here, so that we can report the faulty
1358    instruction in the error message.  */
1359 
1360 void
md_operand(expressionS * exp)1361 md_operand (expressionS * exp)
1362 {
1363   if (in_my_get_expression)
1364     exp->X_op = O_illegal;
1365 }
1366 
1367 /* Immediate values.  */
1368 
1369 #ifdef OBJ_ELF
1370 /* Generic immediate-value read function for use in directives.
1371    Accepts anything that 'expression' can fold to a constant.
1372    *val receives the number.  */
1373 
1374 static int
immediate_for_directive(int * val)1375 immediate_for_directive (int *val)
1376 {
1377   expressionS exp;
1378   exp.X_op = O_illegal;
1379 
1380   if (is_immediate_prefix (*input_line_pointer))
1381     {
1382       input_line_pointer++;
1383       expression (&exp);
1384     }
1385 
1386   if (exp.X_op != O_constant)
1387     {
1388       as_bad (_("expected #constant"));
1389       ignore_rest_of_line ();
1390       return FAIL;
1391     }
1392   *val = exp.X_add_number;
1393   return SUCCESS;
1394 }
1395 #endif
1396 
1397 /* Register parsing.  */
1398 
1399 /* Generic register parser.  CCP points to what should be the
1400    beginning of a register name.  If it is indeed a valid register
1401    name, advance CCP over it and return the reg_entry structure;
1402    otherwise return NULL.  Does not issue diagnostics.	*/
1403 
1404 static struct reg_entry *
arm_reg_parse_multi(char ** ccp)1405 arm_reg_parse_multi (char **ccp)
1406 {
1407   char *start = *ccp;
1408   char *p;
1409   struct reg_entry *reg;
1410 
1411   skip_whitespace (start);
1412 
1413 #ifdef REGISTER_PREFIX
1414   if (*start != REGISTER_PREFIX)
1415     return NULL;
1416   start++;
1417 #endif
1418 #ifdef OPTIONAL_REGISTER_PREFIX
1419   if (*start == OPTIONAL_REGISTER_PREFIX)
1420     start++;
1421 #endif
1422 
1423   p = start;
1424   if (!ISALPHA (*p) || !is_name_beginner (*p))
1425     return NULL;
1426 
1427   do
1428     p++;
1429   while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1430 
1431   reg = (struct reg_entry *) str_hash_find_n (arm_reg_hsh, start, p - start);
1432 
1433   if (!reg)
1434     return NULL;
1435 
1436   *ccp = p;
1437   return reg;
1438 }
1439 
1440 static int
arm_reg_alt_syntax(char ** ccp,char * start,struct reg_entry * reg,enum arm_reg_type type)1441 arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1442 		    enum arm_reg_type type)
1443 {
1444   /* Alternative syntaxes are accepted for a few register classes.  */
1445   switch (type)
1446     {
1447     case REG_TYPE_MVF:
1448     case REG_TYPE_MVD:
1449     case REG_TYPE_MVFX:
1450     case REG_TYPE_MVDX:
1451       /* Generic coprocessor register names are allowed for these.  */
1452       if (reg && reg->type == REG_TYPE_CN)
1453 	return reg->number;
1454       break;
1455 
1456     case REG_TYPE_CP:
1457       /* For backward compatibility, a bare number is valid here.  */
1458       {
1459 	unsigned long processor = strtoul (start, ccp, 10);
1460 	if (*ccp != start && processor <= 15)
1461 	  return processor;
1462       }
1463       /* Fall through.  */
1464 
1465     case REG_TYPE_MMXWC:
1466       /* WC includes WCG.  ??? I'm not sure this is true for all
1467 	 instructions that take WC registers.  */
1468       if (reg && reg->type == REG_TYPE_MMXWCG)
1469 	return reg->number;
1470       break;
1471 
1472     default:
1473       break;
1474     }
1475 
1476   return FAIL;
1477 }
1478 
1479 /* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1480    return value is the register number or FAIL.  */
1481 
1482 static int
arm_reg_parse(char ** ccp,enum arm_reg_type type)1483 arm_reg_parse (char **ccp, enum arm_reg_type type)
1484 {
1485   char *start = *ccp;
1486   struct reg_entry *reg = arm_reg_parse_multi (ccp);
1487   int ret;
1488 
1489   /* Do not allow a scalar (reg+index) to parse as a register.  */
1490   if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1491     return FAIL;
1492 
1493   if (reg && reg->type == type)
1494     return reg->number;
1495 
1496   if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1497     return ret;
1498 
1499   *ccp = start;
1500   return FAIL;
1501 }
1502 
1503 /* Parse a Neon type specifier. *STR should point at the leading '.'
1504    character. Does no verification at this stage that the type fits the opcode
1505    properly. E.g.,
1506 
1507      .i32.i32.s16
1508      .s32.f32
1509      .u16
1510 
1511    Can all be legally parsed by this function.
1512 
1513    Fills in neon_type struct pointer with parsed information, and updates STR
1514    to point after the parsed type specifier. Returns SUCCESS if this was a legal
1515    type, FAIL if not.  */
1516 
1517 static int
parse_neon_type(struct neon_type * type,char ** str)1518 parse_neon_type (struct neon_type *type, char **str)
1519 {
1520   char *ptr = *str;
1521 
1522   if (type)
1523     type->elems = 0;
1524 
1525   while (type->elems < NEON_MAX_TYPE_ELS)
1526     {
1527       enum neon_el_type thistype = NT_untyped;
1528       unsigned thissize = -1u;
1529 
1530       if (*ptr != '.')
1531 	break;
1532 
1533       ptr++;
1534 
1535       /* Just a size without an explicit type.  */
1536       if (ISDIGIT (*ptr))
1537 	goto parsesize;
1538 
1539       switch (TOLOWER (*ptr))
1540 	{
1541 	case 'i': thistype = NT_integer; break;
1542 	case 'f': thistype = NT_float; break;
1543 	case 'p': thistype = NT_poly; break;
1544 	case 's': thistype = NT_signed; break;
1545 	case 'u': thistype = NT_unsigned; break;
1546 	case 'd':
1547 	  thistype = NT_float;
1548 	  thissize = 64;
1549 	  ptr++;
1550 	  goto done;
1551 	case 'b':
1552 	  thistype = NT_bfloat;
1553 	  switch (TOLOWER (*(++ptr)))
1554 	    {
1555 	    case 'f':
1556 	      ptr += 1;
1557 	      thissize = strtoul (ptr, &ptr, 10);
1558 	      if (thissize != 16)
1559 		{
1560 		  as_bad (_("bad size %d in type specifier"), thissize);
1561 		  return FAIL;
1562 		}
1563 	      goto done;
1564 	    case '0': case '1': case '2': case '3': case '4':
1565 	    case '5': case '6': case '7': case '8': case '9':
1566 	    case ' ': case '.':
1567 	      as_bad (_("unexpected type character `b' -- did you mean `bf'?"));
1568 	      return FAIL;
1569 	    default:
1570 	      break;
1571 	    }
1572 	  break;
1573 	default:
1574 	  as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1575 	  return FAIL;
1576 	}
1577 
1578       ptr++;
1579 
1580       /* .f is an abbreviation for .f32.  */
1581       if (thistype == NT_float && !ISDIGIT (*ptr))
1582 	thissize = 32;
1583       else
1584 	{
1585 	parsesize:
1586 	  thissize = strtoul (ptr, &ptr, 10);
1587 
1588 	  if (thissize != 8 && thissize != 16 && thissize != 32
1589 	      && thissize != 64)
1590 	    {
1591 	      as_bad (_("bad size %d in type specifier"), thissize);
1592 	      return FAIL;
1593 	    }
1594 	}
1595 
1596       done:
1597       if (type)
1598 	{
1599 	  type->el[type->elems].type = thistype;
1600 	  type->el[type->elems].size = thissize;
1601 	  type->elems++;
1602 	}
1603     }
1604 
1605   /* Empty/missing type is not a successful parse.  */
1606   if (type->elems == 0)
1607     return FAIL;
1608 
1609   *str = ptr;
1610 
1611   return SUCCESS;
1612 }
1613 
1614 /* Errors may be set multiple times during parsing or bit encoding
1615    (particularly in the Neon bits), but usually the earliest error which is set
1616    will be the most meaningful. Avoid overwriting it with later (cascading)
1617    errors by calling this function.  */
1618 
1619 static void
first_error(const char * err)1620 first_error (const char *err)
1621 {
1622   if (!inst.error)
1623     inst.error = err;
1624 }
1625 
1626 /* Parse a single type, e.g. ".s32", leading period included.  */
1627 static int
parse_neon_operand_type(struct neon_type_el * vectype,char ** ccp)1628 parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1629 {
1630   char *str = *ccp;
1631   struct neon_type optype;
1632 
1633   if (*str == '.')
1634     {
1635       if (parse_neon_type (&optype, &str) == SUCCESS)
1636 	{
1637 	  if (optype.elems == 1)
1638 	    *vectype = optype.el[0];
1639 	  else
1640 	    {
1641 	      first_error (_("only one type should be specified for operand"));
1642 	      return FAIL;
1643 	    }
1644 	}
1645       else
1646 	{
1647 	  first_error (_("vector type expected"));
1648 	  return FAIL;
1649 	}
1650     }
1651   else
1652     return FAIL;
1653 
1654   *ccp = str;
1655 
1656   return SUCCESS;
1657 }
1658 
1659 /* Special meanings for indices (which have a range of 0-7), which will fit into
1660    a 4-bit integer.  */
1661 
1662 #define NEON_ALL_LANES		15
1663 #define NEON_INTERLEAVE_LANES	14
1664 
1665 /* Record a use of the given feature.  */
1666 static void
record_feature_use(const arm_feature_set * feature)1667 record_feature_use (const arm_feature_set *feature)
1668 {
1669   if (thumb_mode)
1670     ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
1671   else
1672     ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
1673 }
1674 
1675 /* If the given feature available in the selected CPU, mark it as used.
1676    Returns TRUE iff feature is available.  */
1677 static bool
mark_feature_used(const arm_feature_set * feature)1678 mark_feature_used (const arm_feature_set *feature)
1679 {
1680 
1681   /* Do not support the use of MVE only instructions when in auto-detection or
1682      -march=all.  */
1683   if (((feature == &mve_ext) || (feature == &mve_fp_ext))
1684       && ARM_CPU_IS_ANY (cpu_variant))
1685     {
1686       first_error (BAD_MVE_AUTO);
1687       return false;
1688     }
1689   /* Ensure the option is valid on the current architecture.  */
1690   if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
1691     return false;
1692 
1693   /* Add the appropriate architecture feature for the barrier option used.
1694      */
1695   record_feature_use (feature);
1696 
1697   return true;
1698 }
1699 
1700 /* Parse either a register or a scalar, with an optional type. Return the
1701    register number, and optionally fill in the actual type of the register
1702    when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1703    type/index information in *TYPEINFO.  */
1704 
1705 static int
parse_typed_reg_or_scalar(char ** ccp,enum arm_reg_type type,enum arm_reg_type * rtype,struct neon_typed_alias * typeinfo)1706 parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1707 			   enum arm_reg_type *rtype,
1708 			   struct neon_typed_alias *typeinfo)
1709 {
1710   char *str = *ccp;
1711   struct reg_entry *reg = arm_reg_parse_multi (&str);
1712   struct neon_typed_alias atype;
1713   struct neon_type_el parsetype;
1714 
1715   atype.defined = 0;
1716   atype.index = -1;
1717   atype.eltype.type = NT_invtype;
1718   atype.eltype.size = -1;
1719 
1720   /* Try alternate syntax for some types of register. Note these are mutually
1721      exclusive with the Neon syntax extensions.  */
1722   if (reg == NULL)
1723     {
1724       int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1725       if (altreg != FAIL)
1726 	*ccp = str;
1727       if (typeinfo)
1728 	*typeinfo = atype;
1729       return altreg;
1730     }
1731 
1732   /* Undo polymorphism when a set of register types may be accepted.  */
1733   if ((type == REG_TYPE_NDQ
1734        && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1735       || (type == REG_TYPE_VFSD
1736 	  && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1737       || (type == REG_TYPE_NSDQ
1738 	  && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1739 	      || reg->type == REG_TYPE_NQ))
1740       || (type == REG_TYPE_NSD
1741 	  && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1742       || (type == REG_TYPE_MMXWC
1743 	  && (reg->type == REG_TYPE_MMXWCG)))
1744     type = (enum arm_reg_type) reg->type;
1745 
1746   if (type == REG_TYPE_MQ)
1747     {
1748       if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
1749 	return FAIL;
1750 
1751       if (!reg || reg->type != REG_TYPE_NQ)
1752 	return FAIL;
1753 
1754       if (reg->number > 14 && !mark_feature_used (&fpu_vfp_ext_d32))
1755 	{
1756 	  first_error (_("expected MVE register [q0..q7]"));
1757 	  return FAIL;
1758 	}
1759       type = REG_TYPE_NQ;
1760     }
1761   else if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
1762 	   && (type == REG_TYPE_NQ))
1763     return FAIL;
1764 
1765 
1766   if (type != reg->type)
1767     return FAIL;
1768 
1769   if (reg->neon)
1770     atype = *reg->neon;
1771 
1772   if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1773     {
1774       if ((atype.defined & NTA_HASTYPE) != 0)
1775 	{
1776 	  first_error (_("can't redefine type for operand"));
1777 	  return FAIL;
1778 	}
1779       atype.defined |= NTA_HASTYPE;
1780       atype.eltype = parsetype;
1781     }
1782 
1783   if (skip_past_char (&str, '[') == SUCCESS)
1784     {
1785       if (type != REG_TYPE_VFD
1786 	  && !(type == REG_TYPE_VFS
1787 	       && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_2))
1788 	  && !(type == REG_TYPE_NQ
1789 	       && ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)))
1790 	{
1791 	  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
1792 	    first_error (_("only D and Q registers may be indexed"));
1793 	  else
1794 	    first_error (_("only D registers may be indexed"));
1795 	  return FAIL;
1796 	}
1797 
1798       if ((atype.defined & NTA_HASINDEX) != 0)
1799 	{
1800 	  first_error (_("can't change index for operand"));
1801 	  return FAIL;
1802 	}
1803 
1804       atype.defined |= NTA_HASINDEX;
1805 
1806       if (skip_past_char (&str, ']') == SUCCESS)
1807 	atype.index = NEON_ALL_LANES;
1808       else
1809 	{
1810 	  expressionS exp;
1811 
1812 	  my_get_expression (&exp, &str, GE_NO_PREFIX);
1813 
1814 	  if (exp.X_op != O_constant)
1815 	    {
1816 	      first_error (_("constant expression required"));
1817 	      return FAIL;
1818 	    }
1819 
1820 	  if (skip_past_char (&str, ']') == FAIL)
1821 	    return FAIL;
1822 
1823 	  atype.index = exp.X_add_number;
1824 	}
1825     }
1826 
1827   if (typeinfo)
1828     *typeinfo = atype;
1829 
1830   if (rtype)
1831     *rtype = type;
1832 
1833   *ccp = str;
1834 
1835   return reg->number;
1836 }
1837 
1838 /* Like arm_reg_parse, but also allow the following extra features:
1839     - If RTYPE is non-zero, return the (possibly restricted) type of the
1840       register (e.g. Neon double or quad reg when either has been requested).
1841     - If this is a Neon vector type with additional type information, fill
1842       in the struct pointed to by VECTYPE (if non-NULL).
1843    This function will fault on encountering a scalar.  */
1844 
1845 static int
arm_typed_reg_parse(char ** ccp,enum arm_reg_type type,enum arm_reg_type * rtype,struct neon_type_el * vectype)1846 arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1847 		     enum arm_reg_type *rtype, struct neon_type_el *vectype)
1848 {
1849   struct neon_typed_alias atype;
1850   char *str = *ccp;
1851   int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1852 
1853   if (reg == FAIL)
1854     return FAIL;
1855 
1856   /* Do not allow regname(... to parse as a register.  */
1857   if (*str == '(')
1858     return FAIL;
1859 
1860   /* Do not allow a scalar (reg+index) to parse as a register.  */
1861   if ((atype.defined & NTA_HASINDEX) != 0)
1862     {
1863       first_error (_("register operand expected, but got scalar"));
1864       return FAIL;
1865     }
1866 
1867   if (vectype)
1868     *vectype = atype.eltype;
1869 
1870   *ccp = str;
1871 
1872   return reg;
1873 }
1874 
1875 #define NEON_SCALAR_REG(X)	((X) >> 4)
1876 #define NEON_SCALAR_INDEX(X)	((X) & 15)
1877 
1878 /* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1879    have enough information to be able to do a good job bounds-checking. So, we
1880    just do easy checks here, and do further checks later.  */
1881 
1882 static int
parse_scalar(char ** ccp,int elsize,struct neon_type_el * type,enum arm_reg_type reg_type)1883 parse_scalar (char **ccp, int elsize, struct neon_type_el *type, enum
1884 	      arm_reg_type reg_type)
1885 {
1886   int reg;
1887   char *str = *ccp;
1888   struct neon_typed_alias atype;
1889   unsigned reg_size;
1890 
1891   reg = parse_typed_reg_or_scalar (&str, reg_type, NULL, &atype);
1892 
1893   switch (reg_type)
1894     {
1895     case REG_TYPE_VFS:
1896       reg_size = 32;
1897       break;
1898     case REG_TYPE_VFD:
1899       reg_size = 64;
1900       break;
1901     case REG_TYPE_MQ:
1902       reg_size = 128;
1903       break;
1904     default:
1905       gas_assert (0);
1906       return FAIL;
1907     }
1908 
1909   if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1910     return FAIL;
1911 
1912   if (reg_type != REG_TYPE_MQ && atype.index == NEON_ALL_LANES)
1913     {
1914       first_error (_("scalar must have an index"));
1915       return FAIL;
1916     }
1917   else if (atype.index >= reg_size / elsize)
1918     {
1919       first_error (_("scalar index out of range"));
1920       return FAIL;
1921     }
1922 
1923   if (type)
1924     *type = atype.eltype;
1925 
1926   *ccp = str;
1927 
1928   return reg * 16 + atype.index;
1929 }
1930 
1931 /* Types of registers in a list.  */
1932 
1933 enum reg_list_els
1934 {
1935   REGLIST_RN,
1936   REGLIST_CLRM,
1937   REGLIST_VFP_S,
1938   REGLIST_VFP_S_VPR,
1939   REGLIST_VFP_D,
1940   REGLIST_VFP_D_VPR,
1941   REGLIST_NEON_D
1942 };
1943 
1944 /* Parse an ARM register list.  Returns the bitmask, or FAIL.  */
1945 
1946 static long
parse_reg_list(char ** strp,enum reg_list_els etype)1947 parse_reg_list (char ** strp, enum reg_list_els etype)
1948 {
1949   char *str = *strp;
1950   long range = 0;
1951   int another_range;
1952 
1953   gas_assert (etype == REGLIST_RN || etype == REGLIST_CLRM);
1954 
1955   /* We come back here if we get ranges concatenated by '+' or '|'.  */
1956   do
1957     {
1958       skip_whitespace (str);
1959 
1960       another_range = 0;
1961 
1962       if (*str == '{')
1963 	{
1964 	  int in_range = 0;
1965 	  int cur_reg = -1;
1966 
1967 	  str++;
1968 	  do
1969 	    {
1970 	      int reg;
1971 	      const char apsr_str[] = "apsr";
1972 	      int apsr_str_len = strlen (apsr_str);
1973 
1974 	      reg = arm_reg_parse (&str, REG_TYPE_RN);
1975 	      if (etype == REGLIST_CLRM)
1976 		{
1977 		  if (reg == REG_SP || reg == REG_PC)
1978 		    reg = FAIL;
1979 		  else if (reg == FAIL
1980 			   && !strncasecmp (str, apsr_str, apsr_str_len)
1981 			   && !ISALPHA (*(str + apsr_str_len)))
1982 		    {
1983 		      reg = 15;
1984 		      str += apsr_str_len;
1985 		    }
1986 
1987 		  if (reg == FAIL)
1988 		    {
1989 		      first_error (_("r0-r12, lr or APSR expected"));
1990 		      return FAIL;
1991 		    }
1992 		}
1993 	      else /* etype == REGLIST_RN.  */
1994 		{
1995 		  if (reg == FAIL)
1996 		    {
1997 		      first_error (_(reg_expected_msgs[REGLIST_RN]));
1998 		      return FAIL;
1999 		    }
2000 		}
2001 
2002 	      if (in_range)
2003 		{
2004 		  int i;
2005 
2006 		  if (reg <= cur_reg)
2007 		    {
2008 		      first_error (_("bad range in register list"));
2009 		      return FAIL;
2010 		    }
2011 
2012 		  for (i = cur_reg + 1; i < reg; i++)
2013 		    {
2014 		      if (range & (1 << i))
2015 			as_tsktsk
2016 			  (_("Warning: duplicated register (r%d) in register list"),
2017 			   i);
2018 		      else
2019 			range |= 1 << i;
2020 		    }
2021 		  in_range = 0;
2022 		}
2023 
2024 	      if (range & (1 << reg))
2025 		as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
2026 			   reg);
2027 	      else if (reg <= cur_reg)
2028 		as_tsktsk (_("Warning: register range not in ascending order"));
2029 
2030 	      range |= 1 << reg;
2031 	      cur_reg = reg;
2032 	    }
2033 	  while (skip_past_comma (&str) != FAIL
2034 		 || (in_range = 1, *str++ == '-'));
2035 	  str--;
2036 
2037 	  if (skip_past_char (&str, '}') == FAIL)
2038 	    {
2039 	      first_error (_("missing `}'"));
2040 	      return FAIL;
2041 	    }
2042 	}
2043       else if (etype == REGLIST_RN)
2044 	{
2045 	  expressionS exp;
2046 
2047 	  if (my_get_expression (&exp, &str, GE_NO_PREFIX))
2048 	    return FAIL;
2049 
2050 	  if (exp.X_op == O_constant)
2051 	    {
2052 	      if (exp.X_add_number
2053 		  != (exp.X_add_number & 0x0000ffff))
2054 		{
2055 		  inst.error = _("invalid register mask");
2056 		  return FAIL;
2057 		}
2058 
2059 	      if ((range & exp.X_add_number) != 0)
2060 		{
2061 		  int regno = range & exp.X_add_number;
2062 
2063 		  regno &= -regno;
2064 		  regno = (1 << regno) - 1;
2065 		  as_tsktsk
2066 		    (_("Warning: duplicated register (r%d) in register list"),
2067 		     regno);
2068 		}
2069 
2070 	      range |= exp.X_add_number;
2071 	    }
2072 	  else
2073 	    {
2074 	      if (inst.relocs[0].type != 0)
2075 		{
2076 		  inst.error = _("expression too complex");
2077 		  return FAIL;
2078 		}
2079 
2080 	      memcpy (&inst.relocs[0].exp, &exp, sizeof (expressionS));
2081 	      inst.relocs[0].type = BFD_RELOC_ARM_MULTI;
2082 	      inst.relocs[0].pc_rel = 0;
2083 	    }
2084 	}
2085 
2086       if (*str == '|' || *str == '+')
2087 	{
2088 	  str++;
2089 	  another_range = 1;
2090 	}
2091     }
2092   while (another_range);
2093 
2094   *strp = str;
2095   return range;
2096 }
2097 
2098 /* Parse a VFP register list.  If the string is invalid return FAIL.
2099    Otherwise return the number of registers, and set PBASE to the first
2100    register.  Parses registers of type ETYPE.
2101    If REGLIST_NEON_D is used, several syntax enhancements are enabled:
2102      - Q registers can be used to specify pairs of D registers
2103      - { } can be omitted from around a singleton register list
2104 	 FIXME: This is not implemented, as it would require backtracking in
2105 	 some cases, e.g.:
2106 	   vtbl.8 d3,d4,d5
2107 	 This could be done (the meaning isn't really ambiguous), but doesn't
2108 	 fit in well with the current parsing framework.
2109      - 32 D registers may be used (also true for VFPv3).
2110    FIXME: Types are ignored in these register lists, which is probably a
2111    bug.  */
2112 
2113 static int
parse_vfp_reg_list(char ** ccp,unsigned int * pbase,enum reg_list_els etype,bool * partial_match)2114 parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype,
2115 		    bool *partial_match)
2116 {
2117   char *str = *ccp;
2118   int base_reg;
2119   int new_base;
2120   enum arm_reg_type regtype = (enum arm_reg_type) 0;
2121   int max_regs = 0;
2122   int count = 0;
2123   int warned = 0;
2124   unsigned long mask = 0;
2125   int i;
2126   bool vpr_seen = false;
2127   bool expect_vpr =
2128     (etype == REGLIST_VFP_S_VPR) || (etype == REGLIST_VFP_D_VPR);
2129 
2130   if (skip_past_char (&str, '{') == FAIL)
2131     {
2132       inst.error = _("expecting {");
2133       return FAIL;
2134     }
2135 
2136   switch (etype)
2137     {
2138     case REGLIST_VFP_S:
2139     case REGLIST_VFP_S_VPR:
2140       regtype = REG_TYPE_VFS;
2141       max_regs = 32;
2142       break;
2143 
2144     case REGLIST_VFP_D:
2145     case REGLIST_VFP_D_VPR:
2146       regtype = REG_TYPE_VFD;
2147       break;
2148 
2149     case REGLIST_NEON_D:
2150       regtype = REG_TYPE_NDQ;
2151       break;
2152 
2153     default:
2154       gas_assert (0);
2155     }
2156 
2157   if (etype != REGLIST_VFP_S && etype != REGLIST_VFP_S_VPR)
2158     {
2159       /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant.  */
2160       if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
2161 	{
2162 	  max_regs = 32;
2163 	  if (thumb_mode)
2164 	    ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
2165 				    fpu_vfp_ext_d32);
2166 	  else
2167 	    ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
2168 				    fpu_vfp_ext_d32);
2169 	}
2170       else
2171 	max_regs = 16;
2172     }
2173 
2174   base_reg = max_regs;
2175   *partial_match = false;
2176 
2177   do
2178     {
2179       unsigned int setmask = 1, addregs = 1;
2180       const char vpr_str[] = "vpr";
2181       size_t vpr_str_len = strlen (vpr_str);
2182 
2183       new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
2184 
2185       if (expect_vpr)
2186 	{
2187 	  if (new_base == FAIL
2188 	      && !strncasecmp (str, vpr_str, vpr_str_len)
2189 	      && !ISALPHA (*(str + vpr_str_len))
2190 	      && !vpr_seen)
2191 	    {
2192 	      vpr_seen = true;
2193 	      str += vpr_str_len;
2194 	      if (count == 0)
2195 		base_reg = 0; /* Canonicalize VPR only on d0 with 0 regs.  */
2196 	    }
2197 	  else if (vpr_seen)
2198 	    {
2199 	      first_error (_("VPR expected last"));
2200 	      return FAIL;
2201 	    }
2202 	  else if (new_base == FAIL)
2203 	    {
2204 	      if (regtype == REG_TYPE_VFS)
2205 		first_error (_("VFP single precision register or VPR "
2206 			       "expected"));
2207 	      else /* regtype == REG_TYPE_VFD.  */
2208 		first_error (_("VFP/Neon double precision register or VPR "
2209 			       "expected"));
2210 	      return FAIL;
2211 	    }
2212 	}
2213       else if (new_base == FAIL)
2214 	{
2215 	  first_error (_(reg_expected_msgs[regtype]));
2216 	  return FAIL;
2217 	}
2218 
2219       *partial_match = true;
2220       if (vpr_seen)
2221 	continue;
2222 
2223       if (new_base >= max_regs)
2224 	{
2225 	  first_error (_("register out of range in list"));
2226 	  return FAIL;
2227 	}
2228 
2229       /* Note: a value of 2 * n is returned for the register Q<n>.  */
2230       if (regtype == REG_TYPE_NQ)
2231 	{
2232 	  setmask = 3;
2233 	  addregs = 2;
2234 	}
2235 
2236       if (new_base < base_reg)
2237 	base_reg = new_base;
2238 
2239       if (mask & (setmask << new_base))
2240 	{
2241 	  first_error (_("invalid register list"));
2242 	  return FAIL;
2243 	}
2244 
2245       if ((mask >> new_base) != 0 && ! warned && !vpr_seen)
2246 	{
2247 	  as_tsktsk (_("register list not in ascending order"));
2248 	  warned = 1;
2249 	}
2250 
2251       mask |= setmask << new_base;
2252       count += addregs;
2253 
2254       if (*str == '-') /* We have the start of a range expression */
2255 	{
2256 	  int high_range;
2257 
2258 	  str++;
2259 
2260 	  if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
2261 	      == FAIL)
2262 	    {
2263 	      inst.error = gettext (reg_expected_msgs[regtype]);
2264 	      return FAIL;
2265 	    }
2266 
2267 	  if (high_range >= max_regs)
2268 	    {
2269 	      first_error (_("register out of range in list"));
2270 	      return FAIL;
2271 	    }
2272 
2273 	  if (regtype == REG_TYPE_NQ)
2274 	    high_range = high_range + 1;
2275 
2276 	  if (high_range <= new_base)
2277 	    {
2278 	      inst.error = _("register range not in ascending order");
2279 	      return FAIL;
2280 	    }
2281 
2282 	  for (new_base += addregs; new_base <= high_range; new_base += addregs)
2283 	    {
2284 	      if (mask & (setmask << new_base))
2285 		{
2286 		  inst.error = _("invalid register list");
2287 		  return FAIL;
2288 		}
2289 
2290 	      mask |= setmask << new_base;
2291 	      count += addregs;
2292 	    }
2293 	}
2294     }
2295   while (skip_past_comma (&str) != FAIL);
2296 
2297   str++;
2298 
2299   /* Sanity check -- should have raised a parse error above.  */
2300   if ((!vpr_seen && count == 0) || count > max_regs)
2301     abort ();
2302 
2303   *pbase = base_reg;
2304 
2305   if (expect_vpr && !vpr_seen)
2306     {
2307       first_error (_("VPR expected last"));
2308       return FAIL;
2309     }
2310 
2311   /* Final test -- the registers must be consecutive.  */
2312   mask >>= base_reg;
2313   for (i = 0; i < count; i++)
2314     {
2315       if ((mask & (1u << i)) == 0)
2316 	{
2317 	  inst.error = _("non-contiguous register range");
2318 	  return FAIL;
2319 	}
2320     }
2321 
2322   *ccp = str;
2323 
2324   return count;
2325 }
2326 
2327 /* True if two alias types are the same.  */
2328 
2329 static bool
neon_alias_types_same(struct neon_typed_alias * a,struct neon_typed_alias * b)2330 neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
2331 {
2332   if (!a && !b)
2333     return true;
2334 
2335   if (!a || !b)
2336     return false;
2337 
2338   if (a->defined != b->defined)
2339     return false;
2340 
2341   if ((a->defined & NTA_HASTYPE) != 0
2342       && (a->eltype.type != b->eltype.type
2343 	  || a->eltype.size != b->eltype.size))
2344     return false;
2345 
2346   if ((a->defined & NTA_HASINDEX) != 0
2347       && (a->index != b->index))
2348     return false;
2349 
2350   return true;
2351 }
2352 
2353 /* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2354    The base register is put in *PBASE.
2355    The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2356    the return value.
2357    The register stride (minus one) is put in bit 4 of the return value.
2358    Bits [6:5] encode the list length (minus one).
2359    The type of the list elements is put in *ELTYPE, if non-NULL.  */
2360 
2361 #define NEON_LANE(X)		((X) & 0xf)
2362 #define NEON_REG_STRIDE(X)	((((X) >> 4) & 1) + 1)
2363 #define NEON_REGLIST_LENGTH(X)	((((X) >> 5) & 3) + 1)
2364 
2365 static int
parse_neon_el_struct_list(char ** str,unsigned * pbase,int mve,struct neon_type_el * eltype)2366 parse_neon_el_struct_list (char **str, unsigned *pbase,
2367 			   int mve,
2368 			   struct neon_type_el *eltype)
2369 {
2370   char *ptr = *str;
2371   int base_reg = -1;
2372   int reg_incr = -1;
2373   int count = 0;
2374   int lane = -1;
2375   int leading_brace = 0;
2376   enum arm_reg_type rtype = REG_TYPE_NDQ;
2377   const char *const incr_error = mve ? _("register stride must be 1") :
2378     _("register stride must be 1 or 2");
2379   const char *const type_error = _("mismatched element/structure types in list");
2380   struct neon_typed_alias firsttype;
2381   firsttype.defined = 0;
2382   firsttype.eltype.type = NT_invtype;
2383   firsttype.eltype.size = -1;
2384   firsttype.index = -1;
2385 
2386   if (skip_past_char (&ptr, '{') == SUCCESS)
2387     leading_brace = 1;
2388 
2389   do
2390     {
2391       struct neon_typed_alias atype;
2392       if (mve)
2393 	rtype = REG_TYPE_MQ;
2394       int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2395 
2396       if (getreg == FAIL)
2397 	{
2398 	  first_error (_(reg_expected_msgs[rtype]));
2399 	  return FAIL;
2400 	}
2401 
2402       if (base_reg == -1)
2403 	{
2404 	  base_reg = getreg;
2405 	  if (rtype == REG_TYPE_NQ)
2406 	    {
2407 	      reg_incr = 1;
2408 	    }
2409 	  firsttype = atype;
2410 	}
2411       else if (reg_incr == -1)
2412 	{
2413 	  reg_incr = getreg - base_reg;
2414 	  if (reg_incr < 1 || reg_incr > 2)
2415 	    {
2416 	      first_error (_(incr_error));
2417 	      return FAIL;
2418 	    }
2419 	}
2420       else if (getreg != base_reg + reg_incr * count)
2421 	{
2422 	  first_error (_(incr_error));
2423 	  return FAIL;
2424 	}
2425 
2426       if (! neon_alias_types_same (&atype, &firsttype))
2427 	{
2428 	  first_error (_(type_error));
2429 	  return FAIL;
2430 	}
2431 
2432       /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2433 	 modes.  */
2434       if (ptr[0] == '-')
2435 	{
2436 	  struct neon_typed_alias htype;
2437 	  int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2438 	  if (lane == -1)
2439 	    lane = NEON_INTERLEAVE_LANES;
2440 	  else if (lane != NEON_INTERLEAVE_LANES)
2441 	    {
2442 	      first_error (_(type_error));
2443 	      return FAIL;
2444 	    }
2445 	  if (reg_incr == -1)
2446 	    reg_incr = 1;
2447 	  else if (reg_incr != 1)
2448 	    {
2449 	      first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2450 	      return FAIL;
2451 	    }
2452 	  ptr++;
2453 	  hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2454 	  if (hireg == FAIL)
2455 	    {
2456 	      first_error (_(reg_expected_msgs[rtype]));
2457 	      return FAIL;
2458 	    }
2459 	  if (! neon_alias_types_same (&htype, &firsttype))
2460 	    {
2461 	      first_error (_(type_error));
2462 	      return FAIL;
2463 	    }
2464 	  count += hireg + dregs - getreg;
2465 	  continue;
2466 	}
2467 
2468       /* If we're using Q registers, we can't use [] or [n] syntax.  */
2469       if (rtype == REG_TYPE_NQ)
2470 	{
2471 	  count += 2;
2472 	  continue;
2473 	}
2474 
2475       if ((atype.defined & NTA_HASINDEX) != 0)
2476 	{
2477 	  if (lane == -1)
2478 	    lane = atype.index;
2479 	  else if (lane != atype.index)
2480 	    {
2481 	      first_error (_(type_error));
2482 	      return FAIL;
2483 	    }
2484 	}
2485       else if (lane == -1)
2486 	lane = NEON_INTERLEAVE_LANES;
2487       else if (lane != NEON_INTERLEAVE_LANES)
2488 	{
2489 	  first_error (_(type_error));
2490 	  return FAIL;
2491 	}
2492       count++;
2493     }
2494   while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2495 
2496   /* No lane set by [x]. We must be interleaving structures.  */
2497   if (lane == -1)
2498     lane = NEON_INTERLEAVE_LANES;
2499 
2500   /* Sanity check.  */
2501   if (lane == -1 || base_reg == -1 || count < 1 || (!mve && count > 4)
2502       || (count > 1 && reg_incr == -1))
2503     {
2504       first_error (_("error parsing element/structure list"));
2505       return FAIL;
2506     }
2507 
2508   if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2509     {
2510       first_error (_("expected }"));
2511       return FAIL;
2512     }
2513 
2514   if (reg_incr == -1)
2515     reg_incr = 1;
2516 
2517   if (eltype)
2518     *eltype = firsttype.eltype;
2519 
2520   *pbase = base_reg;
2521   *str = ptr;
2522 
2523   return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2524 }
2525 
2526 /* Parse an explicit relocation suffix on an expression.  This is
2527    either nothing, or a word in parentheses.  Note that if !OBJ_ELF,
2528    arm_reloc_hsh contains no entries, so this function can only
2529    succeed if there is no () after the word.  Returns -1 on error,
2530    BFD_RELOC_UNUSED if there wasn't any suffix.	 */
2531 
2532 static int
parse_reloc(char ** str)2533 parse_reloc (char **str)
2534 {
2535   struct reloc_entry *r;
2536   char *p, *q;
2537 
2538   if (**str != '(')
2539     return BFD_RELOC_UNUSED;
2540 
2541   p = *str + 1;
2542   q = p;
2543 
2544   while (*q && *q != ')' && *q != ',')
2545     q++;
2546   if (*q != ')')
2547     return -1;
2548 
2549   if ((r = (struct reloc_entry *)
2550        str_hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2551     return -1;
2552 
2553   *str = q + 1;
2554   return r->reloc;
2555 }
2556 
2557 /* Directives: register aliases.  */
2558 
2559 static struct reg_entry *
insert_reg_alias(char * str,unsigned number,int type)2560 insert_reg_alias (char *str, unsigned number, int type)
2561 {
2562   struct reg_entry *new_reg;
2563   const char *name;
2564 
2565   if ((new_reg = (struct reg_entry *) str_hash_find (arm_reg_hsh, str)) != 0)
2566     {
2567       if (new_reg->builtin)
2568 	as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2569 
2570       /* Only warn about a redefinition if it's not defined as the
2571 	 same register.	 */
2572       else if (new_reg->number != number || new_reg->type != type)
2573 	as_warn (_("ignoring redefinition of register alias '%s'"), str);
2574 
2575       return NULL;
2576     }
2577 
2578   name = xstrdup (str);
2579   new_reg = XNEW (struct reg_entry);
2580 
2581   new_reg->name = name;
2582   new_reg->number = number;
2583   new_reg->type = type;
2584   new_reg->builtin = false;
2585   new_reg->neon = NULL;
2586 
2587   str_hash_insert (arm_reg_hsh, name, new_reg, 0);
2588 
2589   return new_reg;
2590 }
2591 
2592 static void
insert_neon_reg_alias(char * str,int number,int type,struct neon_typed_alias * atype)2593 insert_neon_reg_alias (char *str, int number, int type,
2594 		       struct neon_typed_alias *atype)
2595 {
2596   struct reg_entry *reg = insert_reg_alias (str, number, type);
2597 
2598   if (!reg)
2599     {
2600       first_error (_("attempt to redefine typed alias"));
2601       return;
2602     }
2603 
2604   if (atype)
2605     {
2606       reg->neon = XNEW (struct neon_typed_alias);
2607       *reg->neon = *atype;
2608     }
2609 }
2610 
2611 /* Look for the .req directive.	 This is of the form:
2612 
2613 	new_register_name .req existing_register_name
2614 
2615    If we find one, or if it looks sufficiently like one that we want to
2616    handle any error here, return TRUE.  Otherwise return FALSE.  */
2617 
2618 static bool
create_register_alias(char * newname,char * p)2619 create_register_alias (char * newname, char *p)
2620 {
2621   struct reg_entry *old;
2622   char *oldname, *nbuf;
2623   size_t nlen;
2624 
2625   /* The input scrubber ensures that whitespace after the mnemonic is
2626      collapsed to single spaces.  */
2627   oldname = p;
2628   if (!startswith (oldname, " .req "))
2629     return false;
2630 
2631   oldname += 6;
2632   if (*oldname == '\0')
2633     return false;
2634 
2635   old = (struct reg_entry *) str_hash_find (arm_reg_hsh, oldname);
2636   if (!old)
2637     {
2638       as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2639       return true;
2640     }
2641 
2642   /* If TC_CASE_SENSITIVE is defined, then newname already points to
2643      the desired alias name, and p points to its end.  If not, then
2644      the desired alias name is in the global original_case_string.  */
2645 #ifdef TC_CASE_SENSITIVE
2646   nlen = p - newname;
2647 #else
2648   newname = original_case_string;
2649   nlen = strlen (newname);
2650 #endif
2651 
2652   nbuf = xmemdup0 (newname, nlen);
2653 
2654   /* Create aliases under the new name as stated; an all-lowercase
2655      version of the new name; and an all-uppercase version of the new
2656      name.  */
2657   if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2658     {
2659       for (p = nbuf; *p; p++)
2660 	*p = TOUPPER (*p);
2661 
2662       if (strncmp (nbuf, newname, nlen))
2663 	{
2664 	  /* If this attempt to create an additional alias fails, do not bother
2665 	     trying to create the all-lower case alias.  We will fail and issue
2666 	     a second, duplicate error message.  This situation arises when the
2667 	     programmer does something like:
2668 	       foo .req r0
2669 	       Foo .req r1
2670 	     The second .req creates the "Foo" alias but then fails to create
2671 	     the artificial FOO alias because it has already been created by the
2672 	     first .req.  */
2673 	  if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2674 	    {
2675 	      free (nbuf);
2676 	      return true;
2677 	    }
2678 	}
2679 
2680       for (p = nbuf; *p; p++)
2681 	*p = TOLOWER (*p);
2682 
2683       if (strncmp (nbuf, newname, nlen))
2684 	insert_reg_alias (nbuf, old->number, old->type);
2685     }
2686 
2687   free (nbuf);
2688   return true;
2689 }
2690 
2691 /* Create a Neon typed/indexed register alias using directives, e.g.:
2692      X .dn d5.s32[1]
2693      Y .qn 6.s16
2694      Z .dn d7
2695      T .dn Z[0]
2696    These typed registers can be used instead of the types specified after the
2697    Neon mnemonic, so long as all operands given have types. Types can also be
2698    specified directly, e.g.:
2699      vadd d0.s32, d1.s32, d2.s32  */
2700 
2701 static bool
create_neon_reg_alias(char * newname,char * p)2702 create_neon_reg_alias (char *newname, char *p)
2703 {
2704   enum arm_reg_type basetype;
2705   struct reg_entry *basereg;
2706   struct reg_entry mybasereg;
2707   struct neon_type ntype;
2708   struct neon_typed_alias typeinfo;
2709   char *namebuf, *nameend ATTRIBUTE_UNUSED;
2710   int namelen;
2711 
2712   typeinfo.defined = 0;
2713   typeinfo.eltype.type = NT_invtype;
2714   typeinfo.eltype.size = -1;
2715   typeinfo.index = -1;
2716 
2717   nameend = p;
2718 
2719   if (startswith (p, " .dn "))
2720     basetype = REG_TYPE_VFD;
2721   else if (startswith (p, " .qn "))
2722     basetype = REG_TYPE_NQ;
2723   else
2724     return false;
2725 
2726   p += 5;
2727 
2728   if (*p == '\0')
2729     return false;
2730 
2731   basereg = arm_reg_parse_multi (&p);
2732 
2733   if (basereg && basereg->type != basetype)
2734     {
2735       as_bad (_("bad type for register"));
2736       return false;
2737     }
2738 
2739   if (basereg == NULL)
2740     {
2741       expressionS exp;
2742       /* Try parsing as an integer.  */
2743       my_get_expression (&exp, &p, GE_NO_PREFIX);
2744       if (exp.X_op != O_constant)
2745 	{
2746 	  as_bad (_("expression must be constant"));
2747 	  return false;
2748 	}
2749       basereg = &mybasereg;
2750       basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2751 						  : exp.X_add_number;
2752       basereg->neon = 0;
2753     }
2754 
2755   if (basereg->neon)
2756     typeinfo = *basereg->neon;
2757 
2758   if (parse_neon_type (&ntype, &p) == SUCCESS)
2759     {
2760       /* We got a type.  */
2761       if (typeinfo.defined & NTA_HASTYPE)
2762 	{
2763 	  as_bad (_("can't redefine the type of a register alias"));
2764 	  return false;
2765 	}
2766 
2767       typeinfo.defined |= NTA_HASTYPE;
2768       if (ntype.elems != 1)
2769 	{
2770 	  as_bad (_("you must specify a single type only"));
2771 	  return false;
2772 	}
2773       typeinfo.eltype = ntype.el[0];
2774     }
2775 
2776   if (skip_past_char (&p, '[') == SUCCESS)
2777     {
2778       expressionS exp;
2779       /* We got a scalar index.  */
2780 
2781       if (typeinfo.defined & NTA_HASINDEX)
2782 	{
2783 	  as_bad (_("can't redefine the index of a scalar alias"));
2784 	  return false;
2785 	}
2786 
2787       my_get_expression (&exp, &p, GE_NO_PREFIX);
2788 
2789       if (exp.X_op != O_constant)
2790 	{
2791 	  as_bad (_("scalar index must be constant"));
2792 	  return false;
2793 	}
2794 
2795       typeinfo.defined |= NTA_HASINDEX;
2796       typeinfo.index = exp.X_add_number;
2797 
2798       if (skip_past_char (&p, ']') == FAIL)
2799 	{
2800 	  as_bad (_("expecting ]"));
2801 	  return false;
2802 	}
2803     }
2804 
2805   /* If TC_CASE_SENSITIVE is defined, then newname already points to
2806      the desired alias name, and p points to its end.  If not, then
2807      the desired alias name is in the global original_case_string.  */
2808 #ifdef TC_CASE_SENSITIVE
2809   namelen = nameend - newname;
2810 #else
2811   newname = original_case_string;
2812   namelen = strlen (newname);
2813 #endif
2814 
2815   namebuf = xmemdup0 (newname, namelen);
2816 
2817   insert_neon_reg_alias (namebuf, basereg->number, basetype,
2818 			 typeinfo.defined != 0 ? &typeinfo : NULL);
2819 
2820   /* Insert name in all uppercase.  */
2821   for (p = namebuf; *p; p++)
2822     *p = TOUPPER (*p);
2823 
2824   if (strncmp (namebuf, newname, namelen))
2825     insert_neon_reg_alias (namebuf, basereg->number, basetype,
2826 			   typeinfo.defined != 0 ? &typeinfo : NULL);
2827 
2828   /* Insert name in all lowercase.  */
2829   for (p = namebuf; *p; p++)
2830     *p = TOLOWER (*p);
2831 
2832   if (strncmp (namebuf, newname, namelen))
2833     insert_neon_reg_alias (namebuf, basereg->number, basetype,
2834 			   typeinfo.defined != 0 ? &typeinfo : NULL);
2835 
2836   free (namebuf);
2837   return true;
2838 }
2839 
2840 /* Should never be called, as .req goes between the alias and the
2841    register name, not at the beginning of the line.  */
2842 
2843 static void
s_req(int a ATTRIBUTE_UNUSED)2844 s_req (int a ATTRIBUTE_UNUSED)
2845 {
2846   as_bad (_("invalid syntax for .req directive"));
2847 }
2848 
2849 static void
s_dn(int a ATTRIBUTE_UNUSED)2850 s_dn (int a ATTRIBUTE_UNUSED)
2851 {
2852   as_bad (_("invalid syntax for .dn directive"));
2853 }
2854 
2855 static void
s_qn(int a ATTRIBUTE_UNUSED)2856 s_qn (int a ATTRIBUTE_UNUSED)
2857 {
2858   as_bad (_("invalid syntax for .qn directive"));
2859 }
2860 
2861 /* The .unreq directive deletes an alias which was previously defined
2862    by .req.  For example:
2863 
2864        my_alias .req r11
2865        .unreq my_alias	  */
2866 
2867 static void
s_unreq(int a ATTRIBUTE_UNUSED)2868 s_unreq (int a ATTRIBUTE_UNUSED)
2869 {
2870   char * name;
2871   char saved_char;
2872 
2873   name = input_line_pointer;
2874 
2875   while (*input_line_pointer != 0
2876 	 && *input_line_pointer != ' '
2877 	 && *input_line_pointer != '\n')
2878     ++input_line_pointer;
2879 
2880   saved_char = *input_line_pointer;
2881   *input_line_pointer = 0;
2882 
2883   if (!*name)
2884     as_bad (_("invalid syntax for .unreq directive"));
2885   else
2886     {
2887       struct reg_entry *reg
2888 	= (struct reg_entry *) str_hash_find (arm_reg_hsh, name);
2889 
2890       if (!reg)
2891 	as_bad (_("unknown register alias '%s'"), name);
2892       else if (reg->builtin)
2893 	as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2894 		 name);
2895       else
2896 	{
2897 	  char * p;
2898 	  char * nbuf;
2899 
2900 	  str_hash_delete (arm_reg_hsh, name);
2901 	  free ((char *) reg->name);
2902 	  free (reg->neon);
2903 	  free (reg);
2904 
2905 	  /* Also locate the all upper case and all lower case versions.
2906 	     Do not complain if we cannot find one or the other as it
2907 	     was probably deleted above.  */
2908 
2909 	  nbuf = strdup (name);
2910 	  for (p = nbuf; *p; p++)
2911 	    *p = TOUPPER (*p);
2912 	  reg = (struct reg_entry *) str_hash_find (arm_reg_hsh, nbuf);
2913 	  if (reg)
2914 	    {
2915 	      str_hash_delete (arm_reg_hsh, nbuf);
2916 	      free ((char *) reg->name);
2917 	      free (reg->neon);
2918 	      free (reg);
2919 	    }
2920 
2921 	  for (p = nbuf; *p; p++)
2922 	    *p = TOLOWER (*p);
2923 	  reg = (struct reg_entry *) str_hash_find (arm_reg_hsh, nbuf);
2924 	  if (reg)
2925 	    {
2926 	      str_hash_delete (arm_reg_hsh, nbuf);
2927 	      free ((char *) reg->name);
2928 	      free (reg->neon);
2929 	      free (reg);
2930 	    }
2931 
2932 	  free (nbuf);
2933 	}
2934     }
2935 
2936   *input_line_pointer = saved_char;
2937   demand_empty_rest_of_line ();
2938 }
2939 
2940 /* Directives: Instruction set selection.  */
2941 
2942 #ifdef OBJ_ELF
2943 /* This code is to handle mapping symbols as defined in the ARM ELF spec.
2944    (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2945    Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2946    and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped.  */
2947 
2948 /* Create a new mapping symbol for the transition to STATE.  */
2949 
2950 static void
make_mapping_symbol(enum mstate state,valueT value,fragS * frag)2951 make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2952 {
2953   symbolS * symbolP;
2954   const char * symname;
2955   int type;
2956 
2957   switch (state)
2958     {
2959     case MAP_DATA:
2960       symname = "$d";
2961       type = BSF_NO_FLAGS;
2962       break;
2963     case MAP_ARM:
2964       symname = "$a";
2965       type = BSF_NO_FLAGS;
2966       break;
2967     case MAP_THUMB:
2968       symname = "$t";
2969       type = BSF_NO_FLAGS;
2970       break;
2971     default:
2972       abort ();
2973     }
2974 
2975   symbolP = symbol_new (symname, now_seg, frag, value);
2976   symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2977 
2978   switch (state)
2979     {
2980     case MAP_ARM:
2981       THUMB_SET_FUNC (symbolP, 0);
2982       ARM_SET_THUMB (symbolP, 0);
2983       ARM_SET_INTERWORK (symbolP, support_interwork);
2984       break;
2985 
2986     case MAP_THUMB:
2987       THUMB_SET_FUNC (symbolP, 1);
2988       ARM_SET_THUMB (symbolP, 1);
2989       ARM_SET_INTERWORK (symbolP, support_interwork);
2990       break;
2991 
2992     case MAP_DATA:
2993     default:
2994       break;
2995     }
2996 
2997   /* Save the mapping symbols for future reference.  Also check that
2998      we do not place two mapping symbols at the same offset within a
2999      frag.  We'll handle overlap between frags in
3000      check_mapping_symbols.
3001 
3002      If .fill or other data filling directive generates zero sized data,
3003      the mapping symbol for the following code will have the same value
3004      as the one generated for the data filling directive.  In this case,
3005      we replace the old symbol with the new one at the same address.  */
3006   if (value == 0)
3007     {
3008       if (frag->tc_frag_data.first_map != NULL)
3009 	{
3010 	  know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
3011 	  symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
3012 	}
3013       frag->tc_frag_data.first_map = symbolP;
3014     }
3015   if (frag->tc_frag_data.last_map != NULL)
3016     {
3017       know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
3018       if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
3019 	symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
3020     }
3021   frag->tc_frag_data.last_map = symbolP;
3022 }
3023 
3024 /* We must sometimes convert a region marked as code to data during
3025    code alignment, if an odd number of bytes have to be padded.  The
3026    code mapping symbol is pushed to an aligned address.  */
3027 
3028 static void
insert_data_mapping_symbol(enum mstate state,valueT value,fragS * frag,offsetT bytes)3029 insert_data_mapping_symbol (enum mstate state,
3030 			    valueT value, fragS *frag, offsetT bytes)
3031 {
3032   /* If there was already a mapping symbol, remove it.  */
3033   if (frag->tc_frag_data.last_map != NULL
3034       && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
3035     {
3036       symbolS *symp = frag->tc_frag_data.last_map;
3037 
3038       if (value == 0)
3039 	{
3040 	  know (frag->tc_frag_data.first_map == symp);
3041 	  frag->tc_frag_data.first_map = NULL;
3042 	}
3043       frag->tc_frag_data.last_map = NULL;
3044       symbol_remove (symp, &symbol_rootP, &symbol_lastP);
3045     }
3046 
3047   make_mapping_symbol (MAP_DATA, value, frag);
3048   make_mapping_symbol (state, value + bytes, frag);
3049 }
3050 
3051 static void mapping_state_2 (enum mstate state, int max_chars);
3052 
3053 /* Set the mapping state to STATE.  Only call this when about to
3054    emit some STATE bytes to the file.  */
3055 
3056 #define TRANSITION(from, to) (mapstate == (from) && state == (to))
3057 void
mapping_state(enum mstate state)3058 mapping_state (enum mstate state)
3059 {
3060   enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
3061 
3062   if (mapstate == state)
3063     /* The mapping symbol has already been emitted.
3064        There is nothing else to do.  */
3065     return;
3066 
3067   if (state == MAP_ARM || state == MAP_THUMB)
3068     /*  PR gas/12931
3069 	All ARM instructions require 4-byte alignment.
3070 	(Almost) all Thumb instructions require 2-byte alignment.
3071 
3072 	When emitting instructions into any section, mark the section
3073 	appropriately.
3074 
3075 	Some Thumb instructions are alignment-sensitive modulo 4 bytes,
3076 	but themselves require 2-byte alignment; this applies to some
3077 	PC- relative forms.  However, these cases will involve implicit
3078 	literal pool generation or an explicit .align >=2, both of
3079 	which will cause the section to me marked with sufficient
3080 	alignment.  Thus, we don't handle those cases here.  */
3081     record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
3082 
3083   if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
3084     /* This case will be evaluated later.  */
3085     return;
3086 
3087   mapping_state_2 (state, 0);
3088 }
3089 
3090 /* Same as mapping_state, but MAX_CHARS bytes have already been
3091    allocated.  Put the mapping symbol that far back.  */
3092 
3093 static void
mapping_state_2(enum mstate state,int max_chars)3094 mapping_state_2 (enum mstate state, int max_chars)
3095 {
3096   enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
3097 
3098   if (!SEG_NORMAL (now_seg))
3099     return;
3100 
3101   if (mapstate == state)
3102     /* The mapping symbol has already been emitted.
3103        There is nothing else to do.  */
3104     return;
3105 
3106   if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
3107 	  || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
3108     {
3109       struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
3110       const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
3111 
3112       if (add_symbol)
3113 	make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
3114     }
3115 
3116   seg_info (now_seg)->tc_segment_info_data.mapstate = state;
3117   make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
3118 }
3119 #undef TRANSITION
3120 #else
3121 #define mapping_state(x) ((void)0)
3122 #define mapping_state_2(x, y) ((void)0)
3123 #endif
3124 
3125 /* Find the real, Thumb encoded start of a Thumb function.  */
3126 
3127 #ifdef OBJ_COFF
3128 static symbolS *
find_real_start(symbolS * symbolP)3129 find_real_start (symbolS * symbolP)
3130 {
3131   char *       real_start;
3132   const char * name = S_GET_NAME (symbolP);
3133   symbolS *    new_target;
3134 
3135   /* This definition must agree with the one in gcc/config/arm/thumb.c.	 */
3136 #define STUB_NAME ".real_start_of"
3137 
3138   if (name == NULL)
3139     abort ();
3140 
3141   /* The compiler may generate BL instructions to local labels because
3142      it needs to perform a branch to a far away location. These labels
3143      do not have a corresponding ".real_start_of" label.  We check
3144      both for S_IS_LOCAL and for a leading dot, to give a way to bypass
3145      the ".real_start_of" convention for nonlocal branches.  */
3146   if (S_IS_LOCAL (symbolP) || name[0] == '.')
3147     return symbolP;
3148 
3149   real_start = concat (STUB_NAME, name, NULL);
3150   new_target = symbol_find (real_start);
3151   free (real_start);
3152 
3153   if (new_target == NULL)
3154     {
3155       as_warn (_("Failed to find real start of function: %s\n"), name);
3156       new_target = symbolP;
3157     }
3158 
3159   return new_target;
3160 }
3161 #endif
3162 
3163 static void
opcode_select(int width)3164 opcode_select (int width)
3165 {
3166   switch (width)
3167     {
3168     case 16:
3169       if (! thumb_mode)
3170 	{
3171 	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
3172 	    as_bad (_("selected processor does not support THUMB opcodes"));
3173 
3174 	  thumb_mode = 1;
3175 	  /* No need to force the alignment, since we will have been
3176 	     coming from ARM mode, which is word-aligned.  */
3177 	  record_alignment (now_seg, 1);
3178 	}
3179       break;
3180 
3181     case 32:
3182       if (thumb_mode)
3183 	{
3184 	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
3185 	    as_bad (_("selected processor does not support ARM opcodes"));
3186 
3187 	  thumb_mode = 0;
3188 
3189 	  if (!need_pass_2)
3190 	    frag_align (2, 0, 0);
3191 
3192 	  record_alignment (now_seg, 1);
3193 	}
3194       break;
3195 
3196     default:
3197       as_bad (_("invalid instruction size selected (%d)"), width);
3198     }
3199 }
3200 
3201 static void
s_arm(int ignore ATTRIBUTE_UNUSED)3202 s_arm (int ignore ATTRIBUTE_UNUSED)
3203 {
3204   opcode_select (32);
3205   demand_empty_rest_of_line ();
3206 }
3207 
3208 static void
s_thumb(int ignore ATTRIBUTE_UNUSED)3209 s_thumb (int ignore ATTRIBUTE_UNUSED)
3210 {
3211   opcode_select (16);
3212   demand_empty_rest_of_line ();
3213 }
3214 
3215 static void
s_code(int unused ATTRIBUTE_UNUSED)3216 s_code (int unused ATTRIBUTE_UNUSED)
3217 {
3218   int temp;
3219 
3220   temp = get_absolute_expression ();
3221   switch (temp)
3222     {
3223     case 16:
3224     case 32:
3225       opcode_select (temp);
3226       break;
3227 
3228     default:
3229       as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
3230     }
3231 }
3232 
3233 static void
s_force_thumb(int ignore ATTRIBUTE_UNUSED)3234 s_force_thumb (int ignore ATTRIBUTE_UNUSED)
3235 {
3236   /* If we are not already in thumb mode go into it, EVEN if
3237      the target processor does not support thumb instructions.
3238      This is used by gcc/config/arm/lib1funcs.asm for example
3239      to compile interworking support functions even if the
3240      target processor should not support interworking.	*/
3241   if (! thumb_mode)
3242     {
3243       thumb_mode = 2;
3244       record_alignment (now_seg, 1);
3245     }
3246 
3247   demand_empty_rest_of_line ();
3248 }
3249 
3250 static void
s_thumb_func(int ignore ATTRIBUTE_UNUSED)3251 s_thumb_func (int ignore ATTRIBUTE_UNUSED)
3252 {
3253   s_thumb (0);
3254 
3255   /* The following label is the name/address of the start of a Thumb function.
3256      We need to know this for the interworking support.	 */
3257   label_is_thumb_function_name = true;
3258 }
3259 
3260 /* Perform a .set directive, but also mark the alias as
3261    being a thumb function.  */
3262 
3263 static void
s_thumb_set(int equiv)3264 s_thumb_set (int equiv)
3265 {
3266   /* XXX the following is a duplicate of the code for s_set() in read.c
3267      We cannot just call that code as we need to get at the symbol that
3268      is created.  */
3269   char *    name;
3270   char	    delim;
3271   char *    end_name;
3272   symbolS * symbolP;
3273 
3274   /* Especial apologies for the random logic:
3275      This just grew, and could be parsed much more simply!
3276      Dean - in haste.  */
3277   delim	    = get_symbol_name (& name);
3278   end_name  = input_line_pointer;
3279   (void) restore_line_pointer (delim);
3280 
3281   if (*input_line_pointer != ',')
3282     {
3283       *end_name = 0;
3284       as_bad (_("expected comma after name \"%s\""), name);
3285       *end_name = delim;
3286       ignore_rest_of_line ();
3287       return;
3288     }
3289 
3290   input_line_pointer++;
3291   *end_name = 0;
3292 
3293   if (name[0] == '.' && name[1] == '\0')
3294     {
3295       /* XXX - this should not happen to .thumb_set.  */
3296       abort ();
3297     }
3298 
3299   if ((symbolP = symbol_find (name)) == NULL
3300       && (symbolP = md_undefined_symbol (name)) == NULL)
3301     {
3302 #ifndef NO_LISTING
3303       /* When doing symbol listings, play games with dummy fragments living
3304 	 outside the normal fragment chain to record the file and line info
3305 	 for this symbol.  */
3306       if (listing & LISTING_SYMBOLS)
3307 	{
3308 	  extern struct list_info_struct * listing_tail;
3309 	  fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
3310 
3311 	  memset (dummy_frag, 0, sizeof (fragS));
3312 	  dummy_frag->fr_type = rs_fill;
3313 	  dummy_frag->line = listing_tail;
3314 	  symbolP = symbol_new (name, undefined_section, dummy_frag, 0);
3315 	  dummy_frag->fr_symbol = symbolP;
3316 	}
3317       else
3318 #endif
3319 	symbolP = symbol_new (name, undefined_section, &zero_address_frag, 0);
3320 
3321 #ifdef OBJ_COFF
3322       /* "set" symbols are local unless otherwise specified.  */
3323       SF_SET_LOCAL (symbolP);
3324 #endif /* OBJ_COFF  */
3325     }				/* Make a new symbol.  */
3326 
3327   symbol_table_insert (symbolP);
3328 
3329   * end_name = delim;
3330 
3331   if (equiv
3332       && S_IS_DEFINED (symbolP)
3333       && S_GET_SEGMENT (symbolP) != reg_section)
3334     as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
3335 
3336   pseudo_set (symbolP);
3337 
3338   demand_empty_rest_of_line ();
3339 
3340   /* XXX Now we come to the Thumb specific bit of code.	 */
3341 
3342   THUMB_SET_FUNC (symbolP, 1);
3343   ARM_SET_THUMB (symbolP, 1);
3344 #if defined OBJ_ELF || defined OBJ_COFF
3345   ARM_SET_INTERWORK (symbolP, support_interwork);
3346 #endif
3347 }
3348 
3349 /* Directives: Mode selection.  */
3350 
3351 /* .syntax [unified|divided] - choose the new unified syntax
3352    (same for Arm and Thumb encoding, modulo slight differences in what
3353    can be represented) or the old divergent syntax for each mode.  */
3354 static void
s_syntax(int unused ATTRIBUTE_UNUSED)3355 s_syntax (int unused ATTRIBUTE_UNUSED)
3356 {
3357   char *name, delim;
3358 
3359   delim = get_symbol_name (& name);
3360 
3361   if (!strcasecmp (name, "unified"))
3362     unified_syntax = true;
3363   else if (!strcasecmp (name, "divided"))
3364     unified_syntax = false;
3365   else
3366     {
3367       as_bad (_("unrecognized syntax mode \"%s\""), name);
3368       return;
3369     }
3370   (void) restore_line_pointer (delim);
3371   demand_empty_rest_of_line ();
3372 }
3373 
3374 /* Directives: sectioning and alignment.  */
3375 
3376 static void
s_bss(int ignore ATTRIBUTE_UNUSED)3377 s_bss (int ignore ATTRIBUTE_UNUSED)
3378 {
3379   /* We don't support putting frags in the BSS segment, we fake it by
3380      marking in_bss, then looking at s_skip for clues.	*/
3381   subseg_set (bss_section, 0);
3382   demand_empty_rest_of_line ();
3383 
3384 #ifdef md_elf_section_change_hook
3385   md_elf_section_change_hook ();
3386 #endif
3387 }
3388 
3389 static void
s_even(int ignore ATTRIBUTE_UNUSED)3390 s_even (int ignore ATTRIBUTE_UNUSED)
3391 {
3392   /* Never make frag if expect extra pass.  */
3393   if (!need_pass_2)
3394     frag_align (1, 0, 0);
3395 
3396   record_alignment (now_seg, 1);
3397 
3398   demand_empty_rest_of_line ();
3399 }
3400 
3401 /* Directives: CodeComposer Studio.  */
3402 
3403 /*  .ref  (for CodeComposer Studio syntax only).  */
3404 static void
s_ccs_ref(int unused ATTRIBUTE_UNUSED)3405 s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3406 {
3407   if (codecomposer_syntax)
3408     ignore_rest_of_line ();
3409   else
3410     as_bad (_(".ref pseudo-op only available with -mccs flag."));
3411 }
3412 
3413 /*  If name is not NULL, then it is used for marking the beginning of a
3414     function, whereas if it is NULL then it means the function end.  */
3415 static void
asmfunc_debug(const char * name)3416 asmfunc_debug (const char * name)
3417 {
3418   static const char * last_name = NULL;
3419 
3420   if (name != NULL)
3421     {
3422       gas_assert (last_name == NULL);
3423       last_name = name;
3424 
3425       if (debug_type == DEBUG_STABS)
3426          stabs_generate_asm_func (name, name);
3427     }
3428   else
3429     {
3430       gas_assert (last_name != NULL);
3431 
3432       if (debug_type == DEBUG_STABS)
3433         stabs_generate_asm_endfunc (last_name, last_name);
3434 
3435       last_name = NULL;
3436     }
3437 }
3438 
3439 static void
s_ccs_asmfunc(int unused ATTRIBUTE_UNUSED)3440 s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3441 {
3442   if (codecomposer_syntax)
3443     {
3444       switch (asmfunc_state)
3445 	{
3446 	case OUTSIDE_ASMFUNC:
3447 	  asmfunc_state = WAITING_ASMFUNC_NAME;
3448 	  break;
3449 
3450 	case WAITING_ASMFUNC_NAME:
3451 	  as_bad (_(".asmfunc repeated."));
3452 	  break;
3453 
3454 	case WAITING_ENDASMFUNC:
3455 	  as_bad (_(".asmfunc without function."));
3456 	  break;
3457 	}
3458       demand_empty_rest_of_line ();
3459     }
3460   else
3461     as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3462 }
3463 
3464 static void
s_ccs_endasmfunc(int unused ATTRIBUTE_UNUSED)3465 s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3466 {
3467   if (codecomposer_syntax)
3468     {
3469       switch (asmfunc_state)
3470 	{
3471 	case OUTSIDE_ASMFUNC:
3472 	  as_bad (_(".endasmfunc without a .asmfunc."));
3473 	  break;
3474 
3475 	case WAITING_ASMFUNC_NAME:
3476 	  as_bad (_(".endasmfunc without function."));
3477 	  break;
3478 
3479 	case WAITING_ENDASMFUNC:
3480 	  asmfunc_state = OUTSIDE_ASMFUNC;
3481 	  asmfunc_debug (NULL);
3482 	  break;
3483 	}
3484       demand_empty_rest_of_line ();
3485     }
3486   else
3487     as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3488 }
3489 
3490 static void
s_ccs_def(int name)3491 s_ccs_def (int name)
3492 {
3493   if (codecomposer_syntax)
3494     s_globl (name);
3495   else
3496     as_bad (_(".def pseudo-op only available with -mccs flag."));
3497 }
3498 
3499 /* Directives: Literal pools.  */
3500 
3501 static literal_pool *
find_literal_pool(void)3502 find_literal_pool (void)
3503 {
3504   literal_pool * pool;
3505 
3506   for (pool = list_of_pools; pool != NULL; pool = pool->next)
3507     {
3508       if (pool->section == now_seg
3509 	  && pool->sub_section == now_subseg)
3510 	break;
3511     }
3512 
3513   return pool;
3514 }
3515 
3516 static literal_pool *
find_or_make_literal_pool(void)3517 find_or_make_literal_pool (void)
3518 {
3519   /* Next literal pool ID number.  */
3520   static unsigned int latest_pool_num = 1;
3521   literal_pool *      pool;
3522 
3523   pool = find_literal_pool ();
3524 
3525   if (pool == NULL)
3526     {
3527       /* Create a new pool.  */
3528       pool = XNEW (literal_pool);
3529       if (! pool)
3530 	return NULL;
3531 
3532       pool->next_free_entry = 0;
3533       pool->section	    = now_seg;
3534       pool->sub_section	    = now_subseg;
3535       pool->next	    = list_of_pools;
3536       pool->symbol	    = NULL;
3537       pool->alignment	    = 2;
3538 
3539       /* Add it to the list.  */
3540       list_of_pools = pool;
3541     }
3542 
3543   /* New pools, and emptied pools, will have a NULL symbol.  */
3544   if (pool->symbol == NULL)
3545     {
3546       pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3547 				    &zero_address_frag, 0);
3548       pool->id = latest_pool_num ++;
3549     }
3550 
3551   /* Done.  */
3552   return pool;
3553 }
3554 
3555 /* Add the literal in the global 'inst'
3556    structure to the relevant literal pool.  */
3557 
3558 static int
add_to_lit_pool(unsigned int nbytes)3559 add_to_lit_pool (unsigned int nbytes)
3560 {
3561 #define PADDING_SLOT 0x1
3562 #define LIT_ENTRY_SIZE_MASK 0xFF
3563   literal_pool * pool;
3564   unsigned int entry, pool_size = 0;
3565   bool padding_slot_p = false;
3566   unsigned imm1 = 0;
3567   unsigned imm2 = 0;
3568 
3569   if (nbytes == 8)
3570     {
3571       imm1 = inst.operands[1].imm;
3572       imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3573 	       : inst.relocs[0].exp.X_unsigned ? 0
3574 	       : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3575       if (target_big_endian)
3576 	{
3577 	  imm1 = imm2;
3578 	  imm2 = inst.operands[1].imm;
3579 	}
3580     }
3581 
3582   pool = find_or_make_literal_pool ();
3583 
3584   /* Check if this literal value is already in the pool.  */
3585   for (entry = 0; entry < pool->next_free_entry; entry ++)
3586     {
3587       if (nbytes == 4)
3588 	{
3589 	  if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3590 	      && (inst.relocs[0].exp.X_op == O_constant)
3591 	      && (pool->literals[entry].X_add_number
3592 		  == inst.relocs[0].exp.X_add_number)
3593 	      && (pool->literals[entry].X_md == nbytes)
3594 	      && (pool->literals[entry].X_unsigned
3595 		  == inst.relocs[0].exp.X_unsigned))
3596 	    break;
3597 
3598 	  if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3599 	      && (inst.relocs[0].exp.X_op == O_symbol)
3600 	      && (pool->literals[entry].X_add_number
3601 		  == inst.relocs[0].exp.X_add_number)
3602 	      && (pool->literals[entry].X_add_symbol
3603 		  == inst.relocs[0].exp.X_add_symbol)
3604 	      && (pool->literals[entry].X_op_symbol
3605 		  == inst.relocs[0].exp.X_op_symbol)
3606 	      && (pool->literals[entry].X_md == nbytes))
3607 	    break;
3608 	}
3609       else if ((nbytes == 8)
3610 	       && !(pool_size & 0x7)
3611 	       && ((entry + 1) != pool->next_free_entry)
3612 	       && (pool->literals[entry].X_op == O_constant)
3613 	       && (pool->literals[entry].X_add_number == (offsetT) imm1)
3614 	       && (pool->literals[entry].X_unsigned
3615 		   == inst.relocs[0].exp.X_unsigned)
3616 	       && (pool->literals[entry + 1].X_op == O_constant)
3617 	       && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3618 	       && (pool->literals[entry + 1].X_unsigned
3619 		   == inst.relocs[0].exp.X_unsigned))
3620 	break;
3621 
3622       padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3623       if (padding_slot_p && (nbytes == 4))
3624 	break;
3625 
3626       pool_size += 4;
3627     }
3628 
3629   /* Do we need to create a new entry?	*/
3630   if (entry == pool->next_free_entry)
3631     {
3632       if (entry >= MAX_LITERAL_POOL_SIZE)
3633 	{
3634 	  inst.error = _("literal pool overflow");
3635 	  return FAIL;
3636 	}
3637 
3638       if (nbytes == 8)
3639 	{
3640 	  /* For 8-byte entries, we align to an 8-byte boundary,
3641 	     and split it into two 4-byte entries, because on 32-bit
3642 	     host, 8-byte constants are treated as big num, thus
3643 	     saved in "generic_bignum" which will be overwritten
3644 	     by later assignments.
3645 
3646 	     We also need to make sure there is enough space for
3647 	     the split.
3648 
3649 	     We also check to make sure the literal operand is a
3650 	     constant number.  */
3651 	  if (!(inst.relocs[0].exp.X_op == O_constant
3652 		|| inst.relocs[0].exp.X_op == O_big))
3653 	    {
3654 	      inst.error = _("invalid type for literal pool");
3655 	      return FAIL;
3656 	    }
3657 	  else if (pool_size & 0x7)
3658 	    {
3659 	      if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3660 		{
3661 		  inst.error = _("literal pool overflow");
3662 		  return FAIL;
3663 		}
3664 
3665 	      pool->literals[entry] = inst.relocs[0].exp;
3666 	      pool->literals[entry].X_op = O_constant;
3667 	      pool->literals[entry].X_add_number = 0;
3668 	      pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3669 	      pool->next_free_entry += 1;
3670 	      pool_size += 4;
3671 	    }
3672 	  else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3673 	    {
3674 	      inst.error = _("literal pool overflow");
3675 	      return FAIL;
3676 	    }
3677 
3678 	  pool->literals[entry] = inst.relocs[0].exp;
3679 	  pool->literals[entry].X_op = O_constant;
3680 	  pool->literals[entry].X_add_number = imm1;
3681 	  pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3682 	  pool->literals[entry++].X_md = 4;
3683 	  pool->literals[entry] = inst.relocs[0].exp;
3684 	  pool->literals[entry].X_op = O_constant;
3685 	  pool->literals[entry].X_add_number = imm2;
3686 	  pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3687 	  pool->literals[entry].X_md = 4;
3688 	  pool->alignment = 3;
3689 	  pool->next_free_entry += 1;
3690 	}
3691       else
3692 	{
3693 	  pool->literals[entry] = inst.relocs[0].exp;
3694 	  pool->literals[entry].X_md = 4;
3695 	}
3696 
3697 #ifdef OBJ_ELF
3698       /* PR ld/12974: Record the location of the first source line to reference
3699 	 this entry in the literal pool.  If it turns out during linking that the
3700 	 symbol does not exist we will be able to give an accurate line number for
3701 	 the (first use of the) missing reference.  */
3702       if (debug_type == DEBUG_DWARF2)
3703 	dwarf2_where (pool->locs + entry);
3704 #endif
3705       pool->next_free_entry += 1;
3706     }
3707   else if (padding_slot_p)
3708     {
3709       pool->literals[entry] = inst.relocs[0].exp;
3710       pool->literals[entry].X_md = nbytes;
3711     }
3712 
3713   inst.relocs[0].exp.X_op	      = O_symbol;
3714   inst.relocs[0].exp.X_add_number = pool_size;
3715   inst.relocs[0].exp.X_add_symbol = pool->symbol;
3716 
3717   return SUCCESS;
3718 }
3719 
3720 bool
tc_start_label_without_colon(void)3721 tc_start_label_without_colon (void)
3722 {
3723   bool ret = true;
3724 
3725   if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3726     {
3727       const char *label = input_line_pointer;
3728 
3729       while (!is_end_of_line[(int) label[-1]])
3730 	--label;
3731 
3732       if (*label == '.')
3733 	{
3734 	  as_bad (_("Invalid label '%s'"), label);
3735 	  ret = false;
3736 	}
3737 
3738       asmfunc_debug (label);
3739 
3740       asmfunc_state = WAITING_ENDASMFUNC;
3741     }
3742 
3743   return ret;
3744 }
3745 
3746 /* Can't use symbol_new here, so have to create a symbol and then at
3747    a later date assign it a value. That's what these functions do.  */
3748 
3749 static void
symbol_locate(symbolS * symbolP,const char * name,segT segment,valueT valu,fragS * frag)3750 symbol_locate (symbolS *    symbolP,
3751 	       const char * name,	/* It is copied, the caller can modify.	 */
3752 	       segT	    segment,	/* Segment identifier (SEG_<something>).  */
3753 	       valueT	    valu,	/* Symbol value.  */
3754 	       fragS *	    frag)	/* Associated fragment.	 */
3755 {
3756   size_t name_length;
3757   char * preserved_copy_of_name;
3758 
3759   name_length = strlen (name) + 1;   /* +1 for \0.  */
3760   obstack_grow (&notes, name, name_length);
3761   preserved_copy_of_name = (char *) obstack_finish (&notes);
3762 
3763 #ifdef tc_canonicalize_symbol_name
3764   preserved_copy_of_name =
3765     tc_canonicalize_symbol_name (preserved_copy_of_name);
3766 #endif
3767 
3768   S_SET_NAME (symbolP, preserved_copy_of_name);
3769 
3770   S_SET_SEGMENT (symbolP, segment);
3771   S_SET_VALUE (symbolP, valu);
3772   symbol_clear_list_pointers (symbolP);
3773 
3774   symbol_set_frag (symbolP, frag);
3775 
3776   /* Link to end of symbol chain.  */
3777   {
3778     extern int symbol_table_frozen;
3779 
3780     if (symbol_table_frozen)
3781       abort ();
3782   }
3783 
3784   symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3785 
3786   obj_symbol_new_hook (symbolP);
3787 
3788 #ifdef tc_symbol_new_hook
3789   tc_symbol_new_hook (symbolP);
3790 #endif
3791 
3792 #ifdef DEBUG_SYMS
3793   verify_symbol_chain (symbol_rootP, symbol_lastP);
3794 #endif /* DEBUG_SYMS  */
3795 }
3796 
3797 static void
s_ltorg(int ignored ATTRIBUTE_UNUSED)3798 s_ltorg (int ignored ATTRIBUTE_UNUSED)
3799 {
3800   unsigned int entry;
3801   literal_pool * pool;
3802   char sym_name[20];
3803 
3804   pool = find_literal_pool ();
3805   if (pool == NULL
3806       || pool->symbol == NULL
3807       || pool->next_free_entry == 0)
3808     return;
3809 
3810   /* Align pool as you have word accesses.
3811      Only make a frag if we have to.  */
3812   if (!need_pass_2)
3813     frag_align (pool->alignment, 0, 0);
3814 
3815   record_alignment (now_seg, 2);
3816 
3817 #ifdef OBJ_ELF
3818   seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3819   make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3820 #endif
3821   sprintf (sym_name, "$$lit_\002%x", pool->id);
3822 
3823   symbol_locate (pool->symbol, sym_name, now_seg,
3824 		 (valueT) frag_now_fix (), frag_now);
3825   symbol_table_insert (pool->symbol);
3826 
3827   ARM_SET_THUMB (pool->symbol, thumb_mode);
3828 
3829 #if defined OBJ_COFF || defined OBJ_ELF
3830   ARM_SET_INTERWORK (pool->symbol, support_interwork);
3831 #endif
3832 
3833   for (entry = 0; entry < pool->next_free_entry; entry ++)
3834     {
3835 #ifdef OBJ_ELF
3836       if (debug_type == DEBUG_DWARF2)
3837 	dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3838 #endif
3839       /* First output the expression in the instruction to the pool.  */
3840       emit_expr (&(pool->literals[entry]),
3841 		 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3842     }
3843 
3844   /* Mark the pool as empty.  */
3845   pool->next_free_entry = 0;
3846   pool->symbol = NULL;
3847 }
3848 
3849 #ifdef OBJ_ELF
3850 /* Forward declarations for functions below, in the MD interface
3851    section.  */
3852 static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3853 static valueT create_unwind_entry (int);
3854 static void start_unwind_section (const segT, int);
3855 static void add_unwind_opcode (valueT, int);
3856 static void flush_pending_unwind (void);
3857 
3858 /* Directives: Data.  */
3859 
3860 static void
s_arm_elf_cons(int nbytes)3861 s_arm_elf_cons (int nbytes)
3862 {
3863   expressionS exp;
3864 
3865 #ifdef md_flush_pending_output
3866   md_flush_pending_output ();
3867 #endif
3868 
3869   if (is_it_end_of_statement ())
3870     {
3871       demand_empty_rest_of_line ();
3872       return;
3873     }
3874 
3875 #ifdef md_cons_align
3876   md_cons_align (nbytes);
3877 #endif
3878 
3879   mapping_state (MAP_DATA);
3880   do
3881     {
3882       int reloc;
3883       char *base = input_line_pointer;
3884 
3885       expression (& exp);
3886 
3887       if (exp.X_op != O_symbol)
3888 	emit_expr (&exp, (unsigned int) nbytes);
3889       else
3890 	{
3891 	  char *before_reloc = input_line_pointer;
3892 	  reloc = parse_reloc (&input_line_pointer);
3893 	  if (reloc == -1)
3894 	    {
3895 	      as_bad (_("unrecognized relocation suffix"));
3896 	      ignore_rest_of_line ();
3897 	      return;
3898 	    }
3899 	  else if (reloc == BFD_RELOC_UNUSED)
3900 	    emit_expr (&exp, (unsigned int) nbytes);
3901 	  else
3902 	    {
3903 	      reloc_howto_type *howto = (reloc_howto_type *)
3904 		  bfd_reloc_type_lookup (stdoutput,
3905 					 (bfd_reloc_code_real_type) reloc);
3906 	      int size = bfd_get_reloc_size (howto);
3907 
3908 	      if (reloc == BFD_RELOC_ARM_PLT32)
3909 		{
3910 		  as_bad (_("(plt) is only valid on branch targets"));
3911 		  reloc = BFD_RELOC_UNUSED;
3912 		  size = 0;
3913 		}
3914 
3915 	      if (size > nbytes)
3916 		as_bad (ngettext ("%s relocations do not fit in %d byte",
3917 				  "%s relocations do not fit in %d bytes",
3918 				  nbytes),
3919 			howto->name, nbytes);
3920 	      else
3921 		{
3922 		  /* We've parsed an expression stopping at O_symbol.
3923 		     But there may be more expression left now that we
3924 		     have parsed the relocation marker.  Parse it again.
3925 		     XXX Surely there is a cleaner way to do this.  */
3926 		  char *p = input_line_pointer;
3927 		  int offset;
3928 		  char *save_buf = XNEWVEC (char, input_line_pointer - base);
3929 
3930 		  memcpy (save_buf, base, input_line_pointer - base);
3931 		  memmove (base + (input_line_pointer - before_reloc),
3932 			   base, before_reloc - base);
3933 
3934 		  input_line_pointer = base + (input_line_pointer-before_reloc);
3935 		  expression (&exp);
3936 		  memcpy (base, save_buf, p - base);
3937 
3938 		  offset = nbytes - size;
3939 		  p = frag_more (nbytes);
3940 		  memset (p, 0, nbytes);
3941 		  fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3942 			       size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3943 		  free (save_buf);
3944 		}
3945 	    }
3946 	}
3947     }
3948   while (*input_line_pointer++ == ',');
3949 
3950   /* Put terminator back into stream.  */
3951   input_line_pointer --;
3952   demand_empty_rest_of_line ();
3953 }
3954 
3955 /* Emit an expression containing a 32-bit thumb instruction.
3956    Implementation based on put_thumb32_insn.  */
3957 
3958 static void
emit_thumb32_expr(expressionS * exp)3959 emit_thumb32_expr (expressionS * exp)
3960 {
3961   expressionS exp_high = *exp;
3962 
3963   exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3964   emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3965   exp->X_add_number &= 0xffff;
3966   emit_expr (exp, (unsigned int) THUMB_SIZE);
3967 }
3968 
3969 /*  Guess the instruction size based on the opcode.  */
3970 
3971 static int
thumb_insn_size(int opcode)3972 thumb_insn_size (int opcode)
3973 {
3974   if ((unsigned int) opcode < 0xe800u)
3975     return 2;
3976   else if ((unsigned int) opcode >= 0xe8000000u)
3977     return 4;
3978   else
3979     return 0;
3980 }
3981 
3982 static bool
emit_insn(expressionS * exp,int nbytes)3983 emit_insn (expressionS *exp, int nbytes)
3984 {
3985   int size = 0;
3986 
3987   if (exp->X_op == O_constant)
3988     {
3989       size = nbytes;
3990 
3991       if (size == 0)
3992 	size = thumb_insn_size (exp->X_add_number);
3993 
3994       if (size != 0)
3995 	{
3996 	  if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3997 	    {
3998 	      as_bad (_(".inst.n operand too big. "\
3999 			"Use .inst.w instead"));
4000 	      size = 0;
4001 	    }
4002 	  else
4003 	    {
4004 	      if (now_pred.state == AUTOMATIC_PRED_BLOCK)
4005 		set_pred_insn_type_nonvoid (OUTSIDE_PRED_INSN, 0);
4006 	      else
4007 		set_pred_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
4008 
4009 	      if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
4010 		emit_thumb32_expr (exp);
4011 	      else
4012 		emit_expr (exp, (unsigned int) size);
4013 
4014 	      it_fsm_post_encode ();
4015 	    }
4016 	}
4017       else
4018 	as_bad (_("cannot determine Thumb instruction size. "	\
4019 		  "Use .inst.n/.inst.w instead"));
4020     }
4021   else
4022     as_bad (_("constant expression required"));
4023 
4024   return (size != 0);
4025 }
4026 
4027 /* Like s_arm_elf_cons but do not use md_cons_align and
4028    set the mapping state to MAP_ARM/MAP_THUMB.  */
4029 
4030 static void
s_arm_elf_inst(int nbytes)4031 s_arm_elf_inst (int nbytes)
4032 {
4033   if (is_it_end_of_statement ())
4034     {
4035       demand_empty_rest_of_line ();
4036       return;
4037     }
4038 
4039   /* Calling mapping_state () here will not change ARM/THUMB,
4040      but will ensure not to be in DATA state.  */
4041 
4042   if (thumb_mode)
4043     mapping_state (MAP_THUMB);
4044   else
4045     {
4046       if (nbytes != 0)
4047 	{
4048 	  as_bad (_("width suffixes are invalid in ARM mode"));
4049 	  ignore_rest_of_line ();
4050 	  return;
4051 	}
4052 
4053       nbytes = 4;
4054 
4055       mapping_state (MAP_ARM);
4056     }
4057 
4058   do
4059     {
4060       expressionS exp;
4061 
4062       expression (& exp);
4063 
4064       if (! emit_insn (& exp, nbytes))
4065 	{
4066 	  ignore_rest_of_line ();
4067 	  return;
4068 	}
4069     }
4070   while (*input_line_pointer++ == ',');
4071 
4072   /* Put terminator back into stream.  */
4073   input_line_pointer --;
4074   demand_empty_rest_of_line ();
4075 }
4076 
4077 /* Parse a .rel31 directive.  */
4078 
4079 static void
s_arm_rel31(int ignored ATTRIBUTE_UNUSED)4080 s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
4081 {
4082   expressionS exp;
4083   char *p;
4084   valueT highbit;
4085 
4086   highbit = 0;
4087   if (*input_line_pointer == '1')
4088     highbit = 0x80000000;
4089   else if (*input_line_pointer != '0')
4090     as_bad (_("expected 0 or 1"));
4091 
4092   input_line_pointer++;
4093   if (*input_line_pointer != ',')
4094     as_bad (_("missing comma"));
4095   input_line_pointer++;
4096 
4097 #ifdef md_flush_pending_output
4098   md_flush_pending_output ();
4099 #endif
4100 
4101 #ifdef md_cons_align
4102   md_cons_align (4);
4103 #endif
4104 
4105   mapping_state (MAP_DATA);
4106 
4107   expression (&exp);
4108 
4109   p = frag_more (4);
4110   md_number_to_chars (p, highbit, 4);
4111   fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
4112 	       BFD_RELOC_ARM_PREL31);
4113 
4114   demand_empty_rest_of_line ();
4115 }
4116 
4117 /* Directives: AEABI stack-unwind tables.  */
4118 
4119 /* Parse an unwind_fnstart directive.  Simply records the current location.  */
4120 
4121 static void
s_arm_unwind_fnstart(int ignored ATTRIBUTE_UNUSED)4122 s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
4123 {
4124   demand_empty_rest_of_line ();
4125   if (unwind.proc_start)
4126     {
4127       as_bad (_("duplicate .fnstart directive"));
4128       return;
4129     }
4130 
4131   /* Mark the start of the function.  */
4132   unwind.proc_start = expr_build_dot ();
4133 
4134   /* Reset the rest of the unwind info.	 */
4135   unwind.opcode_count = 0;
4136   unwind.table_entry = NULL;
4137   unwind.personality_routine = NULL;
4138   unwind.personality_index = -1;
4139   unwind.frame_size = 0;
4140   unwind.fp_offset = 0;
4141   unwind.fp_reg = REG_SP;
4142   unwind.fp_used = 0;
4143   unwind.sp_restored = 0;
4144 }
4145 
4146 
4147 /* Parse a handlerdata directive.  Creates the exception handling table entry
4148    for the function.  */
4149 
4150 static void
s_arm_unwind_handlerdata(int ignored ATTRIBUTE_UNUSED)4151 s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
4152 {
4153   demand_empty_rest_of_line ();
4154   if (!unwind.proc_start)
4155     as_bad (MISSING_FNSTART);
4156 
4157   if (unwind.table_entry)
4158     as_bad (_("duplicate .handlerdata directive"));
4159 
4160   create_unwind_entry (1);
4161 }
4162 
4163 /* Parse an unwind_fnend directive.  Generates the index table entry.  */
4164 
4165 static void
s_arm_unwind_fnend(int ignored ATTRIBUTE_UNUSED)4166 s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
4167 {
4168   long where;
4169   char *ptr;
4170   valueT val;
4171   unsigned int marked_pr_dependency;
4172 
4173   demand_empty_rest_of_line ();
4174 
4175   if (!unwind.proc_start)
4176     {
4177       as_bad (_(".fnend directive without .fnstart"));
4178       return;
4179     }
4180 
4181   /* Add eh table entry.  */
4182   if (unwind.table_entry == NULL)
4183     val = create_unwind_entry (0);
4184   else
4185     val = 0;
4186 
4187   /* Add index table entry.  This is two words.	 */
4188   start_unwind_section (unwind.saved_seg, 1);
4189   frag_align (2, 0, 0);
4190   record_alignment (now_seg, 2);
4191 
4192   ptr = frag_more (8);
4193   memset (ptr, 0, 8);
4194   where = frag_now_fix () - 8;
4195 
4196   /* Self relative offset of the function start.  */
4197   fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
4198 	   BFD_RELOC_ARM_PREL31);
4199 
4200   /* Indicate dependency on EHABI-defined personality routines to the
4201      linker, if it hasn't been done already.  */
4202   marked_pr_dependency
4203     = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
4204   if (unwind.personality_index >= 0 && unwind.personality_index < 3
4205       && !(marked_pr_dependency & (1 << unwind.personality_index)))
4206     {
4207       static const char *const name[] =
4208 	{
4209 	  "__aeabi_unwind_cpp_pr0",
4210 	  "__aeabi_unwind_cpp_pr1",
4211 	  "__aeabi_unwind_cpp_pr2"
4212 	};
4213       symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
4214       fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
4215       seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
4216 	|= 1 << unwind.personality_index;
4217     }
4218 
4219   if (val)
4220     /* Inline exception table entry.  */
4221     md_number_to_chars (ptr + 4, val, 4);
4222   else
4223     /* Self relative offset of the table entry.	 */
4224     fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
4225 	     BFD_RELOC_ARM_PREL31);
4226 
4227   /* Restore the original section.  */
4228   subseg_set (unwind.saved_seg, unwind.saved_subseg);
4229 
4230   unwind.proc_start = NULL;
4231 }
4232 
4233 
4234 /* Parse an unwind_cantunwind directive.  */
4235 
4236 static void
s_arm_unwind_cantunwind(int ignored ATTRIBUTE_UNUSED)4237 s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
4238 {
4239   demand_empty_rest_of_line ();
4240   if (!unwind.proc_start)
4241     as_bad (MISSING_FNSTART);
4242 
4243   if (unwind.personality_routine || unwind.personality_index != -1)
4244     as_bad (_("personality routine specified for cantunwind frame"));
4245 
4246   unwind.personality_index = -2;
4247 }
4248 
4249 
4250 /* Parse a personalityindex directive.	*/
4251 
4252 static void
s_arm_unwind_personalityindex(int ignored ATTRIBUTE_UNUSED)4253 s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
4254 {
4255   expressionS exp;
4256 
4257   if (!unwind.proc_start)
4258     as_bad (MISSING_FNSTART);
4259 
4260   if (unwind.personality_routine || unwind.personality_index != -1)
4261     as_bad (_("duplicate .personalityindex directive"));
4262 
4263   expression (&exp);
4264 
4265   if (exp.X_op != O_constant
4266       || exp.X_add_number < 0 || exp.X_add_number > 15)
4267     {
4268       as_bad (_("bad personality routine number"));
4269       ignore_rest_of_line ();
4270       return;
4271     }
4272 
4273   unwind.personality_index = exp.X_add_number;
4274 
4275   demand_empty_rest_of_line ();
4276 }
4277 
4278 
4279 /* Parse a personality directive.  */
4280 
4281 static void
s_arm_unwind_personality(int ignored ATTRIBUTE_UNUSED)4282 s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
4283 {
4284   char *name, *p, c;
4285 
4286   if (!unwind.proc_start)
4287     as_bad (MISSING_FNSTART);
4288 
4289   if (unwind.personality_routine || unwind.personality_index != -1)
4290     as_bad (_("duplicate .personality directive"));
4291 
4292   c = get_symbol_name (& name);
4293   p = input_line_pointer;
4294   if (c == '"')
4295     ++ input_line_pointer;
4296   unwind.personality_routine = symbol_find_or_make (name);
4297   *p = c;
4298   demand_empty_rest_of_line ();
4299 }
4300 
4301 
4302 /* Parse a directive saving core registers.  */
4303 
4304 static void
s_arm_unwind_save_core(void)4305 s_arm_unwind_save_core (void)
4306 {
4307   valueT op;
4308   long range;
4309   int n;
4310 
4311   range = parse_reg_list (&input_line_pointer, REGLIST_RN);
4312   if (range == FAIL)
4313     {
4314       as_bad (_("expected register list"));
4315       ignore_rest_of_line ();
4316       return;
4317     }
4318 
4319   demand_empty_rest_of_line ();
4320 
4321   /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
4322      into .unwind_save {..., sp...}.  We aren't bothered about the value of
4323      ip because it is clobbered by calls.  */
4324   if (unwind.sp_restored && unwind.fp_reg == 12
4325       && (range & 0x3000) == 0x1000)
4326     {
4327       unwind.opcode_count--;
4328       unwind.sp_restored = 0;
4329       range = (range | 0x2000) & ~0x1000;
4330       unwind.pending_offset = 0;
4331     }
4332 
4333   /* Pop r4-r15.  */
4334   if (range & 0xfff0)
4335     {
4336       /* See if we can use the short opcodes.  These pop a block of up to 8
4337 	 registers starting with r4, plus maybe r14.  */
4338       for (n = 0; n < 8; n++)
4339 	{
4340 	  /* Break at the first non-saved register.	 */
4341 	  if ((range & (1 << (n + 4))) == 0)
4342 	    break;
4343 	}
4344       /* See if there are any other bits set.  */
4345       if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
4346 	{
4347 	  /* Use the long form.  */
4348 	  op = 0x8000 | ((range >> 4) & 0xfff);
4349 	  add_unwind_opcode (op, 2);
4350 	}
4351       else
4352 	{
4353 	  /* Use the short form.  */
4354 	  if (range & 0x4000)
4355 	    op = 0xa8; /* Pop r14.	*/
4356 	  else
4357 	    op = 0xa0; /* Do not pop r14.  */
4358 	  op |= (n - 1);
4359 	  add_unwind_opcode (op, 1);
4360 	}
4361     }
4362 
4363   /* Pop r0-r3.	 */
4364   if (range & 0xf)
4365     {
4366       op = 0xb100 | (range & 0xf);
4367       add_unwind_opcode (op, 2);
4368     }
4369 
4370   /* Record the number of bytes pushed.	 */
4371   for (n = 0; n < 16; n++)
4372     {
4373       if (range & (1 << n))
4374 	unwind.frame_size += 4;
4375     }
4376 }
4377 
4378 
4379 /* Parse a directive saving FPA registers.  */
4380 
4381 static void
s_arm_unwind_save_fpa(int reg)4382 s_arm_unwind_save_fpa (int reg)
4383 {
4384   expressionS exp;
4385   int num_regs;
4386   valueT op;
4387 
4388   /* Get Number of registers to transfer.  */
4389   if (skip_past_comma (&input_line_pointer) != FAIL)
4390     expression (&exp);
4391   else
4392     exp.X_op = O_illegal;
4393 
4394   if (exp.X_op != O_constant)
4395     {
4396       as_bad (_("expected , <constant>"));
4397       ignore_rest_of_line ();
4398       return;
4399     }
4400 
4401   num_regs = exp.X_add_number;
4402 
4403   if (num_regs < 1 || num_regs > 4)
4404     {
4405       as_bad (_("number of registers must be in the range [1:4]"));
4406       ignore_rest_of_line ();
4407       return;
4408     }
4409 
4410   demand_empty_rest_of_line ();
4411 
4412   if (reg == 4)
4413     {
4414       /* Short form.  */
4415       op = 0xb4 | (num_regs - 1);
4416       add_unwind_opcode (op, 1);
4417     }
4418   else
4419     {
4420       /* Long form.  */
4421       op = 0xc800 | (reg << 4) | (num_regs - 1);
4422       add_unwind_opcode (op, 2);
4423     }
4424   unwind.frame_size += num_regs * 12;
4425 }
4426 
4427 
4428 /* Parse a directive saving VFP registers for ARMv6 and above.  */
4429 
4430 static void
s_arm_unwind_save_vfp_armv6(void)4431 s_arm_unwind_save_vfp_armv6 (void)
4432 {
4433   int count;
4434   unsigned int start;
4435   valueT op;
4436   int num_vfpv3_regs = 0;
4437   int num_regs_below_16;
4438   bool partial_match;
4439 
4440   count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D,
4441 			      &partial_match);
4442   if (count == FAIL)
4443     {
4444       as_bad (_("expected register list"));
4445       ignore_rest_of_line ();
4446       return;
4447     }
4448 
4449   demand_empty_rest_of_line ();
4450 
4451   /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4452      than FSTMX/FLDMX-style ones).  */
4453 
4454   /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31.  */
4455   if (start >= 16)
4456     num_vfpv3_regs = count;
4457   else if (start + count > 16)
4458     num_vfpv3_regs = start + count - 16;
4459 
4460   if (num_vfpv3_regs > 0)
4461     {
4462       int start_offset = start > 16 ? start - 16 : 0;
4463       op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4464       add_unwind_opcode (op, 2);
4465     }
4466 
4467   /* Generate opcode for registers numbered in the range 0 .. 15.  */
4468   num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4469   gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4470   if (num_regs_below_16 > 0)
4471     {
4472       op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4473       add_unwind_opcode (op, 2);
4474     }
4475 
4476   unwind.frame_size += count * 8;
4477 }
4478 
4479 
4480 /* Parse a directive saving VFP registers for pre-ARMv6.  */
4481 
4482 static void
s_arm_unwind_save_vfp(void)4483 s_arm_unwind_save_vfp (void)
4484 {
4485   int count;
4486   unsigned int reg;
4487   valueT op;
4488   bool partial_match;
4489 
4490   count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D,
4491 			      &partial_match);
4492   if (count == FAIL)
4493     {
4494       as_bad (_("expected register list"));
4495       ignore_rest_of_line ();
4496       return;
4497     }
4498 
4499   demand_empty_rest_of_line ();
4500 
4501   if (reg == 8)
4502     {
4503       /* Short form.  */
4504       op = 0xb8 | (count - 1);
4505       add_unwind_opcode (op, 1);
4506     }
4507   else
4508     {
4509       /* Long form.  */
4510       op = 0xb300 | (reg << 4) | (count - 1);
4511       add_unwind_opcode (op, 2);
4512     }
4513   unwind.frame_size += count * 8 + 4;
4514 }
4515 
4516 
4517 /* Parse a directive saving iWMMXt data registers.  */
4518 
4519 static void
s_arm_unwind_save_mmxwr(void)4520 s_arm_unwind_save_mmxwr (void)
4521 {
4522   int reg;
4523   int hi_reg;
4524   int i;
4525   unsigned mask = 0;
4526   valueT op;
4527 
4528   if (*input_line_pointer == '{')
4529     input_line_pointer++;
4530 
4531   do
4532     {
4533       reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4534 
4535       if (reg == FAIL)
4536 	{
4537 	  as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4538 	  goto error;
4539 	}
4540 
4541       if (mask >> reg)
4542 	as_tsktsk (_("register list not in ascending order"));
4543       mask |= 1 << reg;
4544 
4545       if (*input_line_pointer == '-')
4546 	{
4547 	  input_line_pointer++;
4548 	  hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4549 	  if (hi_reg == FAIL)
4550 	    {
4551 	      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4552 	      goto error;
4553 	    }
4554 	  else if (reg >= hi_reg)
4555 	    {
4556 	      as_bad (_("bad register range"));
4557 	      goto error;
4558 	    }
4559 	  for (; reg < hi_reg; reg++)
4560 	    mask |= 1 << reg;
4561 	}
4562     }
4563   while (skip_past_comma (&input_line_pointer) != FAIL);
4564 
4565   skip_past_char (&input_line_pointer, '}');
4566 
4567   demand_empty_rest_of_line ();
4568 
4569   /* Generate any deferred opcodes because we're going to be looking at
4570      the list.	*/
4571   flush_pending_unwind ();
4572 
4573   for (i = 0; i < 16; i++)
4574     {
4575       if (mask & (1 << i))
4576 	unwind.frame_size += 8;
4577     }
4578 
4579   /* Attempt to combine with a previous opcode.	 We do this because gcc
4580      likes to output separate unwind directives for a single block of
4581      registers.	 */
4582   if (unwind.opcode_count > 0)
4583     {
4584       i = unwind.opcodes[unwind.opcode_count - 1];
4585       if ((i & 0xf8) == 0xc0)
4586 	{
4587 	  i &= 7;
4588 	  /* Only merge if the blocks are contiguous.  */
4589 	  if (i < 6)
4590 	    {
4591 	      if ((mask & 0xfe00) == (1 << 9))
4592 		{
4593 		  mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4594 		  unwind.opcode_count--;
4595 		}
4596 	    }
4597 	  else if (i == 6 && unwind.opcode_count >= 2)
4598 	    {
4599 	      i = unwind.opcodes[unwind.opcode_count - 2];
4600 	      reg = i >> 4;
4601 	      i &= 0xf;
4602 
4603 	      op = 0xffff << (reg - 1);
4604 	      if (reg > 0
4605 		  && ((mask & op) == (1u << (reg - 1))))
4606 		{
4607 		  op = (1 << (reg + i + 1)) - 1;
4608 		  op &= ~((1 << reg) - 1);
4609 		  mask |= op;
4610 		  unwind.opcode_count -= 2;
4611 		}
4612 	    }
4613 	}
4614     }
4615 
4616   hi_reg = 15;
4617   /* We want to generate opcodes in the order the registers have been
4618      saved, ie. descending order.  */
4619   for (reg = 15; reg >= -1; reg--)
4620     {
4621       /* Save registers in blocks.  */
4622       if (reg < 0
4623 	  || !(mask & (1 << reg)))
4624 	{
4625 	  /* We found an unsaved reg.  Generate opcodes to save the
4626 	     preceding block.	*/
4627 	  if (reg != hi_reg)
4628 	    {
4629 	      if (reg == 9)
4630 		{
4631 		  /* Short form.  */
4632 		  op = 0xc0 | (hi_reg - 10);
4633 		  add_unwind_opcode (op, 1);
4634 		}
4635 	      else
4636 		{
4637 		  /* Long form.	 */
4638 		  op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4639 		  add_unwind_opcode (op, 2);
4640 		}
4641 	    }
4642 	  hi_reg = reg - 1;
4643 	}
4644     }
4645 
4646   return;
4647  error:
4648   ignore_rest_of_line ();
4649 }
4650 
4651 static void
s_arm_unwind_save_mmxwcg(void)4652 s_arm_unwind_save_mmxwcg (void)
4653 {
4654   int reg;
4655   int hi_reg;
4656   unsigned mask = 0;
4657   valueT op;
4658 
4659   if (*input_line_pointer == '{')
4660     input_line_pointer++;
4661 
4662   skip_whitespace (input_line_pointer);
4663 
4664   do
4665     {
4666       reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4667 
4668       if (reg == FAIL)
4669 	{
4670 	  as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4671 	  goto error;
4672 	}
4673 
4674       reg -= 8;
4675       if (mask >> reg)
4676 	as_tsktsk (_("register list not in ascending order"));
4677       mask |= 1 << reg;
4678 
4679       if (*input_line_pointer == '-')
4680 	{
4681 	  input_line_pointer++;
4682 	  hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4683 	  if (hi_reg == FAIL)
4684 	    {
4685 	      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4686 	      goto error;
4687 	    }
4688 	  else if (reg >= hi_reg)
4689 	    {
4690 	      as_bad (_("bad register range"));
4691 	      goto error;
4692 	    }
4693 	  for (; reg < hi_reg; reg++)
4694 	    mask |= 1 << reg;
4695 	}
4696     }
4697   while (skip_past_comma (&input_line_pointer) != FAIL);
4698 
4699   skip_past_char (&input_line_pointer, '}');
4700 
4701   demand_empty_rest_of_line ();
4702 
4703   /* Generate any deferred opcodes because we're going to be looking at
4704      the list.	*/
4705   flush_pending_unwind ();
4706 
4707   for (reg = 0; reg < 16; reg++)
4708     {
4709       if (mask & (1 << reg))
4710 	unwind.frame_size += 4;
4711     }
4712   op = 0xc700 | mask;
4713   add_unwind_opcode (op, 2);
4714   return;
4715  error:
4716   ignore_rest_of_line ();
4717 }
4718 
4719 
4720 /* Parse an unwind_save directive.
4721    If the argument is non-zero, this is a .vsave directive.  */
4722 
4723 static void
s_arm_unwind_save(int arch_v6)4724 s_arm_unwind_save (int arch_v6)
4725 {
4726   char *peek;
4727   struct reg_entry *reg;
4728   bool had_brace = false;
4729 
4730   if (!unwind.proc_start)
4731     as_bad (MISSING_FNSTART);
4732 
4733   /* Figure out what sort of save we have.  */
4734   peek = input_line_pointer;
4735 
4736   if (*peek == '{')
4737     {
4738       had_brace = true;
4739       peek++;
4740     }
4741 
4742   reg = arm_reg_parse_multi (&peek);
4743 
4744   if (!reg)
4745     {
4746       as_bad (_("register expected"));
4747       ignore_rest_of_line ();
4748       return;
4749     }
4750 
4751   switch (reg->type)
4752     {
4753     case REG_TYPE_FN:
4754       if (had_brace)
4755 	{
4756 	  as_bad (_("FPA .unwind_save does not take a register list"));
4757 	  ignore_rest_of_line ();
4758 	  return;
4759 	}
4760       input_line_pointer = peek;
4761       s_arm_unwind_save_fpa (reg->number);
4762       return;
4763 
4764     case REG_TYPE_RN:
4765       s_arm_unwind_save_core ();
4766       return;
4767 
4768     case REG_TYPE_VFD:
4769       if (arch_v6)
4770 	s_arm_unwind_save_vfp_armv6 ();
4771       else
4772 	s_arm_unwind_save_vfp ();
4773       return;
4774 
4775     case REG_TYPE_MMXWR:
4776       s_arm_unwind_save_mmxwr ();
4777       return;
4778 
4779     case REG_TYPE_MMXWCG:
4780       s_arm_unwind_save_mmxwcg ();
4781       return;
4782 
4783     default:
4784       as_bad (_(".unwind_save does not support this kind of register"));
4785       ignore_rest_of_line ();
4786     }
4787 }
4788 
4789 
4790 /* Parse an unwind_movsp directive.  */
4791 
4792 static void
s_arm_unwind_movsp(int ignored ATTRIBUTE_UNUSED)4793 s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4794 {
4795   int reg;
4796   valueT op;
4797   int offset;
4798 
4799   if (!unwind.proc_start)
4800     as_bad (MISSING_FNSTART);
4801 
4802   reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4803   if (reg == FAIL)
4804     {
4805       as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4806       ignore_rest_of_line ();
4807       return;
4808     }
4809 
4810   /* Optional constant.	 */
4811   if (skip_past_comma (&input_line_pointer) != FAIL)
4812     {
4813       if (immediate_for_directive (&offset) == FAIL)
4814 	return;
4815     }
4816   else
4817     offset = 0;
4818 
4819   demand_empty_rest_of_line ();
4820 
4821   if (reg == REG_SP || reg == REG_PC)
4822     {
4823       as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4824       return;
4825     }
4826 
4827   if (unwind.fp_reg != REG_SP)
4828     as_bad (_("unexpected .unwind_movsp directive"));
4829 
4830   /* Generate opcode to restore the value.  */
4831   op = 0x90 | reg;
4832   add_unwind_opcode (op, 1);
4833 
4834   /* Record the information for later.	*/
4835   unwind.fp_reg = reg;
4836   unwind.fp_offset = unwind.frame_size - offset;
4837   unwind.sp_restored = 1;
4838 }
4839 
4840 /* Parse an unwind_pad directive.  */
4841 
4842 static void
s_arm_unwind_pad(int ignored ATTRIBUTE_UNUSED)4843 s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4844 {
4845   int offset;
4846 
4847   if (!unwind.proc_start)
4848     as_bad (MISSING_FNSTART);
4849 
4850   if (immediate_for_directive (&offset) == FAIL)
4851     return;
4852 
4853   if (offset & 3)
4854     {
4855       as_bad (_("stack increment must be multiple of 4"));
4856       ignore_rest_of_line ();
4857       return;
4858     }
4859 
4860   /* Don't generate any opcodes, just record the details for later.  */
4861   unwind.frame_size += offset;
4862   unwind.pending_offset += offset;
4863 
4864   demand_empty_rest_of_line ();
4865 }
4866 
4867 /* Parse an unwind_setfp directive.  */
4868 
4869 static void
s_arm_unwind_setfp(int ignored ATTRIBUTE_UNUSED)4870 s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4871 {
4872   int sp_reg;
4873   int fp_reg;
4874   int offset;
4875 
4876   if (!unwind.proc_start)
4877     as_bad (MISSING_FNSTART);
4878 
4879   fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4880   if (skip_past_comma (&input_line_pointer) == FAIL)
4881     sp_reg = FAIL;
4882   else
4883     sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4884 
4885   if (fp_reg == FAIL || sp_reg == FAIL)
4886     {
4887       as_bad (_("expected <reg>, <reg>"));
4888       ignore_rest_of_line ();
4889       return;
4890     }
4891 
4892   /* Optional constant.	 */
4893   if (skip_past_comma (&input_line_pointer) != FAIL)
4894     {
4895       if (immediate_for_directive (&offset) == FAIL)
4896 	return;
4897     }
4898   else
4899     offset = 0;
4900 
4901   demand_empty_rest_of_line ();
4902 
4903   if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4904     {
4905       as_bad (_("register must be either sp or set by a previous"
4906 		"unwind_movsp directive"));
4907       return;
4908     }
4909 
4910   /* Don't generate any opcodes, just record the information for later.	 */
4911   unwind.fp_reg = fp_reg;
4912   unwind.fp_used = 1;
4913   if (sp_reg == REG_SP)
4914     unwind.fp_offset = unwind.frame_size - offset;
4915   else
4916     unwind.fp_offset -= offset;
4917 }
4918 
4919 /* Parse an unwind_raw directive.  */
4920 
4921 static void
s_arm_unwind_raw(int ignored ATTRIBUTE_UNUSED)4922 s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4923 {
4924   expressionS exp;
4925   /* This is an arbitrary limit.	 */
4926   unsigned char op[16];
4927   int count;
4928 
4929   if (!unwind.proc_start)
4930     as_bad (MISSING_FNSTART);
4931 
4932   expression (&exp);
4933   if (exp.X_op == O_constant
4934       && skip_past_comma (&input_line_pointer) != FAIL)
4935     {
4936       unwind.frame_size += exp.X_add_number;
4937       expression (&exp);
4938     }
4939   else
4940     exp.X_op = O_illegal;
4941 
4942   if (exp.X_op != O_constant)
4943     {
4944       as_bad (_("expected <offset>, <opcode>"));
4945       ignore_rest_of_line ();
4946       return;
4947     }
4948 
4949   count = 0;
4950 
4951   /* Parse the opcode.	*/
4952   for (;;)
4953     {
4954       if (count >= 16)
4955 	{
4956 	  as_bad (_("unwind opcode too long"));
4957 	  ignore_rest_of_line ();
4958 	}
4959       if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4960 	{
4961 	  as_bad (_("invalid unwind opcode"));
4962 	  ignore_rest_of_line ();
4963 	  return;
4964 	}
4965       op[count++] = exp.X_add_number;
4966 
4967       /* Parse the next byte.  */
4968       if (skip_past_comma (&input_line_pointer) == FAIL)
4969 	break;
4970 
4971       expression (&exp);
4972     }
4973 
4974   /* Add the opcode bytes in reverse order.  */
4975   while (count--)
4976     add_unwind_opcode (op[count], 1);
4977 
4978   demand_empty_rest_of_line ();
4979 }
4980 
4981 
4982 /* Parse a .eabi_attribute directive.  */
4983 
4984 static void
s_arm_eabi_attribute(int ignored ATTRIBUTE_UNUSED)4985 s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4986 {
4987   int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4988 
4989   if (tag >= 0 && tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4990     attributes_set_explicitly[tag] = 1;
4991 }
4992 
4993 /* Emit a tls fix for the symbol.  */
4994 
4995 static void
s_arm_tls_descseq(int ignored ATTRIBUTE_UNUSED)4996 s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4997 {
4998   char *p;
4999   expressionS exp;
5000 #ifdef md_flush_pending_output
5001   md_flush_pending_output ();
5002 #endif
5003 
5004 #ifdef md_cons_align
5005   md_cons_align (4);
5006 #endif
5007 
5008   /* Since we're just labelling the code, there's no need to define a
5009      mapping symbol.  */
5010   expression (&exp);
5011   p = obstack_next_free (&frchain_now->frch_obstack);
5012   fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
5013 	       thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
5014 	       : BFD_RELOC_ARM_TLS_DESCSEQ);
5015 }
5016 #endif /* OBJ_ELF */
5017 
5018 static void s_arm_arch (int);
5019 static void s_arm_object_arch (int);
5020 static void s_arm_cpu (int);
5021 static void s_arm_fpu (int);
5022 static void s_arm_arch_extension (int);
5023 
5024 #ifdef TE_PE
5025 
5026 static void
pe_directive_secrel(int dummy ATTRIBUTE_UNUSED)5027 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
5028 {
5029   expressionS exp;
5030 
5031   do
5032     {
5033       expression (&exp);
5034       if (exp.X_op == O_symbol)
5035 	exp.X_op = O_secrel;
5036 
5037       emit_expr (&exp, 4);
5038     }
5039   while (*input_line_pointer++ == ',');
5040 
5041   input_line_pointer--;
5042   demand_empty_rest_of_line ();
5043 }
5044 #endif /* TE_PE */
5045 
5046 int
arm_is_largest_exponent_ok(int precision)5047 arm_is_largest_exponent_ok (int precision)
5048 {
5049   /* precision == 1 ensures that this will only return
5050      true for 16 bit floats.  */
5051   return (precision == 1) && (fp16_format == ARM_FP16_FORMAT_ALTERNATIVE);
5052 }
5053 
5054 static void
set_fp16_format(int dummy ATTRIBUTE_UNUSED)5055 set_fp16_format (int dummy ATTRIBUTE_UNUSED)
5056 {
5057   char saved_char;
5058   char* name;
5059   enum fp_16bit_format new_format;
5060 
5061   new_format = ARM_FP16_FORMAT_DEFAULT;
5062 
5063   name = input_line_pointer;
5064   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
5065     input_line_pointer++;
5066 
5067   saved_char = *input_line_pointer;
5068   *input_line_pointer = 0;
5069 
5070   if (strcasecmp (name, "ieee") == 0)
5071     new_format = ARM_FP16_FORMAT_IEEE;
5072   else if (strcasecmp (name, "alternative") == 0)
5073     new_format = ARM_FP16_FORMAT_ALTERNATIVE;
5074   else
5075     {
5076       as_bad (_("unrecognised float16 format \"%s\""), name);
5077       goto cleanup;
5078     }
5079 
5080   /* Only set fp16_format if it is still the default (aka not already
5081      been set yet).  */
5082   if (fp16_format == ARM_FP16_FORMAT_DEFAULT)
5083     fp16_format = new_format;
5084   else
5085     {
5086       if (new_format != fp16_format)
5087 	as_warn (_("float16 format cannot be set more than once, ignoring."));
5088     }
5089 
5090  cleanup:
5091   *input_line_pointer = saved_char;
5092   ignore_rest_of_line ();
5093 }
5094 
5095 /* This table describes all the machine specific pseudo-ops the assembler
5096    has to support.  The fields are:
5097      pseudo-op name without dot
5098      function to call to execute this pseudo-op
5099      Integer arg to pass to the function.  */
5100 
5101 const pseudo_typeS md_pseudo_table[] =
5102 {
5103   /* Never called because '.req' does not start a line.	 */
5104   { "req",	   s_req,	  0 },
5105   /* Following two are likewise never called.  */
5106   { "dn",	   s_dn,          0 },
5107   { "qn",          s_qn,          0 },
5108   { "unreq",	   s_unreq,	  0 },
5109   { "bss",	   s_bss,	  0 },
5110   { "align",	   s_align_ptwo,  2 },
5111   { "arm",	   s_arm,	  0 },
5112   { "thumb",	   s_thumb,	  0 },
5113   { "code",	   s_code,	  0 },
5114   { "force_thumb", s_force_thumb, 0 },
5115   { "thumb_func",  s_thumb_func,  0 },
5116   { "thumb_set",   s_thumb_set,	  0 },
5117   { "even",	   s_even,	  0 },
5118   { "ltorg",	   s_ltorg,	  0 },
5119   { "pool",	   s_ltorg,	  0 },
5120   { "syntax",	   s_syntax,	  0 },
5121   { "cpu",	   s_arm_cpu,	  0 },
5122   { "arch",	   s_arm_arch,	  0 },
5123   { "object_arch", s_arm_object_arch,	0 },
5124   { "fpu",	   s_arm_fpu,	  0 },
5125   { "arch_extension", s_arm_arch_extension, 0 },
5126 #ifdef OBJ_ELF
5127   { "word",	        s_arm_elf_cons, 4 },
5128   { "long",	        s_arm_elf_cons, 4 },
5129   { "inst.n",           s_arm_elf_inst, 2 },
5130   { "inst.w",           s_arm_elf_inst, 4 },
5131   { "inst",             s_arm_elf_inst, 0 },
5132   { "rel31",	        s_arm_rel31,	  0 },
5133   { "fnstart",		s_arm_unwind_fnstart,	0 },
5134   { "fnend",		s_arm_unwind_fnend,	0 },
5135   { "cantunwind",	s_arm_unwind_cantunwind, 0 },
5136   { "personality",	s_arm_unwind_personality, 0 },
5137   { "personalityindex",	s_arm_unwind_personalityindex, 0 },
5138   { "handlerdata",	s_arm_unwind_handlerdata, 0 },
5139   { "save",		s_arm_unwind_save,	0 },
5140   { "vsave",		s_arm_unwind_save,	1 },
5141   { "movsp",		s_arm_unwind_movsp,	0 },
5142   { "pad",		s_arm_unwind_pad,	0 },
5143   { "setfp",		s_arm_unwind_setfp,	0 },
5144   { "unwind_raw",	s_arm_unwind_raw,	0 },
5145   { "eabi_attribute",	s_arm_eabi_attribute,	0 },
5146   { "tlsdescseq",	s_arm_tls_descseq,      0 },
5147 #else
5148   { "word",	   cons, 4},
5149 
5150   /* These are used for dwarf.  */
5151   {"2byte", cons, 2},
5152   {"4byte", cons, 4},
5153   {"8byte", cons, 8},
5154   /* These are used for dwarf2.  */
5155   { "file", dwarf2_directive_file, 0 },
5156   { "loc",  dwarf2_directive_loc,  0 },
5157   { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
5158 #endif
5159   { "extend",	   float_cons, 'x' },
5160   { "ldouble",	   float_cons, 'x' },
5161   { "packed",	   float_cons, 'p' },
5162   { "bfloat16",	   float_cons, 'b' },
5163 #ifdef TE_PE
5164   {"secrel32", pe_directive_secrel, 0},
5165 #endif
5166 
5167   /* These are for compatibility with CodeComposer Studio.  */
5168   {"ref",          s_ccs_ref,        0},
5169   {"def",          s_ccs_def,        0},
5170   {"asmfunc",      s_ccs_asmfunc,    0},
5171   {"endasmfunc",   s_ccs_endasmfunc, 0},
5172 
5173   {"float16", float_cons, 'h' },
5174   {"float16_format", set_fp16_format, 0 },
5175 
5176   { 0, 0, 0 }
5177 };
5178 
5179 /* Parser functions used exclusively in instruction operands.  */
5180 
5181 /* Generic immediate-value read function for use in insn parsing.
5182    STR points to the beginning of the immediate (the leading #);
5183    VAL receives the value; if the value is outside [MIN, MAX]
5184    issue an error.  PREFIX_OPT is true if the immediate prefix is
5185    optional.  */
5186 
5187 static int
parse_immediate(char ** str,int * val,int min,int max,bool prefix_opt)5188 parse_immediate (char **str, int *val, int min, int max,
5189 		 bool prefix_opt)
5190 {
5191   expressionS exp;
5192 
5193   my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
5194   if (exp.X_op != O_constant)
5195     {
5196       inst.error = _("constant expression required");
5197       return FAIL;
5198     }
5199 
5200   if (exp.X_add_number < min || exp.X_add_number > max)
5201     {
5202       inst.error = _("immediate value out of range");
5203       return FAIL;
5204     }
5205 
5206   *val = exp.X_add_number;
5207   return SUCCESS;
5208 }
5209 
5210 /* Less-generic immediate-value read function with the possibility of loading a
5211    big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
5212    instructions. Puts the result directly in inst.operands[i].  */
5213 
5214 static int
parse_big_immediate(char ** str,int i,expressionS * in_exp,bool allow_symbol_p)5215 parse_big_immediate (char **str, int i, expressionS *in_exp,
5216 		     bool allow_symbol_p)
5217 {
5218   expressionS exp;
5219   expressionS *exp_p = in_exp ? in_exp : &exp;
5220   char *ptr = *str;
5221 
5222   my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
5223 
5224   if (exp_p->X_op == O_constant)
5225     {
5226       inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
5227       /* If we're on a 64-bit host, then a 64-bit number can be returned using
5228 	 O_constant.  We have to be careful not to break compilation for
5229 	 32-bit X_add_number, though.  */
5230       if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
5231 	{
5232 	  /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4.  */
5233 	  inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
5234 				  & 0xffffffff);
5235 	  inst.operands[i].regisimm = 1;
5236 	}
5237     }
5238   else if (exp_p->X_op == O_big
5239 	   && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
5240     {
5241       unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
5242 
5243       /* Bignums have their least significant bits in
5244 	 generic_bignum[0]. Make sure we put 32 bits in imm and
5245 	 32 bits in reg,  in a (hopefully) portable way.  */
5246       gas_assert (parts != 0);
5247 
5248       /* Make sure that the number is not too big.
5249 	 PR 11972: Bignums can now be sign-extended to the
5250 	 size of a .octa so check that the out of range bits
5251 	 are all zero or all one.  */
5252       if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
5253 	{
5254 	  LITTLENUM_TYPE m = -1;
5255 
5256 	  if (generic_bignum[parts * 2] != 0
5257 	      && generic_bignum[parts * 2] != m)
5258 	    return FAIL;
5259 
5260 	  for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
5261 	    if (generic_bignum[j] != generic_bignum[j-1])
5262 	      return FAIL;
5263 	}
5264 
5265       inst.operands[i].imm = 0;
5266       for (j = 0; j < parts; j++, idx++)
5267 	inst.operands[i].imm |= ((unsigned) generic_bignum[idx]
5268 				 << (LITTLENUM_NUMBER_OF_BITS * j));
5269       inst.operands[i].reg = 0;
5270       for (j = 0; j < parts; j++, idx++)
5271 	inst.operands[i].reg |= ((unsigned) generic_bignum[idx]
5272 				 << (LITTLENUM_NUMBER_OF_BITS * j));
5273       inst.operands[i].regisimm = 1;
5274     }
5275   else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
5276     return FAIL;
5277 
5278   *str = ptr;
5279 
5280   return SUCCESS;
5281 }
5282 
5283 /* Returns the pseudo-register number of an FPA immediate constant,
5284    or FAIL if there isn't a valid constant here.  */
5285 
5286 static int
parse_fpa_immediate(char ** str)5287 parse_fpa_immediate (char ** str)
5288 {
5289   LITTLENUM_TYPE words[MAX_LITTLENUMS];
5290   char *	 save_in;
5291   expressionS	 exp;
5292   int		 i;
5293   int		 j;
5294 
5295   /* First try and match exact strings, this is to guarantee
5296      that some formats will work even for cross assembly.  */
5297 
5298   for (i = 0; fp_const[i]; i++)
5299     {
5300       if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
5301 	{
5302 	  char *start = *str;
5303 
5304 	  *str += strlen (fp_const[i]);
5305 	  if (is_end_of_line[(unsigned char) **str])
5306 	    return i + 8;
5307 	  *str = start;
5308 	}
5309     }
5310 
5311   /* Just because we didn't get a match doesn't mean that the constant
5312      isn't valid, just that it is in a format that we don't
5313      automatically recognize.  Try parsing it with the standard
5314      expression routines.  */
5315 
5316   memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
5317 
5318   /* Look for a raw floating point number.  */
5319   if ((save_in = atof_ieee (*str, 'x', words)) != NULL
5320       && is_end_of_line[(unsigned char) *save_in])
5321     {
5322       for (i = 0; i < NUM_FLOAT_VALS; i++)
5323 	{
5324 	  for (j = 0; j < MAX_LITTLENUMS; j++)
5325 	    {
5326 	      if (words[j] != fp_values[i][j])
5327 		break;
5328 	    }
5329 
5330 	  if (j == MAX_LITTLENUMS)
5331 	    {
5332 	      *str = save_in;
5333 	      return i + 8;
5334 	    }
5335 	}
5336     }
5337 
5338   /* Try and parse a more complex expression, this will probably fail
5339      unless the code uses a floating point prefix (eg "0f").  */
5340   save_in = input_line_pointer;
5341   input_line_pointer = *str;
5342   if (expression (&exp) == absolute_section
5343       && exp.X_op == O_big
5344       && exp.X_add_number < 0)
5345     {
5346       /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
5347 	 Ditto for 15.	*/
5348 #define X_PRECISION 5
5349 #define E_PRECISION 15L
5350       if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
5351 	{
5352 	  for (i = 0; i < NUM_FLOAT_VALS; i++)
5353 	    {
5354 	      for (j = 0; j < MAX_LITTLENUMS; j++)
5355 		{
5356 		  if (words[j] != fp_values[i][j])
5357 		    break;
5358 		}
5359 
5360 	      if (j == MAX_LITTLENUMS)
5361 		{
5362 		  *str = input_line_pointer;
5363 		  input_line_pointer = save_in;
5364 		  return i + 8;
5365 		}
5366 	    }
5367 	}
5368     }
5369 
5370   *str = input_line_pointer;
5371   input_line_pointer = save_in;
5372   inst.error = _("invalid FPA immediate expression");
5373   return FAIL;
5374 }
5375 
5376 /* Returns 1 if a number has "quarter-precision" float format
5377    0baBbbbbbc defgh000 00000000 00000000.  */
5378 
5379 static int
is_quarter_float(unsigned imm)5380 is_quarter_float (unsigned imm)
5381 {
5382   int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
5383   return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
5384 }
5385 
5386 
5387 /* Detect the presence of a floating point or integer zero constant,
5388    i.e. #0.0 or #0.  */
5389 
5390 static bool
parse_ifimm_zero(char ** in)5391 parse_ifimm_zero (char **in)
5392 {
5393   int error_code;
5394 
5395   if (!is_immediate_prefix (**in))
5396     {
5397       /* In unified syntax, all prefixes are optional.  */
5398       if (!unified_syntax)
5399 	return false;
5400     }
5401   else
5402     ++*in;
5403 
5404   /* Accept #0x0 as a synonym for #0.  */
5405   if (startswith (*in, "0x"))
5406     {
5407       int val;
5408       if (parse_immediate (in, &val, 0, 0, true) == FAIL)
5409         return false;
5410       return true;
5411     }
5412 
5413   error_code = atof_generic (in, ".", EXP_CHARS,
5414                              &generic_floating_point_number);
5415 
5416   if (!error_code
5417       && generic_floating_point_number.sign == '+'
5418       && (generic_floating_point_number.low
5419           > generic_floating_point_number.leader))
5420     return true;
5421 
5422   return false;
5423 }
5424 
5425 /* Parse an 8-bit "quarter-precision" floating point number of the form:
5426    0baBbbbbbc defgh000 00000000 00000000.
5427    The zero and minus-zero cases need special handling, since they can't be
5428    encoded in the "quarter-precision" float format, but can nonetheless be
5429    loaded as integer constants.  */
5430 
5431 static unsigned
parse_qfloat_immediate(char ** ccp,int * immed)5432 parse_qfloat_immediate (char **ccp, int *immed)
5433 {
5434   char *str = *ccp;
5435   char *fpnum;
5436   LITTLENUM_TYPE words[MAX_LITTLENUMS];
5437   int found_fpchar = 0;
5438 
5439   skip_past_char (&str, '#');
5440 
5441   /* We must not accidentally parse an integer as a floating-point number. Make
5442      sure that the value we parse is not an integer by checking for special
5443      characters '.' or 'e'.
5444      FIXME: This is a horrible hack, but doing better is tricky because type
5445      information isn't in a very usable state at parse time.  */
5446   fpnum = str;
5447   skip_whitespace (fpnum);
5448 
5449   if (startswith (fpnum, "0x"))
5450     return FAIL;
5451   else
5452     {
5453       for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5454 	if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5455 	  {
5456 	    found_fpchar = 1;
5457 	    break;
5458 	  }
5459 
5460       if (!found_fpchar)
5461 	return FAIL;
5462     }
5463 
5464   if ((str = atof_ieee (str, 's', words)) != NULL)
5465     {
5466       unsigned fpword = 0;
5467       int i;
5468 
5469       /* Our FP word must be 32 bits (single-precision FP).  */
5470       for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5471 	{
5472 	  fpword <<= LITTLENUM_NUMBER_OF_BITS;
5473 	  fpword |= words[i];
5474 	}
5475 
5476       if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5477 	*immed = fpword;
5478       else
5479 	return FAIL;
5480 
5481       *ccp = str;
5482 
5483       return SUCCESS;
5484     }
5485 
5486   return FAIL;
5487 }
5488 
5489 /* Shift operands.  */
5490 enum shift_kind
5491 {
5492   SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX, SHIFT_UXTW
5493 };
5494 
5495 struct asm_shift_name
5496 {
5497   const char	  *name;
5498   enum shift_kind  kind;
5499 };
5500 
5501 /* Third argument to parse_shift.  */
5502 enum parse_shift_mode
5503 {
5504   NO_SHIFT_RESTRICT,		/* Any kind of shift is accepted.  */
5505   SHIFT_IMMEDIATE,		/* Shift operand must be an immediate.	*/
5506   SHIFT_LSL_OR_ASR_IMMEDIATE,	/* Shift must be LSL or ASR immediate.	*/
5507   SHIFT_ASR_IMMEDIATE,		/* Shift must be ASR immediate.	 */
5508   SHIFT_LSL_IMMEDIATE,		/* Shift must be LSL immediate.	 */
5509   SHIFT_UXTW_IMMEDIATE		/* Shift must be UXTW immediate.  */
5510 };
5511 
5512 /* Parse a <shift> specifier on an ARM data processing instruction.
5513    This has three forms:
5514 
5515      (LSL|LSR|ASL|ASR|ROR) Rs
5516      (LSL|LSR|ASL|ASR|ROR) #imm
5517      RRX
5518 
5519    Note that ASL is assimilated to LSL in the instruction encoding, and
5520    RRX to ROR #0 (which cannot be written as such).  */
5521 
5522 static int
parse_shift(char ** str,int i,enum parse_shift_mode mode)5523 parse_shift (char **str, int i, enum parse_shift_mode mode)
5524 {
5525   const struct asm_shift_name *shift_name;
5526   enum shift_kind shift;
5527   char *s = *str;
5528   char *p = s;
5529   int reg;
5530 
5531   for (p = *str; ISALPHA (*p); p++)
5532     ;
5533 
5534   if (p == *str)
5535     {
5536       inst.error = _("shift expression expected");
5537       return FAIL;
5538     }
5539 
5540   shift_name
5541     = (const struct asm_shift_name *) str_hash_find_n (arm_shift_hsh, *str,
5542 						       p - *str);
5543 
5544   if (shift_name == NULL)
5545     {
5546       inst.error = _("shift expression expected");
5547       return FAIL;
5548     }
5549 
5550   shift = shift_name->kind;
5551 
5552   switch (mode)
5553     {
5554     case NO_SHIFT_RESTRICT:
5555     case SHIFT_IMMEDIATE:
5556       if (shift == SHIFT_UXTW)
5557 	{
5558 	  inst.error = _("'UXTW' not allowed here");
5559 	  return FAIL;
5560 	}
5561       break;
5562 
5563     case SHIFT_LSL_OR_ASR_IMMEDIATE:
5564       if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5565 	{
5566 	  inst.error = _("'LSL' or 'ASR' required");
5567 	  return FAIL;
5568 	}
5569       break;
5570 
5571     case SHIFT_LSL_IMMEDIATE:
5572       if (shift != SHIFT_LSL)
5573 	{
5574 	  inst.error = _("'LSL' required");
5575 	  return FAIL;
5576 	}
5577       break;
5578 
5579     case SHIFT_ASR_IMMEDIATE:
5580       if (shift != SHIFT_ASR)
5581 	{
5582 	  inst.error = _("'ASR' required");
5583 	  return FAIL;
5584 	}
5585       break;
5586     case SHIFT_UXTW_IMMEDIATE:
5587       if (shift != SHIFT_UXTW)
5588 	{
5589 	  inst.error = _("'UXTW' required");
5590 	  return FAIL;
5591 	}
5592       break;
5593 
5594     default: abort ();
5595     }
5596 
5597   if (shift != SHIFT_RRX)
5598     {
5599       /* Whitespace can appear here if the next thing is a bare digit.	*/
5600       skip_whitespace (p);
5601 
5602       if (mode == NO_SHIFT_RESTRICT
5603 	  && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5604 	{
5605 	  inst.operands[i].imm = reg;
5606 	  inst.operands[i].immisreg = 1;
5607 	}
5608       else if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
5609 	return FAIL;
5610     }
5611   inst.operands[i].shift_kind = shift;
5612   inst.operands[i].shifted = 1;
5613   *str = p;
5614   return SUCCESS;
5615 }
5616 
5617 /* Parse a <shifter_operand> for an ARM data processing instruction:
5618 
5619       #<immediate>
5620       #<immediate>, <rotate>
5621       <Rm>
5622       <Rm>, <shift>
5623 
5624    where <shift> is defined by parse_shift above, and <rotate> is a
5625    multiple of 2 between 0 and 30.  Validation of immediate operands
5626    is deferred to md_apply_fix.  */
5627 
5628 static int
parse_shifter_operand(char ** str,int i)5629 parse_shifter_operand (char **str, int i)
5630 {
5631   int value;
5632   expressionS exp;
5633 
5634   if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5635     {
5636       inst.operands[i].reg = value;
5637       inst.operands[i].isreg = 1;
5638 
5639       /* parse_shift will override this if appropriate */
5640       inst.relocs[0].exp.X_op = O_constant;
5641       inst.relocs[0].exp.X_add_number = 0;
5642 
5643       if (skip_past_comma (str) == FAIL)
5644 	return SUCCESS;
5645 
5646       /* Shift operation on register.  */
5647       return parse_shift (str, i, NO_SHIFT_RESTRICT);
5648     }
5649 
5650   if (my_get_expression (&inst.relocs[0].exp, str, GE_IMM_PREFIX))
5651     return FAIL;
5652 
5653   if (skip_past_comma (str) == SUCCESS)
5654     {
5655       /* #x, y -- ie explicit rotation by Y.  */
5656       if (my_get_expression (&exp, str, GE_NO_PREFIX))
5657 	return FAIL;
5658 
5659       if (exp.X_op != O_constant || inst.relocs[0].exp.X_op != O_constant)
5660 	{
5661 	  inst.error = _("constant expression expected");
5662 	  return FAIL;
5663 	}
5664 
5665       value = exp.X_add_number;
5666       if (value < 0 || value > 30 || value % 2 != 0)
5667 	{
5668 	  inst.error = _("invalid rotation");
5669 	  return FAIL;
5670 	}
5671       if (inst.relocs[0].exp.X_add_number < 0
5672 	  || inst.relocs[0].exp.X_add_number > 255)
5673 	{
5674 	  inst.error = _("invalid constant");
5675 	  return FAIL;
5676 	}
5677 
5678       /* Encode as specified.  */
5679       inst.operands[i].imm = inst.relocs[0].exp.X_add_number | value << 7;
5680       return SUCCESS;
5681     }
5682 
5683   inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
5684   inst.relocs[0].pc_rel = 0;
5685   return SUCCESS;
5686 }
5687 
5688 /* Group relocation information.  Each entry in the table contains the
5689    textual name of the relocation as may appear in assembler source
5690    and must end with a colon.
5691    Along with this textual name are the relocation codes to be used if
5692    the corresponding instruction is an ALU instruction (ADD or SUB only),
5693    an LDR, an LDRS, or an LDC.  */
5694 
5695 struct group_reloc_table_entry
5696 {
5697   const char *name;
5698   int alu_code;
5699   int ldr_code;
5700   int ldrs_code;
5701   int ldc_code;
5702 };
5703 
5704 typedef enum
5705 {
5706   /* Varieties of non-ALU group relocation.  */
5707 
5708   GROUP_LDR,
5709   GROUP_LDRS,
5710   GROUP_LDC,
5711   GROUP_MVE
5712 } group_reloc_type;
5713 
5714 static struct group_reloc_table_entry group_reloc_table[] =
5715   { /* Program counter relative: */
5716     { "pc_g0_nc",
5717       BFD_RELOC_ARM_ALU_PC_G0_NC,	/* ALU */
5718       0,				/* LDR */
5719       0,				/* LDRS */
5720       0 },				/* LDC */
5721     { "pc_g0",
5722       BFD_RELOC_ARM_ALU_PC_G0,		/* ALU */
5723       BFD_RELOC_ARM_LDR_PC_G0,		/* LDR */
5724       BFD_RELOC_ARM_LDRS_PC_G0,		/* LDRS */
5725       BFD_RELOC_ARM_LDC_PC_G0 },	/* LDC */
5726     { "pc_g1_nc",
5727       BFD_RELOC_ARM_ALU_PC_G1_NC,	/* ALU */
5728       0,				/* LDR */
5729       0,				/* LDRS */
5730       0 },				/* LDC */
5731     { "pc_g1",
5732       BFD_RELOC_ARM_ALU_PC_G1,		/* ALU */
5733       BFD_RELOC_ARM_LDR_PC_G1, 		/* LDR */
5734       BFD_RELOC_ARM_LDRS_PC_G1,		/* LDRS */
5735       BFD_RELOC_ARM_LDC_PC_G1 },	/* LDC */
5736     { "pc_g2",
5737       BFD_RELOC_ARM_ALU_PC_G2,		/* ALU */
5738       BFD_RELOC_ARM_LDR_PC_G2,		/* LDR */
5739       BFD_RELOC_ARM_LDRS_PC_G2,		/* LDRS */
5740       BFD_RELOC_ARM_LDC_PC_G2 },	/* LDC */
5741     /* Section base relative */
5742     { "sb_g0_nc",
5743       BFD_RELOC_ARM_ALU_SB_G0_NC,	/* ALU */
5744       0,				/* LDR */
5745       0,				/* LDRS */
5746       0 },				/* LDC */
5747     { "sb_g0",
5748       BFD_RELOC_ARM_ALU_SB_G0,		/* ALU */
5749       BFD_RELOC_ARM_LDR_SB_G0,		/* LDR */
5750       BFD_RELOC_ARM_LDRS_SB_G0,		/* LDRS */
5751       BFD_RELOC_ARM_LDC_SB_G0 },	/* LDC */
5752     { "sb_g1_nc",
5753       BFD_RELOC_ARM_ALU_SB_G1_NC,	/* ALU */
5754       0,				/* LDR */
5755       0,				/* LDRS */
5756       0 },				/* LDC */
5757     { "sb_g1",
5758       BFD_RELOC_ARM_ALU_SB_G1,		/* ALU */
5759       BFD_RELOC_ARM_LDR_SB_G1, 		/* LDR */
5760       BFD_RELOC_ARM_LDRS_SB_G1,		/* LDRS */
5761       BFD_RELOC_ARM_LDC_SB_G1 },	/* LDC */
5762     { "sb_g2",
5763       BFD_RELOC_ARM_ALU_SB_G2,		/* ALU */
5764       BFD_RELOC_ARM_LDR_SB_G2,		/* LDR */
5765       BFD_RELOC_ARM_LDRS_SB_G2,		/* LDRS */
5766       BFD_RELOC_ARM_LDC_SB_G2 },	/* LDC */
5767     /* Absolute thumb alu relocations.  */
5768     { "lower0_7",
5769       BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU.  */
5770       0,				/* LDR.  */
5771       0,				/* LDRS.  */
5772       0 },				/* LDC.  */
5773     { "lower8_15",
5774       BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU.  */
5775       0,				/* LDR.  */
5776       0,				/* LDRS.  */
5777       0 },				/* LDC.  */
5778     { "upper0_7",
5779       BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU.  */
5780       0,				/* LDR.  */
5781       0,				/* LDRS.  */
5782       0 },				/* LDC.  */
5783     { "upper8_15",
5784       BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU.  */
5785       0,				/* LDR.  */
5786       0,				/* LDRS.  */
5787       0 } };				/* LDC.  */
5788 
5789 /* Given the address of a pointer pointing to the textual name of a group
5790    relocation as may appear in assembler source, attempt to find its details
5791    in group_reloc_table.  The pointer will be updated to the character after
5792    the trailing colon.  On failure, FAIL will be returned; SUCCESS
5793    otherwise.  On success, *entry will be updated to point at the relevant
5794    group_reloc_table entry. */
5795 
5796 static int
find_group_reloc_table_entry(char ** str,struct group_reloc_table_entry ** out)5797 find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5798 {
5799   unsigned int i;
5800   for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5801     {
5802       int length = strlen (group_reloc_table[i].name);
5803 
5804       if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5805 	  && (*str)[length] == ':')
5806 	{
5807 	  *out = &group_reloc_table[i];
5808 	  *str += (length + 1);
5809 	  return SUCCESS;
5810 	}
5811     }
5812 
5813   return FAIL;
5814 }
5815 
5816 /* Parse a <shifter_operand> for an ARM data processing instruction
5817    (as for parse_shifter_operand) where group relocations are allowed:
5818 
5819       #<immediate>
5820       #<immediate>, <rotate>
5821       #:<group_reloc>:<expression>
5822       <Rm>
5823       <Rm>, <shift>
5824 
5825    where <group_reloc> is one of the strings defined in group_reloc_table.
5826    The hashes are optional.
5827 
5828    Everything else is as for parse_shifter_operand.  */
5829 
5830 static parse_operand_result
parse_shifter_operand_group_reloc(char ** str,int i)5831 parse_shifter_operand_group_reloc (char **str, int i)
5832 {
5833   /* Determine if we have the sequence of characters #: or just :
5834      coming next.  If we do, then we check for a group relocation.
5835      If we don't, punt the whole lot to parse_shifter_operand.  */
5836 
5837   if (((*str)[0] == '#' && (*str)[1] == ':')
5838       || (*str)[0] == ':')
5839     {
5840       struct group_reloc_table_entry *entry;
5841 
5842       if ((*str)[0] == '#')
5843 	(*str) += 2;
5844       else
5845 	(*str)++;
5846 
5847       /* Try to parse a group relocation.  Anything else is an error.  */
5848       if (find_group_reloc_table_entry (str, &entry) == FAIL)
5849 	{
5850 	  inst.error = _("unknown group relocation");
5851 	  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5852 	}
5853 
5854       /* We now have the group relocation table entry corresponding to
5855 	 the name in the assembler source.  Next, we parse the expression.  */
5856       if (my_get_expression (&inst.relocs[0].exp, str, GE_NO_PREFIX))
5857 	return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5858 
5859       /* Record the relocation type (always the ALU variant here).  */
5860       inst.relocs[0].type = (bfd_reloc_code_real_type) entry->alu_code;
5861       gas_assert (inst.relocs[0].type != 0);
5862 
5863       return PARSE_OPERAND_SUCCESS;
5864     }
5865   else
5866     return parse_shifter_operand (str, i) == SUCCESS
5867 	   ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5868 
5869   /* Never reached.  */
5870 }
5871 
5872 /* Parse a Neon alignment expression.  Information is written to
5873    inst.operands[i].  We assume the initial ':' has been skipped.
5874 
5875    align	.imm = align << 8, .immisalign=1, .preind=0  */
5876 static parse_operand_result
parse_neon_alignment(char ** str,int i)5877 parse_neon_alignment (char **str, int i)
5878 {
5879   char *p = *str;
5880   expressionS exp;
5881 
5882   my_get_expression (&exp, &p, GE_NO_PREFIX);
5883 
5884   if (exp.X_op != O_constant)
5885     {
5886       inst.error = _("alignment must be constant");
5887       return PARSE_OPERAND_FAIL;
5888     }
5889 
5890   inst.operands[i].imm = exp.X_add_number << 8;
5891   inst.operands[i].immisalign = 1;
5892   /* Alignments are not pre-indexes.  */
5893   inst.operands[i].preind = 0;
5894 
5895   *str = p;
5896   return PARSE_OPERAND_SUCCESS;
5897 }
5898 
5899 /* Parse all forms of an ARM address expression.  Information is written
5900    to inst.operands[i] and/or inst.relocs[0].
5901 
5902    Preindexed addressing (.preind=1):
5903 
5904    [Rn, #offset]       .reg=Rn .relocs[0].exp=offset
5905    [Rn, +/-Rm]	       .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5906    [Rn, +/-Rm, shift]  .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5907 		       .shift_kind=shift .relocs[0].exp=shift_imm
5908 
5909    These three may have a trailing ! which causes .writeback to be set also.
5910 
5911    Postindexed addressing (.postind=1, .writeback=1):
5912 
5913    [Rn], #offset       .reg=Rn .relocs[0].exp=offset
5914    [Rn], +/-Rm	       .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5915    [Rn], +/-Rm, shift  .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5916 		       .shift_kind=shift .relocs[0].exp=shift_imm
5917 
5918    Unindexed addressing (.preind=0, .postind=0):
5919 
5920    [Rn], {option}      .reg=Rn .imm=option .immisreg=0
5921 
5922    Other:
5923 
5924    [Rn]{!}	       shorthand for [Rn,#0]{!}
5925    =immediate	       .isreg=0 .relocs[0].exp=immediate
5926    label	       .reg=PC .relocs[0].pc_rel=1 .relocs[0].exp=label
5927 
5928   It is the caller's responsibility to check for addressing modes not
5929   supported by the instruction, and to set inst.relocs[0].type.  */
5930 
5931 static parse_operand_result
parse_address_main(char ** str,int i,int group_relocations,group_reloc_type group_type)5932 parse_address_main (char **str, int i, int group_relocations,
5933 		    group_reloc_type group_type)
5934 {
5935   char *p = *str;
5936   int reg;
5937 
5938   if (skip_past_char (&p, '[') == FAIL)
5939     {
5940       if (group_type == GROUP_MVE
5941 	  && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5942 	{
5943 	  /* [r0-r15] expected as argument but receiving r0-r15 without
5944 	     [] brackets.  */
5945 	  inst.error = BAD_SYNTAX;
5946 	  return PARSE_OPERAND_FAIL;
5947 	}
5948       else if (skip_past_char (&p, '=') == FAIL)
5949 	{
5950 	  /* Bare address - translate to PC-relative offset.  */
5951 	  inst.relocs[0].pc_rel = 1;
5952 	  inst.operands[i].reg = REG_PC;
5953 	  inst.operands[i].isreg = 1;
5954 	  inst.operands[i].preind = 1;
5955 
5956 	  if (my_get_expression (&inst.relocs[0].exp, &p, GE_OPT_PREFIX_BIG))
5957 	    return PARSE_OPERAND_FAIL;
5958 	}
5959       else if (parse_big_immediate (&p, i, &inst.relocs[0].exp,
5960 				    /*allow_symbol_p=*/true))
5961 	return PARSE_OPERAND_FAIL;
5962 
5963       *str = p;
5964       return PARSE_OPERAND_SUCCESS;
5965     }
5966 
5967   /* PR gas/14887: Allow for whitespace after the opening bracket.  */
5968   skip_whitespace (p);
5969 
5970   if (group_type == GROUP_MVE)
5971     {
5972       enum arm_reg_type rtype = REG_TYPE_MQ;
5973       struct neon_type_el et;
5974       if ((reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
5975 	{
5976 	  inst.operands[i].isquad = 1;
5977 	}
5978       else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5979 	{
5980 	  inst.error = BAD_ADDR_MODE;
5981 	  return PARSE_OPERAND_FAIL;
5982 	}
5983     }
5984   else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5985     {
5986       if (group_type == GROUP_MVE)
5987 	inst.error = BAD_ADDR_MODE;
5988       else
5989 	inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5990       return PARSE_OPERAND_FAIL;
5991     }
5992   inst.operands[i].reg = reg;
5993   inst.operands[i].isreg = 1;
5994 
5995   if (skip_past_comma (&p) == SUCCESS)
5996     {
5997       inst.operands[i].preind = 1;
5998 
5999       if (*p == '+') p++;
6000       else if (*p == '-') p++, inst.operands[i].negative = 1;
6001 
6002       enum arm_reg_type rtype = REG_TYPE_MQ;
6003       struct neon_type_el et;
6004       if (group_type == GROUP_MVE
6005 	  && (reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
6006 	{
6007 	  inst.operands[i].immisreg = 2;
6008 	  inst.operands[i].imm = reg;
6009 
6010 	  if (skip_past_comma (&p) == SUCCESS)
6011 	    {
6012 	      if (parse_shift (&p, i, SHIFT_UXTW_IMMEDIATE) == SUCCESS)
6013 		{
6014 		  inst.operands[i].imm |= inst.relocs[0].exp.X_add_number << 5;
6015 		  inst.relocs[0].exp.X_add_number = 0;
6016 		}
6017 	      else
6018 		return PARSE_OPERAND_FAIL;
6019 	    }
6020 	}
6021       else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
6022 	{
6023 	  inst.operands[i].imm = reg;
6024 	  inst.operands[i].immisreg = 1;
6025 
6026 	  if (skip_past_comma (&p) == SUCCESS)
6027 	    if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
6028 	      return PARSE_OPERAND_FAIL;
6029 	}
6030       else if (skip_past_char (&p, ':') == SUCCESS)
6031 	{
6032 	  /* FIXME: '@' should be used here, but it's filtered out by generic
6033 	     code before we get to see it here. This may be subject to
6034 	     change.  */
6035 	  parse_operand_result result = parse_neon_alignment (&p, i);
6036 
6037 	  if (result != PARSE_OPERAND_SUCCESS)
6038 	    return result;
6039 	}
6040       else
6041 	{
6042 	  if (inst.operands[i].negative)
6043 	    {
6044 	      inst.operands[i].negative = 0;
6045 	      p--;
6046 	    }
6047 
6048 	  if (group_relocations
6049 	      && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
6050 	    {
6051 	      struct group_reloc_table_entry *entry;
6052 
6053 	      /* Skip over the #: or : sequence.  */
6054 	      if (*p == '#')
6055 		p += 2;
6056 	      else
6057 		p++;
6058 
6059 	      /* Try to parse a group relocation.  Anything else is an
6060 		 error.  */
6061 	      if (find_group_reloc_table_entry (&p, &entry) == FAIL)
6062 		{
6063 		  inst.error = _("unknown group relocation");
6064 		  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
6065 		}
6066 
6067 	      /* We now have the group relocation table entry corresponding to
6068 		 the name in the assembler source.  Next, we parse the
6069 		 expression.  */
6070 	      if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
6071 		return PARSE_OPERAND_FAIL_NO_BACKTRACK;
6072 
6073 	      /* Record the relocation type.  */
6074 	      switch (group_type)
6075 		{
6076 		  case GROUP_LDR:
6077 		    inst.relocs[0].type
6078 			= (bfd_reloc_code_real_type) entry->ldr_code;
6079 		    break;
6080 
6081 		  case GROUP_LDRS:
6082 		    inst.relocs[0].type
6083 			= (bfd_reloc_code_real_type) entry->ldrs_code;
6084 		    break;
6085 
6086 		  case GROUP_LDC:
6087 		    inst.relocs[0].type
6088 			= (bfd_reloc_code_real_type) entry->ldc_code;
6089 		    break;
6090 
6091 		  default:
6092 		    gas_assert (0);
6093 		}
6094 
6095 	      if (inst.relocs[0].type == 0)
6096 		{
6097 		  inst.error = _("this group relocation is not allowed on this instruction");
6098 		  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
6099 		}
6100 	    }
6101 	  else
6102 	    {
6103 	      char *q = p;
6104 
6105 	      if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
6106 		return PARSE_OPERAND_FAIL;
6107 	      /* If the offset is 0, find out if it's a +0 or -0.  */
6108 	      if (inst.relocs[0].exp.X_op == O_constant
6109 		  && inst.relocs[0].exp.X_add_number == 0)
6110 		{
6111 		  skip_whitespace (q);
6112 		  if (*q == '#')
6113 		    {
6114 		      q++;
6115 		      skip_whitespace (q);
6116 		    }
6117 		  if (*q == '-')
6118 		    inst.operands[i].negative = 1;
6119 		}
6120 	    }
6121 	}
6122     }
6123   else if (skip_past_char (&p, ':') == SUCCESS)
6124     {
6125       /* FIXME: '@' should be used here, but it's filtered out by generic code
6126 	 before we get to see it here. This may be subject to change.  */
6127       parse_operand_result result = parse_neon_alignment (&p, i);
6128 
6129       if (result != PARSE_OPERAND_SUCCESS)
6130 	return result;
6131     }
6132 
6133   if (skip_past_char (&p, ']') == FAIL)
6134     {
6135       inst.error = _("']' expected");
6136       return PARSE_OPERAND_FAIL;
6137     }
6138 
6139   if (skip_past_char (&p, '!') == SUCCESS)
6140     inst.operands[i].writeback = 1;
6141 
6142   else if (skip_past_comma (&p) == SUCCESS)
6143     {
6144       if (skip_past_char (&p, '{') == SUCCESS)
6145 	{
6146 	  /* [Rn], {expr} - unindexed, with option */
6147 	  if (parse_immediate (&p, &inst.operands[i].imm,
6148 			       0, 255, true) == FAIL)
6149 	    return PARSE_OPERAND_FAIL;
6150 
6151 	  if (skip_past_char (&p, '}') == FAIL)
6152 	    {
6153 	      inst.error = _("'}' expected at end of 'option' field");
6154 	      return PARSE_OPERAND_FAIL;
6155 	    }
6156 	  if (inst.operands[i].preind)
6157 	    {
6158 	      inst.error = _("cannot combine index with option");
6159 	      return PARSE_OPERAND_FAIL;
6160 	    }
6161 	  *str = p;
6162 	  return PARSE_OPERAND_SUCCESS;
6163 	}
6164       else
6165 	{
6166 	  inst.operands[i].postind = 1;
6167 	  inst.operands[i].writeback = 1;
6168 
6169 	  if (inst.operands[i].preind)
6170 	    {
6171 	      inst.error = _("cannot combine pre- and post-indexing");
6172 	      return PARSE_OPERAND_FAIL;
6173 	    }
6174 
6175 	  if (*p == '+') p++;
6176 	  else if (*p == '-') p++, inst.operands[i].negative = 1;
6177 
6178 	  enum arm_reg_type rtype = REG_TYPE_MQ;
6179 	  struct neon_type_el et;
6180 	  if (group_type == GROUP_MVE
6181 	      && (reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
6182 	    {
6183 	      inst.operands[i].immisreg = 2;
6184 	      inst.operands[i].imm = reg;
6185 	    }
6186 	  else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
6187 	    {
6188 	      /* We might be using the immediate for alignment already. If we
6189 		 are, OR the register number into the low-order bits.  */
6190 	      if (inst.operands[i].immisalign)
6191 		inst.operands[i].imm |= reg;
6192 	      else
6193 		inst.operands[i].imm = reg;
6194 	      inst.operands[i].immisreg = 1;
6195 
6196 	      if (skip_past_comma (&p) == SUCCESS)
6197 		if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
6198 		  return PARSE_OPERAND_FAIL;
6199 	    }
6200 	  else
6201 	    {
6202 	      char *q = p;
6203 
6204 	      if (inst.operands[i].negative)
6205 		{
6206 		  inst.operands[i].negative = 0;
6207 		  p--;
6208 		}
6209 	      if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
6210 		return PARSE_OPERAND_FAIL;
6211 	      /* If the offset is 0, find out if it's a +0 or -0.  */
6212 	      if (inst.relocs[0].exp.X_op == O_constant
6213 		  && inst.relocs[0].exp.X_add_number == 0)
6214 		{
6215 		  skip_whitespace (q);
6216 		  if (*q == '#')
6217 		    {
6218 		      q++;
6219 		      skip_whitespace (q);
6220 		    }
6221 		  if (*q == '-')
6222 		    inst.operands[i].negative = 1;
6223 		}
6224 	    }
6225 	}
6226     }
6227 
6228   /* If at this point neither .preind nor .postind is set, we have a
6229      bare [Rn]{!}, which is shorthand for [Rn,#0]{!}.  */
6230   if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
6231     {
6232       inst.operands[i].preind = 1;
6233       inst.relocs[0].exp.X_op = O_constant;
6234       inst.relocs[0].exp.X_add_number = 0;
6235     }
6236   *str = p;
6237   return PARSE_OPERAND_SUCCESS;
6238 }
6239 
6240 static int
parse_address(char ** str,int i)6241 parse_address (char **str, int i)
6242 {
6243   return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
6244 	 ? SUCCESS : FAIL;
6245 }
6246 
6247 static parse_operand_result
parse_address_group_reloc(char ** str,int i,group_reloc_type type)6248 parse_address_group_reloc (char **str, int i, group_reloc_type type)
6249 {
6250   return parse_address_main (str, i, 1, type);
6251 }
6252 
6253 /* Parse an operand for a MOVW or MOVT instruction.  */
6254 static int
parse_half(char ** str)6255 parse_half (char **str)
6256 {
6257   char * p;
6258 
6259   p = *str;
6260   skip_past_char (&p, '#');
6261   if (strncasecmp (p, ":lower16:", 9) == 0)
6262     inst.relocs[0].type = BFD_RELOC_ARM_MOVW;
6263   else if (strncasecmp (p, ":upper16:", 9) == 0)
6264     inst.relocs[0].type = BFD_RELOC_ARM_MOVT;
6265 
6266   if (inst.relocs[0].type != BFD_RELOC_UNUSED)
6267     {
6268       p += 9;
6269       skip_whitespace (p);
6270     }
6271 
6272   if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
6273     return FAIL;
6274 
6275   if (inst.relocs[0].type == BFD_RELOC_UNUSED)
6276     {
6277       if (inst.relocs[0].exp.X_op != O_constant)
6278 	{
6279 	  inst.error = _("constant expression expected");
6280 	  return FAIL;
6281 	}
6282       if (inst.relocs[0].exp.X_add_number < 0
6283 	  || inst.relocs[0].exp.X_add_number > 0xffff)
6284 	{
6285 	  inst.error = _("immediate value out of range");
6286 	  return FAIL;
6287 	}
6288     }
6289   *str = p;
6290   return SUCCESS;
6291 }
6292 
6293 /* Miscellaneous. */
6294 
6295 /* Parse a PSR flag operand.  The value returned is FAIL on syntax error,
6296    or a bitmask suitable to be or-ed into the ARM msr instruction.  */
6297 static int
parse_psr(char ** str,bool lhs)6298 parse_psr (char **str, bool lhs)
6299 {
6300   char *p;
6301   unsigned long psr_field;
6302   const struct asm_psr *psr;
6303   char *start;
6304   bool is_apsr = false;
6305   bool m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
6306 
6307   /* PR gas/12698:  If the user has specified -march=all then m_profile will
6308      be TRUE, but we want to ignore it in this case as we are building for any
6309      CPU type, including non-m variants.  */
6310   if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
6311     m_profile = false;
6312 
6313   /* CPSR's and SPSR's can now be lowercase.  This is just a convenience
6314      feature for ease of use and backwards compatibility.  */
6315   p = *str;
6316   if (strncasecmp (p, "SPSR", 4) == 0)
6317     {
6318       if (m_profile)
6319 	goto unsupported_psr;
6320 
6321       psr_field = SPSR_BIT;
6322     }
6323   else if (strncasecmp (p, "CPSR", 4) == 0)
6324     {
6325       if (m_profile)
6326 	goto unsupported_psr;
6327 
6328       psr_field = 0;
6329     }
6330   else if (strncasecmp (p, "APSR", 4) == 0)
6331     {
6332       /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
6333 	 and ARMv7-R architecture CPUs.  */
6334       is_apsr = true;
6335       psr_field = 0;
6336     }
6337   else if (m_profile)
6338     {
6339       start = p;
6340       do
6341 	p++;
6342       while (ISALNUM (*p) || *p == '_');
6343 
6344       if (strncasecmp (start, "iapsr", 5) == 0
6345 	  || strncasecmp (start, "eapsr", 5) == 0
6346 	  || strncasecmp (start, "xpsr", 4) == 0
6347 	  || strncasecmp (start, "psr", 3) == 0)
6348 	p = start + strcspn (start, "rR") + 1;
6349 
6350       psr = (const struct asm_psr *) str_hash_find_n (arm_v7m_psr_hsh, start,
6351 						      p - start);
6352 
6353       if (!psr)
6354 	return FAIL;
6355 
6356       /* If APSR is being written, a bitfield may be specified.  Note that
6357 	 APSR itself is handled above.  */
6358       if (psr->field <= 3)
6359 	{
6360 	  psr_field = psr->field;
6361 	  is_apsr = true;
6362 	  goto check_suffix;
6363 	}
6364 
6365       *str = p;
6366       /* M-profile MSR instructions have the mask field set to "10", except
6367 	 *PSR variants which modify APSR, which may use a different mask (and
6368 	 have been handled already).  Do that by setting the PSR_f field
6369 	 here.  */
6370       return psr->field | (lhs ? PSR_f : 0);
6371     }
6372   else
6373     goto unsupported_psr;
6374 
6375   p += 4;
6376  check_suffix:
6377   if (*p == '_')
6378     {
6379       /* A suffix follows.  */
6380       p++;
6381       start = p;
6382 
6383       do
6384 	p++;
6385       while (ISALNUM (*p) || *p == '_');
6386 
6387       if (is_apsr)
6388 	{
6389 	  /* APSR uses a notation for bits, rather than fields.  */
6390 	  unsigned int nzcvq_bits = 0;
6391 	  unsigned int g_bit = 0;
6392 	  char *bit;
6393 
6394 	  for (bit = start; bit != p; bit++)
6395 	    {
6396 	      switch (TOLOWER (*bit))
6397 		{
6398 		case 'n':
6399 		  nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
6400 		  break;
6401 
6402 		case 'z':
6403 		  nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
6404 		  break;
6405 
6406 		case 'c':
6407 		  nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
6408 		  break;
6409 
6410 		case 'v':
6411 		  nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
6412 		  break;
6413 
6414 		case 'q':
6415 		  nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
6416 		  break;
6417 
6418 		case 'g':
6419 		  g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
6420 		  break;
6421 
6422 		default:
6423 		  inst.error = _("unexpected bit specified after APSR");
6424 		  return FAIL;
6425 		}
6426 	    }
6427 
6428 	  if (nzcvq_bits == 0x1f)
6429 	    psr_field |= PSR_f;
6430 
6431 	  if (g_bit == 0x1)
6432 	    {
6433 	      if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
6434 		{
6435 		  inst.error = _("selected processor does not "
6436 				 "support DSP extension");
6437 		  return FAIL;
6438 		}
6439 
6440 	      psr_field |= PSR_s;
6441 	    }
6442 
6443 	  if ((nzcvq_bits & 0x20) != 0
6444 	      || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
6445 	      || (g_bit & 0x2) != 0)
6446 	    {
6447 	      inst.error = _("bad bitmask specified after APSR");
6448 	      return FAIL;
6449 	    }
6450 	}
6451       else
6452 	{
6453 	  psr = (const struct asm_psr *) str_hash_find_n (arm_psr_hsh, start,
6454 							  p - start);
6455 	  if (!psr)
6456 	    goto error;
6457 
6458 	  psr_field |= psr->field;
6459 	}
6460     }
6461   else
6462     {
6463       if (ISALNUM (*p))
6464 	goto error;    /* Garbage after "[CS]PSR".  */
6465 
6466       /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes).  This
6467 	 is deprecated, but allow it anyway.  */
6468       if (is_apsr && lhs)
6469 	{
6470 	  psr_field |= PSR_f;
6471 	  as_tsktsk (_("writing to APSR without specifying a bitmask is "
6472 		       "deprecated"));
6473 	}
6474       else if (!m_profile)
6475 	/* These bits are never right for M-profile devices: don't set them
6476 	   (only code paths which read/write APSR reach here).  */
6477 	psr_field |= (PSR_c | PSR_f);
6478     }
6479   *str = p;
6480   return psr_field;
6481 
6482  unsupported_psr:
6483   inst.error = _("selected processor does not support requested special "
6484 		 "purpose register");
6485   return FAIL;
6486 
6487  error:
6488   inst.error = _("flag for {c}psr instruction expected");
6489   return FAIL;
6490 }
6491 
6492 static int
parse_sys_vldr_vstr(char ** str)6493 parse_sys_vldr_vstr (char **str)
6494 {
6495   unsigned i;
6496   int val = FAIL;
6497   struct {
6498     const char *name;
6499     int regl;
6500     int regh;
6501   } sysregs[] = {
6502     {"FPSCR",		0x1, 0x0},
6503     {"FPSCR_nzcvqc",	0x2, 0x0},
6504     {"VPR",		0x4, 0x1},
6505     {"P0",		0x5, 0x1},
6506     {"FPCXTNS",		0x6, 0x1},
6507     {"FPCXTS",		0x7, 0x1}
6508   };
6509   char *op_end = strchr (*str, ',');
6510   size_t op_strlen = op_end - *str;
6511 
6512   for (i = 0; i < sizeof (sysregs) / sizeof (sysregs[0]); i++)
6513     {
6514       if (!strncmp (*str, sysregs[i].name, op_strlen))
6515 	{
6516 	  val = sysregs[i].regl | (sysregs[i].regh << 3);
6517 	  *str = op_end;
6518 	  break;
6519 	}
6520     }
6521 
6522   return val;
6523 }
6524 
6525 /* Parse the flags argument to CPSI[ED].  Returns FAIL on error, or a
6526    value suitable for splatting into the AIF field of the instruction.	*/
6527 
6528 static int
parse_cps_flags(char ** str)6529 parse_cps_flags (char **str)
6530 {
6531   int val = 0;
6532   int saw_a_flag = 0;
6533   char *s = *str;
6534 
6535   for (;;)
6536     switch (*s++)
6537       {
6538       case '\0': case ',':
6539 	goto done;
6540 
6541       case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6542       case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6543       case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6544 
6545       default:
6546 	inst.error = _("unrecognized CPS flag");
6547 	return FAIL;
6548       }
6549 
6550  done:
6551   if (saw_a_flag == 0)
6552     {
6553       inst.error = _("missing CPS flags");
6554       return FAIL;
6555     }
6556 
6557   *str = s - 1;
6558   return val;
6559 }
6560 
6561 /* Parse an endian specifier ("BE" or "LE", case insensitive);
6562    returns 0 for big-endian, 1 for little-endian, FAIL for an error.  */
6563 
6564 static int
parse_endian_specifier(char ** str)6565 parse_endian_specifier (char **str)
6566 {
6567   int little_endian;
6568   char *s = *str;
6569 
6570   if (strncasecmp (s, "BE", 2))
6571     little_endian = 0;
6572   else if (strncasecmp (s, "LE", 2))
6573     little_endian = 1;
6574   else
6575     {
6576       inst.error = _("valid endian specifiers are be or le");
6577       return FAIL;
6578     }
6579 
6580   if (ISALNUM (s[2]) || s[2] == '_')
6581     {
6582       inst.error = _("valid endian specifiers are be or le");
6583       return FAIL;
6584     }
6585 
6586   *str = s + 2;
6587   return little_endian;
6588 }
6589 
6590 /* Parse a rotation specifier: ROR #0, #8, #16, #24.  *val receives a
6591    value suitable for poking into the rotate field of an sxt or sxta
6592    instruction, or FAIL on error.  */
6593 
6594 static int
parse_ror(char ** str)6595 parse_ror (char **str)
6596 {
6597   int rot;
6598   char *s = *str;
6599 
6600   if (strncasecmp (s, "ROR", 3) == 0)
6601     s += 3;
6602   else
6603     {
6604       inst.error = _("missing rotation field after comma");
6605       return FAIL;
6606     }
6607 
6608   if (parse_immediate (&s, &rot, 0, 24, false) == FAIL)
6609     return FAIL;
6610 
6611   switch (rot)
6612     {
6613     case  0: *str = s; return 0x0;
6614     case  8: *str = s; return 0x1;
6615     case 16: *str = s; return 0x2;
6616     case 24: *str = s; return 0x3;
6617 
6618     default:
6619       inst.error = _("rotation can only be 0, 8, 16, or 24");
6620       return FAIL;
6621     }
6622 }
6623 
6624 /* Parse a conditional code (from conds[] below).  The value returned is in the
6625    range 0 .. 14, or FAIL.  */
6626 static int
parse_cond(char ** str)6627 parse_cond (char **str)
6628 {
6629   char *q;
6630   const struct asm_cond *c;
6631   int n;
6632   /* Condition codes are always 2 characters, so matching up to
6633      3 characters is sufficient.  */
6634   char cond[3];
6635 
6636   q = *str;
6637   n = 0;
6638   while (ISALPHA (*q) && n < 3)
6639     {
6640       cond[n] = TOLOWER (*q);
6641       q++;
6642       n++;
6643     }
6644 
6645   c = (const struct asm_cond *) str_hash_find_n (arm_cond_hsh, cond, n);
6646   if (!c)
6647     {
6648       inst.error = _("condition required");
6649       return FAIL;
6650     }
6651 
6652   *str = q;
6653   return c->value;
6654 }
6655 
6656 /* Parse an option for a barrier instruction.  Returns the encoding for the
6657    option, or FAIL.  */
6658 static int
parse_barrier(char ** str)6659 parse_barrier (char **str)
6660 {
6661   char *p, *q;
6662   const struct asm_barrier_opt *o;
6663 
6664   p = q = *str;
6665   while (ISALPHA (*q))
6666     q++;
6667 
6668   o = (const struct asm_barrier_opt *) str_hash_find_n (arm_barrier_opt_hsh, p,
6669 							q - p);
6670   if (!o)
6671     return FAIL;
6672 
6673   if (!mark_feature_used (&o->arch))
6674     return FAIL;
6675 
6676   *str = q;
6677   return o->value;
6678 }
6679 
6680 /* Parse the operands of a table branch instruction.  Similar to a memory
6681    operand.  */
6682 static int
parse_tb(char ** str)6683 parse_tb (char **str)
6684 {
6685   char * p = *str;
6686   int reg;
6687 
6688   if (skip_past_char (&p, '[') == FAIL)
6689     {
6690       inst.error = _("'[' expected");
6691       return FAIL;
6692     }
6693 
6694   if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6695     {
6696       inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6697       return FAIL;
6698     }
6699   inst.operands[0].reg = reg;
6700 
6701   if (skip_past_comma (&p) == FAIL)
6702     {
6703       inst.error = _("',' expected");
6704       return FAIL;
6705     }
6706 
6707   if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6708     {
6709       inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6710       return FAIL;
6711     }
6712   inst.operands[0].imm = reg;
6713 
6714   if (skip_past_comma (&p) == SUCCESS)
6715     {
6716       if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6717 	return FAIL;
6718       if (inst.relocs[0].exp.X_add_number != 1)
6719 	{
6720 	  inst.error = _("invalid shift");
6721 	  return FAIL;
6722 	}
6723       inst.operands[0].shifted = 1;
6724     }
6725 
6726   if (skip_past_char (&p, ']') == FAIL)
6727     {
6728       inst.error = _("']' expected");
6729       return FAIL;
6730     }
6731   *str = p;
6732   return SUCCESS;
6733 }
6734 
6735 /* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6736    information on the types the operands can take and how they are encoded.
6737    Up to four operands may be read; this function handles setting the
6738    ".present" field for each read operand itself.
6739    Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6740    else returns FAIL.  */
6741 
6742 static int
parse_neon_mov(char ** str,int * which_operand)6743 parse_neon_mov (char **str, int *which_operand)
6744 {
6745   int i = *which_operand, val;
6746   enum arm_reg_type rtype;
6747   char *ptr = *str;
6748   struct neon_type_el optype;
6749 
6750    if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ)) != FAIL)
6751     {
6752       /* Cases 17 or 19.  */
6753       inst.operands[i].reg = val;
6754       inst.operands[i].isvec = 1;
6755       inst.operands[i].isscalar = 2;
6756       inst.operands[i].vectype = optype;
6757       inst.operands[i++].present = 1;
6758 
6759       if (skip_past_comma (&ptr) == FAIL)
6760 	goto wanted_comma;
6761 
6762       if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6763 	{
6764 	  /* Case 17: VMOV<c>.<dt> <Qd[idx]>, <Rt>  */
6765 	  inst.operands[i].reg = val;
6766 	  inst.operands[i].isreg = 1;
6767 	  inst.operands[i].present = 1;
6768 	}
6769       else if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ)) != FAIL)
6770 	{
6771 	  /* Case 19: VMOV<c> <Qd[idx]>, <Qd[idx2]>, <Rt>, <Rt2>  */
6772 	  inst.operands[i].reg = val;
6773 	  inst.operands[i].isvec = 1;
6774 	  inst.operands[i].isscalar = 2;
6775 	  inst.operands[i].vectype = optype;
6776 	  inst.operands[i++].present = 1;
6777 
6778 	  if (skip_past_comma (&ptr) == FAIL)
6779 	    goto wanted_comma;
6780 
6781 	  if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6782 	    goto wanted_arm;
6783 
6784 	  inst.operands[i].reg = val;
6785 	  inst.operands[i].isreg = 1;
6786 	  inst.operands[i++].present = 1;
6787 
6788 	  if (skip_past_comma (&ptr) == FAIL)
6789 	    goto wanted_comma;
6790 
6791 	  if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6792 	    goto wanted_arm;
6793 
6794 	  inst.operands[i].reg = val;
6795 	  inst.operands[i].isreg = 1;
6796 	  inst.operands[i].present = 1;
6797 	}
6798       else
6799 	{
6800 	  first_error (_("expected ARM or MVE vector register"));
6801 	  return FAIL;
6802 	}
6803     }
6804    else if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_VFD)) != FAIL)
6805     {
6806       /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>.  */
6807       inst.operands[i].reg = val;
6808       inst.operands[i].isscalar = 1;
6809       inst.operands[i].vectype = optype;
6810       inst.operands[i++].present = 1;
6811 
6812       if (skip_past_comma (&ptr) == FAIL)
6813 	goto wanted_comma;
6814 
6815       if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6816 	goto wanted_arm;
6817 
6818       inst.operands[i].reg = val;
6819       inst.operands[i].isreg = 1;
6820       inst.operands[i].present = 1;
6821     }
6822   else if (((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6823 	    != FAIL)
6824 	   || ((val = arm_typed_reg_parse (&ptr, REG_TYPE_MQ, &rtype, &optype))
6825 	       != FAIL))
6826     {
6827       /* Cases 0, 1, 2, 3, 5 (D only).  */
6828       if (skip_past_comma (&ptr) == FAIL)
6829 	goto wanted_comma;
6830 
6831       inst.operands[i].reg = val;
6832       inst.operands[i].isreg = 1;
6833       inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6834       inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6835       inst.operands[i].isvec = 1;
6836       inst.operands[i].vectype = optype;
6837       inst.operands[i++].present = 1;
6838 
6839       if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6840 	{
6841 	  /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6842 	     Case 13: VMOV <Sd>, <Rm>  */
6843 	  inst.operands[i].reg = val;
6844 	  inst.operands[i].isreg = 1;
6845 	  inst.operands[i].present = 1;
6846 
6847 	  if (rtype == REG_TYPE_NQ)
6848 	    {
6849 	      first_error (_("can't use Neon quad register here"));
6850 	      return FAIL;
6851 	    }
6852 	  else if (rtype != REG_TYPE_VFS)
6853 	    {
6854 	      i++;
6855 	      if (skip_past_comma (&ptr) == FAIL)
6856 		goto wanted_comma;
6857 	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6858 		goto wanted_arm;
6859 	      inst.operands[i].reg = val;
6860 	      inst.operands[i].isreg = 1;
6861 	      inst.operands[i].present = 1;
6862 	    }
6863 	}
6864       else if (((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6865 		&optype)) != FAIL)
6866 	       || ((val = arm_typed_reg_parse (&ptr, REG_TYPE_MQ, &rtype,
6867 		   &optype)) != FAIL))
6868 	{
6869 	  /* Case 0: VMOV<c><q> <Qd>, <Qm>
6870 	     Case 1: VMOV<c><q> <Dd>, <Dm>
6871 	     Case 8: VMOV.F32 <Sd>, <Sm>
6872 	     Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm>  */
6873 
6874 	  inst.operands[i].reg = val;
6875 	  inst.operands[i].isreg = 1;
6876 	  inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6877 	  inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6878 	  inst.operands[i].isvec = 1;
6879 	  inst.operands[i].vectype = optype;
6880 	  inst.operands[i].present = 1;
6881 
6882 	  if (skip_past_comma (&ptr) == SUCCESS)
6883 	    {
6884 	      /* Case 15.  */
6885 	      i++;
6886 
6887 	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6888 		goto wanted_arm;
6889 
6890 	      inst.operands[i].reg = val;
6891 	      inst.operands[i].isreg = 1;
6892 	      inst.operands[i++].present = 1;
6893 
6894 	      if (skip_past_comma (&ptr) == FAIL)
6895 		goto wanted_comma;
6896 
6897 	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6898 		goto wanted_arm;
6899 
6900 	      inst.operands[i].reg = val;
6901 	      inst.operands[i].isreg = 1;
6902 	      inst.operands[i].present = 1;
6903 	    }
6904 	}
6905       else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6906 	  /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6907 	     Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6908 	     Case 10: VMOV.F32 <Sd>, #<imm>
6909 	     Case 11: VMOV.F64 <Dd>, #<imm>  */
6910 	inst.operands[i].immisfloat = 1;
6911       else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/false)
6912 	       == SUCCESS)
6913 	  /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6914 	     Case 3: VMOV<c><q>.<dt> <Dd>, #<imm>  */
6915 	;
6916       else
6917 	{
6918 	  first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6919 	  return FAIL;
6920 	}
6921     }
6922   else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6923     {
6924       /* Cases 6, 7, 16, 18.  */
6925       inst.operands[i].reg = val;
6926       inst.operands[i].isreg = 1;
6927       inst.operands[i++].present = 1;
6928 
6929       if (skip_past_comma (&ptr) == FAIL)
6930 	goto wanted_comma;
6931 
6932       if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ)) != FAIL)
6933 	{
6934 	  /* Case 18: VMOV<c>.<dt> <Rt>, <Qn[idx]>  */
6935 	  inst.operands[i].reg = val;
6936 	  inst.operands[i].isscalar = 2;
6937 	  inst.operands[i].present = 1;
6938 	  inst.operands[i].vectype = optype;
6939 	}
6940       else if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_VFD)) != FAIL)
6941 	{
6942 	  /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]>  */
6943 	  inst.operands[i].reg = val;
6944 	  inst.operands[i].isscalar = 1;
6945 	  inst.operands[i].present = 1;
6946 	  inst.operands[i].vectype = optype;
6947 	}
6948       else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6949 	{
6950 	  inst.operands[i].reg = val;
6951 	  inst.operands[i].isreg = 1;
6952 	  inst.operands[i++].present = 1;
6953 
6954 	  if (skip_past_comma (&ptr) == FAIL)
6955 	    goto wanted_comma;
6956 
6957 	  if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6958 	      != FAIL)
6959 	    {
6960 	      /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm>  */
6961 
6962 	      inst.operands[i].reg = val;
6963 	      inst.operands[i].isreg = 1;
6964 	      inst.operands[i].isvec = 1;
6965 	      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6966 	      inst.operands[i].vectype = optype;
6967 	      inst.operands[i].present = 1;
6968 
6969 	      if (rtype == REG_TYPE_VFS)
6970 		{
6971 		  /* Case 14.  */
6972 		  i++;
6973 		  if (skip_past_comma (&ptr) == FAIL)
6974 		    goto wanted_comma;
6975 		  if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6976 						  &optype)) == FAIL)
6977 		    {
6978 		      first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6979 		      return FAIL;
6980 		    }
6981 		  inst.operands[i].reg = val;
6982 		  inst.operands[i].isreg = 1;
6983 		  inst.operands[i].isvec = 1;
6984 		  inst.operands[i].issingle = 1;
6985 		  inst.operands[i].vectype = optype;
6986 		  inst.operands[i].present = 1;
6987 		}
6988 	    }
6989 	  else
6990 	    {
6991 	      if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ))
6992 		       != FAIL)
6993 		{
6994 		  /* Case 16: VMOV<c> <Rt>, <Rt2>, <Qd[idx]>, <Qd[idx2]>  */
6995 		  inst.operands[i].reg = val;
6996 		  inst.operands[i].isvec = 1;
6997 		  inst.operands[i].isscalar = 2;
6998 		  inst.operands[i].vectype = optype;
6999 		  inst.operands[i++].present = 1;
7000 
7001 		  if (skip_past_comma (&ptr) == FAIL)
7002 		    goto wanted_comma;
7003 
7004 		  if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ))
7005 		      == FAIL)
7006 		    {
7007 		      first_error (_(reg_expected_msgs[REG_TYPE_MQ]));
7008 		      return FAIL;
7009 		    }
7010 		  inst.operands[i].reg = val;
7011 		  inst.operands[i].isvec = 1;
7012 		  inst.operands[i].isscalar = 2;
7013 		  inst.operands[i].vectype = optype;
7014 		  inst.operands[i].present = 1;
7015 		}
7016 	      else
7017 		{
7018 		  first_error (_("VFP single, double or MVE vector register"
7019 			       " expected"));
7020 		  return FAIL;
7021 		}
7022 	    }
7023 	}
7024       else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
7025 	       != FAIL)
7026 	{
7027 	  /* Case 13.  */
7028 	  inst.operands[i].reg = val;
7029 	  inst.operands[i].isreg = 1;
7030 	  inst.operands[i].isvec = 1;
7031 	  inst.operands[i].issingle = 1;
7032 	  inst.operands[i].vectype = optype;
7033 	  inst.operands[i].present = 1;
7034 	}
7035     }
7036   else
7037     {
7038       first_error (_("parse error"));
7039       return FAIL;
7040     }
7041 
7042   /* Successfully parsed the operands. Update args.  */
7043   *which_operand = i;
7044   *str = ptr;
7045   return SUCCESS;
7046 
7047  wanted_comma:
7048   first_error (_("expected comma"));
7049   return FAIL;
7050 
7051  wanted_arm:
7052   first_error (_(reg_expected_msgs[REG_TYPE_RN]));
7053   return FAIL;
7054 }
7055 
7056 /* Use this macro when the operand constraints are different
7057    for ARM and THUMB (e.g. ldrd).  */
7058 #define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
7059 	((arm_operand) | ((thumb_operand) << 16))
7060 
7061 /* Matcher codes for parse_operands.  */
7062 enum operand_parse_code
7063 {
7064   OP_stop,	/* end of line */
7065 
7066   OP_RR,	/* ARM register */
7067   OP_RRnpc,	/* ARM register, not r15 */
7068   OP_RRnpcsp,	/* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
7069   OP_RRnpcb,	/* ARM register, not r15, in square brackets */
7070   OP_RRnpctw,	/* ARM register, not r15 in Thumb-state or with writeback,
7071 		   optional trailing ! */
7072   OP_RRw,	/* ARM register, not r15, optional trailing ! */
7073   OP_RCP,	/* Coprocessor number */
7074   OP_RCN,	/* Coprocessor register */
7075   OP_RF,	/* FPA register */
7076   OP_RVS,	/* VFP single precision register */
7077   OP_RVD,	/* VFP double precision register (0..15) */
7078   OP_RND,       /* Neon double precision register (0..31) */
7079   OP_RNDMQ,     /* Neon double precision (0..31) or MVE vector register.  */
7080   OP_RNDMQR,    /* Neon double precision (0..31), MVE vector or ARM register.
7081 		 */
7082   OP_RNSDMQR,    /* Neon single or double precision, MVE vector or ARM register.
7083 		 */
7084   OP_RNQ,	/* Neon quad precision register */
7085   OP_RNQMQ,	/* Neon quad or MVE vector register.  */
7086   OP_RVSD,	/* VFP single or double precision register */
7087   OP_RVSD_COND,	/* VFP single, double precision register or condition code.  */
7088   OP_RVSDMQ,	/* VFP single, double precision or MVE vector register.  */
7089   OP_RNSD,      /* Neon single or double precision register */
7090   OP_RNDQ,      /* Neon double or quad precision register */
7091   OP_RNDQMQ,     /* Neon double, quad or MVE vector register.  */
7092   OP_RNDQMQR,   /* Neon double, quad, MVE vector or ARM register.  */
7093   OP_RNSDQ,	/* Neon single, double or quad precision register */
7094   OP_RNSC,      /* Neon scalar D[X] */
7095   OP_RVC,	/* VFP control register */
7096   OP_RMF,	/* Maverick F register */
7097   OP_RMD,	/* Maverick D register */
7098   OP_RMFX,	/* Maverick FX register */
7099   OP_RMDX,	/* Maverick DX register */
7100   OP_RMAX,	/* Maverick AX register */
7101   OP_RMDS,	/* Maverick DSPSC register */
7102   OP_RIWR,	/* iWMMXt wR register */
7103   OP_RIWC,	/* iWMMXt wC register */
7104   OP_RIWG,	/* iWMMXt wCG register */
7105   OP_RXA,	/* XScale accumulator register */
7106 
7107   OP_RNSDMQ,	/* Neon single, double or MVE vector register */
7108   OP_RNSDQMQ,	/* Neon single, double or quad register or MVE vector register
7109 		 */
7110   OP_RNSDQMQR,	/* Neon single, double or quad register, MVE vector register or
7111 		   GPR (no SP/SP)  */
7112   OP_RMQ,	/* MVE vector register.  */
7113   OP_RMQRZ,	/* MVE vector or ARM register including ZR.  */
7114   OP_RMQRR,     /* MVE vector or ARM register.  */
7115 
7116   /* New operands for Armv8.1-M Mainline.  */
7117   OP_LR,	/* ARM LR register */
7118   OP_RRe,	/* ARM register, only even numbered.  */
7119   OP_RRo,	/* ARM register, only odd numbered, not r13 or r15.  */
7120   OP_RRnpcsp_I32, /* ARM register (no BadReg) or literal 1 .. 32 */
7121   OP_RR_ZR,	/* ARM register or ZR but no PC */
7122 
7123   OP_REGLST,	/* ARM register list */
7124   OP_CLRMLST,	/* CLRM register list */
7125   OP_VRSLST,	/* VFP single-precision register list */
7126   OP_VRDLST,	/* VFP double-precision register list */
7127   OP_VRSDLST,   /* VFP single or double-precision register list (& quad) */
7128   OP_NRDLST,    /* Neon double-precision register list (d0-d31, qN aliases) */
7129   OP_NSTRLST,   /* Neon element/structure list */
7130   OP_VRSDVLST,  /* VFP single or double-precision register list and VPR */
7131   OP_MSTRLST2,	/* MVE vector list with two elements.  */
7132   OP_MSTRLST4,	/* MVE vector list with four elements.  */
7133 
7134   OP_RNDQ_I0,   /* Neon D or Q reg, or immediate zero.  */
7135   OP_RVSD_I0,	/* VFP S or D reg, or immediate zero.  */
7136   OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero.  */
7137   OP_RSVDMQ_FI0, /* VFP S, D, MVE vector register or floating point immediate
7138 		    zero.  */
7139   OP_RR_RNSC,   /* ARM reg or Neon scalar.  */
7140   OP_RNSD_RNSC, /* Neon S or D reg, or Neon scalar.  */
7141   OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar.  */
7142   OP_RNSDQ_RNSC_MQ, /* Vector S, D or Q reg, Neon scalar or MVE vector register.
7143 		     */
7144   OP_RNSDQ_RNSC_MQ_RR, /* Vector S, D or Q reg, or MVE vector reg , or Neon
7145 			  scalar, or ARM register.  */
7146   OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar.  */
7147   OP_RNDQ_RNSC_RR, /* Neon D or Q reg, Neon scalar, or ARM register.  */
7148   OP_RNDQMQ_RNSC_RR, /* Neon D or Q reg, Neon scalar, MVE vector or ARM
7149 			register.  */
7150   OP_RNDQMQ_RNSC, /* Neon D, Q or MVE vector reg, or Neon scalar.  */
7151   OP_RND_RNSC,  /* Neon D reg, or Neon scalar.  */
7152   OP_VMOV,      /* Neon VMOV operands.  */
7153   OP_RNDQ_Ibig,	/* Neon D or Q reg, or big immediate for logic and VMVN.  */
7154   /* Neon D, Q or MVE vector register, or big immediate for logic and VMVN.  */
7155   OP_RNDQMQ_Ibig,
7156   OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift.  */
7157   OP_RNDQMQ_I63b_RR, /* Neon D or Q reg, immediate for shift, MVE vector or
7158 			ARM register.  */
7159   OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2.  */
7160   OP_VLDR,	/* VLDR operand.  */
7161 
7162   OP_I0,        /* immediate zero */
7163   OP_I7,	/* immediate value 0 .. 7 */
7164   OP_I15,	/*		   0 .. 15 */
7165   OP_I16,	/*		   1 .. 16 */
7166   OP_I16z,      /*                 0 .. 16 */
7167   OP_I31,	/*		   0 .. 31 */
7168   OP_I31w,	/*		   0 .. 31, optional trailing ! */
7169   OP_I32,	/*		   1 .. 32 */
7170   OP_I32z,	/*		   0 .. 32 */
7171   OP_I48_I64,	/*		   48 or 64 */
7172   OP_I63,	/*		   0 .. 63 */
7173   OP_I63s,	/*		 -64 .. 63 */
7174   OP_I64,	/*		   1 .. 64 */
7175   OP_I64z,	/*		   0 .. 64 */
7176   OP_I127,	/*		   0 .. 127 */
7177   OP_I255,	/*		   0 .. 255 */
7178   OP_I511,	/*		   0 .. 511 */
7179   OP_I4095,	/*		   0 .. 4095 */
7180   OP_I8191,	/*		   0 .. 8191 */
7181   OP_I4b,	/* immediate, prefix optional, 1 .. 4 */
7182   OP_I7b,	/*			       0 .. 7 */
7183   OP_I15b,	/*			       0 .. 15 */
7184   OP_I31b,	/*			       0 .. 31 */
7185 
7186   OP_SH,	/* shifter operand */
7187   OP_SHG,	/* shifter operand with possible group relocation */
7188   OP_ADDR,	/* Memory address expression (any mode) */
7189   OP_ADDRMVE,	/* Memory address expression for MVE's VSTR/VLDR.  */
7190   OP_ADDRGLDR,	/* Mem addr expr (any mode) with possible LDR group reloc */
7191   OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
7192   OP_ADDRGLDC,  /* Mem addr expr (any mode) with possible LDC group reloc */
7193   OP_EXP,	/* arbitrary expression */
7194   OP_EXPi,	/* same, with optional immediate prefix */
7195   OP_EXPr,	/* same, with optional relocation suffix */
7196   OP_EXPs,	/* same, with optional non-first operand relocation suffix */
7197   OP_HALF,	/* 0 .. 65535 or low/high reloc.  */
7198   OP_IROT1,	/* VCADD rotate immediate: 90, 270.  */
7199   OP_IROT2,	/* VCMLA rotate immediate: 0, 90, 180, 270.  */
7200 
7201   OP_CPSF,	/* CPS flags */
7202   OP_ENDI,	/* Endianness specifier */
7203   OP_wPSR,	/* CPSR/SPSR/APSR mask for msr (writing).  */
7204   OP_rPSR,	/* CPSR/SPSR/APSR mask for msr (reading).  */
7205   OP_COND,	/* conditional code */
7206   OP_TB,	/* Table branch.  */
7207 
7208   OP_APSR_RR,   /* ARM register or "APSR_nzcv".  */
7209 
7210   OP_RRnpc_I0,	/* ARM register or literal 0 */
7211   OP_RR_EXr,	/* ARM register or expression with opt. reloc stuff. */
7212   OP_RR_EXi,	/* ARM register or expression with imm prefix */
7213   OP_RF_IF,	/* FPA register or immediate */
7214   OP_RIWR_RIWC, /* iWMMXt R or C reg */
7215   OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
7216 
7217   /* Optional operands.	 */
7218   OP_oI7b,	 /* immediate, prefix optional, 0 .. 7 */
7219   OP_oI31b,	 /*				0 .. 31 */
7220   OP_oI32b,      /*                             1 .. 32 */
7221   OP_oI32z,      /*                             0 .. 32 */
7222   OP_oIffffb,	 /*				0 .. 65535 */
7223   OP_oI255c,	 /*	  curly-brace enclosed, 0 .. 255 */
7224 
7225   OP_oRR,	 /* ARM register */
7226   OP_oLR,	 /* ARM LR register */
7227   OP_oRRnpc,	 /* ARM register, not the PC */
7228   OP_oRRnpcsp,	 /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
7229   OP_oRRw,	 /* ARM register, not r15, optional trailing ! */
7230   OP_oRND,       /* Optional Neon double precision register */
7231   OP_oRNQ,       /* Optional Neon quad precision register */
7232   OP_oRNDQMQ,     /* Optional Neon double, quad or MVE vector register.  */
7233   OP_oRNDQ,      /* Optional Neon double or quad precision register */
7234   OP_oRNSDQ,	 /* Optional single, double or quad precision vector register */
7235   OP_oRNSDQMQ,	 /* Optional single, double or quad register or MVE vector
7236 		    register.  */
7237   OP_oRNSDMQ,	 /* Optional single, double register or MVE vector
7238 		    register.  */
7239   OP_oSHll,	 /* LSL immediate */
7240   OP_oSHar,	 /* ASR immediate */
7241   OP_oSHllar,	 /* LSL or ASR immediate */
7242   OP_oROR,	 /* ROR 0/8/16/24 */
7243   OP_oBARRIER_I15, /* Option argument for a barrier instruction.  */
7244 
7245   OP_oRMQRZ,	/* optional MVE vector or ARM register including ZR.  */
7246 
7247   /* Some pre-defined mixed (ARM/THUMB) operands.  */
7248   OP_RR_npcsp		= MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
7249   OP_RRnpc_npcsp	= MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
7250   OP_oRRnpc_npcsp	= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
7251 
7252   OP_FIRST_OPTIONAL = OP_oI7b
7253 };
7254 
7255 /* Generic instruction operand parser.	This does no encoding and no
7256    semantic validation; it merely squirrels values away in the inst
7257    structure.  Returns SUCCESS or FAIL depending on whether the
7258    specified grammar matched.  */
7259 static int
parse_operands(char * str,const unsigned int * pattern,bool thumb)7260 parse_operands (char *str, const unsigned int *pattern, bool thumb)
7261 {
7262   unsigned const int *upat = pattern;
7263   char *backtrack_pos = 0;
7264   const char *backtrack_error = 0;
7265   int i, val = 0, backtrack_index = 0;
7266   enum arm_reg_type rtype;
7267   parse_operand_result result;
7268   unsigned int op_parse_code;
7269   bool partial_match;
7270 
7271 #define po_char_or_fail(chr)			\
7272   do						\
7273     {						\
7274       if (skip_past_char (&str, chr) == FAIL)	\
7275 	goto bad_args;				\
7276     }						\
7277   while (0)
7278 
7279 #define po_reg_or_fail(regtype)					\
7280   do								\
7281     {								\
7282       val = arm_typed_reg_parse (& str, regtype, & rtype,	\
7283 				 & inst.operands[i].vectype);	\
7284       if (val == FAIL)						\
7285 	{							\
7286 	  first_error (_(reg_expected_msgs[regtype]));		\
7287 	  goto failure;						\
7288 	}							\
7289       inst.operands[i].reg = val;				\
7290       inst.operands[i].isreg = 1;				\
7291       inst.operands[i].isquad = (rtype == REG_TYPE_NQ);		\
7292       inst.operands[i].issingle = (rtype == REG_TYPE_VFS);	\
7293       inst.operands[i].isvec = (rtype == REG_TYPE_VFS		\
7294 			     || rtype == REG_TYPE_VFD		\
7295 			     || rtype == REG_TYPE_NQ);		\
7296       inst.operands[i].iszr = (rtype == REG_TYPE_ZR);		\
7297     }								\
7298   while (0)
7299 
7300 #define po_reg_or_goto(regtype, label)				\
7301   do								\
7302     {								\
7303       val = arm_typed_reg_parse (& str, regtype, & rtype,	\
7304 				 & inst.operands[i].vectype);	\
7305       if (val == FAIL)						\
7306 	goto label;						\
7307 								\
7308       inst.operands[i].reg = val;				\
7309       inst.operands[i].isreg = 1;				\
7310       inst.operands[i].isquad = (rtype == REG_TYPE_NQ);		\
7311       inst.operands[i].issingle = (rtype == REG_TYPE_VFS);	\
7312       inst.operands[i].isvec = (rtype == REG_TYPE_VFS		\
7313 			     || rtype == REG_TYPE_VFD		\
7314 			     || rtype == REG_TYPE_NQ);		\
7315       inst.operands[i].iszr = (rtype == REG_TYPE_ZR);		\
7316     }								\
7317   while (0)
7318 
7319 #define po_imm_or_fail(min, max, popt)				\
7320   do								\
7321     {								\
7322       if (parse_immediate (&str, &val, min, max, popt) == FAIL)	\
7323 	goto failure;						\
7324       inst.operands[i].imm = val;				\
7325     }								\
7326   while (0)
7327 
7328 #define po_imm1_or_imm2_or_fail(imm1, imm2, popt)		\
7329   do								\
7330     {								\
7331       expressionS exp;						\
7332       my_get_expression (&exp, &str, popt);			\
7333       if (exp.X_op != O_constant)				\
7334 	{							\
7335 	  inst.error = _("constant expression required");	\
7336 	  goto failure;						\
7337 	}							\
7338       if (exp.X_add_number != imm1 && exp.X_add_number != imm2) \
7339 	{							\
7340 	  inst.error = _("immediate value 48 or 64 expected");	\
7341 	  goto failure;						\
7342 	}							\
7343       inst.operands[i].imm = exp.X_add_number;			\
7344     }								\
7345   while (0)
7346 
7347 #define po_scalar_or_goto(elsz, label, reg_type)			\
7348   do									\
7349     {									\
7350       val = parse_scalar (& str, elsz, & inst.operands[i].vectype,	\
7351 			  reg_type);					\
7352       if (val == FAIL)							\
7353 	goto label;							\
7354       inst.operands[i].reg = val;					\
7355       inst.operands[i].isscalar = 1;					\
7356     }									\
7357   while (0)
7358 
7359 #define po_misc_or_fail(expr)			\
7360   do						\
7361     {						\
7362       if (expr)					\
7363 	goto failure;				\
7364     }						\
7365   while (0)
7366 
7367 #define po_misc_or_fail_no_backtrack(expr)		\
7368   do							\
7369     {							\
7370       result = expr;					\
7371       if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)	\
7372 	backtrack_pos = 0;				\
7373       if (result != PARSE_OPERAND_SUCCESS)		\
7374 	goto failure;					\
7375     }							\
7376   while (0)
7377 
7378 #define po_barrier_or_imm(str)				   \
7379   do							   \
7380     {						 	   \
7381       val = parse_barrier (&str);			   \
7382       if (val == FAIL && ! ISALPHA (*str))		   \
7383 	goto immediate;					   \
7384       if (val == FAIL					   \
7385 	  /* ISB can only take SY as an option.  */	   \
7386 	  || ((inst.instruction & 0xf0) == 0x60		   \
7387 	       && val != 0xf))				   \
7388 	{						   \
7389 	   inst.error = _("invalid barrier type");	   \
7390 	   backtrack_pos = 0;				   \
7391 	   goto failure;				   \
7392 	}						   \
7393     }							   \
7394   while (0)
7395 
7396   skip_whitespace (str);
7397 
7398   for (i = 0; upat[i] != OP_stop; i++)
7399     {
7400       op_parse_code = upat[i];
7401       if (op_parse_code >= 1<<16)
7402 	op_parse_code = thumb ? (op_parse_code >> 16)
7403 				: (op_parse_code & ((1<<16)-1));
7404 
7405       if (op_parse_code >= OP_FIRST_OPTIONAL)
7406 	{
7407 	  /* Remember where we are in case we need to backtrack.  */
7408 	  backtrack_pos = str;
7409 	  backtrack_error = inst.error;
7410 	  backtrack_index = i;
7411 	}
7412 
7413       if (i > 0 && (i > 1 || inst.operands[0].present))
7414 	po_char_or_fail (',');
7415 
7416       switch (op_parse_code)
7417 	{
7418 	  /* Registers */
7419 	case OP_oRRnpc:
7420 	case OP_oRRnpcsp:
7421 	case OP_RRnpc:
7422 	case OP_RRnpcsp:
7423 	case OP_oRR:
7424 	case OP_RRe:
7425 	case OP_RRo:
7426 	case OP_LR:
7427 	case OP_oLR:
7428 	case OP_RR:    po_reg_or_fail (REG_TYPE_RN);	  break;
7429 	case OP_RCP:   po_reg_or_fail (REG_TYPE_CP);	  break;
7430 	case OP_RCN:   po_reg_or_fail (REG_TYPE_CN);	  break;
7431 	case OP_RF:    po_reg_or_fail (REG_TYPE_FN);	  break;
7432 	case OP_RVS:   po_reg_or_fail (REG_TYPE_VFS);	  break;
7433 	case OP_RVD:   po_reg_or_fail (REG_TYPE_VFD);	  break;
7434 	case OP_oRND:
7435 	case OP_RNSDMQR:
7436 	  po_reg_or_goto (REG_TYPE_VFS, try_rndmqr);
7437 	  break;
7438 	try_rndmqr:
7439 	case OP_RNDMQR:
7440 	  po_reg_or_goto (REG_TYPE_RN, try_rndmq);
7441 	  break;
7442 	try_rndmq:
7443 	case OP_RNDMQ:
7444 	  po_reg_or_goto (REG_TYPE_MQ, try_rnd);
7445 	  break;
7446 	try_rnd:
7447 	case OP_RND:   po_reg_or_fail (REG_TYPE_VFD);	  break;
7448 	case OP_RVC:
7449 	  po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
7450 	  break;
7451 	  /* Also accept generic coprocessor regs for unknown registers.  */
7452 	  coproc_reg:
7453 	  po_reg_or_goto (REG_TYPE_CN, vpr_po);
7454 	  break;
7455 	  /* Also accept P0 or p0 for VPR.P0.  Since P0 is already an
7456 	     existing register with a value of 0, this seems like the
7457 	     best way to parse P0.  */
7458 	  vpr_po:
7459 	  if (strncasecmp (str, "P0", 2) == 0)
7460 	    {
7461 	      str += 2;
7462 	      inst.operands[i].isreg = 1;
7463 	      inst.operands[i].reg = 13;
7464 	    }
7465 	  else
7466 	    goto failure;
7467 	  break;
7468 	case OP_RMF:   po_reg_or_fail (REG_TYPE_MVF);	  break;
7469 	case OP_RMD:   po_reg_or_fail (REG_TYPE_MVD);	  break;
7470 	case OP_RMFX:  po_reg_or_fail (REG_TYPE_MVFX);	  break;
7471 	case OP_RMDX:  po_reg_or_fail (REG_TYPE_MVDX);	  break;
7472 	case OP_RMAX:  po_reg_or_fail (REG_TYPE_MVAX);	  break;
7473 	case OP_RMDS:  po_reg_or_fail (REG_TYPE_DSPSC);	  break;
7474 	case OP_RIWR:  po_reg_or_fail (REG_TYPE_MMXWR);	  break;
7475 	case OP_RIWC:  po_reg_or_fail (REG_TYPE_MMXWC);	  break;
7476 	case OP_RIWG:  po_reg_or_fail (REG_TYPE_MMXWCG);  break;
7477 	case OP_RXA:   po_reg_or_fail (REG_TYPE_XSCALE);  break;
7478 	case OP_oRNQ:
7479 	case OP_RNQMQ:
7480 	  po_reg_or_goto (REG_TYPE_MQ, try_nq);
7481 	  break;
7482 	try_nq:
7483 	case OP_RNQ:   po_reg_or_fail (REG_TYPE_NQ);      break;
7484 	case OP_RNSD:  po_reg_or_fail (REG_TYPE_NSD);     break;
7485 	case OP_RNDQMQR:
7486 	  po_reg_or_goto (REG_TYPE_RN, try_rndqmq);
7487 	  break;
7488 	try_rndqmq:
7489 	case OP_oRNDQMQ:
7490 	case OP_RNDQMQ:
7491 	  po_reg_or_goto (REG_TYPE_MQ, try_rndq);
7492 	  break;
7493 	try_rndq:
7494 	case OP_oRNDQ:
7495 	case OP_RNDQ:  po_reg_or_fail (REG_TYPE_NDQ);     break;
7496 	case OP_RVSDMQ:
7497 	  po_reg_or_goto (REG_TYPE_MQ, try_rvsd);
7498 	  break;
7499 	try_rvsd:
7500 	case OP_RVSD:  po_reg_or_fail (REG_TYPE_VFSD);    break;
7501 	case OP_RVSD_COND:
7502 	  po_reg_or_goto (REG_TYPE_VFSD, try_cond);
7503 	  break;
7504 	case OP_oRNSDMQ:
7505 	case OP_RNSDMQ:
7506 	  po_reg_or_goto (REG_TYPE_NSD, try_mq2);
7507 	  break;
7508 	  try_mq2:
7509 	  po_reg_or_fail (REG_TYPE_MQ);
7510 	  break;
7511 	case OP_oRNSDQ:
7512 	case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ);    break;
7513 	case OP_RNSDQMQR:
7514 	  po_reg_or_goto (REG_TYPE_RN, try_mq);
7515 	  break;
7516 	  try_mq:
7517 	case OP_oRNSDQMQ:
7518 	case OP_RNSDQMQ:
7519 	  po_reg_or_goto (REG_TYPE_MQ, try_nsdq2);
7520 	  break;
7521 	  try_nsdq2:
7522 	  po_reg_or_fail (REG_TYPE_NSDQ);
7523 	  inst.error = 0;
7524 	  break;
7525 	case OP_RMQRR:
7526 	  po_reg_or_goto (REG_TYPE_RN, try_rmq);
7527 	  break;
7528 	try_rmq:
7529 	case OP_RMQ:
7530 	  po_reg_or_fail (REG_TYPE_MQ);
7531 	  break;
7532 	/* Neon scalar. Using an element size of 8 means that some invalid
7533 	   scalars are accepted here, so deal with those in later code.  */
7534 	case OP_RNSC:  po_scalar_or_goto (8, failure, REG_TYPE_VFD);    break;
7535 
7536 	case OP_RNDQ_I0:
7537 	  {
7538 	    po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
7539 	    break;
7540 	    try_imm0:
7541 	    po_imm_or_fail (0, 0, true);
7542 	  }
7543 	  break;
7544 
7545 	case OP_RVSD_I0:
7546 	  po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
7547 	  break;
7548 
7549 	case OP_RSVDMQ_FI0:
7550 	  po_reg_or_goto (REG_TYPE_MQ, try_rsvd_fi0);
7551 	  break;
7552 	try_rsvd_fi0:
7553 	case OP_RSVD_FI0:
7554 	  {
7555 	    po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
7556 	    break;
7557 	    try_ifimm0:
7558 	    if (parse_ifimm_zero (&str))
7559 	      inst.operands[i].imm = 0;
7560 	    else
7561 	    {
7562 	      inst.error
7563 	        = _("only floating point zero is allowed as immediate value");
7564 	      goto failure;
7565 	    }
7566 	  }
7567 	  break;
7568 
7569 	case OP_RR_RNSC:
7570 	  {
7571 	    po_scalar_or_goto (8, try_rr, REG_TYPE_VFD);
7572 	    break;
7573 	    try_rr:
7574 	    po_reg_or_fail (REG_TYPE_RN);
7575 	  }
7576 	  break;
7577 
7578 	case OP_RNSDQ_RNSC_MQ_RR:
7579 	  po_reg_or_goto (REG_TYPE_RN, try_rnsdq_rnsc_mq);
7580 	  break;
7581 	try_rnsdq_rnsc_mq:
7582 	case OP_RNSDQ_RNSC_MQ:
7583 	  po_reg_or_goto (REG_TYPE_MQ, try_rnsdq_rnsc);
7584 	  break;
7585 	try_rnsdq_rnsc:
7586 	case OP_RNSDQ_RNSC:
7587 	  {
7588 	    po_scalar_or_goto (8, try_nsdq, REG_TYPE_VFD);
7589 	    inst.error = 0;
7590 	    break;
7591 	    try_nsdq:
7592 	    po_reg_or_fail (REG_TYPE_NSDQ);
7593 	    inst.error = 0;
7594 	  }
7595 	  break;
7596 
7597 	case OP_RNSD_RNSC:
7598 	  {
7599 	    po_scalar_or_goto (8, try_s_scalar, REG_TYPE_VFD);
7600 	    break;
7601 	    try_s_scalar:
7602 	    po_scalar_or_goto (4, try_nsd, REG_TYPE_VFS);
7603 	    break;
7604 	    try_nsd:
7605 	    po_reg_or_fail (REG_TYPE_NSD);
7606 	  }
7607 	  break;
7608 
7609 	case OP_RNDQMQ_RNSC_RR:
7610 	  po_reg_or_goto (REG_TYPE_MQ, try_rndq_rnsc_rr);
7611 	  break;
7612 	try_rndq_rnsc_rr:
7613 	case OP_RNDQ_RNSC_RR:
7614 	  po_reg_or_goto (REG_TYPE_RN, try_rndq_rnsc);
7615 	  break;
7616 	case OP_RNDQMQ_RNSC:
7617 	  po_reg_or_goto (REG_TYPE_MQ, try_rndq_rnsc);
7618 	  break;
7619 	try_rndq_rnsc:
7620 	case OP_RNDQ_RNSC:
7621 	  {
7622 	    po_scalar_or_goto (8, try_ndq, REG_TYPE_VFD);
7623 	    break;
7624 	    try_ndq:
7625 	    po_reg_or_fail (REG_TYPE_NDQ);
7626 	  }
7627 	  break;
7628 
7629 	case OP_RND_RNSC:
7630 	  {
7631 	    po_scalar_or_goto (8, try_vfd, REG_TYPE_VFD);
7632 	    break;
7633 	    try_vfd:
7634 	    po_reg_or_fail (REG_TYPE_VFD);
7635 	  }
7636 	  break;
7637 
7638 	case OP_VMOV:
7639 	  /* WARNING: parse_neon_mov can move the operand counter, i. If we're
7640 	     not careful then bad things might happen.  */
7641 	  po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
7642 	  break;
7643 
7644 	case OP_RNDQMQ_Ibig:
7645 	  po_reg_or_goto (REG_TYPE_MQ, try_rndq_ibig);
7646 	  break;
7647 	try_rndq_ibig:
7648 	case OP_RNDQ_Ibig:
7649 	  {
7650 	    po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
7651 	    break;
7652 	    try_immbig:
7653 	    /* There's a possibility of getting a 64-bit immediate here, so
7654 	       we need special handling.  */
7655 	    if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/false)
7656 		== FAIL)
7657 	      {
7658 		inst.error = _("immediate value is out of range");
7659 		goto failure;
7660 	      }
7661 	  }
7662 	  break;
7663 
7664 	case OP_RNDQMQ_I63b_RR:
7665 	  po_reg_or_goto (REG_TYPE_MQ, try_rndq_i63b_rr);
7666 	  break;
7667 	try_rndq_i63b_rr:
7668 	  po_reg_or_goto (REG_TYPE_RN, try_rndq_i63b);
7669 	  break;
7670 	try_rndq_i63b:
7671 	case OP_RNDQ_I63b:
7672 	  {
7673 	    po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
7674 	    break;
7675 	    try_shimm:
7676 	    po_imm_or_fail (0, 63, true);
7677 	  }
7678 	  break;
7679 
7680 	case OP_RRnpcb:
7681 	  po_char_or_fail ('[');
7682 	  po_reg_or_fail  (REG_TYPE_RN);
7683 	  po_char_or_fail (']');
7684 	  break;
7685 
7686 	case OP_RRnpctw:
7687 	case OP_RRw:
7688 	case OP_oRRw:
7689 	  po_reg_or_fail (REG_TYPE_RN);
7690 	  if (skip_past_char (&str, '!') == SUCCESS)
7691 	    inst.operands[i].writeback = 1;
7692 	  break;
7693 
7694 	  /* Immediates */
7695 	case OP_I7:	 po_imm_or_fail (  0,	   7, false);	break;
7696 	case OP_I15:	 po_imm_or_fail (  0,	  15, false);	break;
7697 	case OP_I16:	 po_imm_or_fail (  1,	  16, false);	break;
7698 	case OP_I16z:	 po_imm_or_fail (  0,     16, false);   break;
7699 	case OP_I31:	 po_imm_or_fail (  0,	  31, false);	break;
7700 	case OP_I32:	 po_imm_or_fail (  1,	  32, false);	break;
7701 	case OP_I32z:	 po_imm_or_fail (  0,     32, false);   break;
7702 	case OP_I48_I64: po_imm1_or_imm2_or_fail (48, 64, false); break;
7703 	case OP_I63s:	 po_imm_or_fail (-64,	  63, false);	break;
7704 	case OP_I63:	 po_imm_or_fail (  0,     63, false);   break;
7705 	case OP_I64:	 po_imm_or_fail (  1,     64, false);   break;
7706 	case OP_I64z:	 po_imm_or_fail (  0,     64, false);   break;
7707 	case OP_I127:	 po_imm_or_fail (  0,	 127, false);	break;
7708 	case OP_I255:	 po_imm_or_fail (  0,	 255, false);	break;
7709 	case OP_I511:	 po_imm_or_fail (  0,	 511, false);	break;
7710 	case OP_I4095:	 po_imm_or_fail (  0,	 4095, false);	break;
7711 	case OP_I8191:   po_imm_or_fail (  0,	 8191, false);	break;
7712 	case OP_I4b:	 po_imm_or_fail (  1,	   4, true);	break;
7713 	case OP_oI7b:
7714 	case OP_I7b:	 po_imm_or_fail (  0,	   7, true);	break;
7715 	case OP_I15b:	 po_imm_or_fail (  0,	  15, true);	break;
7716 	case OP_oI31b:
7717 	case OP_I31b:	 po_imm_or_fail (  0,	  31, true);	break;
7718 	case OP_oI32b:   po_imm_or_fail (  1,     32, true);    break;
7719 	case OP_oI32z:   po_imm_or_fail (  0,     32, true);    break;
7720 	case OP_oIffffb: po_imm_or_fail (  0, 0xffff, true);	break;
7721 
7722 	  /* Immediate variants */
7723 	case OP_oI255c:
7724 	  po_char_or_fail ('{');
7725 	  po_imm_or_fail (0, 255, true);
7726 	  po_char_or_fail ('}');
7727 	  break;
7728 
7729 	case OP_I31w:
7730 	  /* The expression parser chokes on a trailing !, so we have
7731 	     to find it first and zap it.  */
7732 	  {
7733 	    char *s = str;
7734 	    while (*s && *s != ',')
7735 	      s++;
7736 	    if (s[-1] == '!')
7737 	      {
7738 		s[-1] = '\0';
7739 		inst.operands[i].writeback = 1;
7740 	      }
7741 	    po_imm_or_fail (0, 31, true);
7742 	    if (str == s - 1)
7743 	      str = s;
7744 	  }
7745 	  break;
7746 
7747 	  /* Expressions */
7748 	case OP_EXPi:	EXPi:
7749 	  po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7750 					      GE_OPT_PREFIX));
7751 	  break;
7752 
7753 	case OP_EXP:
7754 	  po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7755 					      GE_NO_PREFIX));
7756 	  break;
7757 
7758 	case OP_EXPr:	EXPr:
7759 	  po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7760 					      GE_NO_PREFIX));
7761 	  if (inst.relocs[0].exp.X_op == O_symbol)
7762 	    {
7763 	      val = parse_reloc (&str);
7764 	      if (val == -1)
7765 		{
7766 		  inst.error = _("unrecognized relocation suffix");
7767 		  goto failure;
7768 		}
7769 	      else if (val != BFD_RELOC_UNUSED)
7770 		{
7771 		  inst.operands[i].imm = val;
7772 		  inst.operands[i].hasreloc = 1;
7773 		}
7774 	    }
7775 	  break;
7776 
7777 	case OP_EXPs:
7778 	  po_misc_or_fail (my_get_expression (&inst.relocs[i].exp, &str,
7779 					      GE_NO_PREFIX));
7780 	  if (inst.relocs[i].exp.X_op == O_symbol)
7781 	    {
7782 	      inst.operands[i].hasreloc = 1;
7783 	    }
7784 	  else if (inst.relocs[i].exp.X_op == O_constant)
7785 	    {
7786 	      inst.operands[i].imm = inst.relocs[i].exp.X_add_number;
7787 	      inst.operands[i].hasreloc = 0;
7788 	    }
7789 	  break;
7790 
7791 	  /* Operand for MOVW or MOVT.  */
7792 	case OP_HALF:
7793 	  po_misc_or_fail (parse_half (&str));
7794 	  break;
7795 
7796 	  /* Register or expression.  */
7797 	case OP_RR_EXr:	  po_reg_or_goto (REG_TYPE_RN, EXPr); break;
7798 	case OP_RR_EXi:	  po_reg_or_goto (REG_TYPE_RN, EXPi); break;
7799 
7800 	  /* Register or immediate.  */
7801 	case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0);   break;
7802 	I0:		  po_imm_or_fail (0, 0, false);	      break;
7803 
7804 	case OP_RRnpcsp_I32: po_reg_or_goto (REG_TYPE_RN, I32);	break;
7805 	I32:		     po_imm_or_fail (1, 32, false);	break;
7806 
7807 	case OP_RF_IF:    po_reg_or_goto (REG_TYPE_FN, IF);   break;
7808 	IF:
7809 	  if (!is_immediate_prefix (*str))
7810 	    goto bad_args;
7811 	  str++;
7812 	  val = parse_fpa_immediate (&str);
7813 	  if (val == FAIL)
7814 	    goto failure;
7815 	  /* FPA immediates are encoded as registers 8-15.
7816 	     parse_fpa_immediate has already applied the offset.  */
7817 	  inst.operands[i].reg = val;
7818 	  inst.operands[i].isreg = 1;
7819 	  break;
7820 
7821 	case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
7822 	I32z:		  po_imm_or_fail (0, 32, false);	  break;
7823 
7824 	  /* Two kinds of register.  */
7825 	case OP_RIWR_RIWC:
7826 	  {
7827 	    struct reg_entry *rege = arm_reg_parse_multi (&str);
7828 	    if (!rege
7829 		|| (rege->type != REG_TYPE_MMXWR
7830 		    && rege->type != REG_TYPE_MMXWC
7831 		    && rege->type != REG_TYPE_MMXWCG))
7832 	      {
7833 		inst.error = _("iWMMXt data or control register expected");
7834 		goto failure;
7835 	      }
7836 	    inst.operands[i].reg = rege->number;
7837 	    inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
7838 	  }
7839 	  break;
7840 
7841 	case OP_RIWC_RIWG:
7842 	  {
7843 	    struct reg_entry *rege = arm_reg_parse_multi (&str);
7844 	    if (!rege
7845 		|| (rege->type != REG_TYPE_MMXWC
7846 		    && rege->type != REG_TYPE_MMXWCG))
7847 	      {
7848 		inst.error = _("iWMMXt control register expected");
7849 		goto failure;
7850 	      }
7851 	    inst.operands[i].reg = rege->number;
7852 	    inst.operands[i].isreg = 1;
7853 	  }
7854 	  break;
7855 
7856 	  /* Misc */
7857 	case OP_CPSF:	 val = parse_cps_flags (&str);		break;
7858 	case OP_ENDI:	 val = parse_endian_specifier (&str);	break;
7859 	case OP_oROR:	 val = parse_ror (&str);		break;
7860 	try_cond:
7861 	case OP_COND:	 val = parse_cond (&str);		break;
7862 	case OP_oBARRIER_I15:
7863 	  po_barrier_or_imm (str); break;
7864 	  immediate:
7865 	  if (parse_immediate (&str, &val, 0, 15, true) == FAIL)
7866 	    goto failure;
7867 	  break;
7868 
7869 	case OP_wPSR:
7870 	case OP_rPSR:
7871 	  po_reg_or_goto (REG_TYPE_RNB, try_psr);
7872 	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7873 	    {
7874 	      inst.error = _("Banked registers are not available with this "
7875 			     "architecture.");
7876 	      goto failure;
7877 	    }
7878 	  break;
7879 	  try_psr:
7880 	  val = parse_psr (&str, op_parse_code == OP_wPSR);
7881 	  break;
7882 
7883 	case OP_VLDR:
7884 	  po_reg_or_goto (REG_TYPE_VFSD, try_sysreg);
7885 	  break;
7886 	try_sysreg:
7887 	  val = parse_sys_vldr_vstr (&str);
7888 	  break;
7889 
7890 	case OP_APSR_RR:
7891 	  po_reg_or_goto (REG_TYPE_RN, try_apsr);
7892 	  break;
7893 	  try_apsr:
7894 	  /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7895 	     instruction).  */
7896 	  if (strncasecmp (str, "APSR_", 5) == 0)
7897 	    {
7898 	      unsigned found = 0;
7899 	      str += 5;
7900 	      while (found < 15)
7901 		switch (*str++)
7902 		  {
7903 		  case 'c': found = (found & 1) ? 16 : found | 1; break;
7904 		  case 'n': found = (found & 2) ? 16 : found | 2; break;
7905 		  case 'z': found = (found & 4) ? 16 : found | 4; break;
7906 		  case 'v': found = (found & 8) ? 16 : found | 8; break;
7907 		  default: found = 16;
7908 		  }
7909 	      if (found != 15)
7910 		goto failure;
7911 	      inst.operands[i].isvec = 1;
7912 	      /* APSR_nzcv is encoded in instructions as if it were the REG_PC.  */
7913 	      inst.operands[i].reg = REG_PC;
7914 	    }
7915 	  else
7916 	    goto failure;
7917 	  break;
7918 
7919 	case OP_TB:
7920 	  po_misc_or_fail (parse_tb (&str));
7921 	  break;
7922 
7923 	  /* Register lists.  */
7924 	case OP_REGLST:
7925 	  val = parse_reg_list (&str, REGLIST_RN);
7926 	  if (*str == '^')
7927 	    {
7928 	      inst.operands[i].writeback = 1;
7929 	      str++;
7930 	    }
7931 	  break;
7932 
7933 	case OP_CLRMLST:
7934 	  val = parse_reg_list (&str, REGLIST_CLRM);
7935 	  break;
7936 
7937 	case OP_VRSLST:
7938 	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S,
7939 				    &partial_match);
7940 	  break;
7941 
7942 	case OP_VRDLST:
7943 	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D,
7944 				    &partial_match);
7945 	  break;
7946 
7947 	case OP_VRSDLST:
7948 	  /* Allow Q registers too.  */
7949 	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7950 				    REGLIST_NEON_D, &partial_match);
7951 	  if (val == FAIL)
7952 	    {
7953 	      inst.error = NULL;
7954 	      val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7955 					REGLIST_VFP_S, &partial_match);
7956 	      inst.operands[i].issingle = 1;
7957 	    }
7958 	  break;
7959 
7960 	case OP_VRSDVLST:
7961 	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7962 				    REGLIST_VFP_D_VPR, &partial_match);
7963 	  if (val == FAIL && !partial_match)
7964 	    {
7965 	      inst.error = NULL;
7966 	      val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7967 					REGLIST_VFP_S_VPR, &partial_match);
7968 	      inst.operands[i].issingle = 1;
7969 	    }
7970 	  break;
7971 
7972 	case OP_NRDLST:
7973 	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7974 				    REGLIST_NEON_D, &partial_match);
7975 	  break;
7976 
7977 	case OP_MSTRLST4:
7978 	case OP_MSTRLST2:
7979 	  val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7980 					   1, &inst.operands[i].vectype);
7981 	  if (val != (((op_parse_code == OP_MSTRLST2) ? 3 : 7) << 5 | 0xe))
7982 	    goto failure;
7983 	  break;
7984 	case OP_NSTRLST:
7985 	  val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7986 					   0, &inst.operands[i].vectype);
7987 	  break;
7988 
7989 	  /* Addressing modes */
7990 	case OP_ADDRMVE:
7991 	  po_misc_or_fail (parse_address_group_reloc (&str, i, GROUP_MVE));
7992 	  break;
7993 
7994 	case OP_ADDR:
7995 	  po_misc_or_fail (parse_address (&str, i));
7996 	  break;
7997 
7998 	case OP_ADDRGLDR:
7999 	  po_misc_or_fail_no_backtrack (
8000 	    parse_address_group_reloc (&str, i, GROUP_LDR));
8001 	  break;
8002 
8003 	case OP_ADDRGLDRS:
8004 	  po_misc_or_fail_no_backtrack (
8005 	    parse_address_group_reloc (&str, i, GROUP_LDRS));
8006 	  break;
8007 
8008 	case OP_ADDRGLDC:
8009 	  po_misc_or_fail_no_backtrack (
8010 	    parse_address_group_reloc (&str, i, GROUP_LDC));
8011 	  break;
8012 
8013 	case OP_SH:
8014 	  po_misc_or_fail (parse_shifter_operand (&str, i));
8015 	  break;
8016 
8017 	case OP_SHG:
8018 	  po_misc_or_fail_no_backtrack (
8019 	    parse_shifter_operand_group_reloc (&str, i));
8020 	  break;
8021 
8022 	case OP_oSHll:
8023 	  po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
8024 	  break;
8025 
8026 	case OP_oSHar:
8027 	  po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
8028 	  break;
8029 
8030 	case OP_oSHllar:
8031 	  po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
8032 	  break;
8033 
8034 	case OP_RMQRZ:
8035 	case OP_oRMQRZ:
8036 	  po_reg_or_goto (REG_TYPE_MQ, try_rr_zr);
8037 	  break;
8038 
8039 	case OP_RR_ZR:
8040 	try_rr_zr:
8041 	  po_reg_or_goto (REG_TYPE_RN, ZR);
8042 	  break;
8043 	ZR:
8044 	  po_reg_or_fail (REG_TYPE_ZR);
8045 	  break;
8046 
8047 	default:
8048 	  as_fatal (_("unhandled operand code %d"), op_parse_code);
8049 	}
8050 
8051       /* Various value-based sanity checks and shared operations.  We
8052 	 do not signal immediate failures for the register constraints;
8053 	 this allows a syntax error to take precedence.	 */
8054       switch (op_parse_code)
8055 	{
8056 	case OP_oRRnpc:
8057 	case OP_RRnpc:
8058 	case OP_RRnpcb:
8059 	case OP_RRw:
8060 	case OP_oRRw:
8061 	case OP_RRnpc_I0:
8062 	  if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
8063 	    inst.error = BAD_PC;
8064 	  break;
8065 
8066 	case OP_oRRnpcsp:
8067 	case OP_RRnpcsp:
8068 	case OP_RRnpcsp_I32:
8069 	  if (inst.operands[i].isreg)
8070 	    {
8071 	      if (inst.operands[i].reg == REG_PC)
8072 		inst.error = BAD_PC;
8073 	      else if (inst.operands[i].reg == REG_SP
8074 		       /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
8075 			  relaxed since ARMv8-A.  */
8076 		       && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
8077 		{
8078 		  gas_assert (thumb);
8079 		  inst.error = BAD_SP;
8080 		}
8081 	    }
8082 	  break;
8083 
8084 	case OP_RRnpctw:
8085 	  if (inst.operands[i].isreg
8086 	      && inst.operands[i].reg == REG_PC
8087 	      && (inst.operands[i].writeback || thumb))
8088 	    inst.error = BAD_PC;
8089 	  break;
8090 
8091 	case OP_RVSD_COND:
8092 	case OP_VLDR:
8093 	  if (inst.operands[i].isreg)
8094 	    break;
8095 	/* fall through.  */
8096 
8097 	case OP_CPSF:
8098 	case OP_ENDI:
8099 	case OP_oROR:
8100 	case OP_wPSR:
8101 	case OP_rPSR:
8102 	case OP_COND:
8103 	case OP_oBARRIER_I15:
8104 	case OP_REGLST:
8105 	case OP_CLRMLST:
8106 	case OP_VRSLST:
8107 	case OP_VRDLST:
8108 	case OP_VRSDLST:
8109 	case OP_VRSDVLST:
8110 	case OP_NRDLST:
8111 	case OP_NSTRLST:
8112 	case OP_MSTRLST2:
8113 	case OP_MSTRLST4:
8114 	  if (val == FAIL)
8115 	    goto failure;
8116 	  inst.operands[i].imm = val;
8117 	  break;
8118 
8119 	case OP_LR:
8120 	case OP_oLR:
8121 	  if (inst.operands[i].reg != REG_LR)
8122 	    inst.error = _("operand must be LR register");
8123 	  break;
8124 
8125 	case OP_RMQRZ:
8126 	case OP_oRMQRZ:
8127 	case OP_RR_ZR:
8128 	  if (!inst.operands[i].iszr && inst.operands[i].reg == REG_PC)
8129 	    inst.error = BAD_PC;
8130 	  break;
8131 
8132 	case OP_RRe:
8133 	  if (inst.operands[i].isreg
8134 	      && (inst.operands[i].reg & 0x00000001) != 0)
8135 	    inst.error = BAD_ODD;
8136 	  break;
8137 
8138 	case OP_RRo:
8139 	  if (inst.operands[i].isreg)
8140 	    {
8141 	      if ((inst.operands[i].reg & 0x00000001) != 1)
8142 		inst.error = BAD_EVEN;
8143 	      else if (inst.operands[i].reg == REG_SP)
8144 		as_tsktsk (MVE_BAD_SP);
8145 	      else if (inst.operands[i].reg == REG_PC)
8146 		inst.error = BAD_PC;
8147 	    }
8148 	  break;
8149 
8150 	default:
8151 	  break;
8152 	}
8153 
8154       /* If we get here, this operand was successfully parsed.	*/
8155       inst.operands[i].present = 1;
8156       continue;
8157 
8158     bad_args:
8159       inst.error = BAD_ARGS;
8160 
8161     failure:
8162       if (!backtrack_pos)
8163 	{
8164 	  /* The parse routine should already have set inst.error, but set a
8165 	     default here just in case.  */
8166 	  if (!inst.error)
8167 	    inst.error = BAD_SYNTAX;
8168 	  return FAIL;
8169 	}
8170 
8171       /* Do not backtrack over a trailing optional argument that
8172 	 absorbed some text.  We will only fail again, with the
8173 	 'garbage following instruction' error message, which is
8174 	 probably less helpful than the current one.  */
8175       if (backtrack_index == i && backtrack_pos != str
8176 	  && upat[i+1] == OP_stop)
8177 	{
8178 	  if (!inst.error)
8179 	    inst.error = BAD_SYNTAX;
8180 	  return FAIL;
8181 	}
8182 
8183       /* Try again, skipping the optional argument at backtrack_pos.  */
8184       str = backtrack_pos;
8185       inst.error = backtrack_error;
8186       inst.operands[backtrack_index].present = 0;
8187       i = backtrack_index;
8188       backtrack_pos = 0;
8189     }
8190 
8191   /* Check that we have parsed all the arguments.  */
8192   if (*str != '\0' && !inst.error)
8193     inst.error = _("garbage following instruction");
8194 
8195   return inst.error ? FAIL : SUCCESS;
8196 }
8197 
8198 #undef po_char_or_fail
8199 #undef po_reg_or_fail
8200 #undef po_reg_or_goto
8201 #undef po_imm_or_fail
8202 #undef po_scalar_or_fail
8203 #undef po_barrier_or_imm
8204 
8205 /* Shorthand macro for instruction encoding functions issuing errors.  */
8206 #define constraint(expr, err)			\
8207   do						\
8208     {						\
8209       if (expr)					\
8210 	{					\
8211 	  inst.error = err;			\
8212 	  return;				\
8213 	}					\
8214     }						\
8215   while (0)
8216 
8217 /* Reject "bad registers" for Thumb-2 instructions.  Many Thumb-2
8218    instructions are unpredictable if these registers are used.  This
8219    is the BadReg predicate in ARM's Thumb-2 documentation.
8220 
8221    Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
8222    places, while the restriction on REG_SP was relaxed since ARMv8-A.  */
8223 #define reject_bad_reg(reg)					\
8224   do								\
8225    if (reg == REG_PC)						\
8226      {								\
8227        inst.error = BAD_PC;					\
8228        return;							\
8229      }								\
8230    else if (reg == REG_SP					\
8231 	    && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))	\
8232      {								\
8233        inst.error = BAD_SP;					\
8234        return;							\
8235      }								\
8236   while (0)
8237 
8238 /* If REG is R13 (the stack pointer), warn that its use is
8239    deprecated.  */
8240 #define warn_deprecated_sp(reg)			\
8241   do						\
8242     if (warn_on_deprecated && reg == REG_SP)	\
8243        as_tsktsk (_("use of r13 is deprecated"));	\
8244   while (0)
8245 
8246 /* Functions for operand encoding.  ARM, then Thumb.  */
8247 
8248 #define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
8249 
8250 /* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
8251 
8252    The only binary encoding difference is the Coprocessor number.  Coprocessor
8253    9 is used for half-precision calculations or conversions.  The format of the
8254    instruction is the same as the equivalent Coprocessor 10 instruction that
8255    exists for Single-Precision operation.  */
8256 
8257 static void
do_scalar_fp16_v82_encode(void)8258 do_scalar_fp16_v82_encode (void)
8259 {
8260   if (inst.cond < COND_ALWAYS)
8261     as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
8262 	       " the behaviour is UNPREDICTABLE"));
8263   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
8264 	      _(BAD_FP16));
8265 
8266   inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
8267   mark_feature_used (&arm_ext_fp16);
8268 }
8269 
8270 /* If VAL can be encoded in the immediate field of an ARM instruction,
8271    return the encoded form.  Otherwise, return FAIL.  */
8272 
8273 static unsigned int
encode_arm_immediate(unsigned int val)8274 encode_arm_immediate (unsigned int val)
8275 {
8276   unsigned int a, i;
8277 
8278   if (val <= 0xff)
8279     return val;
8280 
8281   for (i = 2; i < 32; i += 2)
8282     if ((a = rotate_left (val, i)) <= 0xff)
8283       return a | (i << 7); /* 12-bit pack: [shift-cnt,const].  */
8284 
8285   return FAIL;
8286 }
8287 
8288 /* If VAL can be encoded in the immediate field of a Thumb32 instruction,
8289    return the encoded form.  Otherwise, return FAIL.  */
8290 static unsigned int
encode_thumb32_immediate(unsigned int val)8291 encode_thumb32_immediate (unsigned int val)
8292 {
8293   unsigned int a, i;
8294 
8295   if (val <= 0xff)
8296     return val;
8297 
8298   for (i = 1; i <= 24; i++)
8299     {
8300       a = val >> i;
8301       if ((val & ~(0xffU << i)) == 0)
8302 	return ((val >> i) & 0x7f) | ((32 - i) << 7);
8303     }
8304 
8305   a = val & 0xff;
8306   if (val == ((a << 16) | a))
8307     return 0x100 | a;
8308   if (val == ((a << 24) | (a << 16) | (a << 8) | a))
8309     return 0x300 | a;
8310 
8311   a = val & 0xff00;
8312   if (val == ((a << 16) | a))
8313     return 0x200 | (a >> 8);
8314 
8315   return FAIL;
8316 }
8317 /* Encode a VFP SP or DP register number into inst.instruction.  */
8318 
8319 static void
encode_arm_vfp_reg(int reg,enum vfp_reg_pos pos)8320 encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
8321 {
8322   if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
8323       && reg > 15)
8324     {
8325       if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
8326 	{
8327 	  if (thumb_mode)
8328 	    ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
8329 				    fpu_vfp_ext_d32);
8330 	  else
8331 	    ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
8332 				    fpu_vfp_ext_d32);
8333 	}
8334       else
8335 	{
8336 	  first_error (_("D register out of range for selected VFP version"));
8337 	  return;
8338 	}
8339     }
8340 
8341   switch (pos)
8342     {
8343     case VFP_REG_Sd:
8344       inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
8345       break;
8346 
8347     case VFP_REG_Sn:
8348       inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
8349       break;
8350 
8351     case VFP_REG_Sm:
8352       inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
8353       break;
8354 
8355     case VFP_REG_Dd:
8356       inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
8357       break;
8358 
8359     case VFP_REG_Dn:
8360       inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
8361       break;
8362 
8363     case VFP_REG_Dm:
8364       inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
8365       break;
8366 
8367     default:
8368       abort ();
8369     }
8370 }
8371 
8372 /* Encode a <shift> in an ARM-format instruction.  The immediate,
8373    if any, is handled by md_apply_fix.	 */
8374 static void
encode_arm_shift(int i)8375 encode_arm_shift (int i)
8376 {
8377   /* register-shifted register.  */
8378   if (inst.operands[i].immisreg)
8379     {
8380       int op_index;
8381       for (op_index = 0; op_index <= i; ++op_index)
8382 	{
8383 	  /* Check the operand only when it's presented.  In pre-UAL syntax,
8384 	     if the destination register is the same as the first operand, two
8385 	     register form of the instruction can be used.  */
8386 	  if (inst.operands[op_index].present && inst.operands[op_index].isreg
8387 	      && inst.operands[op_index].reg == REG_PC)
8388 	    as_warn (UNPRED_REG ("r15"));
8389 	}
8390 
8391       if (inst.operands[i].imm == REG_PC)
8392 	as_warn (UNPRED_REG ("r15"));
8393     }
8394 
8395   if (inst.operands[i].shift_kind == SHIFT_RRX)
8396     inst.instruction |= SHIFT_ROR << 5;
8397   else
8398     {
8399       inst.instruction |= inst.operands[i].shift_kind << 5;
8400       if (inst.operands[i].immisreg)
8401 	{
8402 	  inst.instruction |= SHIFT_BY_REG;
8403 	  inst.instruction |= inst.operands[i].imm << 8;
8404 	}
8405       else
8406 	inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
8407     }
8408 }
8409 
8410 static void
encode_arm_shifter_operand(int i)8411 encode_arm_shifter_operand (int i)
8412 {
8413   if (inst.operands[i].isreg)
8414     {
8415       inst.instruction |= inst.operands[i].reg;
8416       encode_arm_shift (i);
8417     }
8418   else
8419     {
8420       inst.instruction |= INST_IMMEDIATE;
8421       if (inst.relocs[0].type != BFD_RELOC_ARM_IMMEDIATE)
8422 	inst.instruction |= inst.operands[i].imm;
8423     }
8424 }
8425 
8426 /* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3.  */
8427 static void
encode_arm_addr_mode_common(int i,bool is_t)8428 encode_arm_addr_mode_common (int i, bool is_t)
8429 {
8430   /* PR 14260:
8431      Generate an error if the operand is not a register.  */
8432   constraint (!inst.operands[i].isreg,
8433 	      _("Instruction does not support =N addresses"));
8434 
8435   inst.instruction |= inst.operands[i].reg << 16;
8436 
8437   if (inst.operands[i].preind)
8438     {
8439       if (is_t)
8440 	{
8441 	  inst.error = _("instruction does not accept preindexed addressing");
8442 	  return;
8443 	}
8444       inst.instruction |= PRE_INDEX;
8445       if (inst.operands[i].writeback)
8446 	inst.instruction |= WRITE_BACK;
8447 
8448     }
8449   else if (inst.operands[i].postind)
8450     {
8451       gas_assert (inst.operands[i].writeback);
8452       if (is_t)
8453 	inst.instruction |= WRITE_BACK;
8454     }
8455   else /* unindexed - only for coprocessor */
8456     {
8457       inst.error = _("instruction does not accept unindexed addressing");
8458       return;
8459     }
8460 
8461   if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
8462       && (((inst.instruction & 0x000f0000) >> 16)
8463 	  == ((inst.instruction & 0x0000f000) >> 12)))
8464     as_warn ((inst.instruction & LOAD_BIT)
8465 	     ? _("destination register same as write-back base")
8466 	     : _("source register same as write-back base"));
8467 }
8468 
8469 /* inst.operands[i] was set up by parse_address.  Encode it into an
8470    ARM-format mode 2 load or store instruction.	 If is_t is true,
8471    reject forms that cannot be used with a T instruction (i.e. not
8472    post-indexed).  */
8473 static void
encode_arm_addr_mode_2(int i,bool is_t)8474 encode_arm_addr_mode_2 (int i, bool is_t)
8475 {
8476   const bool is_pc = (inst.operands[i].reg == REG_PC);
8477 
8478   encode_arm_addr_mode_common (i, is_t);
8479 
8480   if (inst.operands[i].immisreg)
8481     {
8482       constraint ((inst.operands[i].imm == REG_PC
8483 		   || (is_pc && inst.operands[i].writeback)),
8484 		  BAD_PC_ADDRESSING);
8485       inst.instruction |= INST_IMMEDIATE;  /* yes, this is backwards */
8486       inst.instruction |= inst.operands[i].imm;
8487       if (!inst.operands[i].negative)
8488 	inst.instruction |= INDEX_UP;
8489       if (inst.operands[i].shifted)
8490 	{
8491 	  if (inst.operands[i].shift_kind == SHIFT_RRX)
8492 	    inst.instruction |= SHIFT_ROR << 5;
8493 	  else
8494 	    {
8495 	      inst.instruction |= inst.operands[i].shift_kind << 5;
8496 	      inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
8497 	    }
8498 	}
8499     }
8500   else /* immediate offset in inst.relocs[0] */
8501     {
8502       if (is_pc && !inst.relocs[0].pc_rel)
8503 	{
8504 	  const bool is_load = ((inst.instruction & LOAD_BIT) != 0);
8505 
8506 	  /* If is_t is TRUE, it's called from do_ldstt.  ldrt/strt
8507 	     cannot use PC in addressing.
8508 	     PC cannot be used in writeback addressing, either.  */
8509 	  constraint ((is_t || inst.operands[i].writeback),
8510 		      BAD_PC_ADDRESSING);
8511 
8512 	  /* Use of PC in str is deprecated for ARMv7.  */
8513 	  if (warn_on_deprecated
8514 	      && !is_load
8515 	      && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
8516 	    as_tsktsk (_("use of PC in this instruction is deprecated"));
8517 	}
8518 
8519       if (inst.relocs[0].type == BFD_RELOC_UNUSED)
8520 	{
8521 	  /* Prefer + for zero encoded value.  */
8522 	  if (!inst.operands[i].negative)
8523 	    inst.instruction |= INDEX_UP;
8524 	  inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM;
8525 	}
8526     }
8527 }
8528 
8529 /* inst.operands[i] was set up by parse_address.  Encode it into an
8530    ARM-format mode 3 load or store instruction.	 Reject forms that
8531    cannot be used with such instructions.  If is_t is true, reject
8532    forms that cannot be used with a T instruction (i.e. not
8533    post-indexed).  */
8534 static void
encode_arm_addr_mode_3(int i,bool is_t)8535 encode_arm_addr_mode_3 (int i, bool is_t)
8536 {
8537   if (inst.operands[i].immisreg && inst.operands[i].shifted)
8538     {
8539       inst.error = _("instruction does not accept scaled register index");
8540       return;
8541     }
8542 
8543   encode_arm_addr_mode_common (i, is_t);
8544 
8545   if (inst.operands[i].immisreg)
8546     {
8547       constraint ((inst.operands[i].imm == REG_PC
8548 		   || (is_t && inst.operands[i].reg == REG_PC)),
8549 		  BAD_PC_ADDRESSING);
8550       constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
8551 		  BAD_PC_WRITEBACK);
8552       inst.instruction |= inst.operands[i].imm;
8553       if (!inst.operands[i].negative)
8554 	inst.instruction |= INDEX_UP;
8555     }
8556   else /* immediate offset in inst.relocs[0] */
8557     {
8558       constraint ((inst.operands[i].reg == REG_PC && !inst.relocs[0].pc_rel
8559 		   && inst.operands[i].writeback),
8560 		  BAD_PC_WRITEBACK);
8561       inst.instruction |= HWOFFSET_IMM;
8562       if (inst.relocs[0].type == BFD_RELOC_UNUSED)
8563 	{
8564 	  /* Prefer + for zero encoded value.  */
8565 	  if (!inst.operands[i].negative)
8566 	    inst.instruction |= INDEX_UP;
8567 
8568 	  inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM8;
8569 	}
8570     }
8571 }
8572 
8573 /* Write immediate bits [7:0] to the following locations:
8574 
8575   |28/24|23     19|18 16|15                    4|3     0|
8576   |  a  |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
8577 
8578   This function is used by VMOV/VMVN/VORR/VBIC.  */
8579 
8580 static void
neon_write_immbits(unsigned immbits)8581 neon_write_immbits (unsigned immbits)
8582 {
8583   inst.instruction |= immbits & 0xf;
8584   inst.instruction |= ((immbits >> 4) & 0x7) << 16;
8585   inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
8586 }
8587 
8588 /* Invert low-order SIZE bits of XHI:XLO.  */
8589 
8590 static void
neon_invert_size(unsigned * xlo,unsigned * xhi,int size)8591 neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
8592 {
8593   unsigned immlo = xlo ? *xlo : 0;
8594   unsigned immhi = xhi ? *xhi : 0;
8595 
8596   switch (size)
8597     {
8598     case 8:
8599       immlo = (~immlo) & 0xff;
8600       break;
8601 
8602     case 16:
8603       immlo = (~immlo) & 0xffff;
8604       break;
8605 
8606     case 64:
8607       immhi = (~immhi) & 0xffffffff;
8608       /* fall through.  */
8609 
8610     case 32:
8611       immlo = (~immlo) & 0xffffffff;
8612       break;
8613 
8614     default:
8615       abort ();
8616     }
8617 
8618   if (xlo)
8619     *xlo = immlo;
8620 
8621   if (xhi)
8622     *xhi = immhi;
8623 }
8624 
8625 /* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
8626    A, B, C, D.  */
8627 
8628 static int
neon_bits_same_in_bytes(unsigned imm)8629 neon_bits_same_in_bytes (unsigned imm)
8630 {
8631   return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
8632 	 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
8633 	 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
8634 	 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
8635 }
8636 
8637 /* For immediate of above form, return 0bABCD.  */
8638 
8639 static unsigned
neon_squash_bits(unsigned imm)8640 neon_squash_bits (unsigned imm)
8641 {
8642   return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
8643 	 | ((imm & 0x01000000) >> 21);
8644 }
8645 
8646 /* Compress quarter-float representation to 0b...000 abcdefgh.  */
8647 
8648 static unsigned
neon_qfloat_bits(unsigned imm)8649 neon_qfloat_bits (unsigned imm)
8650 {
8651   return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
8652 }
8653 
8654 /* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
8655    the instruction. *OP is passed as the initial value of the op field, and
8656    may be set to a different value depending on the constant (i.e.
8657    "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
8658    MVN).  If the immediate looks like a repeated pattern then also
8659    try smaller element sizes.  */
8660 
8661 static int
neon_cmode_for_move_imm(unsigned immlo,unsigned immhi,int float_p,unsigned * immbits,int * op,int size,enum neon_el_type type)8662 neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
8663 			 unsigned *immbits, int *op, int size,
8664 			 enum neon_el_type type)
8665 {
8666   /* Only permit float immediates (including 0.0/-0.0) if the operand type is
8667      float.  */
8668   if (type == NT_float && !float_p)
8669     return FAIL;
8670 
8671   if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
8672     {
8673       if (size != 32 || *op == 1)
8674 	return FAIL;
8675       *immbits = neon_qfloat_bits (immlo);
8676       return 0xf;
8677     }
8678 
8679   if (size == 64)
8680     {
8681       if (neon_bits_same_in_bytes (immhi)
8682 	  && neon_bits_same_in_bytes (immlo))
8683 	{
8684 	  if (*op == 1)
8685 	    return FAIL;
8686 	  *immbits = (neon_squash_bits (immhi) << 4)
8687 		     | neon_squash_bits (immlo);
8688 	  *op = 1;
8689 	  return 0xe;
8690 	}
8691 
8692       if (immhi != immlo)
8693 	return FAIL;
8694     }
8695 
8696   if (size >= 32)
8697     {
8698       if (immlo == (immlo & 0x000000ff))
8699 	{
8700 	  *immbits = immlo;
8701 	  return 0x0;
8702 	}
8703       else if (immlo == (immlo & 0x0000ff00))
8704 	{
8705 	  *immbits = immlo >> 8;
8706 	  return 0x2;
8707 	}
8708       else if (immlo == (immlo & 0x00ff0000))
8709 	{
8710 	  *immbits = immlo >> 16;
8711 	  return 0x4;
8712 	}
8713       else if (immlo == (immlo & 0xff000000))
8714 	{
8715 	  *immbits = immlo >> 24;
8716 	  return 0x6;
8717 	}
8718       else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
8719 	{
8720 	  *immbits = (immlo >> 8) & 0xff;
8721 	  return 0xc;
8722 	}
8723       else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
8724 	{
8725 	  *immbits = (immlo >> 16) & 0xff;
8726 	  return 0xd;
8727 	}
8728 
8729       if ((immlo & 0xffff) != (immlo >> 16))
8730 	return FAIL;
8731       immlo &= 0xffff;
8732     }
8733 
8734   if (size >= 16)
8735     {
8736       if (immlo == (immlo & 0x000000ff))
8737 	{
8738 	  *immbits = immlo;
8739 	  return 0x8;
8740 	}
8741       else if (immlo == (immlo & 0x0000ff00))
8742 	{
8743 	  *immbits = immlo >> 8;
8744 	  return 0xa;
8745 	}
8746 
8747       if ((immlo & 0xff) != (immlo >> 8))
8748 	return FAIL;
8749       immlo &= 0xff;
8750     }
8751 
8752   if (immlo == (immlo & 0x000000ff))
8753     {
8754       /* Don't allow MVN with 8-bit immediate.  */
8755       if (*op == 1)
8756 	return FAIL;
8757       *immbits = immlo;
8758       return 0xe;
8759     }
8760 
8761   return FAIL;
8762 }
8763 
8764 #if defined BFD_HOST_64_BIT
8765 /* Returns TRUE if double precision value V may be cast
8766    to single precision without loss of accuracy.  */
8767 
8768 static bool
is_double_a_single(bfd_uint64_t v)8769 is_double_a_single (bfd_uint64_t v)
8770 {
8771   int exp = (v >> 52) & 0x7FF;
8772   bfd_uint64_t mantissa = v & 0xFFFFFFFFFFFFFULL;
8773 
8774   return ((exp == 0 || exp == 0x7FF
8775 	   || (exp >= 1023 - 126 && exp <= 1023 + 127))
8776 	  && (mantissa & 0x1FFFFFFFL) == 0);
8777 }
8778 
8779 /* Returns a double precision value casted to single precision
8780    (ignoring the least significant bits in exponent and mantissa).  */
8781 
8782 static int
double_to_single(bfd_uint64_t v)8783 double_to_single (bfd_uint64_t v)
8784 {
8785   unsigned int sign = (v >> 63) & 1;
8786   int exp = (v >> 52) & 0x7FF;
8787   bfd_uint64_t mantissa = v & 0xFFFFFFFFFFFFFULL;
8788 
8789   if (exp == 0x7FF)
8790     exp = 0xFF;
8791   else
8792     {
8793       exp = exp - 1023 + 127;
8794       if (exp >= 0xFF)
8795 	{
8796 	  /* Infinity.  */
8797 	  exp = 0x7F;
8798 	  mantissa = 0;
8799 	}
8800       else if (exp < 0)
8801 	{
8802 	  /* No denormalized numbers.  */
8803 	  exp = 0;
8804 	  mantissa = 0;
8805 	}
8806     }
8807   mantissa >>= 29;
8808   return (sign << 31) | (exp << 23) | mantissa;
8809 }
8810 #endif /* BFD_HOST_64_BIT */
8811 
8812 enum lit_type
8813 {
8814   CONST_THUMB,
8815   CONST_ARM,
8816   CONST_VEC
8817 };
8818 
8819 static void do_vfp_nsyn_opcode (const char *);
8820 
8821 /* inst.relocs[0].exp describes an "=expr" load pseudo-operation.
8822    Determine whether it can be performed with a move instruction; if
8823    it can, convert inst.instruction to that move instruction and
8824    return true; if it can't, convert inst.instruction to a literal-pool
8825    load and return FALSE.  If this is not a valid thing to do in the
8826    current context, set inst.error and return TRUE.
8827 
8828    inst.operands[i] describes the destination register.	 */
8829 
8830 static bool
move_or_literal_pool(int i,enum lit_type t,bool mode_3)8831 move_or_literal_pool (int i, enum lit_type t, bool mode_3)
8832 {
8833   unsigned long tbit;
8834   bool thumb_p = (t == CONST_THUMB);
8835   bool arm_p   = (t == CONST_ARM);
8836 
8837   if (thumb_p)
8838     tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
8839   else
8840     tbit = LOAD_BIT;
8841 
8842   if ((inst.instruction & tbit) == 0)
8843     {
8844       inst.error = _("invalid pseudo operation");
8845       return true;
8846     }
8847 
8848   if (inst.relocs[0].exp.X_op != O_constant
8849       && inst.relocs[0].exp.X_op != O_symbol
8850       && inst.relocs[0].exp.X_op != O_big)
8851     {
8852       inst.error = _("constant expression expected");
8853       return true;
8854     }
8855 
8856   if (inst.relocs[0].exp.X_op == O_constant
8857       || inst.relocs[0].exp.X_op == O_big)
8858     {
8859 #if defined BFD_HOST_64_BIT
8860       bfd_uint64_t v;
8861 #else
8862       valueT v;
8863 #endif
8864       if (inst.relocs[0].exp.X_op == O_big)
8865 	{
8866 	  LITTLENUM_TYPE w[X_PRECISION];
8867 	  LITTLENUM_TYPE * l;
8868 
8869 	  if (inst.relocs[0].exp.X_add_number == -1)
8870 	    {
8871 	      gen_to_words (w, X_PRECISION, E_PRECISION);
8872 	      l = w;
8873 	      /* FIXME: Should we check words w[2..5] ?  */
8874 	    }
8875 	  else
8876 	    l = generic_bignum;
8877 
8878 #if defined BFD_HOST_64_BIT
8879 	  v = l[3] & LITTLENUM_MASK;
8880 	  v <<= LITTLENUM_NUMBER_OF_BITS;
8881 	  v |= l[2] & LITTLENUM_MASK;
8882 	  v <<= LITTLENUM_NUMBER_OF_BITS;
8883 	  v |= l[1] & LITTLENUM_MASK;
8884 	  v <<= LITTLENUM_NUMBER_OF_BITS;
8885 	  v |= l[0] & LITTLENUM_MASK;
8886 #else
8887 	  v = l[1] & LITTLENUM_MASK;
8888 	  v <<= LITTLENUM_NUMBER_OF_BITS;
8889 	  v |= l[0] & LITTLENUM_MASK;
8890 #endif
8891 	}
8892       else
8893 	v = inst.relocs[0].exp.X_add_number;
8894 
8895       if (!inst.operands[i].issingle)
8896 	{
8897 	  if (thumb_p)
8898 	    {
8899 	      /* LDR should not use lead in a flag-setting instruction being
8900 		 chosen so we do not check whether movs can be used.  */
8901 
8902 	      if ((ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
8903 		  || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8904 		  && inst.operands[i].reg != 13
8905 		  && inst.operands[i].reg != 15)
8906 		{
8907 		  /* Check if on thumb2 it can be done with a mov.w, mvn or
8908 		     movw instruction.  */
8909 		  unsigned int newimm;
8910 		  bool isNegated = false;
8911 
8912 		  newimm = encode_thumb32_immediate (v);
8913 		  if (newimm == (unsigned int) FAIL)
8914 		    {
8915 		      newimm = encode_thumb32_immediate (~v);
8916 		      isNegated = true;
8917 		    }
8918 
8919 		  /* The number can be loaded with a mov.w or mvn
8920 		     instruction.  */
8921 		  if (newimm != (unsigned int) FAIL
8922 		      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
8923 		    {
8924 		      inst.instruction = (0xf04f0000  /*  MOV.W.  */
8925 					  | (inst.operands[i].reg << 8));
8926 		      /* Change to MOVN.  */
8927 		      inst.instruction |= (isNegated ? 0x200000 : 0);
8928 		      inst.instruction |= (newimm & 0x800) << 15;
8929 		      inst.instruction |= (newimm & 0x700) << 4;
8930 		      inst.instruction |= (newimm & 0x0ff);
8931 		      return true;
8932 		    }
8933 		  /* The number can be loaded with a movw instruction.  */
8934 		  else if ((v & ~0xFFFF) == 0
8935 			   && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8936 		    {
8937 		      int imm = v & 0xFFFF;
8938 
8939 		      inst.instruction = 0xf2400000;  /* MOVW.  */
8940 		      inst.instruction |= (inst.operands[i].reg << 8);
8941 		      inst.instruction |= (imm & 0xf000) << 4;
8942 		      inst.instruction |= (imm & 0x0800) << 15;
8943 		      inst.instruction |= (imm & 0x0700) << 4;
8944 		      inst.instruction |= (imm & 0x00ff);
8945 		      /*  In case this replacement is being done on Armv8-M
8946 			  Baseline we need to make sure to disable the
8947 			  instruction size check, as otherwise GAS will reject
8948 			  the use of this T32 instruction.  */
8949 		      inst.size_req = 0;
8950 		      return true;
8951 		    }
8952 		}
8953 	    }
8954 	  else if (arm_p)
8955 	    {
8956 	      int value = encode_arm_immediate (v);
8957 
8958 	      if (value != FAIL)
8959 		{
8960 		  /* This can be done with a mov instruction.  */
8961 		  inst.instruction &= LITERAL_MASK;
8962 		  inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
8963 		  inst.instruction |= value & 0xfff;
8964 		  return true;
8965 		}
8966 
8967 	      value = encode_arm_immediate (~ v);
8968 	      if (value != FAIL)
8969 		{
8970 		  /* This can be done with a mvn instruction.  */
8971 		  inst.instruction &= LITERAL_MASK;
8972 		  inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8973 		  inst.instruction |= value & 0xfff;
8974 		  return true;
8975 		}
8976 	    }
8977 	  else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
8978 	    {
8979 	      int op = 0;
8980 	      unsigned immbits = 0;
8981 	      unsigned immlo = inst.operands[1].imm;
8982 	      unsigned immhi = inst.operands[1].regisimm
8983 		? inst.operands[1].reg
8984 		: inst.relocs[0].exp.X_unsigned
8985 		? 0
8986 		: ((bfd_int64_t)((int) immlo)) >> 32;
8987 	      int cmode = neon_cmode_for_move_imm (immlo, immhi, false, &immbits,
8988 						   &op, 64, NT_invtype);
8989 
8990 	      if (cmode == FAIL)
8991 		{
8992 		  neon_invert_size (&immlo, &immhi, 64);
8993 		  op = !op;
8994 		  cmode = neon_cmode_for_move_imm (immlo, immhi, false, &immbits,
8995 						   &op, 64, NT_invtype);
8996 		}
8997 
8998 	      if (cmode != FAIL)
8999 		{
9000 		  inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
9001 		    | (1 << 23)
9002 		    | (cmode << 8)
9003 		    | (op << 5)
9004 		    | (1 << 4);
9005 
9006 		  /* Fill other bits in vmov encoding for both thumb and arm.  */
9007 		  if (thumb_mode)
9008 		    inst.instruction |= (0x7U << 29) | (0xF << 24);
9009 		  else
9010 		    inst.instruction |= (0xFU << 28) | (0x1 << 25);
9011 		  neon_write_immbits (immbits);
9012 		  return true;
9013 		}
9014 	    }
9015 	}
9016 
9017       if (t == CONST_VEC)
9018 	{
9019 	  /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant.  */
9020 	  if (inst.operands[i].issingle
9021 	      && is_quarter_float (inst.operands[1].imm)
9022 	      && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
9023 	    {
9024 	      inst.operands[1].imm =
9025 		neon_qfloat_bits (v);
9026 	      do_vfp_nsyn_opcode ("fconsts");
9027 	      return true;
9028 	    }
9029 
9030 	  /* If our host does not support a 64-bit type then we cannot perform
9031 	     the following optimization.  This mean that there will be a
9032 	     discrepancy between the output produced by an assembler built for
9033 	     a 32-bit-only host and the output produced from a 64-bit host, but
9034 	     this cannot be helped.  */
9035 #if defined BFD_HOST_64_BIT
9036 	  else if (!inst.operands[1].issingle
9037 		   && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
9038 	    {
9039 	      if (is_double_a_single (v)
9040 		  && is_quarter_float (double_to_single (v)))
9041 		{
9042 		  inst.operands[1].imm =
9043 		    neon_qfloat_bits (double_to_single (v));
9044 		  do_vfp_nsyn_opcode ("fconstd");
9045 		  return true;
9046 		}
9047 	    }
9048 #endif
9049 	}
9050     }
9051 
9052   if (add_to_lit_pool ((!inst.operands[i].isvec
9053 			|| inst.operands[i].issingle) ? 4 : 8) == FAIL)
9054     return true;
9055 
9056   inst.operands[1].reg = REG_PC;
9057   inst.operands[1].isreg = 1;
9058   inst.operands[1].preind = 1;
9059   inst.relocs[0].pc_rel = 1;
9060   inst.relocs[0].type = (thumb_p
9061 		     ? BFD_RELOC_ARM_THUMB_OFFSET
9062 		     : (mode_3
9063 			? BFD_RELOC_ARM_HWLITERAL
9064 			: BFD_RELOC_ARM_LITERAL));
9065   return false;
9066 }
9067 
9068 /* inst.operands[i] was set up by parse_address.  Encode it into an
9069    ARM-format instruction.  Reject all forms which cannot be encoded
9070    into a coprocessor load/store instruction.  If wb_ok is false,
9071    reject use of writeback; if unind_ok is false, reject use of
9072    unindexed addressing.  If reloc_override is not 0, use it instead
9073    of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
9074    (in which case it is preserved).  */
9075 
9076 static int
encode_arm_cp_address(int i,int wb_ok,int unind_ok,int reloc_override)9077 encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
9078 {
9079   if (!inst.operands[i].isreg)
9080     {
9081       /* PR 18256 */
9082       if (! inst.operands[0].isvec)
9083 	{
9084 	  inst.error = _("invalid co-processor operand");
9085 	  return FAIL;
9086 	}
9087       if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/false))
9088 	return SUCCESS;
9089     }
9090 
9091   inst.instruction |= inst.operands[i].reg << 16;
9092 
9093   gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
9094 
9095   if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
9096     {
9097       gas_assert (!inst.operands[i].writeback);
9098       if (!unind_ok)
9099 	{
9100 	  inst.error = _("instruction does not support unindexed addressing");
9101 	  return FAIL;
9102 	}
9103       inst.instruction |= inst.operands[i].imm;
9104       inst.instruction |= INDEX_UP;
9105       return SUCCESS;
9106     }
9107 
9108   if (inst.operands[i].preind)
9109     inst.instruction |= PRE_INDEX;
9110 
9111   if (inst.operands[i].writeback)
9112     {
9113       if (inst.operands[i].reg == REG_PC)
9114 	{
9115 	  inst.error = _("pc may not be used with write-back");
9116 	  return FAIL;
9117 	}
9118       if (!wb_ok)
9119 	{
9120 	  inst.error = _("instruction does not support writeback");
9121 	  return FAIL;
9122 	}
9123       inst.instruction |= WRITE_BACK;
9124     }
9125 
9126   if (reloc_override)
9127     inst.relocs[0].type = (bfd_reloc_code_real_type) reloc_override;
9128   else if ((inst.relocs[0].type < BFD_RELOC_ARM_ALU_PC_G0_NC
9129 	    || inst.relocs[0].type > BFD_RELOC_ARM_LDC_SB_G2)
9130 	   && inst.relocs[0].type != BFD_RELOC_ARM_LDR_PC_G0)
9131     {
9132       if (thumb_mode)
9133 	inst.relocs[0].type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
9134       else
9135 	inst.relocs[0].type = BFD_RELOC_ARM_CP_OFF_IMM;
9136     }
9137 
9138   /* Prefer + for zero encoded value.  */
9139   if (!inst.operands[i].negative)
9140     inst.instruction |= INDEX_UP;
9141 
9142   return SUCCESS;
9143 }
9144 
9145 /* Functions for instruction encoding, sorted by sub-architecture.
9146    First some generics; their names are taken from the conventional
9147    bit positions for register arguments in ARM format instructions.  */
9148 
9149 static void
do_noargs(void)9150 do_noargs (void)
9151 {
9152 }
9153 
9154 static void
do_rd(void)9155 do_rd (void)
9156 {
9157   inst.instruction |= inst.operands[0].reg << 12;
9158 }
9159 
9160 static void
do_rn(void)9161 do_rn (void)
9162 {
9163   inst.instruction |= inst.operands[0].reg << 16;
9164 }
9165 
9166 static void
do_rd_rm(void)9167 do_rd_rm (void)
9168 {
9169   inst.instruction |= inst.operands[0].reg << 12;
9170   inst.instruction |= inst.operands[1].reg;
9171 }
9172 
9173 static void
do_rm_rn(void)9174 do_rm_rn (void)
9175 {
9176   inst.instruction |= inst.operands[0].reg;
9177   inst.instruction |= inst.operands[1].reg << 16;
9178 }
9179 
9180 static void
do_rd_rn(void)9181 do_rd_rn (void)
9182 {
9183   inst.instruction |= inst.operands[0].reg << 12;
9184   inst.instruction |= inst.operands[1].reg << 16;
9185 }
9186 
9187 static void
do_rn_rd(void)9188 do_rn_rd (void)
9189 {
9190   inst.instruction |= inst.operands[0].reg << 16;
9191   inst.instruction |= inst.operands[1].reg << 12;
9192 }
9193 
9194 static void
do_tt(void)9195 do_tt (void)
9196 {
9197   inst.instruction |= inst.operands[0].reg << 8;
9198   inst.instruction |= inst.operands[1].reg << 16;
9199 }
9200 
9201 static bool
check_obsolete(const arm_feature_set * feature,const char * msg)9202 check_obsolete (const arm_feature_set *feature, const char *msg)
9203 {
9204   if (ARM_CPU_IS_ANY (cpu_variant))
9205     {
9206       as_tsktsk ("%s", msg);
9207       return true;
9208     }
9209   else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
9210     {
9211       as_bad ("%s", msg);
9212       return true;
9213     }
9214 
9215   return false;
9216 }
9217 
9218 static void
do_rd_rm_rn(void)9219 do_rd_rm_rn (void)
9220 {
9221   unsigned Rn = inst.operands[2].reg;
9222   /* Enforce restrictions on SWP instruction.  */
9223   if ((inst.instruction & 0x0fbfffff) == 0x01000090)
9224     {
9225       constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
9226 		  _("Rn must not overlap other operands"));
9227 
9228       /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
9229        */
9230       if (!check_obsolete (&arm_ext_v8,
9231 			   _("swp{b} use is obsoleted for ARMv8 and later"))
9232 	  && warn_on_deprecated
9233 	  && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
9234 	as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
9235     }
9236 
9237   inst.instruction |= inst.operands[0].reg << 12;
9238   inst.instruction |= inst.operands[1].reg;
9239   inst.instruction |= Rn << 16;
9240 }
9241 
9242 static void
do_rd_rn_rm(void)9243 do_rd_rn_rm (void)
9244 {
9245   inst.instruction |= inst.operands[0].reg << 12;
9246   inst.instruction |= inst.operands[1].reg << 16;
9247   inst.instruction |= inst.operands[2].reg;
9248 }
9249 
9250 static void
do_rm_rd_rn(void)9251 do_rm_rd_rn (void)
9252 {
9253   constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
9254   constraint (((inst.relocs[0].exp.X_op != O_constant
9255 		&& inst.relocs[0].exp.X_op != O_illegal)
9256 	       || inst.relocs[0].exp.X_add_number != 0),
9257 	      BAD_ADDR_MODE);
9258   inst.instruction |= inst.operands[0].reg;
9259   inst.instruction |= inst.operands[1].reg << 12;
9260   inst.instruction |= inst.operands[2].reg << 16;
9261 }
9262 
9263 static void
do_imm0(void)9264 do_imm0 (void)
9265 {
9266   inst.instruction |= inst.operands[0].imm;
9267 }
9268 
9269 static void
do_rd_cpaddr(void)9270 do_rd_cpaddr (void)
9271 {
9272   inst.instruction |= inst.operands[0].reg << 12;
9273   encode_arm_cp_address (1, true, true, 0);
9274 }
9275 
9276 /* ARM instructions, in alphabetical order by function name (except
9277    that wrapper functions appear immediately after the function they
9278    wrap).  */
9279 
9280 /* This is a pseudo-op of the form "adr rd, label" to be converted
9281    into a relative address of the form "add rd, pc, #label-.-8".  */
9282 
9283 static void
do_adr(void)9284 do_adr (void)
9285 {
9286   inst.instruction |= (inst.operands[0].reg << 12);  /* Rd */
9287 
9288   /* Frag hacking will turn this into a sub instruction if the offset turns
9289      out to be negative.  */
9290   inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
9291   inst.relocs[0].pc_rel = 1;
9292   inst.relocs[0].exp.X_add_number -= 8;
9293 
9294   if (support_interwork
9295       && inst.relocs[0].exp.X_op == O_symbol
9296       && inst.relocs[0].exp.X_add_symbol != NULL
9297       && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
9298       && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
9299     inst.relocs[0].exp.X_add_number |= 1;
9300 }
9301 
9302 /* This is a pseudo-op of the form "adrl rd, label" to be converted
9303    into a relative address of the form:
9304    add rd, pc, #low(label-.-8)"
9305    add rd, rd, #high(label-.-8)"  */
9306 
9307 static void
do_adrl(void)9308 do_adrl (void)
9309 {
9310   inst.instruction |= (inst.operands[0].reg << 12);  /* Rd */
9311 
9312   /* Frag hacking will turn this into a sub instruction if the offset turns
9313      out to be negative.  */
9314   inst.relocs[0].type	       = BFD_RELOC_ARM_ADRL_IMMEDIATE;
9315   inst.relocs[0].pc_rel	       = 1;
9316   inst.size		       = INSN_SIZE * 2;
9317   inst.relocs[0].exp.X_add_number -= 8;
9318 
9319   if (support_interwork
9320       && inst.relocs[0].exp.X_op == O_symbol
9321       && inst.relocs[0].exp.X_add_symbol != NULL
9322       && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
9323       && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
9324     inst.relocs[0].exp.X_add_number |= 1;
9325 }
9326 
9327 static void
do_arit(void)9328 do_arit (void)
9329 {
9330   constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9331 	      && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9332 	      THUMB1_RELOC_ONLY);
9333   if (!inst.operands[1].present)
9334     inst.operands[1].reg = inst.operands[0].reg;
9335   inst.instruction |= inst.operands[0].reg << 12;
9336   inst.instruction |= inst.operands[1].reg << 16;
9337   encode_arm_shifter_operand (2);
9338 }
9339 
9340 static void
do_barrier(void)9341 do_barrier (void)
9342 {
9343   if (inst.operands[0].present)
9344     inst.instruction |= inst.operands[0].imm;
9345   else
9346     inst.instruction |= 0xf;
9347 }
9348 
9349 static void
do_bfc(void)9350 do_bfc (void)
9351 {
9352   unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
9353   constraint (msb > 32, _("bit-field extends past end of register"));
9354   /* The instruction encoding stores the LSB and MSB,
9355      not the LSB and width.  */
9356   inst.instruction |= inst.operands[0].reg << 12;
9357   inst.instruction |= inst.operands[1].imm << 7;
9358   inst.instruction |= (msb - 1) << 16;
9359 }
9360 
9361 static void
do_bfi(void)9362 do_bfi (void)
9363 {
9364   unsigned int msb;
9365 
9366   /* #0 in second position is alternative syntax for bfc, which is
9367      the same instruction but with REG_PC in the Rm field.  */
9368   if (!inst.operands[1].isreg)
9369     inst.operands[1].reg = REG_PC;
9370 
9371   msb = inst.operands[2].imm + inst.operands[3].imm;
9372   constraint (msb > 32, _("bit-field extends past end of register"));
9373   /* The instruction encoding stores the LSB and MSB,
9374      not the LSB and width.  */
9375   inst.instruction |= inst.operands[0].reg << 12;
9376   inst.instruction |= inst.operands[1].reg;
9377   inst.instruction |= inst.operands[2].imm << 7;
9378   inst.instruction |= (msb - 1) << 16;
9379 }
9380 
9381 static void
do_bfx(void)9382 do_bfx (void)
9383 {
9384   constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
9385 	      _("bit-field extends past end of register"));
9386   inst.instruction |= inst.operands[0].reg << 12;
9387   inst.instruction |= inst.operands[1].reg;
9388   inst.instruction |= inst.operands[2].imm << 7;
9389   inst.instruction |= (inst.operands[3].imm - 1) << 16;
9390 }
9391 
9392 /* ARM V5 breakpoint instruction (argument parse)
9393      BKPT <16 bit unsigned immediate>
9394      Instruction is not conditional.
9395 	The bit pattern given in insns[] has the COND_ALWAYS condition,
9396 	and it is an error if the caller tried to override that.  */
9397 
9398 static void
do_bkpt(void)9399 do_bkpt (void)
9400 {
9401   /* Top 12 of 16 bits to bits 19:8.  */
9402   inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
9403 
9404   /* Bottom 4 of 16 bits to bits 3:0.  */
9405   inst.instruction |= inst.operands[0].imm & 0xf;
9406 }
9407 
9408 static void
encode_branch(int default_reloc)9409 encode_branch (int default_reloc)
9410 {
9411   if (inst.operands[0].hasreloc)
9412     {
9413       constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
9414 		  && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
9415 		  _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
9416       inst.relocs[0].type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
9417 	? BFD_RELOC_ARM_PLT32
9418 	: thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
9419     }
9420   else
9421     inst.relocs[0].type = (bfd_reloc_code_real_type) default_reloc;
9422   inst.relocs[0].pc_rel = 1;
9423 }
9424 
9425 static void
do_branch(void)9426 do_branch (void)
9427 {
9428 #ifdef OBJ_ELF
9429   if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
9430     encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
9431   else
9432 #endif
9433     encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
9434 }
9435 
9436 static void
do_bl(void)9437 do_bl (void)
9438 {
9439 #ifdef OBJ_ELF
9440   if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
9441     {
9442       if (inst.cond == COND_ALWAYS)
9443 	encode_branch (BFD_RELOC_ARM_PCREL_CALL);
9444       else
9445 	encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
9446     }
9447   else
9448 #endif
9449     encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
9450 }
9451 
9452 /* ARM V5 branch-link-exchange instruction (argument parse)
9453      BLX <target_addr>		ie BLX(1)
9454      BLX{<condition>} <Rm>	ie BLX(2)
9455    Unfortunately, there are two different opcodes for this mnemonic.
9456    So, the insns[].value is not used, and the code here zaps values
9457 	into inst.instruction.
9458    Also, the <target_addr> can be 25 bits, hence has its own reloc.  */
9459 
9460 static void
do_blx(void)9461 do_blx (void)
9462 {
9463   if (inst.operands[0].isreg)
9464     {
9465       /* Arg is a register; the opcode provided by insns[] is correct.
9466 	 It is not illegal to do "blx pc", just useless.  */
9467       if (inst.operands[0].reg == REG_PC)
9468 	as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
9469 
9470       inst.instruction |= inst.operands[0].reg;
9471     }
9472   else
9473     {
9474       /* Arg is an address; this instruction cannot be executed
9475 	 conditionally, and the opcode must be adjusted.
9476 	 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
9477 	 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead.  */
9478       constraint (inst.cond != COND_ALWAYS, BAD_COND);
9479       inst.instruction = 0xfa000000;
9480       encode_branch (BFD_RELOC_ARM_PCREL_BLX);
9481     }
9482 }
9483 
9484 static void
do_bx(void)9485 do_bx (void)
9486 {
9487   bool want_reloc;
9488 
9489   if (inst.operands[0].reg == REG_PC)
9490     as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
9491 
9492   inst.instruction |= inst.operands[0].reg;
9493   /* Output R_ARM_V4BX relocations if is an EABI object that looks like
9494      it is for ARMv4t or earlier.  */
9495   want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
9496   if (!ARM_FEATURE_ZERO (selected_object_arch)
9497       && !ARM_CPU_HAS_FEATURE (selected_object_arch, arm_ext_v5))
9498       want_reloc = true;
9499 
9500 #ifdef OBJ_ELF
9501   if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
9502 #endif
9503     want_reloc = false;
9504 
9505   if (want_reloc)
9506     inst.relocs[0].type = BFD_RELOC_ARM_V4BX;
9507 }
9508 
9509 
9510 /* ARM v5TEJ.  Jump to Jazelle code.  */
9511 
9512 static void
do_bxj(void)9513 do_bxj (void)
9514 {
9515   if (inst.operands[0].reg == REG_PC)
9516     as_tsktsk (_("use of r15 in bxj is not really useful"));
9517 
9518   inst.instruction |= inst.operands[0].reg;
9519 }
9520 
9521 /* Co-processor data operation:
9522       CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
9523       CDP2	<coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}	 */
9524 static void
do_cdp(void)9525 do_cdp (void)
9526 {
9527   inst.instruction |= inst.operands[0].reg << 8;
9528   inst.instruction |= inst.operands[1].imm << 20;
9529   inst.instruction |= inst.operands[2].reg << 12;
9530   inst.instruction |= inst.operands[3].reg << 16;
9531   inst.instruction |= inst.operands[4].reg;
9532   inst.instruction |= inst.operands[5].imm << 5;
9533 }
9534 
9535 static void
do_cmp(void)9536 do_cmp (void)
9537 {
9538   inst.instruction |= inst.operands[0].reg << 16;
9539   encode_arm_shifter_operand (1);
9540 }
9541 
9542 /* Transfer between coprocessor and ARM registers.
9543    MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
9544    MRC2
9545    MCR{cond}
9546    MCR2
9547 
9548    No special properties.  */
9549 
9550 struct deprecated_coproc_regs_s
9551 {
9552   unsigned cp;
9553   int opc1;
9554   unsigned crn;
9555   unsigned crm;
9556   int opc2;
9557   arm_feature_set deprecated;
9558   arm_feature_set obsoleted;
9559   const char *dep_msg;
9560   const char *obs_msg;
9561 };
9562 
9563 #define DEPR_ACCESS_V8 \
9564   N_("This coprocessor register access is deprecated in ARMv8")
9565 
9566 /* Table of all deprecated coprocessor registers.  */
9567 static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
9568 {
9569     {15, 0, 7, 10, 5,					/* CP15DMB.  */
9570      ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9571      DEPR_ACCESS_V8, NULL},
9572     {15, 0, 7, 10, 4,					/* CP15DSB.  */
9573      ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9574      DEPR_ACCESS_V8, NULL},
9575     {15, 0, 7,  5, 4,					/* CP15ISB.  */
9576      ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9577      DEPR_ACCESS_V8, NULL},
9578     {14, 6, 1,  0, 0,					/* TEEHBR.  */
9579      ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9580      DEPR_ACCESS_V8, NULL},
9581     {14, 6, 0,  0, 0,					/* TEECR.  */
9582      ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9583      DEPR_ACCESS_V8, NULL},
9584 };
9585 
9586 #undef DEPR_ACCESS_V8
9587 
9588 static const size_t deprecated_coproc_reg_count =
9589   sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
9590 
9591 static void
do_co_reg(void)9592 do_co_reg (void)
9593 {
9594   unsigned Rd;
9595   size_t i;
9596 
9597   Rd = inst.operands[2].reg;
9598   if (thumb_mode)
9599     {
9600       if (inst.instruction == 0xee000010
9601 	  || inst.instruction == 0xfe000010)
9602 	/* MCR, MCR2  */
9603 	reject_bad_reg (Rd);
9604       else if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9605 	/* MRC, MRC2  */
9606 	constraint (Rd == REG_SP, BAD_SP);
9607     }
9608   else
9609     {
9610       /* MCR */
9611       if (inst.instruction == 0xe000010)
9612 	constraint (Rd == REG_PC, BAD_PC);
9613     }
9614 
9615     for (i = 0; i < deprecated_coproc_reg_count; ++i)
9616       {
9617 	const struct deprecated_coproc_regs_s *r =
9618 	  deprecated_coproc_regs + i;
9619 
9620 	if (inst.operands[0].reg == r->cp
9621 	    && inst.operands[1].imm == r->opc1
9622 	    && inst.operands[3].reg == r->crn
9623 	    && inst.operands[4].reg == r->crm
9624 	    && inst.operands[5].imm == r->opc2)
9625 	  {
9626 	    if (! ARM_CPU_IS_ANY (cpu_variant)
9627 		&& warn_on_deprecated
9628 		&& ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
9629 	      as_tsktsk ("%s", r->dep_msg);
9630 	  }
9631       }
9632 
9633   inst.instruction |= inst.operands[0].reg << 8;
9634   inst.instruction |= inst.operands[1].imm << 21;
9635   inst.instruction |= Rd << 12;
9636   inst.instruction |= inst.operands[3].reg << 16;
9637   inst.instruction |= inst.operands[4].reg;
9638   inst.instruction |= inst.operands[5].imm << 5;
9639 }
9640 
9641 /* Transfer between coprocessor register and pair of ARM registers.
9642    MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
9643    MCRR2
9644    MRRC{cond}
9645    MRRC2
9646 
9647    Two XScale instructions are special cases of these:
9648 
9649      MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
9650      MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
9651 
9652    Result unpredictable if Rd or Rn is R15.  */
9653 
9654 static void
do_co_reg2c(void)9655 do_co_reg2c (void)
9656 {
9657   unsigned Rd, Rn;
9658 
9659   Rd = inst.operands[2].reg;
9660   Rn = inst.operands[3].reg;
9661 
9662   if (thumb_mode)
9663     {
9664       reject_bad_reg (Rd);
9665       reject_bad_reg (Rn);
9666     }
9667   else
9668     {
9669       constraint (Rd == REG_PC, BAD_PC);
9670       constraint (Rn == REG_PC, BAD_PC);
9671     }
9672 
9673   /* Only check the MRRC{2} variants.  */
9674   if ((inst.instruction & 0x0FF00000) == 0x0C500000)
9675     {
9676        /* If Rd == Rn, error that the operation is
9677 	  unpredictable (example MRRC p3,#1,r1,r1,c4).  */
9678        constraint (Rd == Rn, BAD_OVERLAP);
9679     }
9680 
9681   inst.instruction |= inst.operands[0].reg << 8;
9682   inst.instruction |= inst.operands[1].imm << 4;
9683   inst.instruction |= Rd << 12;
9684   inst.instruction |= Rn << 16;
9685   inst.instruction |= inst.operands[4].reg;
9686 }
9687 
9688 static void
do_cpsi(void)9689 do_cpsi (void)
9690 {
9691   inst.instruction |= inst.operands[0].imm << 6;
9692   if (inst.operands[1].present)
9693     {
9694       inst.instruction |= CPSI_MMOD;
9695       inst.instruction |= inst.operands[1].imm;
9696     }
9697 }
9698 
9699 static void
do_dbg(void)9700 do_dbg (void)
9701 {
9702   inst.instruction |= inst.operands[0].imm;
9703 }
9704 
9705 static void
do_div(void)9706 do_div (void)
9707 {
9708   unsigned Rd, Rn, Rm;
9709 
9710   Rd = inst.operands[0].reg;
9711   Rn = (inst.operands[1].present
9712 	? inst.operands[1].reg : Rd);
9713   Rm = inst.operands[2].reg;
9714 
9715   constraint ((Rd == REG_PC), BAD_PC);
9716   constraint ((Rn == REG_PC), BAD_PC);
9717   constraint ((Rm == REG_PC), BAD_PC);
9718 
9719   inst.instruction |= Rd << 16;
9720   inst.instruction |= Rn << 0;
9721   inst.instruction |= Rm << 8;
9722 }
9723 
9724 static void
do_it(void)9725 do_it (void)
9726 {
9727   /* There is no IT instruction in ARM mode.  We
9728      process it to do the validation as if in
9729      thumb mode, just in case the code gets
9730      assembled for thumb using the unified syntax.  */
9731 
9732   inst.size = 0;
9733   if (unified_syntax)
9734     {
9735       set_pred_insn_type (IT_INSN);
9736       now_pred.mask = (inst.instruction & 0xf) | 0x10;
9737       now_pred.cc = inst.operands[0].imm;
9738     }
9739 }
9740 
9741 /* If there is only one register in the register list,
9742    then return its register number.  Otherwise return -1.  */
9743 static int
only_one_reg_in_list(int range)9744 only_one_reg_in_list (int range)
9745 {
9746   int i = ffs (range) - 1;
9747   return (i > 15 || range != (1 << i)) ? -1 : i;
9748 }
9749 
9750 static void
encode_ldmstm(int from_push_pop_mnem)9751 encode_ldmstm(int from_push_pop_mnem)
9752 {
9753   int base_reg = inst.operands[0].reg;
9754   int range = inst.operands[1].imm;
9755   int one_reg;
9756 
9757   inst.instruction |= base_reg << 16;
9758   inst.instruction |= range;
9759 
9760   if (inst.operands[1].writeback)
9761     inst.instruction |= LDM_TYPE_2_OR_3;
9762 
9763   if (inst.operands[0].writeback)
9764     {
9765       inst.instruction |= WRITE_BACK;
9766       /* Check for unpredictable uses of writeback.  */
9767       if (inst.instruction & LOAD_BIT)
9768 	{
9769 	  /* Not allowed in LDM type 2.	 */
9770 	  if ((inst.instruction & LDM_TYPE_2_OR_3)
9771 	      && ((range & (1 << REG_PC)) == 0))
9772 	    as_warn (_("writeback of base register is UNPREDICTABLE"));
9773 	  /* Only allowed if base reg not in list for other types.  */
9774 	  else if (range & (1 << base_reg))
9775 	    as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
9776 	}
9777       else /* STM.  */
9778 	{
9779 	  /* Not allowed for type 2.  */
9780 	  if (inst.instruction & LDM_TYPE_2_OR_3)
9781 	    as_warn (_("writeback of base register is UNPREDICTABLE"));
9782 	  /* Only allowed if base reg not in list, or first in list.  */
9783 	  else if ((range & (1 << base_reg))
9784 		   && (range & ((1 << base_reg) - 1)))
9785 	    as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
9786 	}
9787     }
9788 
9789   /* If PUSH/POP has only one register, then use the A2 encoding.  */
9790   one_reg = only_one_reg_in_list (range);
9791   if (from_push_pop_mnem && one_reg >= 0)
9792     {
9793       int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
9794 
9795       if (is_push && one_reg == 13 /* SP */)
9796 	/* PR 22483: The A2 encoding cannot be used when
9797 	   pushing the stack pointer as this is UNPREDICTABLE.  */
9798 	return;
9799 
9800       inst.instruction &= A_COND_MASK;
9801       inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
9802       inst.instruction |= one_reg << 12;
9803     }
9804 }
9805 
9806 static void
do_ldmstm(void)9807 do_ldmstm (void)
9808 {
9809   encode_ldmstm (/*from_push_pop_mnem=*/false);
9810 }
9811 
9812 /* ARMv5TE load-consecutive (argument parse)
9813    Mode is like LDRH.
9814 
9815      LDRccD R, mode
9816      STRccD R, mode.  */
9817 
9818 static void
do_ldrd(void)9819 do_ldrd (void)
9820 {
9821   constraint (inst.operands[0].reg % 2 != 0,
9822 	      _("first transfer register must be even"));
9823   constraint (inst.operands[1].present
9824 	      && inst.operands[1].reg != inst.operands[0].reg + 1,
9825 	      _("can only transfer two consecutive registers"));
9826   constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9827   constraint (!inst.operands[2].isreg, _("'[' expected"));
9828 
9829   if (!inst.operands[1].present)
9830     inst.operands[1].reg = inst.operands[0].reg + 1;
9831 
9832   /* encode_arm_addr_mode_3 will diagnose overlap between the base
9833      register and the first register written; we have to diagnose
9834      overlap between the base and the second register written here.  */
9835 
9836   if (inst.operands[2].reg == inst.operands[1].reg
9837       && (inst.operands[2].writeback || inst.operands[2].postind))
9838     as_warn (_("base register written back, and overlaps "
9839 	       "second transfer register"));
9840 
9841   if (!(inst.instruction & V4_STR_BIT))
9842     {
9843       /* For an index-register load, the index register must not overlap the
9844 	destination (even if not write-back).  */
9845       if (inst.operands[2].immisreg
9846 	      && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
9847 	      || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
9848 	as_warn (_("index register overlaps transfer register"));
9849     }
9850   inst.instruction |= inst.operands[0].reg << 12;
9851   encode_arm_addr_mode_3 (2, /*is_t=*/false);
9852 }
9853 
9854 static void
do_ldrex(void)9855 do_ldrex (void)
9856 {
9857   constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9858 	      || inst.operands[1].postind || inst.operands[1].writeback
9859 	      || inst.operands[1].immisreg || inst.operands[1].shifted
9860 	      || inst.operands[1].negative
9861 	      /* This can arise if the programmer has written
9862 		   strex rN, rM, foo
9863 		 or if they have mistakenly used a register name as the last
9864 		 operand,  eg:
9865 		   strex rN, rM, rX
9866 		 It is very difficult to distinguish between these two cases
9867 		 because "rX" might actually be a label. ie the register
9868 		 name has been occluded by a symbol of the same name. So we
9869 		 just generate a general 'bad addressing mode' type error
9870 		 message and leave it up to the programmer to discover the
9871 		 true cause and fix their mistake.  */
9872 	      || (inst.operands[1].reg == REG_PC),
9873 	      BAD_ADDR_MODE);
9874 
9875   constraint (inst.relocs[0].exp.X_op != O_constant
9876 	      || inst.relocs[0].exp.X_add_number != 0,
9877 	      _("offset must be zero in ARM encoding"));
9878 
9879   constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
9880 
9881   inst.instruction |= inst.operands[0].reg << 12;
9882   inst.instruction |= inst.operands[1].reg << 16;
9883   inst.relocs[0].type = BFD_RELOC_UNUSED;
9884 }
9885 
9886 static void
do_ldrexd(void)9887 do_ldrexd (void)
9888 {
9889   constraint (inst.operands[0].reg % 2 != 0,
9890 	      _("even register required"));
9891   constraint (inst.operands[1].present
9892 	      && inst.operands[1].reg != inst.operands[0].reg + 1,
9893 	      _("can only load two consecutive registers"));
9894   /* If op 1 were present and equal to PC, this function wouldn't
9895      have been called in the first place.  */
9896   constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9897 
9898   inst.instruction |= inst.operands[0].reg << 12;
9899   inst.instruction |= inst.operands[2].reg << 16;
9900 }
9901 
9902 /* In both ARM and thumb state 'ldr pc, #imm'  with an immediate
9903    which is not a multiple of four is UNPREDICTABLE.  */
9904 static void
check_ldr_r15_aligned(void)9905 check_ldr_r15_aligned (void)
9906 {
9907   constraint (!(inst.operands[1].immisreg)
9908 	      && (inst.operands[0].reg == REG_PC
9909 	      && inst.operands[1].reg == REG_PC
9910 	      && (inst.relocs[0].exp.X_add_number & 0x3)),
9911 	      _("ldr to register 15 must be 4-byte aligned"));
9912 }
9913 
9914 static void
do_ldst(void)9915 do_ldst (void)
9916 {
9917   inst.instruction |= inst.operands[0].reg << 12;
9918   if (!inst.operands[1].isreg)
9919     if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/false))
9920       return;
9921   encode_arm_addr_mode_2 (1, /*is_t=*/false);
9922   check_ldr_r15_aligned ();
9923 }
9924 
9925 static void
do_ldstt(void)9926 do_ldstt (void)
9927 {
9928   /* ldrt/strt always use post-indexed addressing.  Turn [Rn] into [Rn]! and
9929      reject [Rn,...].  */
9930   if (inst.operands[1].preind)
9931     {
9932       constraint (inst.relocs[0].exp.X_op != O_constant
9933 		  || inst.relocs[0].exp.X_add_number != 0,
9934 		  _("this instruction requires a post-indexed address"));
9935 
9936       inst.operands[1].preind = 0;
9937       inst.operands[1].postind = 1;
9938       inst.operands[1].writeback = 1;
9939     }
9940   inst.instruction |= inst.operands[0].reg << 12;
9941   encode_arm_addr_mode_2 (1, /*is_t=*/true);
9942 }
9943 
9944 /* Halfword and signed-byte load/store operations.  */
9945 
9946 static void
do_ldstv4(void)9947 do_ldstv4 (void)
9948 {
9949   constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9950   inst.instruction |= inst.operands[0].reg << 12;
9951   if (!inst.operands[1].isreg)
9952     if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/true))
9953       return;
9954   encode_arm_addr_mode_3 (1, /*is_t=*/false);
9955 }
9956 
9957 static void
do_ldsttv4(void)9958 do_ldsttv4 (void)
9959 {
9960   /* ldrt/strt always use post-indexed addressing.  Turn [Rn] into [Rn]! and
9961      reject [Rn,...].  */
9962   if (inst.operands[1].preind)
9963     {
9964       constraint (inst.relocs[0].exp.X_op != O_constant
9965 		  || inst.relocs[0].exp.X_add_number != 0,
9966 		  _("this instruction requires a post-indexed address"));
9967 
9968       inst.operands[1].preind = 0;
9969       inst.operands[1].postind = 1;
9970       inst.operands[1].writeback = 1;
9971     }
9972   inst.instruction |= inst.operands[0].reg << 12;
9973   encode_arm_addr_mode_3 (1, /*is_t=*/true);
9974 }
9975 
9976 /* Co-processor register load/store.
9977    Format: <LDC|STC>{cond}[L] CP#,CRd,<address>	 */
9978 static void
do_lstc(void)9979 do_lstc (void)
9980 {
9981   inst.instruction |= inst.operands[0].reg << 8;
9982   inst.instruction |= inst.operands[1].reg << 12;
9983   encode_arm_cp_address (2, true, true, 0);
9984 }
9985 
9986 static void
do_mlas(void)9987 do_mlas (void)
9988 {
9989   /* This restriction does not apply to mls (nor to mla in v6 or later).  */
9990   if (inst.operands[0].reg == inst.operands[1].reg
9991       && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
9992       && !(inst.instruction & 0x00400000))
9993     as_tsktsk (_("Rd and Rm should be different in mla"));
9994 
9995   inst.instruction |= inst.operands[0].reg << 16;
9996   inst.instruction |= inst.operands[1].reg;
9997   inst.instruction |= inst.operands[2].reg << 8;
9998   inst.instruction |= inst.operands[3].reg << 12;
9999 }
10000 
10001 static void
do_mov(void)10002 do_mov (void)
10003 {
10004   constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
10005 	      && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
10006 	      THUMB1_RELOC_ONLY);
10007   inst.instruction |= inst.operands[0].reg << 12;
10008   encode_arm_shifter_operand (1);
10009 }
10010 
10011 /* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>.	 */
10012 static void
do_mov16(void)10013 do_mov16 (void)
10014 {
10015   bfd_vma imm;
10016   bool top;
10017 
10018   top = (inst.instruction & 0x00400000) != 0;
10019   constraint (top && inst.relocs[0].type == BFD_RELOC_ARM_MOVW,
10020 	      _(":lower16: not allowed in this instruction"));
10021   constraint (!top && inst.relocs[0].type == BFD_RELOC_ARM_MOVT,
10022 	      _(":upper16: not allowed in this instruction"));
10023   inst.instruction |= inst.operands[0].reg << 12;
10024   if (inst.relocs[0].type == BFD_RELOC_UNUSED)
10025     {
10026       imm = inst.relocs[0].exp.X_add_number;
10027       /* The value is in two pieces: 0:11, 16:19.  */
10028       inst.instruction |= (imm & 0x00000fff);
10029       inst.instruction |= (imm & 0x0000f000) << 4;
10030     }
10031 }
10032 
10033 static int
do_vfp_nsyn_mrs(void)10034 do_vfp_nsyn_mrs (void)
10035 {
10036   if (inst.operands[0].isvec)
10037     {
10038       if (inst.operands[1].reg != 1)
10039 	first_error (_("operand 1 must be FPSCR"));
10040       memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
10041       memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
10042       do_vfp_nsyn_opcode ("fmstat");
10043     }
10044   else if (inst.operands[1].isvec)
10045     do_vfp_nsyn_opcode ("fmrx");
10046   else
10047     return FAIL;
10048 
10049   return SUCCESS;
10050 }
10051 
10052 static int
do_vfp_nsyn_msr(void)10053 do_vfp_nsyn_msr (void)
10054 {
10055   if (inst.operands[0].isvec)
10056     do_vfp_nsyn_opcode ("fmxr");
10057   else
10058     return FAIL;
10059 
10060   return SUCCESS;
10061 }
10062 
10063 static void
do_vmrs(void)10064 do_vmrs (void)
10065 {
10066   unsigned Rt = inst.operands[0].reg;
10067 
10068   if (thumb_mode && Rt == REG_SP)
10069     {
10070       inst.error = BAD_SP;
10071       return;
10072     }
10073 
10074   switch (inst.operands[1].reg)
10075     {
10076     /* MVFR2 is only valid for Armv8-A.  */
10077     case 5:
10078       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
10079 		  _(BAD_FPU));
10080       break;
10081 
10082     /* Check for new Armv8.1-M Mainline changes to <spec_reg>.  */
10083     case 1: /* fpscr.  */
10084       constraint (!(ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10085 		    || ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10086 		  _(BAD_FPU));
10087       break;
10088 
10089     case 14: /* fpcxt_ns.  */
10090     case 15: /* fpcxt_s.  */
10091       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main),
10092 		  _("selected processor does not support instruction"));
10093       break;
10094 
10095     case  2: /* fpscr_nzcvqc.  */
10096     case 12: /* vpr.  */
10097     case 13: /* p0.  */
10098       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main)
10099 		  || (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10100 		      && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10101 		  _("selected processor does not support instruction"));
10102       if (inst.operands[0].reg != 2
10103 	  && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
10104 	as_warn (_("accessing MVE system register without MVE is UNPREDICTABLE"));
10105       break;
10106 
10107     default:
10108       break;
10109     }
10110 
10111   /* APSR_ sets isvec. All other refs to PC are illegal.  */
10112   if (!inst.operands[0].isvec && Rt == REG_PC)
10113     {
10114       inst.error = BAD_PC;
10115       return;
10116     }
10117 
10118   /* If we get through parsing the register name, we just insert the number
10119      generated into the instruction without further validation.  */
10120   inst.instruction |= (inst.operands[1].reg << 16);
10121   inst.instruction |= (Rt << 12);
10122 }
10123 
10124 static void
do_vmsr(void)10125 do_vmsr (void)
10126 {
10127   unsigned Rt = inst.operands[1].reg;
10128 
10129   if (thumb_mode)
10130     reject_bad_reg (Rt);
10131   else if (Rt == REG_PC)
10132     {
10133       inst.error = BAD_PC;
10134       return;
10135     }
10136 
10137   switch (inst.operands[0].reg)
10138     {
10139     /* MVFR2 is only valid for Armv8-A.  */
10140     case 5:
10141       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
10142 		  _(BAD_FPU));
10143       break;
10144 
10145     /* Check for new Armv8.1-M Mainline changes to <spec_reg>.  */
10146     case  1: /* fpcr.  */
10147       constraint (!(ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10148 		    || ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10149 		  _(BAD_FPU));
10150       break;
10151 
10152     case 14: /* fpcxt_ns.  */
10153     case 15: /* fpcxt_s.  */
10154       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main),
10155 		  _("selected processor does not support instruction"));
10156       break;
10157 
10158     case  2: /* fpscr_nzcvqc.  */
10159     case 12: /* vpr.  */
10160     case 13: /* p0.  */
10161       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main)
10162 		  || (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10163 		      && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10164 		  _("selected processor does not support instruction"));
10165       if (inst.operands[0].reg != 2
10166 	  && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
10167 	as_warn (_("accessing MVE system register without MVE is UNPREDICTABLE"));
10168       break;
10169 
10170     default:
10171       break;
10172     }
10173 
10174   /* If we get through parsing the register name, we just insert the number
10175      generated into the instruction without further validation.  */
10176   inst.instruction |= (inst.operands[0].reg << 16);
10177   inst.instruction |= (Rt << 12);
10178 }
10179 
10180 static void
do_mrs(void)10181 do_mrs (void)
10182 {
10183   unsigned br;
10184 
10185   if (do_vfp_nsyn_mrs () == SUCCESS)
10186     return;
10187 
10188   constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10189   inst.instruction |= inst.operands[0].reg << 12;
10190 
10191   if (inst.operands[1].isreg)
10192     {
10193       br = inst.operands[1].reg;
10194       if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf0000))
10195 	as_bad (_("bad register for mrs"));
10196     }
10197   else
10198     {
10199       /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all.  */
10200       constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
10201 		  != (PSR_c|PSR_f),
10202 		  _("'APSR', 'CPSR' or 'SPSR' expected"));
10203       br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
10204     }
10205 
10206   inst.instruction |= br;
10207 }
10208 
10209 /* Two possible forms:
10210       "{C|S}PSR_<field>, Rm",
10211       "{C|S}PSR_f, #expression".  */
10212 
10213 static void
do_msr(void)10214 do_msr (void)
10215 {
10216   if (do_vfp_nsyn_msr () == SUCCESS)
10217     return;
10218 
10219   inst.instruction |= inst.operands[0].imm;
10220   if (inst.operands[1].isreg)
10221     inst.instruction |= inst.operands[1].reg;
10222   else
10223     {
10224       inst.instruction |= INST_IMMEDIATE;
10225       inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
10226       inst.relocs[0].pc_rel = 0;
10227     }
10228 }
10229 
10230 static void
do_mul(void)10231 do_mul (void)
10232 {
10233   constraint (inst.operands[2].reg == REG_PC, BAD_PC);
10234 
10235   if (!inst.operands[2].present)
10236     inst.operands[2].reg = inst.operands[0].reg;
10237   inst.instruction |= inst.operands[0].reg << 16;
10238   inst.instruction |= inst.operands[1].reg;
10239   inst.instruction |= inst.operands[2].reg << 8;
10240 
10241   if (inst.operands[0].reg == inst.operands[1].reg
10242       && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
10243     as_tsktsk (_("Rd and Rm should be different in mul"));
10244 }
10245 
10246 /* Long Multiply Parser
10247    UMULL RdLo, RdHi, Rm, Rs
10248    SMULL RdLo, RdHi, Rm, Rs
10249    UMLAL RdLo, RdHi, Rm, Rs
10250    SMLAL RdLo, RdHi, Rm, Rs.  */
10251 
10252 static void
do_mull(void)10253 do_mull (void)
10254 {
10255   inst.instruction |= inst.operands[0].reg << 12;
10256   inst.instruction |= inst.operands[1].reg << 16;
10257   inst.instruction |= inst.operands[2].reg;
10258   inst.instruction |= inst.operands[3].reg << 8;
10259 
10260   /* rdhi and rdlo must be different.  */
10261   if (inst.operands[0].reg == inst.operands[1].reg)
10262     as_tsktsk (_("rdhi and rdlo must be different"));
10263 
10264   /* rdhi, rdlo and rm must all be different before armv6.  */
10265   if ((inst.operands[0].reg == inst.operands[2].reg
10266       || inst.operands[1].reg == inst.operands[2].reg)
10267       && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
10268     as_tsktsk (_("rdhi, rdlo and rm must all be different"));
10269 }
10270 
10271 static void
do_nop(void)10272 do_nop (void)
10273 {
10274   if (inst.operands[0].present
10275       || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
10276     {
10277       /* Architectural NOP hints are CPSR sets with no bits selected.  */
10278       inst.instruction &= 0xf0000000;
10279       inst.instruction |= 0x0320f000;
10280       if (inst.operands[0].present)
10281 	inst.instruction |= inst.operands[0].imm;
10282     }
10283 }
10284 
10285 /* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
10286    PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
10287    Condition defaults to COND_ALWAYS.
10288    Error if Rd, Rn or Rm are R15.  */
10289 
10290 static void
do_pkhbt(void)10291 do_pkhbt (void)
10292 {
10293   inst.instruction |= inst.operands[0].reg << 12;
10294   inst.instruction |= inst.operands[1].reg << 16;
10295   inst.instruction |= inst.operands[2].reg;
10296   if (inst.operands[3].present)
10297     encode_arm_shift (3);
10298 }
10299 
10300 /* ARM V6 PKHTB (Argument Parse).  */
10301 
10302 static void
do_pkhtb(void)10303 do_pkhtb (void)
10304 {
10305   if (!inst.operands[3].present)
10306     {
10307       /* If the shift specifier is omitted, turn the instruction
10308 	 into pkhbt rd, rm, rn. */
10309       inst.instruction &= 0xfff00010;
10310       inst.instruction |= inst.operands[0].reg << 12;
10311       inst.instruction |= inst.operands[1].reg;
10312       inst.instruction |= inst.operands[2].reg << 16;
10313     }
10314   else
10315     {
10316       inst.instruction |= inst.operands[0].reg << 12;
10317       inst.instruction |= inst.operands[1].reg << 16;
10318       inst.instruction |= inst.operands[2].reg;
10319       encode_arm_shift (3);
10320     }
10321 }
10322 
10323 /* ARMv5TE: Preload-Cache
10324    MP Extensions: Preload for write
10325 
10326     PLD(W) <addr_mode>
10327 
10328   Syntactically, like LDR with B=1, W=0, L=1.  */
10329 
10330 static void
do_pld(void)10331 do_pld (void)
10332 {
10333   constraint (!inst.operands[0].isreg,
10334 	      _("'[' expected after PLD mnemonic"));
10335   constraint (inst.operands[0].postind,
10336 	      _("post-indexed expression used in preload instruction"));
10337   constraint (inst.operands[0].writeback,
10338 	      _("writeback used in preload instruction"));
10339   constraint (!inst.operands[0].preind,
10340 	      _("unindexed addressing used in preload instruction"));
10341   encode_arm_addr_mode_2 (0, /*is_t=*/false);
10342 }
10343 
10344 /* ARMv7: PLI <addr_mode>  */
10345 static void
do_pli(void)10346 do_pli (void)
10347 {
10348   constraint (!inst.operands[0].isreg,
10349 	      _("'[' expected after PLI mnemonic"));
10350   constraint (inst.operands[0].postind,
10351 	      _("post-indexed expression used in preload instruction"));
10352   constraint (inst.operands[0].writeback,
10353 	      _("writeback used in preload instruction"));
10354   constraint (!inst.operands[0].preind,
10355 	      _("unindexed addressing used in preload instruction"));
10356   encode_arm_addr_mode_2 (0, /*is_t=*/false);
10357   inst.instruction &= ~PRE_INDEX;
10358 }
10359 
10360 static void
do_push_pop(void)10361 do_push_pop (void)
10362 {
10363   constraint (inst.operands[0].writeback,
10364 	      _("push/pop do not support {reglist}^"));
10365   inst.operands[1] = inst.operands[0];
10366   memset (&inst.operands[0], 0, sizeof inst.operands[0]);
10367   inst.operands[0].isreg = 1;
10368   inst.operands[0].writeback = 1;
10369   inst.operands[0].reg = REG_SP;
10370   encode_ldmstm (/*from_push_pop_mnem=*/true);
10371 }
10372 
10373 /* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
10374    word at the specified address and the following word
10375    respectively.
10376    Unconditionally executed.
10377    Error if Rn is R15.	*/
10378 
10379 static void
do_rfe(void)10380 do_rfe (void)
10381 {
10382   inst.instruction |= inst.operands[0].reg << 16;
10383   if (inst.operands[0].writeback)
10384     inst.instruction |= WRITE_BACK;
10385 }
10386 
10387 /* ARM V6 ssat (argument parse).  */
10388 
10389 static void
do_ssat(void)10390 do_ssat (void)
10391 {
10392   inst.instruction |= inst.operands[0].reg << 12;
10393   inst.instruction |= (inst.operands[1].imm - 1) << 16;
10394   inst.instruction |= inst.operands[2].reg;
10395 
10396   if (inst.operands[3].present)
10397     encode_arm_shift (3);
10398 }
10399 
10400 /* ARM V6 usat (argument parse).  */
10401 
10402 static void
do_usat(void)10403 do_usat (void)
10404 {
10405   inst.instruction |= inst.operands[0].reg << 12;
10406   inst.instruction |= inst.operands[1].imm << 16;
10407   inst.instruction |= inst.operands[2].reg;
10408 
10409   if (inst.operands[3].present)
10410     encode_arm_shift (3);
10411 }
10412 
10413 /* ARM V6 ssat16 (argument parse).  */
10414 
10415 static void
do_ssat16(void)10416 do_ssat16 (void)
10417 {
10418   inst.instruction |= inst.operands[0].reg << 12;
10419   inst.instruction |= ((inst.operands[1].imm - 1) << 16);
10420   inst.instruction |= inst.operands[2].reg;
10421 }
10422 
10423 static void
do_usat16(void)10424 do_usat16 (void)
10425 {
10426   inst.instruction |= inst.operands[0].reg << 12;
10427   inst.instruction |= inst.operands[1].imm << 16;
10428   inst.instruction |= inst.operands[2].reg;
10429 }
10430 
10431 /* ARM V6 SETEND (argument parse).  Sets the E bit in the CPSR while
10432    preserving the other bits.
10433 
10434    setend <endian_specifier>, where <endian_specifier> is either
10435    BE or LE.  */
10436 
10437 static void
do_setend(void)10438 do_setend (void)
10439 {
10440   if (warn_on_deprecated
10441       && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10442       as_tsktsk (_("setend use is deprecated for ARMv8"));
10443 
10444   if (inst.operands[0].imm)
10445     inst.instruction |= 0x200;
10446 }
10447 
10448 static void
do_shift(void)10449 do_shift (void)
10450 {
10451   unsigned int Rm = (inst.operands[1].present
10452 		     ? inst.operands[1].reg
10453 		     : inst.operands[0].reg);
10454 
10455   inst.instruction |= inst.operands[0].reg << 12;
10456   inst.instruction |= Rm;
10457   if (inst.operands[2].isreg)  /* Rd, {Rm,} Rs */
10458     {
10459       inst.instruction |= inst.operands[2].reg << 8;
10460       inst.instruction |= SHIFT_BY_REG;
10461       /* PR 12854: Error on extraneous shifts.  */
10462       constraint (inst.operands[2].shifted,
10463 		  _("extraneous shift as part of operand to shift insn"));
10464     }
10465   else
10466     inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
10467 }
10468 
10469 static void
do_smc(void)10470 do_smc (void)
10471 {
10472   unsigned int value = inst.relocs[0].exp.X_add_number;
10473   constraint (value > 0xf, _("immediate too large (bigger than 0xF)"));
10474 
10475   inst.relocs[0].type = BFD_RELOC_ARM_SMC;
10476   inst.relocs[0].pc_rel = 0;
10477 }
10478 
10479 static void
do_hvc(void)10480 do_hvc (void)
10481 {
10482   inst.relocs[0].type = BFD_RELOC_ARM_HVC;
10483   inst.relocs[0].pc_rel = 0;
10484 }
10485 
10486 static void
do_swi(void)10487 do_swi (void)
10488 {
10489   inst.relocs[0].type = BFD_RELOC_ARM_SWI;
10490   inst.relocs[0].pc_rel = 0;
10491 }
10492 
10493 static void
do_setpan(void)10494 do_setpan (void)
10495 {
10496   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
10497 	      _("selected processor does not support SETPAN instruction"));
10498 
10499   inst.instruction |= ((inst.operands[0].imm & 1) << 9);
10500 }
10501 
10502 static void
do_t_setpan(void)10503 do_t_setpan (void)
10504 {
10505   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
10506 	      _("selected processor does not support SETPAN instruction"));
10507 
10508   inst.instruction |= (inst.operands[0].imm << 3);
10509 }
10510 
10511 /* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
10512    SMLAxy{cond} Rd,Rm,Rs,Rn
10513    SMLAWy{cond} Rd,Rm,Rs,Rn
10514    Error if any register is R15.  */
10515 
10516 static void
do_smla(void)10517 do_smla (void)
10518 {
10519   inst.instruction |= inst.operands[0].reg << 16;
10520   inst.instruction |= inst.operands[1].reg;
10521   inst.instruction |= inst.operands[2].reg << 8;
10522   inst.instruction |= inst.operands[3].reg << 12;
10523 }
10524 
10525 /* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
10526    SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
10527    Error if any register is R15.
10528    Warning if Rdlo == Rdhi.  */
10529 
10530 static void
do_smlal(void)10531 do_smlal (void)
10532 {
10533   inst.instruction |= inst.operands[0].reg << 12;
10534   inst.instruction |= inst.operands[1].reg << 16;
10535   inst.instruction |= inst.operands[2].reg;
10536   inst.instruction |= inst.operands[3].reg << 8;
10537 
10538   if (inst.operands[0].reg == inst.operands[1].reg)
10539     as_tsktsk (_("rdhi and rdlo must be different"));
10540 }
10541 
10542 /* ARM V5E (El Segundo) signed-multiply (argument parse)
10543    SMULxy{cond} Rd,Rm,Rs
10544    Error if any register is R15.  */
10545 
10546 static void
do_smul(void)10547 do_smul (void)
10548 {
10549   inst.instruction |= inst.operands[0].reg << 16;
10550   inst.instruction |= inst.operands[1].reg;
10551   inst.instruction |= inst.operands[2].reg << 8;
10552 }
10553 
10554 /* ARM V6 srs (argument parse).  The variable fields in the encoding are
10555    the same for both ARM and Thumb-2.  */
10556 
10557 static void
do_srs(void)10558 do_srs (void)
10559 {
10560   int reg;
10561 
10562   if (inst.operands[0].present)
10563     {
10564       reg = inst.operands[0].reg;
10565       constraint (reg != REG_SP, _("SRS base register must be r13"));
10566     }
10567   else
10568     reg = REG_SP;
10569 
10570   inst.instruction |= reg << 16;
10571   inst.instruction |= inst.operands[1].imm;
10572   if (inst.operands[0].writeback || inst.operands[1].writeback)
10573     inst.instruction |= WRITE_BACK;
10574 }
10575 
10576 /* ARM V6 strex (argument parse).  */
10577 
10578 static void
do_strex(void)10579 do_strex (void)
10580 {
10581   constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10582 	      || inst.operands[2].postind || inst.operands[2].writeback
10583 	      || inst.operands[2].immisreg || inst.operands[2].shifted
10584 	      || inst.operands[2].negative
10585 	      /* See comment in do_ldrex().  */
10586 	      || (inst.operands[2].reg == REG_PC),
10587 	      BAD_ADDR_MODE);
10588 
10589   constraint (inst.operands[0].reg == inst.operands[1].reg
10590 	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10591 
10592   constraint (inst.relocs[0].exp.X_op != O_constant
10593 	      || inst.relocs[0].exp.X_add_number != 0,
10594 	      _("offset must be zero in ARM encoding"));
10595 
10596   inst.instruction |= inst.operands[0].reg << 12;
10597   inst.instruction |= inst.operands[1].reg;
10598   inst.instruction |= inst.operands[2].reg << 16;
10599   inst.relocs[0].type = BFD_RELOC_UNUSED;
10600 }
10601 
10602 static void
do_t_strexbh(void)10603 do_t_strexbh (void)
10604 {
10605   constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10606 	      || inst.operands[2].postind || inst.operands[2].writeback
10607 	      || inst.operands[2].immisreg || inst.operands[2].shifted
10608 	      || inst.operands[2].negative,
10609 	      BAD_ADDR_MODE);
10610 
10611   constraint (inst.operands[0].reg == inst.operands[1].reg
10612 	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10613 
10614   do_rm_rd_rn ();
10615 }
10616 
10617 static void
do_strexd(void)10618 do_strexd (void)
10619 {
10620   constraint (inst.operands[1].reg % 2 != 0,
10621 	      _("even register required"));
10622   constraint (inst.operands[2].present
10623 	      && inst.operands[2].reg != inst.operands[1].reg + 1,
10624 	      _("can only store two consecutive registers"));
10625   /* If op 2 were present and equal to PC, this function wouldn't
10626      have been called in the first place.  */
10627   constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
10628 
10629   constraint (inst.operands[0].reg == inst.operands[1].reg
10630 	      || inst.operands[0].reg == inst.operands[1].reg + 1
10631 	      || inst.operands[0].reg == inst.operands[3].reg,
10632 	      BAD_OVERLAP);
10633 
10634   inst.instruction |= inst.operands[0].reg << 12;
10635   inst.instruction |= inst.operands[1].reg;
10636   inst.instruction |= inst.operands[3].reg << 16;
10637 }
10638 
10639 /* ARM V8 STRL.  */
10640 static void
do_stlex(void)10641 do_stlex (void)
10642 {
10643   constraint (inst.operands[0].reg == inst.operands[1].reg
10644 	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10645 
10646   do_rd_rm_rn ();
10647 }
10648 
10649 static void
do_t_stlex(void)10650 do_t_stlex (void)
10651 {
10652   constraint (inst.operands[0].reg == inst.operands[1].reg
10653 	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10654 
10655   do_rm_rd_rn ();
10656 }
10657 
10658 /* ARM V6 SXTAH extracts a 16-bit value from a register, sign
10659    extends it to 32-bits, and adds the result to a value in another
10660    register.  You can specify a rotation by 0, 8, 16, or 24 bits
10661    before extracting the 16-bit value.
10662    SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
10663    Condition defaults to COND_ALWAYS.
10664    Error if any register uses R15.  */
10665 
10666 static void
do_sxtah(void)10667 do_sxtah (void)
10668 {
10669   inst.instruction |= inst.operands[0].reg << 12;
10670   inst.instruction |= inst.operands[1].reg << 16;
10671   inst.instruction |= inst.operands[2].reg;
10672   inst.instruction |= inst.operands[3].imm << 10;
10673 }
10674 
10675 /* ARM V6 SXTH.
10676 
10677    SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
10678    Condition defaults to COND_ALWAYS.
10679    Error if any register uses R15.  */
10680 
10681 static void
do_sxth(void)10682 do_sxth (void)
10683 {
10684   inst.instruction |= inst.operands[0].reg << 12;
10685   inst.instruction |= inst.operands[1].reg;
10686   inst.instruction |= inst.operands[2].imm << 10;
10687 }
10688 
10689 /* VFP instructions.  In a logical order: SP variant first, monad
10690    before dyad, arithmetic then move then load/store.  */
10691 
10692 static void
do_vfp_sp_monadic(void)10693 do_vfp_sp_monadic (void)
10694 {
10695   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
10696 	      && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10697 	      _(BAD_FPU));
10698 
10699   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10700   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
10701 }
10702 
10703 static void
do_vfp_sp_dyadic(void)10704 do_vfp_sp_dyadic (void)
10705 {
10706   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10707   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
10708   encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
10709 }
10710 
10711 static void
do_vfp_sp_compare_z(void)10712 do_vfp_sp_compare_z (void)
10713 {
10714   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10715 }
10716 
10717 static void
do_vfp_dp_sp_cvt(void)10718 do_vfp_dp_sp_cvt (void)
10719 {
10720   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10721   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
10722 }
10723 
10724 static void
do_vfp_sp_dp_cvt(void)10725 do_vfp_sp_dp_cvt (void)
10726 {
10727   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10728   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
10729 }
10730 
10731 static void
do_vfp_reg_from_sp(void)10732 do_vfp_reg_from_sp (void)
10733 {
10734   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
10735 	     && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10736 	     _(BAD_FPU));
10737 
10738   inst.instruction |= inst.operands[0].reg << 12;
10739   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
10740 }
10741 
10742 static void
do_vfp_reg2_from_sp2(void)10743 do_vfp_reg2_from_sp2 (void)
10744 {
10745   constraint (inst.operands[2].imm != 2,
10746 	      _("only two consecutive VFP SP registers allowed here"));
10747   inst.instruction |= inst.operands[0].reg << 12;
10748   inst.instruction |= inst.operands[1].reg << 16;
10749   encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
10750 }
10751 
10752 static void
do_vfp_sp_from_reg(void)10753 do_vfp_sp_from_reg (void)
10754 {
10755   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
10756 	     && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10757 	     _(BAD_FPU));
10758 
10759   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
10760   inst.instruction |= inst.operands[1].reg << 12;
10761 }
10762 
10763 static void
do_vfp_sp2_from_reg2(void)10764 do_vfp_sp2_from_reg2 (void)
10765 {
10766   constraint (inst.operands[0].imm != 2,
10767 	      _("only two consecutive VFP SP registers allowed here"));
10768   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
10769   inst.instruction |= inst.operands[1].reg << 12;
10770   inst.instruction |= inst.operands[2].reg << 16;
10771 }
10772 
10773 static void
do_vfp_sp_ldst(void)10774 do_vfp_sp_ldst (void)
10775 {
10776   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10777   encode_arm_cp_address (1, false, true, 0);
10778 }
10779 
10780 static void
do_vfp_dp_ldst(void)10781 do_vfp_dp_ldst (void)
10782 {
10783   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10784   encode_arm_cp_address (1, false, true, 0);
10785 }
10786 
10787 
10788 static void
vfp_sp_ldstm(enum vfp_ldstm_type ldstm_type)10789 vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
10790 {
10791   if (inst.operands[0].writeback)
10792     inst.instruction |= WRITE_BACK;
10793   else
10794     constraint (ldstm_type != VFP_LDSTMIA,
10795 		_("this addressing mode requires base-register writeback"));
10796   inst.instruction |= inst.operands[0].reg << 16;
10797   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
10798   inst.instruction |= inst.operands[1].imm;
10799 }
10800 
10801 static void
vfp_dp_ldstm(enum vfp_ldstm_type ldstm_type)10802 vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
10803 {
10804   int count;
10805 
10806   if (inst.operands[0].writeback)
10807     inst.instruction |= WRITE_BACK;
10808   else
10809     constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
10810 		_("this addressing mode requires base-register writeback"));
10811 
10812   inst.instruction |= inst.operands[0].reg << 16;
10813   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10814 
10815   count = inst.operands[1].imm << 1;
10816   if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
10817     count += 1;
10818 
10819   inst.instruction |= count;
10820 }
10821 
10822 static void
do_vfp_sp_ldstmia(void)10823 do_vfp_sp_ldstmia (void)
10824 {
10825   vfp_sp_ldstm (VFP_LDSTMIA);
10826 }
10827 
10828 static void
do_vfp_sp_ldstmdb(void)10829 do_vfp_sp_ldstmdb (void)
10830 {
10831   vfp_sp_ldstm (VFP_LDSTMDB);
10832 }
10833 
10834 static void
do_vfp_dp_ldstmia(void)10835 do_vfp_dp_ldstmia (void)
10836 {
10837   vfp_dp_ldstm (VFP_LDSTMIA);
10838 }
10839 
10840 static void
do_vfp_dp_ldstmdb(void)10841 do_vfp_dp_ldstmdb (void)
10842 {
10843   vfp_dp_ldstm (VFP_LDSTMDB);
10844 }
10845 
10846 static void
do_vfp_xp_ldstmia(void)10847 do_vfp_xp_ldstmia (void)
10848 {
10849   vfp_dp_ldstm (VFP_LDSTMIAX);
10850 }
10851 
10852 static void
do_vfp_xp_ldstmdb(void)10853 do_vfp_xp_ldstmdb (void)
10854 {
10855   vfp_dp_ldstm (VFP_LDSTMDBX);
10856 }
10857 
10858 static void
do_vfp_dp_rd_rm(void)10859 do_vfp_dp_rd_rm (void)
10860 {
10861   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1)
10862 	      && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10863 	      _(BAD_FPU));
10864 
10865   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10866   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
10867 }
10868 
10869 static void
do_vfp_dp_rn_rd(void)10870 do_vfp_dp_rn_rd (void)
10871 {
10872   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
10873   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10874 }
10875 
10876 static void
do_vfp_dp_rd_rn(void)10877 do_vfp_dp_rd_rn (void)
10878 {
10879   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10880   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10881 }
10882 
10883 static void
do_vfp_dp_rd_rn_rm(void)10884 do_vfp_dp_rd_rn_rm (void)
10885 {
10886   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
10887 	      && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10888 	      _(BAD_FPU));
10889 
10890   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10891   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10892   encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
10893 }
10894 
10895 static void
do_vfp_dp_rd(void)10896 do_vfp_dp_rd (void)
10897 {
10898   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10899 }
10900 
10901 static void
do_vfp_dp_rm_rd_rn(void)10902 do_vfp_dp_rm_rd_rn (void)
10903 {
10904   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
10905 	      && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10906 	      _(BAD_FPU));
10907 
10908   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
10909   encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10910   encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
10911 }
10912 
10913 /* VFPv3 instructions.  */
10914 static void
do_vfp_sp_const(void)10915 do_vfp_sp_const (void)
10916 {
10917   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10918   inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10919   inst.instruction |= (inst.operands[1].imm & 0x0f);
10920 }
10921 
10922 static void
do_vfp_dp_const(void)10923 do_vfp_dp_const (void)
10924 {
10925   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10926   inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10927   inst.instruction |= (inst.operands[1].imm & 0x0f);
10928 }
10929 
10930 static void
vfp_conv(int srcsize)10931 vfp_conv (int srcsize)
10932 {
10933   int immbits = srcsize - inst.operands[1].imm;
10934 
10935   if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
10936     {
10937       /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
10938 	 i.e. immbits must be in range 0 - 16.  */
10939       inst.error = _("immediate value out of range, expected range [0, 16]");
10940       return;
10941     }
10942   else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
10943     {
10944       /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
10945 	 i.e. immbits must be in range 0 - 31.  */
10946       inst.error = _("immediate value out of range, expected range [1, 32]");
10947       return;
10948     }
10949 
10950   inst.instruction |= (immbits & 1) << 5;
10951   inst.instruction |= (immbits >> 1);
10952 }
10953 
10954 static void
do_vfp_sp_conv_16(void)10955 do_vfp_sp_conv_16 (void)
10956 {
10957   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10958   vfp_conv (16);
10959 }
10960 
10961 static void
do_vfp_dp_conv_16(void)10962 do_vfp_dp_conv_16 (void)
10963 {
10964   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10965   vfp_conv (16);
10966 }
10967 
10968 static void
do_vfp_sp_conv_32(void)10969 do_vfp_sp_conv_32 (void)
10970 {
10971   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10972   vfp_conv (32);
10973 }
10974 
10975 static void
do_vfp_dp_conv_32(void)10976 do_vfp_dp_conv_32 (void)
10977 {
10978   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10979   vfp_conv (32);
10980 }
10981 
10982 /* FPA instructions.  Also in a logical order.	*/
10983 
10984 static void
do_fpa_cmp(void)10985 do_fpa_cmp (void)
10986 {
10987   inst.instruction |= inst.operands[0].reg << 16;
10988   inst.instruction |= inst.operands[1].reg;
10989 }
10990 
10991 static void
do_fpa_ldmstm(void)10992 do_fpa_ldmstm (void)
10993 {
10994   inst.instruction |= inst.operands[0].reg << 12;
10995   switch (inst.operands[1].imm)
10996     {
10997     case 1: inst.instruction |= CP_T_X;		 break;
10998     case 2: inst.instruction |= CP_T_Y;		 break;
10999     case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
11000     case 4:					 break;
11001     default: abort ();
11002     }
11003 
11004   if (inst.instruction & (PRE_INDEX | INDEX_UP))
11005     {
11006       /* The instruction specified "ea" or "fd", so we can only accept
11007 	 [Rn]{!}.  The instruction does not really support stacking or
11008 	 unstacking, so we have to emulate these by setting appropriate
11009 	 bits and offsets.  */
11010       constraint (inst.relocs[0].exp.X_op != O_constant
11011 		  || inst.relocs[0].exp.X_add_number != 0,
11012 		  _("this instruction does not support indexing"));
11013 
11014       if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
11015 	inst.relocs[0].exp.X_add_number = 12 * inst.operands[1].imm;
11016 
11017       if (!(inst.instruction & INDEX_UP))
11018 	inst.relocs[0].exp.X_add_number = -inst.relocs[0].exp.X_add_number;
11019 
11020       if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
11021 	{
11022 	  inst.operands[2].preind = 0;
11023 	  inst.operands[2].postind = 1;
11024 	}
11025     }
11026 
11027   encode_arm_cp_address (2, true, true, 0);
11028 }
11029 
11030 /* iWMMXt instructions: strictly in alphabetical order.	 */
11031 
11032 static void
do_iwmmxt_tandorc(void)11033 do_iwmmxt_tandorc (void)
11034 {
11035   constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
11036 }
11037 
11038 static void
do_iwmmxt_textrc(void)11039 do_iwmmxt_textrc (void)
11040 {
11041   inst.instruction |= inst.operands[0].reg << 12;
11042   inst.instruction |= inst.operands[1].imm;
11043 }
11044 
11045 static void
do_iwmmxt_textrm(void)11046 do_iwmmxt_textrm (void)
11047 {
11048   inst.instruction |= inst.operands[0].reg << 12;
11049   inst.instruction |= inst.operands[1].reg << 16;
11050   inst.instruction |= inst.operands[2].imm;
11051 }
11052 
11053 static void
do_iwmmxt_tinsr(void)11054 do_iwmmxt_tinsr (void)
11055 {
11056   inst.instruction |= inst.operands[0].reg << 16;
11057   inst.instruction |= inst.operands[1].reg << 12;
11058   inst.instruction |= inst.operands[2].imm;
11059 }
11060 
11061 static void
do_iwmmxt_tmia(void)11062 do_iwmmxt_tmia (void)
11063 {
11064   inst.instruction |= inst.operands[0].reg << 5;
11065   inst.instruction |= inst.operands[1].reg;
11066   inst.instruction |= inst.operands[2].reg << 12;
11067 }
11068 
11069 static void
do_iwmmxt_waligni(void)11070 do_iwmmxt_waligni (void)
11071 {
11072   inst.instruction |= inst.operands[0].reg << 12;
11073   inst.instruction |= inst.operands[1].reg << 16;
11074   inst.instruction |= inst.operands[2].reg;
11075   inst.instruction |= inst.operands[3].imm << 20;
11076 }
11077 
11078 static void
do_iwmmxt_wmerge(void)11079 do_iwmmxt_wmerge (void)
11080 {
11081   inst.instruction |= inst.operands[0].reg << 12;
11082   inst.instruction |= inst.operands[1].reg << 16;
11083   inst.instruction |= inst.operands[2].reg;
11084   inst.instruction |= inst.operands[3].imm << 21;
11085 }
11086 
11087 static void
do_iwmmxt_wmov(void)11088 do_iwmmxt_wmov (void)
11089 {
11090   /* WMOV rD, rN is an alias for WOR rD, rN, rN.  */
11091   inst.instruction |= inst.operands[0].reg << 12;
11092   inst.instruction |= inst.operands[1].reg << 16;
11093   inst.instruction |= inst.operands[1].reg;
11094 }
11095 
11096 static void
do_iwmmxt_wldstbh(void)11097 do_iwmmxt_wldstbh (void)
11098 {
11099   int reloc;
11100   inst.instruction |= inst.operands[0].reg << 12;
11101   if (thumb_mode)
11102     reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
11103   else
11104     reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
11105   encode_arm_cp_address (1, true, false, reloc);
11106 }
11107 
11108 static void
do_iwmmxt_wldstw(void)11109 do_iwmmxt_wldstw (void)
11110 {
11111   /* RIWR_RIWC clears .isreg for a control register.  */
11112   if (!inst.operands[0].isreg)
11113     {
11114       constraint (inst.cond != COND_ALWAYS, BAD_COND);
11115       inst.instruction |= 0xf0000000;
11116     }
11117 
11118   inst.instruction |= inst.operands[0].reg << 12;
11119   encode_arm_cp_address (1, true, true, 0);
11120 }
11121 
11122 static void
do_iwmmxt_wldstd(void)11123 do_iwmmxt_wldstd (void)
11124 {
11125   inst.instruction |= inst.operands[0].reg << 12;
11126   if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
11127       && inst.operands[1].immisreg)
11128     {
11129       inst.instruction &= ~0x1a000ff;
11130       inst.instruction |= (0xfU << 28);
11131       if (inst.operands[1].preind)
11132 	inst.instruction |= PRE_INDEX;
11133       if (!inst.operands[1].negative)
11134 	inst.instruction |= INDEX_UP;
11135       if (inst.operands[1].writeback)
11136 	inst.instruction |= WRITE_BACK;
11137       inst.instruction |= inst.operands[1].reg << 16;
11138       inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
11139       inst.instruction |= inst.operands[1].imm;
11140     }
11141   else
11142     encode_arm_cp_address (1, true, false, 0);
11143 }
11144 
11145 static void
do_iwmmxt_wshufh(void)11146 do_iwmmxt_wshufh (void)
11147 {
11148   inst.instruction |= inst.operands[0].reg << 12;
11149   inst.instruction |= inst.operands[1].reg << 16;
11150   inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
11151   inst.instruction |= (inst.operands[2].imm & 0x0f);
11152 }
11153 
11154 static void
do_iwmmxt_wzero(void)11155 do_iwmmxt_wzero (void)
11156 {
11157   /* WZERO reg is an alias for WANDN reg, reg, reg.  */
11158   inst.instruction |= inst.operands[0].reg;
11159   inst.instruction |= inst.operands[0].reg << 12;
11160   inst.instruction |= inst.operands[0].reg << 16;
11161 }
11162 
11163 static void
do_iwmmxt_wrwrwr_or_imm5(void)11164 do_iwmmxt_wrwrwr_or_imm5 (void)
11165 {
11166   if (inst.operands[2].isreg)
11167     do_rd_rn_rm ();
11168   else {
11169     constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
11170 		_("immediate operand requires iWMMXt2"));
11171     do_rd_rn ();
11172     if (inst.operands[2].imm == 0)
11173       {
11174 	switch ((inst.instruction >> 20) & 0xf)
11175 	  {
11176 	  case 4:
11177 	  case 5:
11178 	  case 6:
11179 	  case 7:
11180 	    /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16.  */
11181 	    inst.operands[2].imm = 16;
11182 	    inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
11183 	    break;
11184 	  case 8:
11185 	  case 9:
11186 	  case 10:
11187 	  case 11:
11188 	    /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32.  */
11189 	    inst.operands[2].imm = 32;
11190 	    inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
11191 	    break;
11192 	  case 12:
11193 	  case 13:
11194 	  case 14:
11195 	  case 15:
11196 	    {
11197 	      /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn.  */
11198 	      unsigned long wrn;
11199 	      wrn = (inst.instruction >> 16) & 0xf;
11200 	      inst.instruction &= 0xff0fff0f;
11201 	      inst.instruction |= wrn;
11202 	      /* Bail out here; the instruction is now assembled.  */
11203 	      return;
11204 	    }
11205 	  }
11206       }
11207     /* Map 32 -> 0, etc.  */
11208     inst.operands[2].imm &= 0x1f;
11209     inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
11210   }
11211 }
11212 
11213 /* Cirrus Maverick instructions.  Simple 2-, 3-, and 4-register
11214    operations first, then control, shift, and load/store.  */
11215 
11216 /* Insns like "foo X,Y,Z".  */
11217 
11218 static void
do_mav_triple(void)11219 do_mav_triple (void)
11220 {
11221   inst.instruction |= inst.operands[0].reg << 16;
11222   inst.instruction |= inst.operands[1].reg;
11223   inst.instruction |= inst.operands[2].reg << 12;
11224 }
11225 
11226 /* Insns like "foo W,X,Y,Z".
11227     where W=MVAX[0:3] and X,Y,Z=MVFX[0:15].  */
11228 
11229 static void
do_mav_quad(void)11230 do_mav_quad (void)
11231 {
11232   inst.instruction |= inst.operands[0].reg << 5;
11233   inst.instruction |= inst.operands[1].reg << 12;
11234   inst.instruction |= inst.operands[2].reg << 16;
11235   inst.instruction |= inst.operands[3].reg;
11236 }
11237 
11238 /* cfmvsc32<cond> DSPSC,MVDX[15:0].  */
11239 static void
do_mav_dspsc(void)11240 do_mav_dspsc (void)
11241 {
11242   inst.instruction |= inst.operands[1].reg << 12;
11243 }
11244 
11245 /* Maverick shift immediate instructions.
11246    cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
11247    cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0].  */
11248 
11249 static void
do_mav_shift(void)11250 do_mav_shift (void)
11251 {
11252   int imm = inst.operands[2].imm;
11253 
11254   inst.instruction |= inst.operands[0].reg << 12;
11255   inst.instruction |= inst.operands[1].reg << 16;
11256 
11257   /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
11258      Bits 5-7 of the insn should have bits 4-6 of the immediate.
11259      Bit 4 should be 0.	 */
11260   imm = (imm & 0xf) | ((imm & 0x70) << 1);
11261 
11262   inst.instruction |= imm;
11263 }
11264 
11265 /* XScale instructions.	 Also sorted arithmetic before move.  */
11266 
11267 /* Xscale multiply-accumulate (argument parse)
11268      MIAcc   acc0,Rm,Rs
11269      MIAPHcc acc0,Rm,Rs
11270      MIAxycc acc0,Rm,Rs.  */
11271 
11272 static void
do_xsc_mia(void)11273 do_xsc_mia (void)
11274 {
11275   inst.instruction |= inst.operands[1].reg;
11276   inst.instruction |= inst.operands[2].reg << 12;
11277 }
11278 
11279 /* Xscale move-accumulator-register (argument parse)
11280 
11281      MARcc   acc0,RdLo,RdHi.  */
11282 
11283 static void
do_xsc_mar(void)11284 do_xsc_mar (void)
11285 {
11286   inst.instruction |= inst.operands[1].reg << 12;
11287   inst.instruction |= inst.operands[2].reg << 16;
11288 }
11289 
11290 /* Xscale move-register-accumulator (argument parse)
11291 
11292      MRAcc   RdLo,RdHi,acc0.  */
11293 
11294 static void
do_xsc_mra(void)11295 do_xsc_mra (void)
11296 {
11297   constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
11298   inst.instruction |= inst.operands[0].reg << 12;
11299   inst.instruction |= inst.operands[1].reg << 16;
11300 }
11301 
11302 /* Encoding functions relevant only to Thumb.  */
11303 
11304 /* inst.operands[i] is a shifted-register operand; encode
11305    it into inst.instruction in the format used by Thumb32.  */
11306 
11307 static void
encode_thumb32_shifted_operand(int i)11308 encode_thumb32_shifted_operand (int i)
11309 {
11310   unsigned int value = inst.relocs[0].exp.X_add_number;
11311   unsigned int shift = inst.operands[i].shift_kind;
11312 
11313   constraint (inst.operands[i].immisreg,
11314 	      _("shift by register not allowed in thumb mode"));
11315   inst.instruction |= inst.operands[i].reg;
11316   if (shift == SHIFT_RRX)
11317     inst.instruction |= SHIFT_ROR << 4;
11318   else
11319     {
11320       constraint (inst.relocs[0].exp.X_op != O_constant,
11321 		  _("expression too complex"));
11322 
11323       constraint (value > 32
11324 		  || (value == 32 && (shift == SHIFT_LSL
11325 				      || shift == SHIFT_ROR)),
11326 		  _("shift expression is too large"));
11327 
11328       if (value == 0)
11329 	shift = SHIFT_LSL;
11330       else if (value == 32)
11331 	value = 0;
11332 
11333       inst.instruction |= shift << 4;
11334       inst.instruction |= (value & 0x1c) << 10;
11335       inst.instruction |= (value & 0x03) << 6;
11336     }
11337 }
11338 
11339 
11340 /* inst.operands[i] was set up by parse_address.  Encode it into a
11341    Thumb32 format load or store instruction.  Reject forms that cannot
11342    be used with such instructions.  If is_t is true, reject forms that
11343    cannot be used with a T instruction; if is_d is true, reject forms
11344    that cannot be used with a D instruction.  If it is a store insn,
11345    reject PC in Rn.  */
11346 
11347 static void
encode_thumb32_addr_mode(int i,bool is_t,bool is_d)11348 encode_thumb32_addr_mode (int i, bool is_t, bool is_d)
11349 {
11350   const bool is_pc = (inst.operands[i].reg == REG_PC);
11351 
11352   constraint (!inst.operands[i].isreg,
11353 	      _("Instruction does not support =N addresses"));
11354 
11355   inst.instruction |= inst.operands[i].reg << 16;
11356   if (inst.operands[i].immisreg)
11357     {
11358       constraint (is_pc, BAD_PC_ADDRESSING);
11359       constraint (is_t || is_d, _("cannot use register index with this instruction"));
11360       constraint (inst.operands[i].negative,
11361 		  _("Thumb does not support negative register indexing"));
11362       constraint (inst.operands[i].postind,
11363 		  _("Thumb does not support register post-indexing"));
11364       constraint (inst.operands[i].writeback,
11365 		  _("Thumb does not support register indexing with writeback"));
11366       constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
11367 		  _("Thumb supports only LSL in shifted register indexing"));
11368 
11369       inst.instruction |= inst.operands[i].imm;
11370       if (inst.operands[i].shifted)
11371 	{
11372 	  constraint (inst.relocs[0].exp.X_op != O_constant,
11373 		      _("expression too complex"));
11374 	  constraint (inst.relocs[0].exp.X_add_number < 0
11375 		      || inst.relocs[0].exp.X_add_number > 3,
11376 		      _("shift out of range"));
11377 	  inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
11378 	}
11379       inst.relocs[0].type = BFD_RELOC_UNUSED;
11380     }
11381   else if (inst.operands[i].preind)
11382     {
11383       constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
11384       constraint (is_t && inst.operands[i].writeback,
11385 		  _("cannot use writeback with this instruction"));
11386       constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
11387 		  BAD_PC_ADDRESSING);
11388 
11389       if (is_d)
11390 	{
11391 	  inst.instruction |= 0x01000000;
11392 	  if (inst.operands[i].writeback)
11393 	    inst.instruction |= 0x00200000;
11394 	}
11395       else
11396 	{
11397 	  inst.instruction |= 0x00000c00;
11398 	  if (inst.operands[i].writeback)
11399 	    inst.instruction |= 0x00000100;
11400 	}
11401       inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
11402     }
11403   else if (inst.operands[i].postind)
11404     {
11405       gas_assert (inst.operands[i].writeback);
11406       constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
11407       constraint (is_t, _("cannot use post-indexing with this instruction"));
11408 
11409       if (is_d)
11410 	inst.instruction |= 0x00200000;
11411       else
11412 	inst.instruction |= 0x00000900;
11413       inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
11414     }
11415   else /* unindexed - only for coprocessor */
11416     inst.error = _("instruction does not accept unindexed addressing");
11417 }
11418 
11419 /* Table of Thumb instructions which exist in 16- and/or 32-bit
11420    encodings (the latter only in post-V6T2 cores).  The index is the
11421    value used in the insns table below.  When there is more than one
11422    possible 16-bit encoding for the instruction, this table always
11423    holds variant (1).
11424    Also contains several pseudo-instructions used during relaxation.  */
11425 #define T16_32_TAB				\
11426   X(_adc,   4140, eb400000),			\
11427   X(_adcs,  4140, eb500000),			\
11428   X(_add,   1c00, eb000000),			\
11429   X(_adds,  1c00, eb100000),			\
11430   X(_addi,  0000, f1000000),			\
11431   X(_addis, 0000, f1100000),			\
11432   X(_add_pc,000f, f20f0000),			\
11433   X(_add_sp,000d, f10d0000),			\
11434   X(_adr,   000f, f20f0000),			\
11435   X(_and,   4000, ea000000),			\
11436   X(_ands,  4000, ea100000),			\
11437   X(_asr,   1000, fa40f000),			\
11438   X(_asrs,  1000, fa50f000),			\
11439   X(_b,     e000, f000b000),			\
11440   X(_bcond, d000, f0008000),			\
11441   X(_bf,    0000, f040e001),			\
11442   X(_bfcsel,0000, f000e001),			\
11443   X(_bfx,   0000, f060e001),			\
11444   X(_bfl,   0000, f000c001),			\
11445   X(_bflx,  0000, f070e001),			\
11446   X(_bic,   4380, ea200000),			\
11447   X(_bics,  4380, ea300000),			\
11448   X(_cinc,  0000, ea509000),			\
11449   X(_cinv,  0000, ea50a000),			\
11450   X(_cmn,   42c0, eb100f00),			\
11451   X(_cmp,   2800, ebb00f00),			\
11452   X(_cneg,  0000, ea50b000),			\
11453   X(_cpsie, b660, f3af8400),			\
11454   X(_cpsid, b670, f3af8600),			\
11455   X(_cpy,   4600, ea4f0000),			\
11456   X(_csel,  0000, ea508000),			\
11457   X(_cset,  0000, ea5f900f),			\
11458   X(_csetm, 0000, ea5fa00f),			\
11459   X(_csinc, 0000, ea509000),			\
11460   X(_csinv, 0000, ea50a000),			\
11461   X(_csneg, 0000, ea50b000),			\
11462   X(_dec_sp,80dd, f1ad0d00),			\
11463   X(_dls,   0000, f040e001),			\
11464   X(_dlstp, 0000, f000e001),			\
11465   X(_eor,   4040, ea800000),			\
11466   X(_eors,  4040, ea900000),			\
11467   X(_inc_sp,00dd, f10d0d00),			\
11468   X(_lctp,  0000, f00fe001),			\
11469   X(_ldmia, c800, e8900000),			\
11470   X(_ldr,   6800, f8500000),			\
11471   X(_ldrb,  7800, f8100000),			\
11472   X(_ldrh,  8800, f8300000),			\
11473   X(_ldrsb, 5600, f9100000),			\
11474   X(_ldrsh, 5e00, f9300000),			\
11475   X(_ldr_pc,4800, f85f0000),			\
11476   X(_ldr_pc2,4800, f85f0000),			\
11477   X(_ldr_sp,9800, f85d0000),			\
11478   X(_le,    0000, f00fc001),			\
11479   X(_letp,  0000, f01fc001),			\
11480   X(_lsl,   0000, fa00f000),			\
11481   X(_lsls,  0000, fa10f000),			\
11482   X(_lsr,   0800, fa20f000),			\
11483   X(_lsrs,  0800, fa30f000),			\
11484   X(_mov,   2000, ea4f0000),			\
11485   X(_movs,  2000, ea5f0000),			\
11486   X(_mul,   4340, fb00f000),                     \
11487   X(_muls,  4340, ffffffff), /* no 32b muls */	\
11488   X(_mvn,   43c0, ea6f0000),			\
11489   X(_mvns,  43c0, ea7f0000),			\
11490   X(_neg,   4240, f1c00000), /* rsb #0 */	\
11491   X(_negs,  4240, f1d00000), /* rsbs #0 */	\
11492   X(_orr,   4300, ea400000),			\
11493   X(_orrs,  4300, ea500000),			\
11494   X(_pop,   bc00, e8bd0000), /* ldmia sp!,... */	\
11495   X(_push,  b400, e92d0000), /* stmdb sp!,... */	\
11496   X(_rev,   ba00, fa90f080),			\
11497   X(_rev16, ba40, fa90f090),			\
11498   X(_revsh, bac0, fa90f0b0),			\
11499   X(_ror,   41c0, fa60f000),			\
11500   X(_rors,  41c0, fa70f000),			\
11501   X(_sbc,   4180, eb600000),			\
11502   X(_sbcs,  4180, eb700000),			\
11503   X(_stmia, c000, e8800000),			\
11504   X(_str,   6000, f8400000),			\
11505   X(_strb,  7000, f8000000),			\
11506   X(_strh,  8000, f8200000),			\
11507   X(_str_sp,9000, f84d0000),			\
11508   X(_sub,   1e00, eba00000),			\
11509   X(_subs,  1e00, ebb00000),			\
11510   X(_subi,  8000, f1a00000),			\
11511   X(_subis, 8000, f1b00000),			\
11512   X(_sxtb,  b240, fa4ff080),			\
11513   X(_sxth,  b200, fa0ff080),			\
11514   X(_tst,   4200, ea100f00),			\
11515   X(_uxtb,  b2c0, fa5ff080),			\
11516   X(_uxth,  b280, fa1ff080),			\
11517   X(_nop,   bf00, f3af8000),			\
11518   X(_yield, bf10, f3af8001),			\
11519   X(_wfe,   bf20, f3af8002),			\
11520   X(_wfi,   bf30, f3af8003),			\
11521   X(_wls,   0000, f040c001),			\
11522   X(_wlstp, 0000, f000c001),			\
11523   X(_sev,   bf40, f3af8004),                    \
11524   X(_sevl,  bf50, f3af8005),			\
11525   X(_udf,   de00, f7f0a000)
11526 
11527 /* To catch errors in encoding functions, the codes are all offset by
11528    0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
11529    as 16-bit instructions.  */
11530 #define X(a,b,c) T_MNEM##a
11531 enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
11532 #undef X
11533 
11534 #define X(a,b,c) 0x##b
11535 static const unsigned short thumb_op16[] = { T16_32_TAB };
11536 #define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
11537 #undef X
11538 
11539 #define X(a,b,c) 0x##c
11540 static const unsigned int thumb_op32[] = { T16_32_TAB };
11541 #define THUMB_OP32(n)        (thumb_op32[(n) - (T16_32_OFFSET + 1)])
11542 #define THUMB_SETS_FLAGS(n)  (THUMB_OP32 (n) & 0x00100000)
11543 #undef X
11544 #undef T16_32_TAB
11545 
11546 /* Thumb instruction encoders, in alphabetical order.  */
11547 
11548 /* ADDW or SUBW.  */
11549 
11550 static void
do_t_add_sub_w(void)11551 do_t_add_sub_w (void)
11552 {
11553   int Rd, Rn;
11554 
11555   Rd = inst.operands[0].reg;
11556   Rn = inst.operands[1].reg;
11557 
11558   /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
11559      is the SP-{plus,minus}-immediate form of the instruction.  */
11560   if (Rn == REG_SP)
11561     constraint (Rd == REG_PC, BAD_PC);
11562   else
11563     reject_bad_reg (Rd);
11564 
11565   inst.instruction |= (Rn << 16) | (Rd << 8);
11566   inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
11567 }
11568 
11569 /* Parse an add or subtract instruction.  We get here with inst.instruction
11570    equaling any of THUMB_OPCODE_add, adds, sub, or subs.  */
11571 
11572 static void
do_t_add_sub(void)11573 do_t_add_sub (void)
11574 {
11575   int Rd, Rs, Rn;
11576 
11577   Rd = inst.operands[0].reg;
11578   Rs = (inst.operands[1].present
11579 	? inst.operands[1].reg    /* Rd, Rs, foo */
11580 	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
11581 
11582   if (Rd == REG_PC)
11583     set_pred_insn_type_last ();
11584 
11585   if (unified_syntax)
11586     {
11587       bool flags;
11588       bool narrow;
11589       int opcode;
11590 
11591       flags = (inst.instruction == T_MNEM_adds
11592 	       || inst.instruction == T_MNEM_subs);
11593       if (flags)
11594 	narrow = !in_pred_block ();
11595       else
11596 	narrow = in_pred_block ();
11597       if (!inst.operands[2].isreg)
11598 	{
11599 	  int add;
11600 
11601 	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11602 	    constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
11603 
11604 	  add = (inst.instruction == T_MNEM_add
11605 		 || inst.instruction == T_MNEM_adds);
11606 	  opcode = 0;
11607 	  if (inst.size_req != 4)
11608 	    {
11609 	      /* Attempt to use a narrow opcode, with relaxation if
11610 		 appropriate.  */
11611 	      if (Rd == REG_SP && Rs == REG_SP && !flags)
11612 		opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
11613 	      else if (Rd <= 7 && Rs == REG_SP && add && !flags)
11614 		opcode = T_MNEM_add_sp;
11615 	      else if (Rd <= 7 && Rs == REG_PC && add && !flags)
11616 		opcode = T_MNEM_add_pc;
11617 	      else if (Rd <= 7 && Rs <= 7 && narrow)
11618 		{
11619 		  if (flags)
11620 		    opcode = add ? T_MNEM_addis : T_MNEM_subis;
11621 		  else
11622 		    opcode = add ? T_MNEM_addi : T_MNEM_subi;
11623 		}
11624 	      if (opcode)
11625 		{
11626 		  inst.instruction = THUMB_OP16(opcode);
11627 		  inst.instruction |= (Rd << 4) | Rs;
11628 		  if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11629 		      || (inst.relocs[0].type
11630 			  > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC))
11631 		  {
11632 		    if (inst.size_req == 2)
11633 		      inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11634 		    else
11635 		      inst.relax = opcode;
11636 		  }
11637 		}
11638 	      else
11639 		constraint (inst.size_req == 2, _("cannot honor width suffix"));
11640 	    }
11641 	  if (inst.size_req == 4
11642 	      || (inst.size_req != 2 && !opcode))
11643 	    {
11644 	      constraint ((inst.relocs[0].type
11645 			   >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
11646 			  && (inst.relocs[0].type
11647 			      <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
11648 			  THUMB1_RELOC_ONLY);
11649 	      if (Rd == REG_PC)
11650 		{
11651 		  constraint (add, BAD_PC);
11652 		  constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
11653 			     _("only SUBS PC, LR, #const allowed"));
11654 		  constraint (inst.relocs[0].exp.X_op != O_constant,
11655 			      _("expression too complex"));
11656 		  constraint (inst.relocs[0].exp.X_add_number < 0
11657 			      || inst.relocs[0].exp.X_add_number > 0xff,
11658 			     _("immediate value out of range"));
11659 		  inst.instruction = T2_SUBS_PC_LR
11660 				     | inst.relocs[0].exp.X_add_number;
11661 		  inst.relocs[0].type = BFD_RELOC_UNUSED;
11662 		  return;
11663 		}
11664 	      else if (Rs == REG_PC)
11665 		{
11666 		  /* Always use addw/subw.  */
11667 		  inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
11668 		  inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
11669 		}
11670 	      else
11671 		{
11672 		  inst.instruction = THUMB_OP32 (inst.instruction);
11673 		  inst.instruction = (inst.instruction & 0xe1ffffff)
11674 				     | 0x10000000;
11675 		  if (flags)
11676 		    inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11677 		  else
11678 		    inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_IMM;
11679 		}
11680 	      inst.instruction |= Rd << 8;
11681 	      inst.instruction |= Rs << 16;
11682 	    }
11683 	}
11684       else
11685 	{
11686 	  unsigned int value = inst.relocs[0].exp.X_add_number;
11687 	  unsigned int shift = inst.operands[2].shift_kind;
11688 
11689 	  Rn = inst.operands[2].reg;
11690 	  /* See if we can do this with a 16-bit instruction.  */
11691 	  if (!inst.operands[2].shifted && inst.size_req != 4)
11692 	    {
11693 	      if (Rd > 7 || Rs > 7 || Rn > 7)
11694 		narrow = false;
11695 
11696 	      if (narrow)
11697 		{
11698 		  inst.instruction = ((inst.instruction == T_MNEM_adds
11699 				       || inst.instruction == T_MNEM_add)
11700 				      ? T_OPCODE_ADD_R3
11701 				      : T_OPCODE_SUB_R3);
11702 		  inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
11703 		  return;
11704 		}
11705 
11706 	      if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
11707 		{
11708 		  /* Thumb-1 cores (except v6-M) require at least one high
11709 		     register in a narrow non flag setting add.  */
11710 		  if (Rd > 7 || Rn > 7
11711 		      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
11712 		      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
11713 		    {
11714 		      if (Rd == Rn)
11715 			{
11716 			  Rn = Rs;
11717 			  Rs = Rd;
11718 			}
11719 		      inst.instruction = T_OPCODE_ADD_HI;
11720 		      inst.instruction |= (Rd & 8) << 4;
11721 		      inst.instruction |= (Rd & 7);
11722 		      inst.instruction |= Rn << 3;
11723 		      return;
11724 		    }
11725 		}
11726 	    }
11727 
11728 	  constraint (Rd == REG_PC, BAD_PC);
11729 	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11730 	    constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
11731 	  constraint (Rs == REG_PC, BAD_PC);
11732 	  reject_bad_reg (Rn);
11733 
11734 	  /* If we get here, it can't be done in 16 bits.  */
11735 	  constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
11736 		      _("shift must be constant"));
11737 	  inst.instruction = THUMB_OP32 (inst.instruction);
11738 	  inst.instruction |= Rd << 8;
11739 	  inst.instruction |= Rs << 16;
11740 	  constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
11741 		      _("shift value over 3 not allowed in thumb mode"));
11742 	  constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
11743 		      _("only LSL shift allowed in thumb mode"));
11744 	  encode_thumb32_shifted_operand (2);
11745 	}
11746     }
11747   else
11748     {
11749       constraint (inst.instruction == T_MNEM_adds
11750 		  || inst.instruction == T_MNEM_subs,
11751 		  BAD_THUMB32);
11752 
11753       if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
11754 	{
11755 	  constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
11756 		      || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
11757 		      BAD_HIREG);
11758 
11759 	  inst.instruction = (inst.instruction == T_MNEM_add
11760 			      ? 0x0000 : 0x8000);
11761 	  inst.instruction |= (Rd << 4) | Rs;
11762 	  inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11763 	  return;
11764 	}
11765 
11766       Rn = inst.operands[2].reg;
11767       constraint (inst.operands[2].shifted, _("unshifted register required"));
11768 
11769       /* We now have Rd, Rs, and Rn set to registers.  */
11770       if (Rd > 7 || Rs > 7 || Rn > 7)
11771 	{
11772 	  /* Can't do this for SUB.	 */
11773 	  constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
11774 	  inst.instruction = T_OPCODE_ADD_HI;
11775 	  inst.instruction |= (Rd & 8) << 4;
11776 	  inst.instruction |= (Rd & 7);
11777 	  if (Rs == Rd)
11778 	    inst.instruction |= Rn << 3;
11779 	  else if (Rn == Rd)
11780 	    inst.instruction |= Rs << 3;
11781 	  else
11782 	    constraint (1, _("dest must overlap one source register"));
11783 	}
11784       else
11785 	{
11786 	  inst.instruction = (inst.instruction == T_MNEM_add
11787 			      ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
11788 	  inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
11789 	}
11790     }
11791 }
11792 
11793 static void
do_t_adr(void)11794 do_t_adr (void)
11795 {
11796   unsigned Rd;
11797 
11798   Rd = inst.operands[0].reg;
11799   reject_bad_reg (Rd);
11800 
11801   if (unified_syntax && inst.size_req == 0 && Rd <= 7)
11802     {
11803       /* Defer to section relaxation.  */
11804       inst.relax = inst.instruction;
11805       inst.instruction = THUMB_OP16 (inst.instruction);
11806       inst.instruction |= Rd << 4;
11807     }
11808   else if (unified_syntax && inst.size_req != 2)
11809     {
11810       /* Generate a 32-bit opcode.  */
11811       inst.instruction = THUMB_OP32 (inst.instruction);
11812       inst.instruction |= Rd << 8;
11813       inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_PC12;
11814       inst.relocs[0].pc_rel = 1;
11815     }
11816   else
11817     {
11818       /* Generate a 16-bit opcode.  */
11819       inst.instruction = THUMB_OP16 (inst.instruction);
11820       inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11821       inst.relocs[0].exp.X_add_number -= 4; /* PC relative adjust.  */
11822       inst.relocs[0].pc_rel = 1;
11823       inst.instruction |= Rd << 4;
11824     }
11825 
11826   if (inst.relocs[0].exp.X_op == O_symbol
11827       && inst.relocs[0].exp.X_add_symbol != NULL
11828       && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
11829       && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
11830     inst.relocs[0].exp.X_add_number += 1;
11831 }
11832 
11833 /* Arithmetic instructions for which there is just one 16-bit
11834    instruction encoding, and it allows only two low registers.
11835    For maximal compatibility with ARM syntax, we allow three register
11836    operands even when Thumb-32 instructions are not available, as long
11837    as the first two are identical.  For instance, both "sbc r0,r1" and
11838    "sbc r0,r0,r1" are allowed.  */
11839 static void
do_t_arit3(void)11840 do_t_arit3 (void)
11841 {
11842   int Rd, Rs, Rn;
11843 
11844   Rd = inst.operands[0].reg;
11845   Rs = (inst.operands[1].present
11846 	? inst.operands[1].reg    /* Rd, Rs, foo */
11847 	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
11848   Rn = inst.operands[2].reg;
11849 
11850   reject_bad_reg (Rd);
11851   reject_bad_reg (Rs);
11852   if (inst.operands[2].isreg)
11853     reject_bad_reg (Rn);
11854 
11855   if (unified_syntax)
11856     {
11857       if (!inst.operands[2].isreg)
11858 	{
11859 	  /* For an immediate, we always generate a 32-bit opcode;
11860 	     section relaxation will shrink it later if possible.  */
11861 	  inst.instruction = THUMB_OP32 (inst.instruction);
11862 	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11863 	  inst.instruction |= Rd << 8;
11864 	  inst.instruction |= Rs << 16;
11865 	  inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11866 	}
11867       else
11868 	{
11869 	  bool narrow;
11870 
11871 	  /* See if we can do this with a 16-bit instruction.  */
11872 	  if (THUMB_SETS_FLAGS (inst.instruction))
11873 	    narrow = !in_pred_block ();
11874 	  else
11875 	    narrow = in_pred_block ();
11876 
11877 	  if (Rd > 7 || Rn > 7 || Rs > 7)
11878 	    narrow = false;
11879 	  if (inst.operands[2].shifted)
11880 	    narrow = false;
11881 	  if (inst.size_req == 4)
11882 	    narrow = false;
11883 
11884 	  if (narrow
11885 	      && Rd == Rs)
11886 	    {
11887 	      inst.instruction = THUMB_OP16 (inst.instruction);
11888 	      inst.instruction |= Rd;
11889 	      inst.instruction |= Rn << 3;
11890 	      return;
11891 	    }
11892 
11893 	  /* If we get here, it can't be done in 16 bits.  */
11894 	  constraint (inst.operands[2].shifted
11895 		      && inst.operands[2].immisreg,
11896 		      _("shift must be constant"));
11897 	  inst.instruction = THUMB_OP32 (inst.instruction);
11898 	  inst.instruction |= Rd << 8;
11899 	  inst.instruction |= Rs << 16;
11900 	  encode_thumb32_shifted_operand (2);
11901 	}
11902     }
11903   else
11904     {
11905       /* On its face this is a lie - the instruction does set the
11906 	 flags.  However, the only supported mnemonic in this mode
11907 	 says it doesn't.  */
11908       constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11909 
11910       constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11911 		  _("unshifted register required"));
11912       constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11913       constraint (Rd != Rs,
11914 		  _("dest and source1 must be the same register"));
11915 
11916       inst.instruction = THUMB_OP16 (inst.instruction);
11917       inst.instruction |= Rd;
11918       inst.instruction |= Rn << 3;
11919     }
11920 }
11921 
11922 /* Similarly, but for instructions where the arithmetic operation is
11923    commutative, so we can allow either of them to be different from
11924    the destination operand in a 16-bit instruction.  For instance, all
11925    three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
11926    accepted.  */
11927 static void
do_t_arit3c(void)11928 do_t_arit3c (void)
11929 {
11930   int Rd, Rs, Rn;
11931 
11932   Rd = inst.operands[0].reg;
11933   Rs = (inst.operands[1].present
11934 	? inst.operands[1].reg    /* Rd, Rs, foo */
11935 	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
11936   Rn = inst.operands[2].reg;
11937 
11938   reject_bad_reg (Rd);
11939   reject_bad_reg (Rs);
11940   if (inst.operands[2].isreg)
11941     reject_bad_reg (Rn);
11942 
11943   if (unified_syntax)
11944     {
11945       if (!inst.operands[2].isreg)
11946 	{
11947 	  /* For an immediate, we always generate a 32-bit opcode;
11948 	     section relaxation will shrink it later if possible.  */
11949 	  inst.instruction = THUMB_OP32 (inst.instruction);
11950 	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11951 	  inst.instruction |= Rd << 8;
11952 	  inst.instruction |= Rs << 16;
11953 	  inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11954 	}
11955       else
11956 	{
11957 	  bool narrow;
11958 
11959 	  /* See if we can do this with a 16-bit instruction.  */
11960 	  if (THUMB_SETS_FLAGS (inst.instruction))
11961 	    narrow = !in_pred_block ();
11962 	  else
11963 	    narrow = in_pred_block ();
11964 
11965 	  if (Rd > 7 || Rn > 7 || Rs > 7)
11966 	    narrow = false;
11967 	  if (inst.operands[2].shifted)
11968 	    narrow = false;
11969 	  if (inst.size_req == 4)
11970 	    narrow = false;
11971 
11972 	  if (narrow)
11973 	    {
11974 	      if (Rd == Rs)
11975 		{
11976 		  inst.instruction = THUMB_OP16 (inst.instruction);
11977 		  inst.instruction |= Rd;
11978 		  inst.instruction |= Rn << 3;
11979 		  return;
11980 		}
11981 	      if (Rd == Rn)
11982 		{
11983 		  inst.instruction = THUMB_OP16 (inst.instruction);
11984 		  inst.instruction |= Rd;
11985 		  inst.instruction |= Rs << 3;
11986 		  return;
11987 		}
11988 	    }
11989 
11990 	  /* If we get here, it can't be done in 16 bits.  */
11991 	  constraint (inst.operands[2].shifted
11992 		      && inst.operands[2].immisreg,
11993 		      _("shift must be constant"));
11994 	  inst.instruction = THUMB_OP32 (inst.instruction);
11995 	  inst.instruction |= Rd << 8;
11996 	  inst.instruction |= Rs << 16;
11997 	  encode_thumb32_shifted_operand (2);
11998 	}
11999     }
12000   else
12001     {
12002       /* On its face this is a lie - the instruction does set the
12003 	 flags.  However, the only supported mnemonic in this mode
12004 	 says it doesn't.  */
12005       constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
12006 
12007       constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
12008 		  _("unshifted register required"));
12009       constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
12010 
12011       inst.instruction = THUMB_OP16 (inst.instruction);
12012       inst.instruction |= Rd;
12013 
12014       if (Rd == Rs)
12015 	inst.instruction |= Rn << 3;
12016       else if (Rd == Rn)
12017 	inst.instruction |= Rs << 3;
12018       else
12019 	constraint (1, _("dest must overlap one source register"));
12020     }
12021 }
12022 
12023 static void
do_t_bfc(void)12024 do_t_bfc (void)
12025 {
12026   unsigned Rd;
12027   unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
12028   constraint (msb > 32, _("bit-field extends past end of register"));
12029   /* The instruction encoding stores the LSB and MSB,
12030      not the LSB and width.  */
12031   Rd = inst.operands[0].reg;
12032   reject_bad_reg (Rd);
12033   inst.instruction |= Rd << 8;
12034   inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
12035   inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
12036   inst.instruction |= msb - 1;
12037 }
12038 
12039 static void
do_t_bfi(void)12040 do_t_bfi (void)
12041 {
12042   int Rd, Rn;
12043   unsigned int msb;
12044 
12045   Rd = inst.operands[0].reg;
12046   reject_bad_reg (Rd);
12047 
12048   /* #0 in second position is alternative syntax for bfc, which is
12049      the same instruction but with REG_PC in the Rm field.  */
12050   if (!inst.operands[1].isreg)
12051     Rn = REG_PC;
12052   else
12053     {
12054       Rn = inst.operands[1].reg;
12055       reject_bad_reg (Rn);
12056     }
12057 
12058   msb = inst.operands[2].imm + inst.operands[3].imm;
12059   constraint (msb > 32, _("bit-field extends past end of register"));
12060   /* The instruction encoding stores the LSB and MSB,
12061      not the LSB and width.  */
12062   inst.instruction |= Rd << 8;
12063   inst.instruction |= Rn << 16;
12064   inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
12065   inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
12066   inst.instruction |= msb - 1;
12067 }
12068 
12069 static void
do_t_bfx(void)12070 do_t_bfx (void)
12071 {
12072   unsigned Rd, Rn;
12073 
12074   Rd = inst.operands[0].reg;
12075   Rn = inst.operands[1].reg;
12076 
12077   reject_bad_reg (Rd);
12078   reject_bad_reg (Rn);
12079 
12080   constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
12081 	      _("bit-field extends past end of register"));
12082   inst.instruction |= Rd << 8;
12083   inst.instruction |= Rn << 16;
12084   inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
12085   inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
12086   inst.instruction |= inst.operands[3].imm - 1;
12087 }
12088 
12089 /* ARM V5 Thumb BLX (argument parse)
12090 	BLX <target_addr>	which is BLX(1)
12091 	BLX <Rm>		which is BLX(2)
12092    Unfortunately, there are two different opcodes for this mnemonic.
12093    So, the insns[].value is not used, and the code here zaps values
12094 	into inst.instruction.
12095 
12096    ??? How to take advantage of the additional two bits of displacement
12097    available in Thumb32 mode?  Need new relocation?  */
12098 
12099 static void
do_t_blx(void)12100 do_t_blx (void)
12101 {
12102   set_pred_insn_type_last ();
12103 
12104   if (inst.operands[0].isreg)
12105     {
12106       constraint (inst.operands[0].reg == REG_PC, BAD_PC);
12107       /* We have a register, so this is BLX(2).  */
12108       inst.instruction |= inst.operands[0].reg << 3;
12109     }
12110   else
12111     {
12112       /* No register.  This must be BLX(1).  */
12113       inst.instruction = 0xf000e800;
12114       encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
12115     }
12116 }
12117 
12118 static void
do_t_branch(void)12119 do_t_branch (void)
12120 {
12121   int opcode;
12122   int cond;
12123   bfd_reloc_code_real_type reloc;
12124 
12125   cond = inst.cond;
12126   set_pred_insn_type (IF_INSIDE_IT_LAST_INSN);
12127 
12128   if (in_pred_block ())
12129     {
12130       /* Conditional branches inside IT blocks are encoded as unconditional
12131 	 branches.  */
12132       cond = COND_ALWAYS;
12133     }
12134   else
12135     cond = inst.cond;
12136 
12137   if (cond != COND_ALWAYS)
12138     opcode = T_MNEM_bcond;
12139   else
12140     opcode = inst.instruction;
12141 
12142   if (unified_syntax
12143       && (inst.size_req == 4
12144 	  || (inst.size_req != 2
12145 	      && (inst.operands[0].hasreloc
12146 		  || inst.relocs[0].exp.X_op == O_constant))))
12147     {
12148       inst.instruction = THUMB_OP32(opcode);
12149       if (cond == COND_ALWAYS)
12150 	reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
12151       else
12152 	{
12153 	  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
12154 		      _("selected architecture does not support "
12155 			"wide conditional branch instruction"));
12156 
12157 	  gas_assert (cond != 0xF);
12158 	  inst.instruction |= cond << 22;
12159 	  reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
12160 	}
12161     }
12162   else
12163     {
12164       inst.instruction = THUMB_OP16(opcode);
12165       if (cond == COND_ALWAYS)
12166 	reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
12167       else
12168 	{
12169 	  inst.instruction |= cond << 8;
12170 	  reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
12171 	}
12172       /* Allow section relaxation.  */
12173       if (unified_syntax && inst.size_req != 2)
12174 	inst.relax = opcode;
12175     }
12176   inst.relocs[0].type = reloc;
12177   inst.relocs[0].pc_rel = 1;
12178 }
12179 
12180 /* Actually do the work for Thumb state bkpt and hlt.  The only difference
12181    between the two is the maximum immediate allowed - which is passed in
12182    RANGE.  */
12183 static void
do_t_bkpt_hlt1(int range)12184 do_t_bkpt_hlt1 (int range)
12185 {
12186   constraint (inst.cond != COND_ALWAYS,
12187 	      _("instruction is always unconditional"));
12188   if (inst.operands[0].present)
12189     {
12190       constraint (inst.operands[0].imm > range,
12191 		  _("immediate value out of range"));
12192       inst.instruction |= inst.operands[0].imm;
12193     }
12194 
12195   set_pred_insn_type (NEUTRAL_IT_INSN);
12196 }
12197 
12198 static void
do_t_hlt(void)12199 do_t_hlt (void)
12200 {
12201   do_t_bkpt_hlt1 (63);
12202 }
12203 
12204 static void
do_t_bkpt(void)12205 do_t_bkpt (void)
12206 {
12207   do_t_bkpt_hlt1 (255);
12208 }
12209 
12210 static void
do_t_branch23(void)12211 do_t_branch23 (void)
12212 {
12213   set_pred_insn_type_last ();
12214   encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
12215 
12216   /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
12217      this file.  We used to simply ignore the PLT reloc type here --
12218      the branch encoding is now needed to deal with TLSCALL relocs.
12219      So if we see a PLT reloc now, put it back to how it used to be to
12220      keep the preexisting behaviour.  */
12221   if (inst.relocs[0].type == BFD_RELOC_ARM_PLT32)
12222     inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH23;
12223 
12224 #if defined(OBJ_COFF)
12225   /* If the destination of the branch is a defined symbol which does not have
12226      the THUMB_FUNC attribute, then we must be calling a function which has
12227      the (interfacearm) attribute.  We look for the Thumb entry point to that
12228      function and change the branch to refer to that function instead.	*/
12229   if (	 inst.relocs[0].exp.X_op == O_symbol
12230       && inst.relocs[0].exp.X_add_symbol != NULL
12231       && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
12232       && ! THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
12233     inst.relocs[0].exp.X_add_symbol
12234       = find_real_start (inst.relocs[0].exp.X_add_symbol);
12235 #endif
12236 }
12237 
12238 static void
do_t_bx(void)12239 do_t_bx (void)
12240 {
12241   set_pred_insn_type_last ();
12242   inst.instruction |= inst.operands[0].reg << 3;
12243   /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC.	 The reloc
12244      should cause the alignment to be checked once it is known.	 This is
12245      because BX PC only works if the instruction is word aligned.  */
12246 }
12247 
12248 static void
do_t_bxj(void)12249 do_t_bxj (void)
12250 {
12251   int Rm;
12252 
12253   set_pred_insn_type_last ();
12254   Rm = inst.operands[0].reg;
12255   reject_bad_reg (Rm);
12256   inst.instruction |= Rm << 16;
12257 }
12258 
12259 static void
do_t_clz(void)12260 do_t_clz (void)
12261 {
12262   unsigned Rd;
12263   unsigned Rm;
12264 
12265   Rd = inst.operands[0].reg;
12266   Rm = inst.operands[1].reg;
12267 
12268   reject_bad_reg (Rd);
12269   reject_bad_reg (Rm);
12270 
12271   inst.instruction |= Rd << 8;
12272   inst.instruction |= Rm << 16;
12273   inst.instruction |= Rm;
12274 }
12275 
12276 /* For the Armv8.1-M conditional instructions.  */
12277 static void
do_t_cond(void)12278 do_t_cond (void)
12279 {
12280   unsigned Rd, Rn, Rm;
12281   signed int cond;
12282 
12283   constraint (inst.cond != COND_ALWAYS, BAD_COND);
12284 
12285   Rd = inst.operands[0].reg;
12286   switch (inst.instruction)
12287     {
12288       case T_MNEM_csinc:
12289       case T_MNEM_csinv:
12290       case T_MNEM_csneg:
12291       case T_MNEM_csel:
12292 	Rn = inst.operands[1].reg;
12293 	Rm = inst.operands[2].reg;
12294 	cond = inst.operands[3].imm;
12295 	constraint (Rn == REG_SP, BAD_SP);
12296 	constraint (Rm == REG_SP, BAD_SP);
12297 	break;
12298 
12299       case T_MNEM_cinc:
12300       case T_MNEM_cinv:
12301       case T_MNEM_cneg:
12302 	Rn = inst.operands[1].reg;
12303 	cond = inst.operands[2].imm;
12304 	/* Invert the last bit to invert the cond.  */
12305 	cond = TOGGLE_BIT (cond, 0);
12306 	constraint (Rn == REG_SP, BAD_SP);
12307 	Rm = Rn;
12308 	break;
12309 
12310       case T_MNEM_csetm:
12311       case T_MNEM_cset:
12312 	cond = inst.operands[1].imm;
12313 	/* Invert the last bit to invert the cond.  */
12314 	cond = TOGGLE_BIT (cond, 0);
12315 	Rn = REG_PC;
12316 	Rm = REG_PC;
12317 	break;
12318 
12319       default: abort ();
12320     }
12321 
12322   set_pred_insn_type (OUTSIDE_PRED_INSN);
12323   inst.instruction = THUMB_OP32 (inst.instruction);
12324   inst.instruction |= Rd << 8;
12325   inst.instruction |= Rn << 16;
12326   inst.instruction |= Rm;
12327   inst.instruction |= cond << 4;
12328 }
12329 
12330 static void
do_t_csdb(void)12331 do_t_csdb (void)
12332 {
12333   set_pred_insn_type (OUTSIDE_PRED_INSN);
12334 }
12335 
12336 static void
do_t_cps(void)12337 do_t_cps (void)
12338 {
12339   set_pred_insn_type (OUTSIDE_PRED_INSN);
12340   inst.instruction |= inst.operands[0].imm;
12341 }
12342 
12343 static void
do_t_cpsi(void)12344 do_t_cpsi (void)
12345 {
12346   set_pred_insn_type (OUTSIDE_PRED_INSN);
12347   if (unified_syntax
12348       && (inst.operands[1].present || inst.size_req == 4)
12349       && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
12350     {
12351       unsigned int imod = (inst.instruction & 0x0030) >> 4;
12352       inst.instruction = 0xf3af8000;
12353       inst.instruction |= imod << 9;
12354       inst.instruction |= inst.operands[0].imm << 5;
12355       if (inst.operands[1].present)
12356 	inst.instruction |= 0x100 | inst.operands[1].imm;
12357     }
12358   else
12359     {
12360       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
12361 		  && (inst.operands[0].imm & 4),
12362 		  _("selected processor does not support 'A' form "
12363 		    "of this instruction"));
12364       constraint (inst.operands[1].present || inst.size_req == 4,
12365 		  _("Thumb does not support the 2-argument "
12366 		    "form of this instruction"));
12367       inst.instruction |= inst.operands[0].imm;
12368     }
12369 }
12370 
12371 /* THUMB CPY instruction (argument parse).  */
12372 
12373 static void
do_t_cpy(void)12374 do_t_cpy (void)
12375 {
12376   if (inst.size_req == 4)
12377     {
12378       inst.instruction = THUMB_OP32 (T_MNEM_mov);
12379       inst.instruction |= inst.operands[0].reg << 8;
12380       inst.instruction |= inst.operands[1].reg;
12381     }
12382   else
12383     {
12384       inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
12385       inst.instruction |= (inst.operands[0].reg & 0x7);
12386       inst.instruction |= inst.operands[1].reg << 3;
12387     }
12388 }
12389 
12390 static void
do_t_cbz(void)12391 do_t_cbz (void)
12392 {
12393   set_pred_insn_type (OUTSIDE_PRED_INSN);
12394   constraint (inst.operands[0].reg > 7, BAD_HIREG);
12395   inst.instruction |= inst.operands[0].reg;
12396   inst.relocs[0].pc_rel = 1;
12397   inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH7;
12398 }
12399 
12400 static void
do_t_dbg(void)12401 do_t_dbg (void)
12402 {
12403   inst.instruction |= inst.operands[0].imm;
12404 }
12405 
12406 static void
do_t_div(void)12407 do_t_div (void)
12408 {
12409   unsigned Rd, Rn, Rm;
12410 
12411   Rd = inst.operands[0].reg;
12412   Rn = (inst.operands[1].present
12413 	? inst.operands[1].reg : Rd);
12414   Rm = inst.operands[2].reg;
12415 
12416   reject_bad_reg (Rd);
12417   reject_bad_reg (Rn);
12418   reject_bad_reg (Rm);
12419 
12420   inst.instruction |= Rd << 8;
12421   inst.instruction |= Rn << 16;
12422   inst.instruction |= Rm;
12423 }
12424 
12425 static void
do_t_hint(void)12426 do_t_hint (void)
12427 {
12428   if (unified_syntax && inst.size_req == 4)
12429     inst.instruction = THUMB_OP32 (inst.instruction);
12430   else
12431     inst.instruction = THUMB_OP16 (inst.instruction);
12432 }
12433 
12434 static void
do_t_it(void)12435 do_t_it (void)
12436 {
12437   unsigned int cond = inst.operands[0].imm;
12438 
12439   set_pred_insn_type (IT_INSN);
12440   now_pred.mask = (inst.instruction & 0xf) | 0x10;
12441   now_pred.cc = cond;
12442   now_pred.warn_deprecated = false;
12443   now_pred.type = SCALAR_PRED;
12444 
12445   /* If the condition is a negative condition, invert the mask.  */
12446   if ((cond & 0x1) == 0x0)
12447     {
12448       unsigned int mask = inst.instruction & 0x000f;
12449 
12450       if ((mask & 0x7) == 0)
12451 	{
12452 	  /* No conversion needed.  */
12453 	  now_pred.block_length = 1;
12454 	}
12455       else if ((mask & 0x3) == 0)
12456 	{
12457 	  mask ^= 0x8;
12458 	  now_pred.block_length = 2;
12459 	}
12460       else if ((mask & 0x1) == 0)
12461 	{
12462 	  mask ^= 0xC;
12463 	  now_pred.block_length = 3;
12464 	}
12465       else
12466 	{
12467 	  mask ^= 0xE;
12468 	  now_pred.block_length = 4;
12469 	}
12470 
12471       inst.instruction &= 0xfff0;
12472       inst.instruction |= mask;
12473     }
12474 
12475   inst.instruction |= cond << 4;
12476 }
12477 
12478 /* Helper function used for both push/pop and ldm/stm.  */
12479 static void
encode_thumb2_multi(bool do_io,int base,unsigned mask,bool writeback)12480 encode_thumb2_multi (bool do_io, int base, unsigned mask,
12481 		     bool writeback)
12482 {
12483   bool load, store;
12484 
12485   gas_assert (base != -1 || !do_io);
12486   load = do_io && ((inst.instruction & (1 << 20)) != 0);
12487   store = do_io && !load;
12488 
12489   if (mask & (1 << 13))
12490     inst.error =  _("SP not allowed in register list");
12491 
12492   if (do_io && (mask & (1 << base)) != 0
12493       && writeback)
12494     inst.error = _("having the base register in the register list when "
12495 		   "using write back is UNPREDICTABLE");
12496 
12497   if (load)
12498     {
12499       if (mask & (1 << 15))
12500 	{
12501 	  if (mask & (1 << 14))
12502 	    inst.error = _("LR and PC should not both be in register list");
12503 	  else
12504 	    set_pred_insn_type_last ();
12505 	}
12506     }
12507   else if (store)
12508     {
12509       if (mask & (1 << 15))
12510 	inst.error = _("PC not allowed in register list");
12511     }
12512 
12513   if (do_io && ((mask & (mask - 1)) == 0))
12514     {
12515       /* Single register transfers implemented as str/ldr.  */
12516       if (writeback)
12517 	{
12518 	  if (inst.instruction & (1 << 23))
12519 	    inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
12520 	  else
12521 	    inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
12522 	}
12523       else
12524 	{
12525 	  if (inst.instruction & (1 << 23))
12526 	    inst.instruction = 0x00800000; /* ia -> [base] */
12527 	  else
12528 	    inst.instruction = 0x00000c04; /* db -> [base, #-4] */
12529 	}
12530 
12531       inst.instruction |= 0xf8400000;
12532       if (load)
12533 	inst.instruction |= 0x00100000;
12534 
12535       mask = ffs (mask) - 1;
12536       mask <<= 12;
12537     }
12538   else if (writeback)
12539     inst.instruction |= WRITE_BACK;
12540 
12541   inst.instruction |= mask;
12542   if (do_io)
12543     inst.instruction |= base << 16;
12544 }
12545 
12546 static void
do_t_ldmstm(void)12547 do_t_ldmstm (void)
12548 {
12549   /* This really doesn't seem worth it.  */
12550   constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
12551 	      _("expression too complex"));
12552   constraint (inst.operands[1].writeback,
12553 	      _("Thumb load/store multiple does not support {reglist}^"));
12554 
12555   if (unified_syntax)
12556     {
12557       bool narrow;
12558       unsigned mask;
12559 
12560       narrow = false;
12561       /* See if we can use a 16-bit instruction.  */
12562       if (inst.instruction < 0xffff /* not ldmdb/stmdb */
12563 	  && inst.size_req != 4
12564 	  && !(inst.operands[1].imm & ~0xff))
12565 	{
12566 	  mask = 1 << inst.operands[0].reg;
12567 
12568 	  if (inst.operands[0].reg <= 7)
12569 	    {
12570 	      if (inst.instruction == T_MNEM_stmia
12571 		  ? inst.operands[0].writeback
12572 		  : (inst.operands[0].writeback
12573 		     == !(inst.operands[1].imm & mask)))
12574 		{
12575 		  if (inst.instruction == T_MNEM_stmia
12576 		      && (inst.operands[1].imm & mask)
12577 		      && (inst.operands[1].imm & (mask - 1)))
12578 		    as_warn (_("value stored for r%d is UNKNOWN"),
12579 			     inst.operands[0].reg);
12580 
12581 		  inst.instruction = THUMB_OP16 (inst.instruction);
12582 		  inst.instruction |= inst.operands[0].reg << 8;
12583 		  inst.instruction |= inst.operands[1].imm;
12584 		  narrow = true;
12585 		}
12586 	      else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
12587 		{
12588 		  /* This means 1 register in reg list one of 3 situations:
12589 		     1. Instruction is stmia, but without writeback.
12590 		     2. lmdia without writeback, but with Rn not in
12591 			reglist.
12592 		     3. ldmia with writeback, but with Rn in reglist.
12593 		     Case 3 is UNPREDICTABLE behaviour, so we handle
12594 		     case 1 and 2 which can be converted into a 16-bit
12595 		     str or ldr. The SP cases are handled below.  */
12596 		  unsigned long opcode;
12597 		  /* First, record an error for Case 3.  */
12598 		  if (inst.operands[1].imm & mask
12599 		      && inst.operands[0].writeback)
12600 		    inst.error =
12601 			_("having the base register in the register list when "
12602 			  "using write back is UNPREDICTABLE");
12603 
12604 		  opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
12605 							     : T_MNEM_ldr);
12606 		  inst.instruction = THUMB_OP16 (opcode);
12607 		  inst.instruction |= inst.operands[0].reg << 3;
12608 		  inst.instruction |= (ffs (inst.operands[1].imm)-1);
12609 		  narrow = true;
12610 		}
12611 	    }
12612 	  else if (inst.operands[0] .reg == REG_SP)
12613 	    {
12614 	      if (inst.operands[0].writeback)
12615 		{
12616 		  inst.instruction =
12617 			THUMB_OP16 (inst.instruction == T_MNEM_stmia
12618 				    ? T_MNEM_push : T_MNEM_pop);
12619 		  inst.instruction |= inst.operands[1].imm;
12620 		  narrow = true;
12621 		}
12622 	      else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
12623 		{
12624 		  inst.instruction =
12625 			THUMB_OP16 (inst.instruction == T_MNEM_stmia
12626 				    ? T_MNEM_str_sp : T_MNEM_ldr_sp);
12627 		  inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
12628 		  narrow = true;
12629 		}
12630 	    }
12631 	}
12632 
12633       if (!narrow)
12634 	{
12635 	  if (inst.instruction < 0xffff)
12636 	    inst.instruction = THUMB_OP32 (inst.instruction);
12637 
12638 	  encode_thumb2_multi (true /* do_io */, inst.operands[0].reg,
12639 			       inst.operands[1].imm,
12640 			       inst.operands[0].writeback);
12641 	}
12642     }
12643   else
12644     {
12645       constraint (inst.operands[0].reg > 7
12646 		  || (inst.operands[1].imm & ~0xff), BAD_HIREG);
12647       constraint (inst.instruction != T_MNEM_ldmia
12648 		  && inst.instruction != T_MNEM_stmia,
12649 		  _("Thumb-2 instruction only valid in unified syntax"));
12650       if (inst.instruction == T_MNEM_stmia)
12651 	{
12652 	  if (!inst.operands[0].writeback)
12653 	    as_warn (_("this instruction will write back the base register"));
12654 	  if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
12655 	      && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
12656 	    as_warn (_("value stored for r%d is UNKNOWN"),
12657 		     inst.operands[0].reg);
12658 	}
12659       else
12660 	{
12661 	  if (!inst.operands[0].writeback
12662 	      && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
12663 	    as_warn (_("this instruction will write back the base register"));
12664 	  else if (inst.operands[0].writeback
12665 		   && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
12666 	    as_warn (_("this instruction will not write back the base register"));
12667 	}
12668 
12669       inst.instruction = THUMB_OP16 (inst.instruction);
12670       inst.instruction |= inst.operands[0].reg << 8;
12671       inst.instruction |= inst.operands[1].imm;
12672     }
12673 }
12674 
12675 static void
do_t_ldrex(void)12676 do_t_ldrex (void)
12677 {
12678   constraint (!inst.operands[1].isreg || !inst.operands[1].preind
12679 	      || inst.operands[1].postind || inst.operands[1].writeback
12680 	      || inst.operands[1].immisreg || inst.operands[1].shifted
12681 	      || inst.operands[1].negative,
12682 	      BAD_ADDR_MODE);
12683 
12684   constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
12685 
12686   inst.instruction |= inst.operands[0].reg << 12;
12687   inst.instruction |= inst.operands[1].reg << 16;
12688   inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
12689 }
12690 
12691 static void
do_t_ldrexd(void)12692 do_t_ldrexd (void)
12693 {
12694   if (!inst.operands[1].present)
12695     {
12696       constraint (inst.operands[0].reg == REG_LR,
12697 		  _("r14 not allowed as first register "
12698 		    "when second register is omitted"));
12699       inst.operands[1].reg = inst.operands[0].reg + 1;
12700     }
12701   constraint (inst.operands[0].reg == inst.operands[1].reg,
12702 	      BAD_OVERLAP);
12703 
12704   inst.instruction |= inst.operands[0].reg << 12;
12705   inst.instruction |= inst.operands[1].reg << 8;
12706   inst.instruction |= inst.operands[2].reg << 16;
12707 }
12708 
12709 static void
do_t_ldst(void)12710 do_t_ldst (void)
12711 {
12712   unsigned long opcode;
12713   int Rn;
12714 
12715   if (inst.operands[0].isreg
12716       && !inst.operands[0].preind
12717       && inst.operands[0].reg == REG_PC)
12718     set_pred_insn_type_last ();
12719 
12720   opcode = inst.instruction;
12721   if (unified_syntax)
12722     {
12723       if (!inst.operands[1].isreg)
12724 	{
12725 	  if (opcode <= 0xffff)
12726 	    inst.instruction = THUMB_OP32 (opcode);
12727 	  if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/false))
12728 	    return;
12729 	}
12730       if (inst.operands[1].isreg
12731 	  && !inst.operands[1].writeback
12732 	  && !inst.operands[1].shifted && !inst.operands[1].postind
12733 	  && !inst.operands[1].negative && inst.operands[0].reg <= 7
12734 	  && opcode <= 0xffff
12735 	  && inst.size_req != 4)
12736 	{
12737 	  /* Insn may have a 16-bit form.  */
12738 	  Rn = inst.operands[1].reg;
12739 	  if (inst.operands[1].immisreg)
12740 	    {
12741 	      inst.instruction = THUMB_OP16 (opcode);
12742 	      /* [Rn, Rik] */
12743 	      if (Rn <= 7 && inst.operands[1].imm <= 7)
12744 		goto op16;
12745 	      else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
12746 		reject_bad_reg (inst.operands[1].imm);
12747 	    }
12748 	  else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
12749 		    && opcode != T_MNEM_ldrsb)
12750 		   || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
12751 		   || (Rn == REG_SP && opcode == T_MNEM_str))
12752 	    {
12753 	      /* [Rn, #const] */
12754 	      if (Rn > 7)
12755 		{
12756 		  if (Rn == REG_PC)
12757 		    {
12758 		      if (inst.relocs[0].pc_rel)
12759 			opcode = T_MNEM_ldr_pc2;
12760 		      else
12761 			opcode = T_MNEM_ldr_pc;
12762 		    }
12763 		  else
12764 		    {
12765 		      if (opcode == T_MNEM_ldr)
12766 			opcode = T_MNEM_ldr_sp;
12767 		      else
12768 			opcode = T_MNEM_str_sp;
12769 		    }
12770 		  inst.instruction = inst.operands[0].reg << 8;
12771 		}
12772 	      else
12773 		{
12774 		  inst.instruction = inst.operands[0].reg;
12775 		  inst.instruction |= inst.operands[1].reg << 3;
12776 		}
12777 	      inst.instruction |= THUMB_OP16 (opcode);
12778 	      if (inst.size_req == 2)
12779 		inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12780 	      else
12781 		inst.relax = opcode;
12782 	      return;
12783 	    }
12784 	}
12785       /* Definitely a 32-bit variant.  */
12786 
12787       /* Warning for Erratum 752419.  */
12788       if (opcode == T_MNEM_ldr
12789 	  && inst.operands[0].reg == REG_SP
12790 	  && inst.operands[1].writeback == 1
12791 	  && !inst.operands[1].immisreg)
12792 	{
12793 	  if (no_cpu_selected ()
12794 	      || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
12795 		  && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
12796 		  && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
12797 	    as_warn (_("This instruction may be unpredictable "
12798 		       "if executed on M-profile cores "
12799 		       "with interrupts enabled."));
12800 	}
12801 
12802       /* Do some validations regarding addressing modes.  */
12803       if (inst.operands[1].immisreg)
12804 	reject_bad_reg (inst.operands[1].imm);
12805 
12806       constraint (inst.operands[1].writeback == 1
12807 		  && inst.operands[0].reg == inst.operands[1].reg,
12808 		  BAD_OVERLAP);
12809 
12810       inst.instruction = THUMB_OP32 (opcode);
12811       inst.instruction |= inst.operands[0].reg << 12;
12812       encode_thumb32_addr_mode (1, /*is_t=*/false, /*is_d=*/false);
12813       check_ldr_r15_aligned ();
12814       return;
12815     }
12816 
12817   constraint (inst.operands[0].reg > 7, BAD_HIREG);
12818 
12819   if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
12820     {
12821       /* Only [Rn,Rm] is acceptable.  */
12822       constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
12823       constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
12824 		  || inst.operands[1].postind || inst.operands[1].shifted
12825 		  || inst.operands[1].negative,
12826 		  _("Thumb does not support this addressing mode"));
12827       inst.instruction = THUMB_OP16 (inst.instruction);
12828       goto op16;
12829     }
12830 
12831   inst.instruction = THUMB_OP16 (inst.instruction);
12832   if (!inst.operands[1].isreg)
12833     if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/false))
12834       return;
12835 
12836   constraint (!inst.operands[1].preind
12837 	      || inst.operands[1].shifted
12838 	      || inst.operands[1].writeback,
12839 	      _("Thumb does not support this addressing mode"));
12840   if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
12841     {
12842       constraint (inst.instruction & 0x0600,
12843 		  _("byte or halfword not valid for base register"));
12844       constraint (inst.operands[1].reg == REG_PC
12845 		  && !(inst.instruction & THUMB_LOAD_BIT),
12846 		  _("r15 based store not allowed"));
12847       constraint (inst.operands[1].immisreg,
12848 		  _("invalid base register for register offset"));
12849 
12850       if (inst.operands[1].reg == REG_PC)
12851 	inst.instruction = T_OPCODE_LDR_PC;
12852       else if (inst.instruction & THUMB_LOAD_BIT)
12853 	inst.instruction = T_OPCODE_LDR_SP;
12854       else
12855 	inst.instruction = T_OPCODE_STR_SP;
12856 
12857       inst.instruction |= inst.operands[0].reg << 8;
12858       inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12859       return;
12860     }
12861 
12862   constraint (inst.operands[1].reg > 7, BAD_HIREG);
12863   if (!inst.operands[1].immisreg)
12864     {
12865       /* Immediate offset.  */
12866       inst.instruction |= inst.operands[0].reg;
12867       inst.instruction |= inst.operands[1].reg << 3;
12868       inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12869       return;
12870     }
12871 
12872   /* Register offset.  */
12873   constraint (inst.operands[1].imm > 7, BAD_HIREG);
12874   constraint (inst.operands[1].negative,
12875 	      _("Thumb does not support this addressing mode"));
12876 
12877  op16:
12878   switch (inst.instruction)
12879     {
12880     case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
12881     case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
12882     case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
12883     case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
12884     case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
12885     case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
12886     case 0x5600 /* ldrsb */:
12887     case 0x5e00 /* ldrsh */: break;
12888     default: abort ();
12889     }
12890 
12891   inst.instruction |= inst.operands[0].reg;
12892   inst.instruction |= inst.operands[1].reg << 3;
12893   inst.instruction |= inst.operands[1].imm << 6;
12894 }
12895 
12896 static void
do_t_ldstd(void)12897 do_t_ldstd (void)
12898 {
12899   if (!inst.operands[1].present)
12900     {
12901       inst.operands[1].reg = inst.operands[0].reg + 1;
12902       constraint (inst.operands[0].reg == REG_LR,
12903 		  _("r14 not allowed here"));
12904       constraint (inst.operands[0].reg == REG_R12,
12905 		  _("r12 not allowed here"));
12906     }
12907 
12908   if (inst.operands[2].writeback
12909       && (inst.operands[0].reg == inst.operands[2].reg
12910       || inst.operands[1].reg == inst.operands[2].reg))
12911     as_warn (_("base register written back, and overlaps "
12912 	       "one of transfer registers"));
12913 
12914   inst.instruction |= inst.operands[0].reg << 12;
12915   inst.instruction |= inst.operands[1].reg << 8;
12916   encode_thumb32_addr_mode (2, /*is_t=*/false, /*is_d=*/true);
12917 }
12918 
12919 static void
do_t_ldstt(void)12920 do_t_ldstt (void)
12921 {
12922   inst.instruction |= inst.operands[0].reg << 12;
12923   encode_thumb32_addr_mode (1, /*is_t=*/true, /*is_d=*/false);
12924 }
12925 
12926 static void
do_t_mla(void)12927 do_t_mla (void)
12928 {
12929   unsigned Rd, Rn, Rm, Ra;
12930 
12931   Rd = inst.operands[0].reg;
12932   Rn = inst.operands[1].reg;
12933   Rm = inst.operands[2].reg;
12934   Ra = inst.operands[3].reg;
12935 
12936   reject_bad_reg (Rd);
12937   reject_bad_reg (Rn);
12938   reject_bad_reg (Rm);
12939   reject_bad_reg (Ra);
12940 
12941   inst.instruction |= Rd << 8;
12942   inst.instruction |= Rn << 16;
12943   inst.instruction |= Rm;
12944   inst.instruction |= Ra << 12;
12945 }
12946 
12947 static void
do_t_mlal(void)12948 do_t_mlal (void)
12949 {
12950   unsigned RdLo, RdHi, Rn, Rm;
12951 
12952   RdLo = inst.operands[0].reg;
12953   RdHi = inst.operands[1].reg;
12954   Rn = inst.operands[2].reg;
12955   Rm = inst.operands[3].reg;
12956 
12957   reject_bad_reg (RdLo);
12958   reject_bad_reg (RdHi);
12959   reject_bad_reg (Rn);
12960   reject_bad_reg (Rm);
12961 
12962   inst.instruction |= RdLo << 12;
12963   inst.instruction |= RdHi << 8;
12964   inst.instruction |= Rn << 16;
12965   inst.instruction |= Rm;
12966 }
12967 
12968 static void
do_t_mov_cmp(void)12969 do_t_mov_cmp (void)
12970 {
12971   unsigned Rn, Rm;
12972 
12973   Rn = inst.operands[0].reg;
12974   Rm = inst.operands[1].reg;
12975 
12976   if (Rn == REG_PC)
12977     set_pred_insn_type_last ();
12978 
12979   if (unified_syntax)
12980     {
12981       int r0off = (inst.instruction == T_MNEM_mov
12982 		   || inst.instruction == T_MNEM_movs) ? 8 : 16;
12983       unsigned long opcode;
12984       bool narrow;
12985       bool low_regs;
12986 
12987       low_regs = (Rn <= 7 && Rm <= 7);
12988       opcode = inst.instruction;
12989       if (in_pred_block ())
12990 	narrow = opcode != T_MNEM_movs;
12991       else
12992 	narrow = opcode != T_MNEM_movs || low_regs;
12993       if (inst.size_req == 4
12994 	  || inst.operands[1].shifted)
12995 	narrow = false;
12996 
12997       /* MOVS PC, LR is encoded as SUBS PC, LR, #0.  */
12998       if (opcode == T_MNEM_movs && inst.operands[1].isreg
12999 	  && !inst.operands[1].shifted
13000 	  && Rn == REG_PC
13001 	  && Rm == REG_LR)
13002 	{
13003 	  inst.instruction = T2_SUBS_PC_LR;
13004 	  return;
13005 	}
13006 
13007       if (opcode == T_MNEM_cmp)
13008 	{
13009 	  constraint (Rn == REG_PC, BAD_PC);
13010 	  if (narrow)
13011 	    {
13012 	      /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
13013 		 but valid.  */
13014 	      warn_deprecated_sp (Rm);
13015 	      /* R15 was documented as a valid choice for Rm in ARMv6,
13016 		 but as UNPREDICTABLE in ARMv7.  ARM's proprietary
13017 		 tools reject R15, so we do too.  */
13018 	      constraint (Rm == REG_PC, BAD_PC);
13019 	    }
13020 	  else
13021 	    reject_bad_reg (Rm);
13022 	}
13023       else if (opcode == T_MNEM_mov
13024 	       || opcode == T_MNEM_movs)
13025 	{
13026 	  if (inst.operands[1].isreg)
13027 	    {
13028 	      if (opcode == T_MNEM_movs)
13029 		{
13030 		  reject_bad_reg (Rn);
13031 		  reject_bad_reg (Rm);
13032 		}
13033 	      else if (narrow)
13034 		{
13035 		  /* This is mov.n.  */
13036 		  if ((Rn == REG_SP || Rn == REG_PC)
13037 		      && (Rm == REG_SP || Rm == REG_PC))
13038 		    {
13039 		      as_tsktsk (_("Use of r%u as a source register is "
13040 				 "deprecated when r%u is the destination "
13041 				 "register."), Rm, Rn);
13042 		    }
13043 		}
13044 	      else
13045 		{
13046 		  /* This is mov.w.  */
13047 		  constraint (Rn == REG_PC, BAD_PC);
13048 		  constraint (Rm == REG_PC, BAD_PC);
13049 		  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13050 		    constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
13051 		}
13052 	    }
13053 	  else
13054 	    reject_bad_reg (Rn);
13055 	}
13056 
13057       if (!inst.operands[1].isreg)
13058 	{
13059 	  /* Immediate operand.  */
13060 	  if (!in_pred_block () && opcode == T_MNEM_mov)
13061 	    narrow = 0;
13062 	  if (low_regs && narrow)
13063 	    {
13064 	      inst.instruction = THUMB_OP16 (opcode);
13065 	      inst.instruction |= Rn << 8;
13066 	      if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
13067 		  || inst.relocs[0].type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
13068 		{
13069 		  if (inst.size_req == 2)
13070 		    inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
13071 		  else
13072 		    inst.relax = opcode;
13073 		}
13074 	    }
13075 	  else
13076 	    {
13077 	      constraint ((inst.relocs[0].type
13078 			   >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
13079 			  && (inst.relocs[0].type
13080 			      <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
13081 			  THUMB1_RELOC_ONLY);
13082 
13083 	      inst.instruction = THUMB_OP32 (inst.instruction);
13084 	      inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13085 	      inst.instruction |= Rn << r0off;
13086 	      inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13087 	    }
13088 	}
13089       else if (inst.operands[1].shifted && inst.operands[1].immisreg
13090 	       && (inst.instruction == T_MNEM_mov
13091 		   || inst.instruction == T_MNEM_movs))
13092 	{
13093 	  /* Register shifts are encoded as separate shift instructions.  */
13094 	  bool flags = (inst.instruction == T_MNEM_movs);
13095 
13096 	  if (in_pred_block ())
13097 	    narrow = !flags;
13098 	  else
13099 	    narrow = flags;
13100 
13101 	  if (inst.size_req == 4)
13102 	    narrow = false;
13103 
13104 	  if (!low_regs || inst.operands[1].imm > 7)
13105 	    narrow = false;
13106 
13107 	  if (Rn != Rm)
13108 	    narrow = false;
13109 
13110 	  switch (inst.operands[1].shift_kind)
13111 	    {
13112 	    case SHIFT_LSL:
13113 	      opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
13114 	      break;
13115 	    case SHIFT_ASR:
13116 	      opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
13117 	      break;
13118 	    case SHIFT_LSR:
13119 	      opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
13120 	      break;
13121 	    case SHIFT_ROR:
13122 	      opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
13123 	      break;
13124 	    default:
13125 	      abort ();
13126 	    }
13127 
13128 	  inst.instruction = opcode;
13129 	  if (narrow)
13130 	    {
13131 	      inst.instruction |= Rn;
13132 	      inst.instruction |= inst.operands[1].imm << 3;
13133 	    }
13134 	  else
13135 	    {
13136 	      if (flags)
13137 		inst.instruction |= CONDS_BIT;
13138 
13139 	      inst.instruction |= Rn << 8;
13140 	      inst.instruction |= Rm << 16;
13141 	      inst.instruction |= inst.operands[1].imm;
13142 	    }
13143 	}
13144       else if (!narrow)
13145 	{
13146 	  /* Some mov with immediate shift have narrow variants.
13147 	     Register shifts are handled above.  */
13148 	  if (low_regs && inst.operands[1].shifted
13149 	      && (inst.instruction == T_MNEM_mov
13150 		  || inst.instruction == T_MNEM_movs))
13151 	    {
13152 	      if (in_pred_block ())
13153 		narrow = (inst.instruction == T_MNEM_mov);
13154 	      else
13155 		narrow = (inst.instruction == T_MNEM_movs);
13156 	    }
13157 
13158 	  if (narrow)
13159 	    {
13160 	      switch (inst.operands[1].shift_kind)
13161 		{
13162 		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
13163 		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
13164 		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
13165 		default: narrow = false; break;
13166 		}
13167 	    }
13168 
13169 	  if (narrow)
13170 	    {
13171 	      inst.instruction |= Rn;
13172 	      inst.instruction |= Rm << 3;
13173 	      inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
13174 	    }
13175 	  else
13176 	    {
13177 	      inst.instruction = THUMB_OP32 (inst.instruction);
13178 	      inst.instruction |= Rn << r0off;
13179 	      encode_thumb32_shifted_operand (1);
13180 	    }
13181 	}
13182       else
13183 	switch (inst.instruction)
13184 	  {
13185 	  case T_MNEM_mov:
13186 	    /* In v4t or v5t a move of two lowregs produces unpredictable
13187 	       results. Don't allow this.  */
13188 	    if (low_regs)
13189 	      {
13190 		constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
13191 			    "MOV Rd, Rs with two low registers is not "
13192 			    "permitted on this architecture");
13193 		ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
13194 					arm_ext_v6);
13195 	      }
13196 
13197 	    inst.instruction = T_OPCODE_MOV_HR;
13198 	    inst.instruction |= (Rn & 0x8) << 4;
13199 	    inst.instruction |= (Rn & 0x7);
13200 	    inst.instruction |= Rm << 3;
13201 	    break;
13202 
13203 	  case T_MNEM_movs:
13204 	    /* We know we have low registers at this point.
13205 	       Generate LSLS Rd, Rs, #0.  */
13206 	    inst.instruction = T_OPCODE_LSL_I;
13207 	    inst.instruction |= Rn;
13208 	    inst.instruction |= Rm << 3;
13209 	    break;
13210 
13211 	  case T_MNEM_cmp:
13212 	    if (low_regs)
13213 	      {
13214 		inst.instruction = T_OPCODE_CMP_LR;
13215 		inst.instruction |= Rn;
13216 		inst.instruction |= Rm << 3;
13217 	      }
13218 	    else
13219 	      {
13220 		inst.instruction = T_OPCODE_CMP_HR;
13221 		inst.instruction |= (Rn & 0x8) << 4;
13222 		inst.instruction |= (Rn & 0x7);
13223 		inst.instruction |= Rm << 3;
13224 	      }
13225 	    break;
13226 	  }
13227       return;
13228     }
13229 
13230   inst.instruction = THUMB_OP16 (inst.instruction);
13231 
13232   /* PR 10443: Do not silently ignore shifted operands.  */
13233   constraint (inst.operands[1].shifted,
13234 	      _("shifts in CMP/MOV instructions are only supported in unified syntax"));
13235 
13236   if (inst.operands[1].isreg)
13237     {
13238       if (Rn < 8 && Rm < 8)
13239 	{
13240 	  /* A move of two lowregs is encoded as ADD Rd, Rs, #0
13241 	     since a MOV instruction produces unpredictable results.  */
13242 	  if (inst.instruction == T_OPCODE_MOV_I8)
13243 	    inst.instruction = T_OPCODE_ADD_I3;
13244 	  else
13245 	    inst.instruction = T_OPCODE_CMP_LR;
13246 
13247 	  inst.instruction |= Rn;
13248 	  inst.instruction |= Rm << 3;
13249 	}
13250       else
13251 	{
13252 	  if (inst.instruction == T_OPCODE_MOV_I8)
13253 	    inst.instruction = T_OPCODE_MOV_HR;
13254 	  else
13255 	    inst.instruction = T_OPCODE_CMP_HR;
13256 	  do_t_cpy ();
13257 	}
13258     }
13259   else
13260     {
13261       constraint (Rn > 7,
13262 		  _("only lo regs allowed with immediate"));
13263       inst.instruction |= Rn << 8;
13264       inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
13265     }
13266 }
13267 
13268 static void
do_t_mov16(void)13269 do_t_mov16 (void)
13270 {
13271   unsigned Rd;
13272   bfd_vma imm;
13273   bool top;
13274 
13275   top = (inst.instruction & 0x00800000) != 0;
13276   if (inst.relocs[0].type == BFD_RELOC_ARM_MOVW)
13277     {
13278       constraint (top, _(":lower16: not allowed in this instruction"));
13279       inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVW;
13280     }
13281   else if (inst.relocs[0].type == BFD_RELOC_ARM_MOVT)
13282     {
13283       constraint (!top, _(":upper16: not allowed in this instruction"));
13284       inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVT;
13285     }
13286 
13287   Rd = inst.operands[0].reg;
13288   reject_bad_reg (Rd);
13289 
13290   inst.instruction |= Rd << 8;
13291   if (inst.relocs[0].type == BFD_RELOC_UNUSED)
13292     {
13293       imm = inst.relocs[0].exp.X_add_number;
13294       inst.instruction |= (imm & 0xf000) << 4;
13295       inst.instruction |= (imm & 0x0800) << 15;
13296       inst.instruction |= (imm & 0x0700) << 4;
13297       inst.instruction |= (imm & 0x00ff);
13298     }
13299 }
13300 
13301 static void
do_t_mvn_tst(void)13302 do_t_mvn_tst (void)
13303 {
13304   unsigned Rn, Rm;
13305 
13306   Rn = inst.operands[0].reg;
13307   Rm = inst.operands[1].reg;
13308 
13309   if (inst.instruction == T_MNEM_cmp
13310       || inst.instruction == T_MNEM_cmn)
13311     constraint (Rn == REG_PC, BAD_PC);
13312   else
13313     reject_bad_reg (Rn);
13314   reject_bad_reg (Rm);
13315 
13316   if (unified_syntax)
13317     {
13318       int r0off = (inst.instruction == T_MNEM_mvn
13319 		   || inst.instruction == T_MNEM_mvns) ? 8 : 16;
13320       bool narrow;
13321 
13322       if (inst.size_req == 4
13323 	  || inst.instruction > 0xffff
13324 	  || inst.operands[1].shifted
13325 	  || Rn > 7 || Rm > 7)
13326 	narrow = false;
13327       else if (inst.instruction == T_MNEM_cmn
13328 	       || inst.instruction == T_MNEM_tst)
13329 	narrow = true;
13330       else if (THUMB_SETS_FLAGS (inst.instruction))
13331 	narrow = !in_pred_block ();
13332       else
13333 	narrow = in_pred_block ();
13334 
13335       if (!inst.operands[1].isreg)
13336 	{
13337 	  /* For an immediate, we always generate a 32-bit opcode;
13338 	     section relaxation will shrink it later if possible.  */
13339 	  if (inst.instruction < 0xffff)
13340 	    inst.instruction = THUMB_OP32 (inst.instruction);
13341 	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13342 	  inst.instruction |= Rn << r0off;
13343 	  inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13344 	}
13345       else
13346 	{
13347 	  /* See if we can do this with a 16-bit instruction.  */
13348 	  if (narrow)
13349 	    {
13350 	      inst.instruction = THUMB_OP16 (inst.instruction);
13351 	      inst.instruction |= Rn;
13352 	      inst.instruction |= Rm << 3;
13353 	    }
13354 	  else
13355 	    {
13356 	      constraint (inst.operands[1].shifted
13357 			  && inst.operands[1].immisreg,
13358 			  _("shift must be constant"));
13359 	      if (inst.instruction < 0xffff)
13360 		inst.instruction = THUMB_OP32 (inst.instruction);
13361 	      inst.instruction |= Rn << r0off;
13362 	      encode_thumb32_shifted_operand (1);
13363 	    }
13364 	}
13365     }
13366   else
13367     {
13368       constraint (inst.instruction > 0xffff
13369 		  || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
13370       constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
13371 		  _("unshifted register required"));
13372       constraint (Rn > 7 || Rm > 7,
13373 		  BAD_HIREG);
13374 
13375       inst.instruction = THUMB_OP16 (inst.instruction);
13376       inst.instruction |= Rn;
13377       inst.instruction |= Rm << 3;
13378     }
13379 }
13380 
13381 static void
do_t_mrs(void)13382 do_t_mrs (void)
13383 {
13384   unsigned Rd;
13385 
13386   if (do_vfp_nsyn_mrs () == SUCCESS)
13387     return;
13388 
13389   Rd = inst.operands[0].reg;
13390   reject_bad_reg (Rd);
13391   inst.instruction |= Rd << 8;
13392 
13393   if (inst.operands[1].isreg)
13394     {
13395       unsigned br = inst.operands[1].reg;
13396       if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
13397 	as_bad (_("bad register for mrs"));
13398 
13399       inst.instruction |= br & (0xf << 16);
13400       inst.instruction |= (br & 0x300) >> 4;
13401       inst.instruction |= (br & SPSR_BIT) >> 2;
13402     }
13403   else
13404     {
13405       int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
13406 
13407       if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
13408 	{
13409 	  /* PR gas/12698:  The constraint is only applied for m_profile.
13410 	     If the user has specified -march=all, we want to ignore it as
13411 	     we are building for any CPU type, including non-m variants.  */
13412 	  bool m_profile =
13413 	    !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
13414 	  constraint ((flags != 0) && m_profile, _("selected processor does "
13415 						   "not support requested special purpose register"));
13416 	}
13417       else
13418 	/* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
13419 	   devices).  */
13420 	constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
13421 		    _("'APSR', 'CPSR' or 'SPSR' expected"));
13422 
13423       inst.instruction |= (flags & SPSR_BIT) >> 2;
13424       inst.instruction |= inst.operands[1].imm & 0xff;
13425       inst.instruction |= 0xf0000;
13426     }
13427 }
13428 
13429 static void
do_t_msr(void)13430 do_t_msr (void)
13431 {
13432   int flags;
13433   unsigned Rn;
13434 
13435   if (do_vfp_nsyn_msr () == SUCCESS)
13436     return;
13437 
13438   constraint (!inst.operands[1].isreg,
13439 	      _("Thumb encoding does not support an immediate here"));
13440 
13441   if (inst.operands[0].isreg)
13442     flags = (int)(inst.operands[0].reg);
13443   else
13444     flags = inst.operands[0].imm;
13445 
13446   if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
13447     {
13448       int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
13449 
13450       /* PR gas/12698:  The constraint is only applied for m_profile.
13451 	 If the user has specified -march=all, we want to ignore it as
13452 	 we are building for any CPU type, including non-m variants.  */
13453       bool m_profile =
13454 	!ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
13455       constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
13456 	   && (bits & ~(PSR_s | PSR_f)) != 0)
13457 	  || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
13458 	      && bits != PSR_f)) && m_profile,
13459 	  _("selected processor does not support requested special "
13460 	    "purpose register"));
13461     }
13462   else
13463      constraint ((flags & 0xff) != 0, _("selected processor does not support "
13464 		 "requested special purpose register"));
13465 
13466   Rn = inst.operands[1].reg;
13467   reject_bad_reg (Rn);
13468 
13469   inst.instruction |= (flags & SPSR_BIT) >> 2;
13470   inst.instruction |= (flags & 0xf0000) >> 8;
13471   inst.instruction |= (flags & 0x300) >> 4;
13472   inst.instruction |= (flags & 0xff);
13473   inst.instruction |= Rn << 16;
13474 }
13475 
13476 static void
do_t_mul(void)13477 do_t_mul (void)
13478 {
13479   bool narrow;
13480   unsigned Rd, Rn, Rm;
13481 
13482   if (!inst.operands[2].present)
13483     inst.operands[2].reg = inst.operands[0].reg;
13484 
13485   Rd = inst.operands[0].reg;
13486   Rn = inst.operands[1].reg;
13487   Rm = inst.operands[2].reg;
13488 
13489   if (unified_syntax)
13490     {
13491       if (inst.size_req == 4
13492 	  || (Rd != Rn
13493 	      && Rd != Rm)
13494 	  || Rn > 7
13495 	  || Rm > 7)
13496 	narrow = false;
13497       else if (inst.instruction == T_MNEM_muls)
13498 	narrow = !in_pred_block ();
13499       else
13500 	narrow = in_pred_block ();
13501     }
13502   else
13503     {
13504       constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
13505       constraint (Rn > 7 || Rm > 7,
13506 		  BAD_HIREG);
13507       narrow = true;
13508     }
13509 
13510   if (narrow)
13511     {
13512       /* 16-bit MULS/Conditional MUL.  */
13513       inst.instruction = THUMB_OP16 (inst.instruction);
13514       inst.instruction |= Rd;
13515 
13516       if (Rd == Rn)
13517 	inst.instruction |= Rm << 3;
13518       else if (Rd == Rm)
13519 	inst.instruction |= Rn << 3;
13520       else
13521 	constraint (1, _("dest must overlap one source register"));
13522     }
13523   else
13524     {
13525       constraint (inst.instruction != T_MNEM_mul,
13526 		  _("Thumb-2 MUL must not set flags"));
13527       /* 32-bit MUL.  */
13528       inst.instruction = THUMB_OP32 (inst.instruction);
13529       inst.instruction |= Rd << 8;
13530       inst.instruction |= Rn << 16;
13531       inst.instruction |= Rm << 0;
13532 
13533       reject_bad_reg (Rd);
13534       reject_bad_reg (Rn);
13535       reject_bad_reg (Rm);
13536     }
13537 }
13538 
13539 static void
do_t_mull(void)13540 do_t_mull (void)
13541 {
13542   unsigned RdLo, RdHi, Rn, Rm;
13543 
13544   RdLo = inst.operands[0].reg;
13545   RdHi = inst.operands[1].reg;
13546   Rn = inst.operands[2].reg;
13547   Rm = inst.operands[3].reg;
13548 
13549   reject_bad_reg (RdLo);
13550   reject_bad_reg (RdHi);
13551   reject_bad_reg (Rn);
13552   reject_bad_reg (Rm);
13553 
13554   inst.instruction |= RdLo << 12;
13555   inst.instruction |= RdHi << 8;
13556   inst.instruction |= Rn << 16;
13557   inst.instruction |= Rm;
13558 
13559  if (RdLo == RdHi)
13560     as_tsktsk (_("rdhi and rdlo must be different"));
13561 }
13562 
13563 static void
do_t_nop(void)13564 do_t_nop (void)
13565 {
13566   set_pred_insn_type (NEUTRAL_IT_INSN);
13567 
13568   if (unified_syntax)
13569     {
13570       if (inst.size_req == 4 || inst.operands[0].imm > 15)
13571 	{
13572 	  inst.instruction = THUMB_OP32 (inst.instruction);
13573 	  inst.instruction |= inst.operands[0].imm;
13574 	}
13575       else
13576 	{
13577 	  /* PR9722: Check for Thumb2 availability before
13578 	     generating a thumb2 nop instruction.  */
13579 	  if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
13580 	    {
13581 	      inst.instruction = THUMB_OP16 (inst.instruction);
13582 	      inst.instruction |= inst.operands[0].imm << 4;
13583 	    }
13584 	  else
13585 	    inst.instruction = 0x46c0;
13586 	}
13587     }
13588   else
13589     {
13590       constraint (inst.operands[0].present,
13591 		  _("Thumb does not support NOP with hints"));
13592       inst.instruction = 0x46c0;
13593     }
13594 }
13595 
13596 static void
do_t_neg(void)13597 do_t_neg (void)
13598 {
13599   if (unified_syntax)
13600     {
13601       bool narrow;
13602 
13603       if (THUMB_SETS_FLAGS (inst.instruction))
13604 	narrow = !in_pred_block ();
13605       else
13606 	narrow = in_pred_block ();
13607       if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
13608 	narrow = false;
13609       if (inst.size_req == 4)
13610 	narrow = false;
13611 
13612       if (!narrow)
13613 	{
13614 	  inst.instruction = THUMB_OP32 (inst.instruction);
13615 	  inst.instruction |= inst.operands[0].reg << 8;
13616 	  inst.instruction |= inst.operands[1].reg << 16;
13617 	}
13618       else
13619 	{
13620 	  inst.instruction = THUMB_OP16 (inst.instruction);
13621 	  inst.instruction |= inst.operands[0].reg;
13622 	  inst.instruction |= inst.operands[1].reg << 3;
13623 	}
13624     }
13625   else
13626     {
13627       constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
13628 		  BAD_HIREG);
13629       constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
13630 
13631       inst.instruction = THUMB_OP16 (inst.instruction);
13632       inst.instruction |= inst.operands[0].reg;
13633       inst.instruction |= inst.operands[1].reg << 3;
13634     }
13635 }
13636 
13637 static void
do_t_orn(void)13638 do_t_orn (void)
13639 {
13640   unsigned Rd, Rn;
13641 
13642   Rd = inst.operands[0].reg;
13643   Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
13644 
13645   reject_bad_reg (Rd);
13646   /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN.  */
13647   reject_bad_reg (Rn);
13648 
13649   inst.instruction |= Rd << 8;
13650   inst.instruction |= Rn << 16;
13651 
13652   if (!inst.operands[2].isreg)
13653     {
13654       inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13655       inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13656     }
13657   else
13658     {
13659       unsigned Rm;
13660 
13661       Rm = inst.operands[2].reg;
13662       reject_bad_reg (Rm);
13663 
13664       constraint (inst.operands[2].shifted
13665 		  && inst.operands[2].immisreg,
13666 		  _("shift must be constant"));
13667       encode_thumb32_shifted_operand (2);
13668     }
13669 }
13670 
13671 static void
do_t_pkhbt(void)13672 do_t_pkhbt (void)
13673 {
13674   unsigned Rd, Rn, Rm;
13675 
13676   Rd = inst.operands[0].reg;
13677   Rn = inst.operands[1].reg;
13678   Rm = inst.operands[2].reg;
13679 
13680   reject_bad_reg (Rd);
13681   reject_bad_reg (Rn);
13682   reject_bad_reg (Rm);
13683 
13684   inst.instruction |= Rd << 8;
13685   inst.instruction |= Rn << 16;
13686   inst.instruction |= Rm;
13687   if (inst.operands[3].present)
13688     {
13689       unsigned int val = inst.relocs[0].exp.X_add_number;
13690       constraint (inst.relocs[0].exp.X_op != O_constant,
13691 		  _("expression too complex"));
13692       inst.instruction |= (val & 0x1c) << 10;
13693       inst.instruction |= (val & 0x03) << 6;
13694     }
13695 }
13696 
13697 static void
do_t_pkhtb(void)13698 do_t_pkhtb (void)
13699 {
13700   if (!inst.operands[3].present)
13701     {
13702       unsigned Rtmp;
13703 
13704       inst.instruction &= ~0x00000020;
13705 
13706       /* PR 10168.  Swap the Rm and Rn registers.  */
13707       Rtmp = inst.operands[1].reg;
13708       inst.operands[1].reg = inst.operands[2].reg;
13709       inst.operands[2].reg = Rtmp;
13710     }
13711   do_t_pkhbt ();
13712 }
13713 
13714 static void
do_t_pld(void)13715 do_t_pld (void)
13716 {
13717   if (inst.operands[0].immisreg)
13718     reject_bad_reg (inst.operands[0].imm);
13719 
13720   encode_thumb32_addr_mode (0, /*is_t=*/false, /*is_d=*/false);
13721 }
13722 
13723 static void
do_t_push_pop(void)13724 do_t_push_pop (void)
13725 {
13726   unsigned mask;
13727 
13728   constraint (inst.operands[0].writeback,
13729 	      _("push/pop do not support {reglist}^"));
13730   constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
13731 	      _("expression too complex"));
13732 
13733   mask = inst.operands[0].imm;
13734   if (inst.size_req != 4 && (mask & ~0xff) == 0)
13735     inst.instruction = THUMB_OP16 (inst.instruction) | mask;
13736   else if (inst.size_req != 4
13737 	   && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
13738 				       ? REG_LR : REG_PC)))
13739     {
13740       inst.instruction = THUMB_OP16 (inst.instruction);
13741       inst.instruction |= THUMB_PP_PC_LR;
13742       inst.instruction |= mask & 0xff;
13743     }
13744   else if (unified_syntax)
13745     {
13746       inst.instruction = THUMB_OP32 (inst.instruction);
13747       encode_thumb2_multi (true /* do_io */, 13, mask, true);
13748     }
13749   else
13750     {
13751       inst.error = _("invalid register list to push/pop instruction");
13752       return;
13753     }
13754 }
13755 
13756 static void
do_t_clrm(void)13757 do_t_clrm (void)
13758 {
13759   if (unified_syntax)
13760     encode_thumb2_multi (false /* do_io */, -1, inst.operands[0].imm, false);
13761   else
13762     {
13763       inst.error = _("invalid register list to push/pop instruction");
13764       return;
13765     }
13766 }
13767 
13768 static void
do_t_vscclrm(void)13769 do_t_vscclrm (void)
13770 {
13771   if (inst.operands[0].issingle)
13772     {
13773       inst.instruction |= (inst.operands[0].reg & 0x1) << 22;
13774       inst.instruction |= (inst.operands[0].reg & 0x1e) << 11;
13775       inst.instruction |= inst.operands[0].imm;
13776     }
13777   else
13778     {
13779       inst.instruction |= (inst.operands[0].reg & 0x10) << 18;
13780       inst.instruction |= (inst.operands[0].reg & 0xf) << 12;
13781       inst.instruction |= 1 << 8;
13782       inst.instruction |= inst.operands[0].imm << 1;
13783     }
13784 }
13785 
13786 static void
do_t_rbit(void)13787 do_t_rbit (void)
13788 {
13789   unsigned Rd, Rm;
13790 
13791   Rd = inst.operands[0].reg;
13792   Rm = inst.operands[1].reg;
13793 
13794   reject_bad_reg (Rd);
13795   reject_bad_reg (Rm);
13796 
13797   inst.instruction |= Rd << 8;
13798   inst.instruction |= Rm << 16;
13799   inst.instruction |= Rm;
13800 }
13801 
13802 static void
do_t_rev(void)13803 do_t_rev (void)
13804 {
13805   unsigned Rd, Rm;
13806 
13807   Rd = inst.operands[0].reg;
13808   Rm = inst.operands[1].reg;
13809 
13810   reject_bad_reg (Rd);
13811   reject_bad_reg (Rm);
13812 
13813   if (Rd <= 7 && Rm <= 7
13814       && inst.size_req != 4)
13815     {
13816       inst.instruction = THUMB_OP16 (inst.instruction);
13817       inst.instruction |= Rd;
13818       inst.instruction |= Rm << 3;
13819     }
13820   else if (unified_syntax)
13821     {
13822       inst.instruction = THUMB_OP32 (inst.instruction);
13823       inst.instruction |= Rd << 8;
13824       inst.instruction |= Rm << 16;
13825       inst.instruction |= Rm;
13826     }
13827   else
13828     inst.error = BAD_HIREG;
13829 }
13830 
13831 static void
do_t_rrx(void)13832 do_t_rrx (void)
13833 {
13834   unsigned Rd, Rm;
13835 
13836   Rd = inst.operands[0].reg;
13837   Rm = inst.operands[1].reg;
13838 
13839   reject_bad_reg (Rd);
13840   reject_bad_reg (Rm);
13841 
13842   inst.instruction |= Rd << 8;
13843   inst.instruction |= Rm;
13844 }
13845 
13846 static void
do_t_rsb(void)13847 do_t_rsb (void)
13848 {
13849   unsigned Rd, Rs;
13850 
13851   Rd = inst.operands[0].reg;
13852   Rs = (inst.operands[1].present
13853 	? inst.operands[1].reg    /* Rd, Rs, foo */
13854 	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
13855 
13856   reject_bad_reg (Rd);
13857   reject_bad_reg (Rs);
13858   if (inst.operands[2].isreg)
13859     reject_bad_reg (inst.operands[2].reg);
13860 
13861   inst.instruction |= Rd << 8;
13862   inst.instruction |= Rs << 16;
13863   if (!inst.operands[2].isreg)
13864     {
13865       bool narrow;
13866 
13867       if ((inst.instruction & 0x00100000) != 0)
13868 	narrow = !in_pred_block ();
13869       else
13870 	narrow = in_pred_block ();
13871 
13872       if (Rd > 7 || Rs > 7)
13873 	narrow = false;
13874 
13875       if (inst.size_req == 4 || !unified_syntax)
13876 	narrow = false;
13877 
13878       if (inst.relocs[0].exp.X_op != O_constant
13879 	  || inst.relocs[0].exp.X_add_number != 0)
13880 	narrow = false;
13881 
13882       /* Turn rsb #0 into 16-bit neg.  We should probably do this via
13883 	 relaxation, but it doesn't seem worth the hassle.  */
13884       if (narrow)
13885 	{
13886 	  inst.relocs[0].type = BFD_RELOC_UNUSED;
13887 	  inst.instruction = THUMB_OP16 (T_MNEM_negs);
13888 	  inst.instruction |= Rs << 3;
13889 	  inst.instruction |= Rd;
13890 	}
13891       else
13892 	{
13893 	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13894 	  inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13895 	}
13896     }
13897   else
13898     encode_thumb32_shifted_operand (2);
13899 }
13900 
13901 static void
do_t_setend(void)13902 do_t_setend (void)
13903 {
13904   if (warn_on_deprecated
13905       && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13906       as_tsktsk (_("setend use is deprecated for ARMv8"));
13907 
13908   set_pred_insn_type (OUTSIDE_PRED_INSN);
13909   if (inst.operands[0].imm)
13910     inst.instruction |= 0x8;
13911 }
13912 
13913 static void
do_t_shift(void)13914 do_t_shift (void)
13915 {
13916   if (!inst.operands[1].present)
13917     inst.operands[1].reg = inst.operands[0].reg;
13918 
13919   if (unified_syntax)
13920     {
13921       bool narrow;
13922       int shift_kind;
13923 
13924       switch (inst.instruction)
13925 	{
13926 	case T_MNEM_asr:
13927 	case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
13928 	case T_MNEM_lsl:
13929 	case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
13930 	case T_MNEM_lsr:
13931 	case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
13932 	case T_MNEM_ror:
13933 	case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
13934 	default: abort ();
13935 	}
13936 
13937       if (THUMB_SETS_FLAGS (inst.instruction))
13938 	narrow = !in_pred_block ();
13939       else
13940 	narrow = in_pred_block ();
13941       if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
13942 	narrow = false;
13943       if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
13944 	narrow = false;
13945       if (inst.operands[2].isreg
13946 	  && (inst.operands[1].reg != inst.operands[0].reg
13947 	      || inst.operands[2].reg > 7))
13948 	narrow = false;
13949       if (inst.size_req == 4)
13950 	narrow = false;
13951 
13952       reject_bad_reg (inst.operands[0].reg);
13953       reject_bad_reg (inst.operands[1].reg);
13954 
13955       if (!narrow)
13956 	{
13957 	  if (inst.operands[2].isreg)
13958 	    {
13959 	      reject_bad_reg (inst.operands[2].reg);
13960 	      inst.instruction = THUMB_OP32 (inst.instruction);
13961 	      inst.instruction |= inst.operands[0].reg << 8;
13962 	      inst.instruction |= inst.operands[1].reg << 16;
13963 	      inst.instruction |= inst.operands[2].reg;
13964 
13965 	      /* PR 12854: Error on extraneous shifts.  */
13966 	      constraint (inst.operands[2].shifted,
13967 			  _("extraneous shift as part of operand to shift insn"));
13968 	    }
13969 	  else
13970 	    {
13971 	      inst.operands[1].shifted = 1;
13972 	      inst.operands[1].shift_kind = shift_kind;
13973 	      inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
13974 					     ? T_MNEM_movs : T_MNEM_mov);
13975 	      inst.instruction |= inst.operands[0].reg << 8;
13976 	      encode_thumb32_shifted_operand (1);
13977 	      /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup.  */
13978 	      inst.relocs[0].type = BFD_RELOC_UNUSED;
13979 	    }
13980 	}
13981       else
13982 	{
13983 	  if (inst.operands[2].isreg)
13984 	    {
13985 	      switch (shift_kind)
13986 		{
13987 		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
13988 		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
13989 		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
13990 		case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
13991 		default: abort ();
13992 		}
13993 
13994 	      inst.instruction |= inst.operands[0].reg;
13995 	      inst.instruction |= inst.operands[2].reg << 3;
13996 
13997 	      /* PR 12854: Error on extraneous shifts.  */
13998 	      constraint (inst.operands[2].shifted,
13999 			  _("extraneous shift as part of operand to shift insn"));
14000 	    }
14001 	  else
14002 	    {
14003 	      switch (shift_kind)
14004 		{
14005 		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
14006 		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
14007 		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
14008 		default: abort ();
14009 		}
14010 	      inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
14011 	      inst.instruction |= inst.operands[0].reg;
14012 	      inst.instruction |= inst.operands[1].reg << 3;
14013 	    }
14014 	}
14015     }
14016   else
14017     {
14018       constraint (inst.operands[0].reg > 7
14019 		  || inst.operands[1].reg > 7, BAD_HIREG);
14020       constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
14021 
14022       if (inst.operands[2].isreg)  /* Rd, {Rs,} Rn */
14023 	{
14024 	  constraint (inst.operands[2].reg > 7, BAD_HIREG);
14025 	  constraint (inst.operands[0].reg != inst.operands[1].reg,
14026 		      _("source1 and dest must be same register"));
14027 
14028 	  switch (inst.instruction)
14029 	    {
14030 	    case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
14031 	    case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
14032 	    case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
14033 	    case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
14034 	    default: abort ();
14035 	    }
14036 
14037 	  inst.instruction |= inst.operands[0].reg;
14038 	  inst.instruction |= inst.operands[2].reg << 3;
14039 
14040 	  /* PR 12854: Error on extraneous shifts.  */
14041 	  constraint (inst.operands[2].shifted,
14042 		      _("extraneous shift as part of operand to shift insn"));
14043 	}
14044       else
14045 	{
14046 	  switch (inst.instruction)
14047 	    {
14048 	    case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
14049 	    case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
14050 	    case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
14051 	    case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
14052 	    default: abort ();
14053 	    }
14054 	  inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
14055 	  inst.instruction |= inst.operands[0].reg;
14056 	  inst.instruction |= inst.operands[1].reg << 3;
14057 	}
14058     }
14059 }
14060 
14061 static void
do_t_simd(void)14062 do_t_simd (void)
14063 {
14064   unsigned Rd, Rn, Rm;
14065 
14066   Rd = inst.operands[0].reg;
14067   Rn = inst.operands[1].reg;
14068   Rm = inst.operands[2].reg;
14069 
14070   reject_bad_reg (Rd);
14071   reject_bad_reg (Rn);
14072   reject_bad_reg (Rm);
14073 
14074   inst.instruction |= Rd << 8;
14075   inst.instruction |= Rn << 16;
14076   inst.instruction |= Rm;
14077 }
14078 
14079 static void
do_t_simd2(void)14080 do_t_simd2 (void)
14081 {
14082   unsigned Rd, Rn, Rm;
14083 
14084   Rd = inst.operands[0].reg;
14085   Rm = inst.operands[1].reg;
14086   Rn = inst.operands[2].reg;
14087 
14088   reject_bad_reg (Rd);
14089   reject_bad_reg (Rn);
14090   reject_bad_reg (Rm);
14091 
14092   inst.instruction |= Rd << 8;
14093   inst.instruction |= Rn << 16;
14094   inst.instruction |= Rm;
14095 }
14096 
14097 static void
do_t_smc(void)14098 do_t_smc (void)
14099 {
14100   unsigned int value = inst.relocs[0].exp.X_add_number;
14101   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
14102 	      _("SMC is not permitted on this architecture"));
14103   constraint (inst.relocs[0].exp.X_op != O_constant,
14104 	      _("expression too complex"));
14105   constraint (value > 0xf, _("immediate too large (bigger than 0xF)"));
14106 
14107   inst.relocs[0].type = BFD_RELOC_UNUSED;
14108   inst.instruction |= (value & 0x000f) << 16;
14109 
14110   /* PR gas/15623: SMC instructions must be last in an IT block.  */
14111   set_pred_insn_type_last ();
14112 }
14113 
14114 static void
do_t_hvc(void)14115 do_t_hvc (void)
14116 {
14117   unsigned int value = inst.relocs[0].exp.X_add_number;
14118 
14119   inst.relocs[0].type = BFD_RELOC_UNUSED;
14120   inst.instruction |= (value & 0x0fff);
14121   inst.instruction |= (value & 0xf000) << 4;
14122 }
14123 
14124 static void
do_t_ssat_usat(int bias)14125 do_t_ssat_usat (int bias)
14126 {
14127   unsigned Rd, Rn;
14128 
14129   Rd = inst.operands[0].reg;
14130   Rn = inst.operands[2].reg;
14131 
14132   reject_bad_reg (Rd);
14133   reject_bad_reg (Rn);
14134 
14135   inst.instruction |= Rd << 8;
14136   inst.instruction |= inst.operands[1].imm - bias;
14137   inst.instruction |= Rn << 16;
14138 
14139   if (inst.operands[3].present)
14140     {
14141       offsetT shift_amount = inst.relocs[0].exp.X_add_number;
14142 
14143       inst.relocs[0].type = BFD_RELOC_UNUSED;
14144 
14145       constraint (inst.relocs[0].exp.X_op != O_constant,
14146 		  _("expression too complex"));
14147 
14148       if (shift_amount != 0)
14149 	{
14150 	  constraint (shift_amount > 31,
14151 		      _("shift expression is too large"));
14152 
14153 	  if (inst.operands[3].shift_kind == SHIFT_ASR)
14154 	    inst.instruction |= 0x00200000;  /* sh bit.  */
14155 
14156 	  inst.instruction |= (shift_amount & 0x1c) << 10;
14157 	  inst.instruction |= (shift_amount & 0x03) << 6;
14158 	}
14159     }
14160 }
14161 
14162 static void
do_t_ssat(void)14163 do_t_ssat (void)
14164 {
14165   do_t_ssat_usat (1);
14166 }
14167 
14168 static void
do_t_ssat16(void)14169 do_t_ssat16 (void)
14170 {
14171   unsigned Rd, Rn;
14172 
14173   Rd = inst.operands[0].reg;
14174   Rn = inst.operands[2].reg;
14175 
14176   reject_bad_reg (Rd);
14177   reject_bad_reg (Rn);
14178 
14179   inst.instruction |= Rd << 8;
14180   inst.instruction |= inst.operands[1].imm - 1;
14181   inst.instruction |= Rn << 16;
14182 }
14183 
14184 static void
do_t_strex(void)14185 do_t_strex (void)
14186 {
14187   constraint (!inst.operands[2].isreg || !inst.operands[2].preind
14188 	      || inst.operands[2].postind || inst.operands[2].writeback
14189 	      || inst.operands[2].immisreg || inst.operands[2].shifted
14190 	      || inst.operands[2].negative,
14191 	      BAD_ADDR_MODE);
14192 
14193   constraint (inst.operands[2].reg == REG_PC, BAD_PC);
14194 
14195   inst.instruction |= inst.operands[0].reg << 8;
14196   inst.instruction |= inst.operands[1].reg << 12;
14197   inst.instruction |= inst.operands[2].reg << 16;
14198   inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
14199 }
14200 
14201 static void
do_t_strexd(void)14202 do_t_strexd (void)
14203 {
14204   if (!inst.operands[2].present)
14205     inst.operands[2].reg = inst.operands[1].reg + 1;
14206 
14207   constraint (inst.operands[0].reg == inst.operands[1].reg
14208 	      || inst.operands[0].reg == inst.operands[2].reg
14209 	      || inst.operands[0].reg == inst.operands[3].reg,
14210 	      BAD_OVERLAP);
14211 
14212   inst.instruction |= inst.operands[0].reg;
14213   inst.instruction |= inst.operands[1].reg << 12;
14214   inst.instruction |= inst.operands[2].reg << 8;
14215   inst.instruction |= inst.operands[3].reg << 16;
14216 }
14217 
14218 static void
do_t_sxtah(void)14219 do_t_sxtah (void)
14220 {
14221   unsigned Rd, Rn, Rm;
14222 
14223   Rd = inst.operands[0].reg;
14224   Rn = inst.operands[1].reg;
14225   Rm = inst.operands[2].reg;
14226 
14227   reject_bad_reg (Rd);
14228   reject_bad_reg (Rn);
14229   reject_bad_reg (Rm);
14230 
14231   inst.instruction |= Rd << 8;
14232   inst.instruction |= Rn << 16;
14233   inst.instruction |= Rm;
14234   inst.instruction |= inst.operands[3].imm << 4;
14235 }
14236 
14237 static void
do_t_sxth(void)14238 do_t_sxth (void)
14239 {
14240   unsigned Rd, Rm;
14241 
14242   Rd = inst.operands[0].reg;
14243   Rm = inst.operands[1].reg;
14244 
14245   reject_bad_reg (Rd);
14246   reject_bad_reg (Rm);
14247 
14248   if (inst.instruction <= 0xffff
14249       && inst.size_req != 4
14250       && Rd <= 7 && Rm <= 7
14251       && (!inst.operands[2].present || inst.operands[2].imm == 0))
14252     {
14253       inst.instruction = THUMB_OP16 (inst.instruction);
14254       inst.instruction |= Rd;
14255       inst.instruction |= Rm << 3;
14256     }
14257   else if (unified_syntax)
14258     {
14259       if (inst.instruction <= 0xffff)
14260 	inst.instruction = THUMB_OP32 (inst.instruction);
14261       inst.instruction |= Rd << 8;
14262       inst.instruction |= Rm;
14263       inst.instruction |= inst.operands[2].imm << 4;
14264     }
14265   else
14266     {
14267       constraint (inst.operands[2].present && inst.operands[2].imm != 0,
14268 		  _("Thumb encoding does not support rotation"));
14269       constraint (1, BAD_HIREG);
14270     }
14271 }
14272 
14273 static void
do_t_swi(void)14274 do_t_swi (void)
14275 {
14276   inst.relocs[0].type = BFD_RELOC_ARM_SWI;
14277 }
14278 
14279 static void
do_t_tb(void)14280 do_t_tb (void)
14281 {
14282   unsigned Rn, Rm;
14283   int half;
14284 
14285   half = (inst.instruction & 0x10) != 0;
14286   set_pred_insn_type_last ();
14287   constraint (inst.operands[0].immisreg,
14288 	      _("instruction requires register index"));
14289 
14290   Rn = inst.operands[0].reg;
14291   Rm = inst.operands[0].imm;
14292 
14293   if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
14294     constraint (Rn == REG_SP, BAD_SP);
14295   reject_bad_reg (Rm);
14296 
14297   constraint (!half && inst.operands[0].shifted,
14298 	      _("instruction does not allow shifted index"));
14299   inst.instruction |= (Rn << 16) | Rm;
14300 }
14301 
14302 static void
do_t_udf(void)14303 do_t_udf (void)
14304 {
14305   if (!inst.operands[0].present)
14306     inst.operands[0].imm = 0;
14307 
14308   if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
14309     {
14310       constraint (inst.size_req == 2,
14311                   _("immediate value out of range"));
14312       inst.instruction = THUMB_OP32 (inst.instruction);
14313       inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
14314       inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
14315     }
14316   else
14317     {
14318       inst.instruction = THUMB_OP16 (inst.instruction);
14319       inst.instruction |= inst.operands[0].imm;
14320     }
14321 
14322   set_pred_insn_type (NEUTRAL_IT_INSN);
14323 }
14324 
14325 
14326 static void
do_t_usat(void)14327 do_t_usat (void)
14328 {
14329   do_t_ssat_usat (0);
14330 }
14331 
14332 static void
do_t_usat16(void)14333 do_t_usat16 (void)
14334 {
14335   unsigned Rd, Rn;
14336 
14337   Rd = inst.operands[0].reg;
14338   Rn = inst.operands[2].reg;
14339 
14340   reject_bad_reg (Rd);
14341   reject_bad_reg (Rn);
14342 
14343   inst.instruction |= Rd << 8;
14344   inst.instruction |= inst.operands[1].imm;
14345   inst.instruction |= Rn << 16;
14346 }
14347 
14348 /* Checking the range of the branch offset (VAL) with NBITS bits
14349    and IS_SIGNED signedness.  Also checks the LSB to be 0.  */
14350 static int
v8_1_branch_value_check(int val,int nbits,int is_signed)14351 v8_1_branch_value_check (int val, int nbits, int is_signed)
14352 {
14353   gas_assert (nbits > 0 && nbits <= 32);
14354   if (is_signed)
14355     {
14356       int cmp = (1 << (nbits - 1));
14357       if ((val < -cmp) || (val >= cmp) || (val & 0x01))
14358 	return FAIL;
14359     }
14360   else
14361     {
14362       if ((val <= 0) || (val >= (1 << nbits)) || (val & 0x1))
14363 	return FAIL;
14364     }
14365     return SUCCESS;
14366 }
14367 
14368 /* For branches in Armv8.1-M Mainline.  */
14369 static void
do_t_branch_future(void)14370 do_t_branch_future (void)
14371 {
14372   unsigned long insn = inst.instruction;
14373 
14374   inst.instruction = THUMB_OP32 (inst.instruction);
14375   if (inst.operands[0].hasreloc == 0)
14376     {
14377       if (v8_1_branch_value_check (inst.operands[0].imm, 5, false) == FAIL)
14378 	as_bad (BAD_BRANCH_OFF);
14379 
14380       inst.instruction |= ((inst.operands[0].imm & 0x1f) >> 1) << 23;
14381     }
14382   else
14383     {
14384       inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH5;
14385       inst.relocs[0].pc_rel = 1;
14386     }
14387 
14388   switch (insn)
14389     {
14390       case T_MNEM_bf:
14391 	if (inst.operands[1].hasreloc == 0)
14392 	  {
14393 	    int val = inst.operands[1].imm;
14394 	    if (v8_1_branch_value_check (inst.operands[1].imm, 17, true) == FAIL)
14395 	      as_bad (BAD_BRANCH_OFF);
14396 
14397 	    int immA = (val & 0x0001f000) >> 12;
14398 	    int immB = (val & 0x00000ffc) >> 2;
14399 	    int immC = (val & 0x00000002) >> 1;
14400 	    inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
14401 	  }
14402 	else
14403 	  {
14404 	    inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF17;
14405 	    inst.relocs[1].pc_rel = 1;
14406 	  }
14407 	break;
14408 
14409       case T_MNEM_bfl:
14410 	if (inst.operands[1].hasreloc == 0)
14411 	  {
14412 	    int val = inst.operands[1].imm;
14413 	    if (v8_1_branch_value_check (inst.operands[1].imm, 19, true) == FAIL)
14414 	      as_bad (BAD_BRANCH_OFF);
14415 
14416 	    int immA = (val & 0x0007f000) >> 12;
14417 	    int immB = (val & 0x00000ffc) >> 2;
14418 	    int immC = (val & 0x00000002) >> 1;
14419 	    inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
14420 	  }
14421 	  else
14422 	  {
14423 	    inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF19;
14424 	    inst.relocs[1].pc_rel = 1;
14425 	  }
14426 	break;
14427 
14428       case T_MNEM_bfcsel:
14429 	/* Operand 1.  */
14430 	if (inst.operands[1].hasreloc == 0)
14431 	  {
14432 	    int val = inst.operands[1].imm;
14433 	    int immA = (val & 0x00001000) >> 12;
14434 	    int immB = (val & 0x00000ffc) >> 2;
14435 	    int immC = (val & 0x00000002) >> 1;
14436 	    inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
14437 	  }
14438 	  else
14439 	  {
14440 	    inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF13;
14441 	    inst.relocs[1].pc_rel = 1;
14442 	  }
14443 
14444 	/* Operand 2.  */
14445 	if (inst.operands[2].hasreloc == 0)
14446 	  {
14447 	      constraint ((inst.operands[0].hasreloc != 0), BAD_ARGS);
14448 	      int val2 = inst.operands[2].imm;
14449 	      int val0 = inst.operands[0].imm & 0x1f;
14450 	      int diff = val2 - val0;
14451 	      if (diff == 4)
14452 		inst.instruction |= 1 << 17; /* T bit.  */
14453 	      else if (diff != 2)
14454 		as_bad (_("out of range label-relative fixup value"));
14455 	  }
14456 	else
14457 	  {
14458 	      constraint ((inst.operands[0].hasreloc == 0), BAD_ARGS);
14459 	      inst.relocs[2].type = BFD_RELOC_THUMB_PCREL_BFCSEL;
14460 	      inst.relocs[2].pc_rel = 1;
14461 	  }
14462 
14463 	/* Operand 3.  */
14464 	constraint (inst.cond != COND_ALWAYS, BAD_COND);
14465 	inst.instruction |= (inst.operands[3].imm & 0xf) << 18;
14466 	break;
14467 
14468       case T_MNEM_bfx:
14469       case T_MNEM_bflx:
14470 	inst.instruction |= inst.operands[1].reg << 16;
14471 	break;
14472 
14473       default: abort ();
14474     }
14475 }
14476 
14477 /* Helper function for do_t_loloop to handle relocations.  */
14478 static void
v8_1_loop_reloc(int is_le)14479 v8_1_loop_reloc (int is_le)
14480 {
14481   if (inst.relocs[0].exp.X_op == O_constant)
14482     {
14483       int value = inst.relocs[0].exp.X_add_number;
14484       value = (is_le) ? -value : value;
14485 
14486       if (v8_1_branch_value_check (value, 12, false) == FAIL)
14487 	as_bad (BAD_BRANCH_OFF);
14488 
14489       int imml, immh;
14490 
14491       immh = (value & 0x00000ffc) >> 2;
14492       imml = (value & 0x00000002) >> 1;
14493 
14494       inst.instruction |= (imml << 11) | (immh << 1);
14495     }
14496   else
14497     {
14498       inst.relocs[0].type = BFD_RELOC_ARM_THUMB_LOOP12;
14499       inst.relocs[0].pc_rel = 1;
14500     }
14501 }
14502 
14503 /* For shifts with four operands in MVE.  */
14504 static void
do_mve_scalar_shift1(void)14505 do_mve_scalar_shift1 (void)
14506 {
14507   unsigned int value = inst.operands[2].imm;
14508 
14509   inst.instruction |= inst.operands[0].reg << 16;
14510   inst.instruction |= inst.operands[1].reg << 8;
14511 
14512   /* Setting the bit for saturation.  */
14513   inst.instruction |= ((value == 64) ? 0: 1) << 7;
14514 
14515   /* Assuming Rm is already checked not to be 11x1.  */
14516   constraint (inst.operands[3].reg == inst.operands[0].reg, BAD_OVERLAP);
14517   constraint (inst.operands[3].reg == inst.operands[1].reg, BAD_OVERLAP);
14518   inst.instruction |= inst.operands[3].reg << 12;
14519 }
14520 
14521 /* For shifts in MVE.  */
14522 static void
do_mve_scalar_shift(void)14523 do_mve_scalar_shift (void)
14524 {
14525   if (!inst.operands[2].present)
14526     {
14527       inst.operands[2] = inst.operands[1];
14528       inst.operands[1].reg = 0xf;
14529     }
14530 
14531   inst.instruction |= inst.operands[0].reg << 16;
14532   inst.instruction |= inst.operands[1].reg << 8;
14533 
14534   if (inst.operands[2].isreg)
14535     {
14536       /* Assuming Rm is already checked not to be 11x1.  */
14537       constraint (inst.operands[2].reg == inst.operands[0].reg, BAD_OVERLAP);
14538       constraint (inst.operands[2].reg == inst.operands[1].reg, BAD_OVERLAP);
14539       inst.instruction |= inst.operands[2].reg << 12;
14540     }
14541   else
14542     {
14543       /* Assuming imm is already checked as [1,32].  */
14544       unsigned int value = inst.operands[2].imm;
14545       inst.instruction |= (value & 0x1c) << 10;
14546       inst.instruction |= (value & 0x03) << 6;
14547       /* Change last 4 bits from 0xd to 0xf.  */
14548       inst.instruction |= 0x2;
14549     }
14550 }
14551 
14552 /* MVE instruction encoder helpers.  */
14553 #define M_MNEM_vabav	0xee800f01
14554 #define M_MNEM_vmladav	  0xeef00e00
14555 #define M_MNEM_vmladava	  0xeef00e20
14556 #define M_MNEM_vmladavx	  0xeef01e00
14557 #define M_MNEM_vmladavax  0xeef01e20
14558 #define M_MNEM_vmlsdav	  0xeef00e01
14559 #define M_MNEM_vmlsdava	  0xeef00e21
14560 #define M_MNEM_vmlsdavx	  0xeef01e01
14561 #define M_MNEM_vmlsdavax  0xeef01e21
14562 #define M_MNEM_vmullt	0xee011e00
14563 #define M_MNEM_vmullb	0xee010e00
14564 #define M_MNEM_vctp	0xf000e801
14565 #define M_MNEM_vst20	0xfc801e00
14566 #define M_MNEM_vst21	0xfc801e20
14567 #define M_MNEM_vst40	0xfc801e01
14568 #define M_MNEM_vst41	0xfc801e21
14569 #define M_MNEM_vst42	0xfc801e41
14570 #define M_MNEM_vst43	0xfc801e61
14571 #define M_MNEM_vld20	0xfc901e00
14572 #define M_MNEM_vld21	0xfc901e20
14573 #define M_MNEM_vld40	0xfc901e01
14574 #define M_MNEM_vld41	0xfc901e21
14575 #define M_MNEM_vld42	0xfc901e41
14576 #define M_MNEM_vld43	0xfc901e61
14577 #define M_MNEM_vstrb	0xec000e00
14578 #define M_MNEM_vstrh	0xec000e10
14579 #define M_MNEM_vstrw	0xec000e40
14580 #define M_MNEM_vstrd	0xec000e50
14581 #define M_MNEM_vldrb	0xec100e00
14582 #define M_MNEM_vldrh	0xec100e10
14583 #define M_MNEM_vldrw	0xec100e40
14584 #define M_MNEM_vldrd	0xec100e50
14585 #define M_MNEM_vmovlt	0xeea01f40
14586 #define M_MNEM_vmovlb	0xeea00f40
14587 #define M_MNEM_vmovnt	0xfe311e81
14588 #define M_MNEM_vmovnb	0xfe310e81
14589 #define M_MNEM_vadc	0xee300f00
14590 #define M_MNEM_vadci	0xee301f00
14591 #define M_MNEM_vbrsr	0xfe011e60
14592 #define M_MNEM_vaddlv	0xee890f00
14593 #define M_MNEM_vaddlva	0xee890f20
14594 #define M_MNEM_vaddv	0xeef10f00
14595 #define M_MNEM_vaddva	0xeef10f20
14596 #define M_MNEM_vddup	0xee011f6e
14597 #define M_MNEM_vdwdup	0xee011f60
14598 #define M_MNEM_vidup	0xee010f6e
14599 #define M_MNEM_viwdup	0xee010f60
14600 #define M_MNEM_vmaxv	0xeee20f00
14601 #define M_MNEM_vmaxav	0xeee00f00
14602 #define M_MNEM_vminv	0xeee20f80
14603 #define M_MNEM_vminav	0xeee00f80
14604 #define M_MNEM_vmlaldav	  0xee800e00
14605 #define M_MNEM_vmlaldava  0xee800e20
14606 #define M_MNEM_vmlaldavx  0xee801e00
14607 #define M_MNEM_vmlaldavax 0xee801e20
14608 #define M_MNEM_vmlsldav	  0xee800e01
14609 #define M_MNEM_vmlsldava  0xee800e21
14610 #define M_MNEM_vmlsldavx  0xee801e01
14611 #define M_MNEM_vmlsldavax 0xee801e21
14612 #define M_MNEM_vrmlaldavhx  0xee801f00
14613 #define M_MNEM_vrmlaldavhax 0xee801f20
14614 #define M_MNEM_vrmlsldavh   0xfe800e01
14615 #define M_MNEM_vrmlsldavha  0xfe800e21
14616 #define M_MNEM_vrmlsldavhx  0xfe801e01
14617 #define M_MNEM_vrmlsldavhax 0xfe801e21
14618 #define M_MNEM_vqmovnt	  0xee331e01
14619 #define M_MNEM_vqmovnb	  0xee330e01
14620 #define M_MNEM_vqmovunt	  0xee311e81
14621 #define M_MNEM_vqmovunb	  0xee310e81
14622 #define M_MNEM_vshrnt	    0xee801fc1
14623 #define M_MNEM_vshrnb	    0xee800fc1
14624 #define M_MNEM_vrshrnt	    0xfe801fc1
14625 #define M_MNEM_vqshrnt	    0xee801f40
14626 #define M_MNEM_vqshrnb	    0xee800f40
14627 #define M_MNEM_vqshrunt	    0xee801fc0
14628 #define M_MNEM_vqshrunb	    0xee800fc0
14629 #define M_MNEM_vrshrnb	    0xfe800fc1
14630 #define M_MNEM_vqrshrnt	    0xee801f41
14631 #define M_MNEM_vqrshrnb	    0xee800f41
14632 #define M_MNEM_vqrshrunt    0xfe801fc0
14633 #define M_MNEM_vqrshrunb    0xfe800fc0
14634 
14635 /* Bfloat16 instruction encoder helpers.  */
14636 #define B_MNEM_vfmat 0xfc300850
14637 #define B_MNEM_vfmab 0xfc300810
14638 
14639 /* Neon instruction encoder helpers.  */
14640 
14641 /* Encodings for the different types for various Neon opcodes.  */
14642 
14643 /* An "invalid" code for the following tables.  */
14644 #define N_INV -1u
14645 
14646 struct neon_tab_entry
14647 {
14648   unsigned integer;
14649   unsigned float_or_poly;
14650   unsigned scalar_or_imm;
14651 };
14652 
14653 /* Map overloaded Neon opcodes to their respective encodings.  */
14654 #define NEON_ENC_TAB					\
14655   X(vabd,	0x0000700, 0x1200d00, N_INV),		\
14656   X(vabdl,	0x0800700, N_INV,     N_INV),		\
14657   X(vmax,	0x0000600, 0x0000f00, N_INV),		\
14658   X(vmin,	0x0000610, 0x0200f00, N_INV),		\
14659   X(vpadd,	0x0000b10, 0x1000d00, N_INV),		\
14660   X(vpmax,	0x0000a00, 0x1000f00, N_INV),		\
14661   X(vpmin,	0x0000a10, 0x1200f00, N_INV),		\
14662   X(vadd,	0x0000800, 0x0000d00, N_INV),		\
14663   X(vaddl,	0x0800000, N_INV,     N_INV),		\
14664   X(vsub,	0x1000800, 0x0200d00, N_INV),		\
14665   X(vsubl,	0x0800200, N_INV,     N_INV),		\
14666   X(vceq,	0x1000810, 0x0000e00, 0x1b10100),	\
14667   X(vcge,	0x0000310, 0x1000e00, 0x1b10080),	\
14668   X(vcgt,	0x0000300, 0x1200e00, 0x1b10000),	\
14669   /* Register variants of the following two instructions are encoded as
14670      vcge / vcgt with the operands reversed.  */  	\
14671   X(vclt,	0x0000300, 0x1200e00, 0x1b10200),	\
14672   X(vcle,	0x0000310, 0x1000e00, 0x1b10180),	\
14673   X(vfma,	N_INV, 0x0000c10, N_INV),		\
14674   X(vfms,	N_INV, 0x0200c10, N_INV),		\
14675   X(vmla,	0x0000900, 0x0000d10, 0x0800040),	\
14676   X(vmls,	0x1000900, 0x0200d10, 0x0800440),	\
14677   X(vmul,	0x0000910, 0x1000d10, 0x0800840),	\
14678   X(vmull,	0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float.  */ \
14679   X(vmlal,	0x0800800, N_INV,     0x0800240),	\
14680   X(vmlsl,	0x0800a00, N_INV,     0x0800640),	\
14681   X(vqdmlal,	0x0800900, N_INV,     0x0800340),	\
14682   X(vqdmlsl,	0x0800b00, N_INV,     0x0800740),	\
14683   X(vqdmull,	0x0800d00, N_INV,     0x0800b40),	\
14684   X(vqdmulh,    0x0000b00, N_INV,     0x0800c40),	\
14685   X(vqrdmulh,   0x1000b00, N_INV,     0x0800d40),	\
14686   X(vqrdmlah,   0x3000b10, N_INV,     0x0800e40),	\
14687   X(vqrdmlsh,   0x3000c10, N_INV,     0x0800f40),	\
14688   X(vshl,	0x0000400, N_INV,     0x0800510),	\
14689   X(vqshl,	0x0000410, N_INV,     0x0800710),	\
14690   X(vand,	0x0000110, N_INV,     0x0800030),	\
14691   X(vbic,	0x0100110, N_INV,     0x0800030),	\
14692   X(veor,	0x1000110, N_INV,     N_INV),		\
14693   X(vorn,	0x0300110, N_INV,     0x0800010),	\
14694   X(vorr,	0x0200110, N_INV,     0x0800010),	\
14695   X(vmvn,	0x1b00580, N_INV,     0x0800030),	\
14696   X(vshll,	0x1b20300, N_INV,     0x0800a10), /* max shift, immediate.  */ \
14697   X(vcvt,       0x1b30600, N_INV,     0x0800e10), /* integer, fixed-point.  */ \
14698   X(vdup,       0xe800b10, N_INV,     0x1b00c00), /* arm, scalar.  */ \
14699   X(vld1,       0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup.  */ \
14700   X(vst1,	0x0000000, 0x0800000, N_INV),		\
14701   X(vld2,	0x0200100, 0x0a00100, 0x0a00d00),	\
14702   X(vst2,	0x0000100, 0x0800100, N_INV),		\
14703   X(vld3,	0x0200200, 0x0a00200, 0x0a00e00),	\
14704   X(vst3,	0x0000200, 0x0800200, N_INV),		\
14705   X(vld4,	0x0200300, 0x0a00300, 0x0a00f00),	\
14706   X(vst4,	0x0000300, 0x0800300, N_INV),		\
14707   X(vmovn,	0x1b20200, N_INV,     N_INV),		\
14708   X(vtrn,	0x1b20080, N_INV,     N_INV),		\
14709   X(vqmovn,	0x1b20200, N_INV,     N_INV),		\
14710   X(vqmovun,	0x1b20240, N_INV,     N_INV),		\
14711   X(vnmul,      0xe200a40, 0xe200b40, N_INV),		\
14712   X(vnmla,      0xe100a40, 0xe100b40, N_INV),		\
14713   X(vnmls,      0xe100a00, 0xe100b00, N_INV),		\
14714   X(vfnma,      0xe900a40, 0xe900b40, N_INV),		\
14715   X(vfnms,      0xe900a00, 0xe900b00, N_INV),		\
14716   X(vcmp,	0xeb40a40, 0xeb40b40, N_INV),		\
14717   X(vcmpz,	0xeb50a40, 0xeb50b40, N_INV),		\
14718   X(vcmpe,	0xeb40ac0, 0xeb40bc0, N_INV),		\
14719   X(vcmpez,     0xeb50ac0, 0xeb50bc0, N_INV),		\
14720   X(vseleq,	0xe000a00, N_INV,     N_INV),		\
14721   X(vselvs,	0xe100a00, N_INV,     N_INV),		\
14722   X(vselge,	0xe200a00, N_INV,     N_INV),		\
14723   X(vselgt,	0xe300a00, N_INV,     N_INV),		\
14724   X(vmaxnm,	0xe800a00, 0x3000f10, N_INV),		\
14725   X(vminnm,	0xe800a40, 0x3200f10, N_INV),		\
14726   X(vcvta,	0xebc0a40, 0x3bb0000, N_INV),		\
14727   X(vrintr,	0xeb60a40, 0x3ba0400, N_INV),		\
14728   X(vrinta,	0xeb80a40, 0x3ba0400, N_INV),		\
14729   X(aes,	0x3b00300, N_INV,     N_INV),		\
14730   X(sha3op,	0x2000c00, N_INV,     N_INV),		\
14731   X(sha1h,	0x3b902c0, N_INV,     N_INV),           \
14732   X(sha2op,     0x3ba0380, N_INV,     N_INV)
14733 
14734 enum neon_opc
14735 {
14736 #define X(OPC,I,F,S) N_MNEM_##OPC
14737 NEON_ENC_TAB
14738 #undef X
14739 };
14740 
14741 static const struct neon_tab_entry neon_enc_tab[] =
14742 {
14743 #define X(OPC,I,F,S) { (I), (F), (S) }
14744 NEON_ENC_TAB
14745 #undef X
14746 };
14747 
14748 /* Do not use these macros; instead, use NEON_ENCODE defined below.  */
14749 #define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14750 #define NEON_ENC_ARMREG_(X)  (neon_enc_tab[(X) & 0x0fffffff].integer)
14751 #define NEON_ENC_POLY_(X)    (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14752 #define NEON_ENC_FLOAT_(X)   (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14753 #define NEON_ENC_SCALAR_(X)  (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14754 #define NEON_ENC_IMMED_(X)   (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14755 #define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14756 #define NEON_ENC_LANE_(X)    (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14757 #define NEON_ENC_DUP_(X)     (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14758 #define NEON_ENC_SINGLE_(X) \
14759   ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
14760 #define NEON_ENC_DOUBLE_(X) \
14761   ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
14762 #define NEON_ENC_FPV8_(X) \
14763   ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
14764 
14765 #define NEON_ENCODE(type, inst)					\
14766   do								\
14767     {								\
14768       inst.instruction = NEON_ENC_##type##_ (inst.instruction);	\
14769       inst.is_neon = 1;						\
14770     }								\
14771   while (0)
14772 
14773 #define check_neon_suffixes						\
14774   do									\
14775     {									\
14776       if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon)	\
14777 	{								\
14778 	  as_bad (_("invalid neon suffix for non neon instruction"));	\
14779 	  return;							\
14780 	}								\
14781     }									\
14782   while (0)
14783 
14784 /* Define shapes for instruction operands. The following mnemonic characters
14785    are used in this table:
14786 
14787      F - VFP S<n> register
14788      D - Neon D<n> register
14789      Q - Neon Q<n> register
14790      I - Immediate
14791      S - Scalar
14792      R - ARM register
14793      L - D<n> register list
14794 
14795    This table is used to generate various data:
14796      - enumerations of the form NS_DDR to be used as arguments to
14797        neon_select_shape.
14798      - a table classifying shapes into single, double, quad, mixed.
14799      - a table used to drive neon_select_shape.  */
14800 
14801 #define NEON_SHAPE_DEF			\
14802   X(4, (R, R, Q, Q), QUAD),		\
14803   X(4, (Q, R, R, I), QUAD),		\
14804   X(4, (R, R, S, S), QUAD),		\
14805   X(4, (S, S, R, R), QUAD),		\
14806   X(3, (Q, R, I), QUAD),		\
14807   X(3, (I, Q, Q), QUAD),		\
14808   X(3, (I, Q, R), QUAD),		\
14809   X(3, (R, Q, Q), QUAD),		\
14810   X(3, (D, D, D), DOUBLE),		\
14811   X(3, (Q, Q, Q), QUAD),		\
14812   X(3, (D, D, I), DOUBLE),		\
14813   X(3, (Q, Q, I), QUAD),		\
14814   X(3, (D, D, S), DOUBLE),		\
14815   X(3, (Q, Q, S), QUAD),		\
14816   X(3, (Q, Q, R), QUAD),		\
14817   X(3, (R, R, Q), QUAD),		\
14818   X(2, (R, Q),	  QUAD),		\
14819   X(2, (D, D), DOUBLE),			\
14820   X(2, (Q, Q), QUAD),			\
14821   X(2, (D, S), DOUBLE),			\
14822   X(2, (Q, S), QUAD),			\
14823   X(2, (D, R), DOUBLE),			\
14824   X(2, (Q, R), QUAD),			\
14825   X(2, (D, I), DOUBLE),			\
14826   X(2, (Q, I), QUAD),			\
14827   X(3, (P, F, I), SINGLE),		\
14828   X(3, (P, D, I), DOUBLE),		\
14829   X(3, (P, Q, I), QUAD),		\
14830   X(4, (P, F, F, I), SINGLE),		\
14831   X(4, (P, D, D, I), DOUBLE),		\
14832   X(4, (P, Q, Q, I), QUAD),		\
14833   X(5, (P, F, F, F, I), SINGLE),	\
14834   X(5, (P, D, D, D, I), DOUBLE),	\
14835   X(5, (P, Q, Q, Q, I), QUAD),		\
14836   X(3, (D, L, D), DOUBLE),		\
14837   X(2, (D, Q), MIXED),			\
14838   X(2, (Q, D), MIXED),			\
14839   X(3, (D, Q, I), MIXED),		\
14840   X(3, (Q, D, I), MIXED),		\
14841   X(3, (Q, D, D), MIXED),		\
14842   X(3, (D, Q, Q), MIXED),		\
14843   X(3, (Q, Q, D), MIXED),		\
14844   X(3, (Q, D, S), MIXED),		\
14845   X(3, (D, Q, S), MIXED),		\
14846   X(4, (D, D, D, I), DOUBLE),		\
14847   X(4, (Q, Q, Q, I), QUAD),		\
14848   X(4, (D, D, S, I), DOUBLE),		\
14849   X(4, (Q, Q, S, I), QUAD),		\
14850   X(2, (F, F), SINGLE),			\
14851   X(3, (F, F, F), SINGLE),		\
14852   X(2, (F, I), SINGLE),			\
14853   X(2, (F, D), MIXED),			\
14854   X(2, (D, F), MIXED),			\
14855   X(3, (F, F, I), MIXED),		\
14856   X(4, (R, R, F, F), SINGLE),		\
14857   X(4, (F, F, R, R), SINGLE),		\
14858   X(3, (D, R, R), DOUBLE),		\
14859   X(3, (R, R, D), DOUBLE),		\
14860   X(2, (S, R), SINGLE),			\
14861   X(2, (R, S), SINGLE),			\
14862   X(2, (F, R), SINGLE),			\
14863   X(2, (R, F), SINGLE),			\
14864 /* Used for MVE tail predicated loop instructions.  */\
14865   X(2, (R, R), QUAD),			\
14866 /* Half float shape supported so far.  */\
14867   X (2, (H, D), MIXED),			\
14868   X (2, (D, H), MIXED),			\
14869   X (2, (H, F), MIXED),			\
14870   X (2, (F, H), MIXED),			\
14871   X (2, (H, H), HALF),			\
14872   X (2, (H, R), HALF),			\
14873   X (2, (R, H), HALF),			\
14874   X (2, (H, I), HALF),			\
14875   X (3, (H, H, H), HALF),		\
14876   X (3, (H, F, I), MIXED),		\
14877   X (3, (F, H, I), MIXED),		\
14878   X (3, (D, H, H), MIXED),		\
14879   X (3, (D, H, S), MIXED)
14880 
14881 #define S2(A,B)		NS_##A##B
14882 #define S3(A,B,C)	NS_##A##B##C
14883 #define S4(A,B,C,D)	NS_##A##B##C##D
14884 #define S5(A,B,C,D,E)	NS_##A##B##C##D##E
14885 
14886 #define X(N, L, C) S##N L
14887 
14888 enum neon_shape
14889 {
14890   NEON_SHAPE_DEF,
14891   NS_NULL
14892 };
14893 
14894 #undef X
14895 #undef S2
14896 #undef S3
14897 #undef S4
14898 #undef S5
14899 
14900 enum neon_shape_class
14901 {
14902   SC_HALF,
14903   SC_SINGLE,
14904   SC_DOUBLE,
14905   SC_QUAD,
14906   SC_MIXED
14907 };
14908 
14909 #define X(N, L, C) SC_##C
14910 
14911 static enum neon_shape_class neon_shape_class[] =
14912 {
14913   NEON_SHAPE_DEF
14914 };
14915 
14916 #undef X
14917 
14918 enum neon_shape_el
14919 {
14920   SE_H,
14921   SE_F,
14922   SE_D,
14923   SE_Q,
14924   SE_I,
14925   SE_S,
14926   SE_R,
14927   SE_L,
14928   SE_P
14929 };
14930 
14931 /* Register widths of above.  */
14932 static unsigned neon_shape_el_size[] =
14933 {
14934   16,
14935   32,
14936   64,
14937   128,
14938   0,
14939   32,
14940   32,
14941   0,
14942   0
14943 };
14944 
14945 struct neon_shape_info
14946 {
14947   unsigned els;
14948   enum neon_shape_el el[NEON_MAX_TYPE_ELS];
14949 };
14950 
14951 #define S2(A,B)		{ SE_##A, SE_##B }
14952 #define S3(A,B,C)	{ SE_##A, SE_##B, SE_##C }
14953 #define S4(A,B,C,D)	{ SE_##A, SE_##B, SE_##C, SE_##D }
14954 #define S5(A,B,C,D,E)	{ SE_##A, SE_##B, SE_##C, SE_##D, SE_##E }
14955 
14956 #define X(N, L, C) { N, S##N L }
14957 
14958 static struct neon_shape_info neon_shape_tab[] =
14959 {
14960   NEON_SHAPE_DEF
14961 };
14962 
14963 #undef X
14964 #undef S2
14965 #undef S3
14966 #undef S4
14967 #undef S5
14968 
14969 /* Bit masks used in type checking given instructions.
14970   'N_EQK' means the type must be the same as (or based on in some way) the key
14971    type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
14972    set, various other bits can be set as well in order to modify the meaning of
14973    the type constraint.  */
14974 
14975 enum neon_type_mask
14976 {
14977   N_S8   = 0x0000001,
14978   N_S16  = 0x0000002,
14979   N_S32  = 0x0000004,
14980   N_S64  = 0x0000008,
14981   N_U8   = 0x0000010,
14982   N_U16  = 0x0000020,
14983   N_U32  = 0x0000040,
14984   N_U64  = 0x0000080,
14985   N_I8   = 0x0000100,
14986   N_I16  = 0x0000200,
14987   N_I32  = 0x0000400,
14988   N_I64  = 0x0000800,
14989   N_8    = 0x0001000,
14990   N_16   = 0x0002000,
14991   N_32   = 0x0004000,
14992   N_64   = 0x0008000,
14993   N_P8   = 0x0010000,
14994   N_P16  = 0x0020000,
14995   N_F16  = 0x0040000,
14996   N_F32  = 0x0080000,
14997   N_F64  = 0x0100000,
14998   N_P64	 = 0x0200000,
14999   N_BF16 = 0x0400000,
15000   N_KEY  = 0x1000000, /* Key element (main type specifier).  */
15001   N_EQK  = 0x2000000, /* Given operand has the same type & size as the key.  */
15002   N_VFP  = 0x4000000, /* VFP mode: operand size must match register width.  */
15003   N_UNT  = 0x8000000, /* Must be explicitly untyped.  */
15004   N_DBL  = 0x0000001, /* If N_EQK, this operand is twice the size.  */
15005   N_HLF  = 0x0000002, /* If N_EQK, this operand is half the size.  */
15006   N_SGN  = 0x0000004, /* If N_EQK, this operand is forced to be signed.  */
15007   N_UNS  = 0x0000008, /* If N_EQK, this operand is forced to be unsigned.  */
15008   N_INT  = 0x0000010, /* If N_EQK, this operand is forced to be integer.  */
15009   N_FLT  = 0x0000020, /* If N_EQK, this operand is forced to be float.  */
15010   N_SIZ  = 0x0000040, /* If N_EQK, this operand is forced to be size-only.  */
15011   N_UTYP = 0,
15012   N_MAX_NONSPECIAL = N_P64
15013 };
15014 
15015 #define N_ALLMODS  (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
15016 
15017 #define N_SU_ALL   (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
15018 #define N_SU_32    (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
15019 #define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
15020 #define N_S_32     (N_S8 | N_S16 | N_S32)
15021 #define N_F_16_32  (N_F16 | N_F32)
15022 #define N_SUF_32   (N_SU_32 | N_F_16_32)
15023 #define N_I_ALL    (N_I8 | N_I16 | N_I32 | N_I64)
15024 #define N_IF_32    (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
15025 #define N_F_ALL    (N_F16 | N_F32 | N_F64)
15026 #define N_I_MVE	   (N_I8 | N_I16 | N_I32)
15027 #define N_F_MVE	   (N_F16 | N_F32)
15028 #define N_SU_MVE   (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
15029 
15030 /* Pass this as the first type argument to neon_check_type to ignore types
15031    altogether.  */
15032 #define N_IGNORE_TYPE (N_KEY | N_EQK)
15033 
15034 /* Select a "shape" for the current instruction (describing register types or
15035    sizes) from a list of alternatives. Return NS_NULL if the current instruction
15036    doesn't fit. For non-polymorphic shapes, checking is usually done as a
15037    function of operand parsing, so this function doesn't need to be called.
15038    Shapes should be listed in order of decreasing length.  */
15039 
15040 static enum neon_shape
neon_select_shape(enum neon_shape shape,...)15041 neon_select_shape (enum neon_shape shape, ...)
15042 {
15043   va_list ap;
15044   enum neon_shape first_shape = shape;
15045 
15046   /* Fix missing optional operands. FIXME: we don't know at this point how
15047      many arguments we should have, so this makes the assumption that we have
15048      > 1. This is true of all current Neon opcodes, I think, but may not be
15049      true in the future.  */
15050   if (!inst.operands[1].present)
15051     inst.operands[1] = inst.operands[0];
15052 
15053   va_start (ap, shape);
15054 
15055   for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
15056     {
15057       unsigned j;
15058       int matches = 1;
15059 
15060       for (j = 0; j < neon_shape_tab[shape].els; j++)
15061 	{
15062 	  if (!inst.operands[j].present)
15063 	    {
15064 	      matches = 0;
15065 	      break;
15066 	    }
15067 
15068 	  switch (neon_shape_tab[shape].el[j])
15069 	    {
15070 	      /* If a  .f16,  .16,  .u16,  .s16 type specifier is given over
15071 		 a VFP single precision register operand, it's essentially
15072 		 means only half of the register is used.
15073 
15074 		 If the type specifier is given after the mnemonics, the
15075 		 information is stored in inst.vectype.  If the type specifier
15076 		 is given after register operand, the information is stored
15077 		 in inst.operands[].vectype.
15078 
15079 		 When there is only one type specifier, and all the register
15080 		 operands are the same type of hardware register, the type
15081 		 specifier applies to all register operands.
15082 
15083 		 If no type specifier is given, the shape is inferred from
15084 		 operand information.
15085 
15086 		 for example:
15087 		 vadd.f16 s0, s1, s2:		NS_HHH
15088 		 vabs.f16 s0, s1:		NS_HH
15089 		 vmov.f16 s0, r1:		NS_HR
15090 		 vmov.f16 r0, s1:		NS_RH
15091 		 vcvt.f16 r0, s1:		NS_RH
15092 		 vcvt.f16.s32	s2, s2, #29:	NS_HFI
15093 		 vcvt.f16.s32	s2, s2:		NS_HF
15094 	      */
15095 	    case SE_H:
15096 	      if (!(inst.operands[j].isreg
15097 		    && inst.operands[j].isvec
15098 		    && inst.operands[j].issingle
15099 		    && !inst.operands[j].isquad
15100 		    && ((inst.vectype.elems == 1
15101 			 && inst.vectype.el[0].size == 16)
15102 			|| (inst.vectype.elems > 1
15103 			    && inst.vectype.el[j].size == 16)
15104 			|| (inst.vectype.elems == 0
15105 			    && inst.operands[j].vectype.type != NT_invtype
15106 			    && inst.operands[j].vectype.size == 16))))
15107 		matches = 0;
15108 	      break;
15109 
15110 	    case SE_F:
15111 	      if (!(inst.operands[j].isreg
15112 		    && inst.operands[j].isvec
15113 		    && inst.operands[j].issingle
15114 		    && !inst.operands[j].isquad
15115 		    && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
15116 			|| (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
15117 			|| (inst.vectype.elems == 0
15118 			    && (inst.operands[j].vectype.size == 32
15119 				|| inst.operands[j].vectype.type == NT_invtype)))))
15120 		matches = 0;
15121 	      break;
15122 
15123 	    case SE_D:
15124 	      if (!(inst.operands[j].isreg
15125 		    && inst.operands[j].isvec
15126 		    && !inst.operands[j].isquad
15127 		    && !inst.operands[j].issingle))
15128 		matches = 0;
15129 	      break;
15130 
15131 	    case SE_R:
15132 	      if (!(inst.operands[j].isreg
15133 		    && !inst.operands[j].isvec))
15134 		matches = 0;
15135 	      break;
15136 
15137 	    case SE_Q:
15138 	      if (!(inst.operands[j].isreg
15139 		    && inst.operands[j].isvec
15140 		    && inst.operands[j].isquad
15141 		    && !inst.operands[j].issingle))
15142 		matches = 0;
15143 	      break;
15144 
15145 	    case SE_I:
15146 	      if (!(!inst.operands[j].isreg
15147 		    && !inst.operands[j].isscalar))
15148 		matches = 0;
15149 	      break;
15150 
15151 	    case SE_S:
15152 	      if (!(!inst.operands[j].isreg
15153 		    && inst.operands[j].isscalar))
15154 		matches = 0;
15155 	      break;
15156 
15157 	    case SE_P:
15158 	    case SE_L:
15159 	      break;
15160 	    }
15161 	  if (!matches)
15162 	    break;
15163 	}
15164       if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
15165 	/* We've matched all the entries in the shape table, and we don't
15166 	   have any left over operands which have not been matched.  */
15167 	break;
15168     }
15169 
15170   va_end (ap);
15171 
15172   if (shape == NS_NULL && first_shape != NS_NULL)
15173     first_error (_("invalid instruction shape"));
15174 
15175   return shape;
15176 }
15177 
15178 /* True if SHAPE is predominantly a quadword operation (most of the time, this
15179    means the Q bit should be set).  */
15180 
15181 static int
neon_quad(enum neon_shape shape)15182 neon_quad (enum neon_shape shape)
15183 {
15184   return neon_shape_class[shape] == SC_QUAD;
15185 }
15186 
15187 static void
neon_modify_type_size(unsigned typebits,enum neon_el_type * g_type,unsigned * g_size)15188 neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
15189 		       unsigned *g_size)
15190 {
15191   /* Allow modification to be made to types which are constrained to be
15192      based on the key element, based on bits set alongside N_EQK.  */
15193   if ((typebits & N_EQK) != 0)
15194     {
15195       if ((typebits & N_HLF) != 0)
15196 	*g_size /= 2;
15197       else if ((typebits & N_DBL) != 0)
15198 	*g_size *= 2;
15199       if ((typebits & N_SGN) != 0)
15200 	*g_type = NT_signed;
15201       else if ((typebits & N_UNS) != 0)
15202 	*g_type = NT_unsigned;
15203       else if ((typebits & N_INT) != 0)
15204 	*g_type = NT_integer;
15205       else if ((typebits & N_FLT) != 0)
15206 	*g_type = NT_float;
15207       else if ((typebits & N_SIZ) != 0)
15208 	*g_type = NT_untyped;
15209     }
15210 }
15211 
15212 /* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
15213    operand type, i.e. the single type specified in a Neon instruction when it
15214    is the only one given.  */
15215 
15216 static struct neon_type_el
neon_type_promote(struct neon_type_el * key,unsigned thisarg)15217 neon_type_promote (struct neon_type_el *key, unsigned thisarg)
15218 {
15219   struct neon_type_el dest = *key;
15220 
15221   gas_assert ((thisarg & N_EQK) != 0);
15222 
15223   neon_modify_type_size (thisarg, &dest.type, &dest.size);
15224 
15225   return dest;
15226 }
15227 
15228 /* Convert Neon type and size into compact bitmask representation.  */
15229 
15230 static enum neon_type_mask
type_chk_of_el_type(enum neon_el_type type,unsigned size)15231 type_chk_of_el_type (enum neon_el_type type, unsigned size)
15232 {
15233   switch (type)
15234     {
15235     case NT_untyped:
15236       switch (size)
15237 	{
15238 	case 8:  return N_8;
15239 	case 16: return N_16;
15240 	case 32: return N_32;
15241 	case 64: return N_64;
15242 	default: ;
15243 	}
15244       break;
15245 
15246     case NT_integer:
15247       switch (size)
15248 	{
15249 	case 8:  return N_I8;
15250 	case 16: return N_I16;
15251 	case 32: return N_I32;
15252 	case 64: return N_I64;
15253 	default: ;
15254 	}
15255       break;
15256 
15257     case NT_float:
15258       switch (size)
15259 	{
15260 	case 16: return N_F16;
15261 	case 32: return N_F32;
15262 	case 64: return N_F64;
15263 	default: ;
15264 	}
15265       break;
15266 
15267     case NT_poly:
15268       switch (size)
15269 	{
15270 	case 8:  return N_P8;
15271 	case 16: return N_P16;
15272 	case 64: return N_P64;
15273 	default: ;
15274 	}
15275       break;
15276 
15277     case NT_signed:
15278       switch (size)
15279 	{
15280 	case 8:  return N_S8;
15281 	case 16: return N_S16;
15282 	case 32: return N_S32;
15283 	case 64: return N_S64;
15284 	default: ;
15285 	}
15286       break;
15287 
15288     case NT_unsigned:
15289       switch (size)
15290 	{
15291 	case 8:  return N_U8;
15292 	case 16: return N_U16;
15293 	case 32: return N_U32;
15294 	case 64: return N_U64;
15295 	default: ;
15296 	}
15297       break;
15298 
15299     case NT_bfloat:
15300       if (size == 16) return N_BF16;
15301       break;
15302 
15303     default: ;
15304     }
15305 
15306   return N_UTYP;
15307 }
15308 
15309 /* Convert compact Neon bitmask type representation to a type and size. Only
15310    handles the case where a single bit is set in the mask.  */
15311 
15312 static int
el_type_of_type_chk(enum neon_el_type * type,unsigned * size,enum neon_type_mask mask)15313 el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
15314 		     enum neon_type_mask mask)
15315 {
15316   if ((mask & N_EQK) != 0)
15317     return FAIL;
15318 
15319   if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
15320     *size = 8;
15321   else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16 | N_BF16))
15322 	   != 0)
15323     *size = 16;
15324   else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
15325     *size = 32;
15326   else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
15327     *size = 64;
15328   else
15329     return FAIL;
15330 
15331   if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
15332     *type = NT_signed;
15333   else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
15334     *type = NT_unsigned;
15335   else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
15336     *type = NT_integer;
15337   else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
15338     *type = NT_untyped;
15339   else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
15340     *type = NT_poly;
15341   else if ((mask & (N_F_ALL)) != 0)
15342     *type = NT_float;
15343   else if ((mask & (N_BF16)) != 0)
15344     *type = NT_bfloat;
15345   else
15346     return FAIL;
15347 
15348   return SUCCESS;
15349 }
15350 
15351 /* Modify a bitmask of allowed types. This is only needed for type
15352    relaxation.  */
15353 
15354 static unsigned
modify_types_allowed(unsigned allowed,unsigned mods)15355 modify_types_allowed (unsigned allowed, unsigned mods)
15356 {
15357   unsigned size;
15358   enum neon_el_type type;
15359   unsigned destmask;
15360   int i;
15361 
15362   destmask = 0;
15363 
15364   for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
15365     {
15366       if (el_type_of_type_chk (&type, &size,
15367 			       (enum neon_type_mask) (allowed & i)) == SUCCESS)
15368 	{
15369 	  neon_modify_type_size (mods, &type, &size);
15370 	  destmask |= type_chk_of_el_type (type, size);
15371 	}
15372     }
15373 
15374   return destmask;
15375 }
15376 
15377 /* Check type and return type classification.
15378    The manual states (paraphrase): If one datatype is given, it indicates the
15379    type given in:
15380     - the second operand, if there is one
15381     - the operand, if there is no second operand
15382     - the result, if there are no operands.
15383    This isn't quite good enough though, so we use a concept of a "key" datatype
15384    which is set on a per-instruction basis, which is the one which matters when
15385    only one data type is written.
15386    Note: this function has side-effects (e.g. filling in missing operands). All
15387    Neon instructions should call it before performing bit encoding.  */
15388 
15389 static struct neon_type_el
neon_check_type(unsigned els,enum neon_shape ns,...)15390 neon_check_type (unsigned els, enum neon_shape ns, ...)
15391 {
15392   va_list ap;
15393   unsigned i, pass, key_el = 0;
15394   unsigned types[NEON_MAX_TYPE_ELS];
15395   enum neon_el_type k_type = NT_invtype;
15396   unsigned k_size = -1u;
15397   struct neon_type_el badtype = {NT_invtype, -1};
15398   unsigned key_allowed = 0;
15399 
15400   /* Optional registers in Neon instructions are always (not) in operand 1.
15401      Fill in the missing operand here, if it was omitted.  */
15402   if (els > 1 && !inst.operands[1].present)
15403     inst.operands[1] = inst.operands[0];
15404 
15405   /* Suck up all the varargs.  */
15406   va_start (ap, ns);
15407   for (i = 0; i < els; i++)
15408     {
15409       unsigned thisarg = va_arg (ap, unsigned);
15410       if (thisarg == N_IGNORE_TYPE)
15411 	{
15412 	  va_end (ap);
15413 	  return badtype;
15414 	}
15415       types[i] = thisarg;
15416       if ((thisarg & N_KEY) != 0)
15417 	key_el = i;
15418     }
15419   va_end (ap);
15420 
15421   if (inst.vectype.elems > 0)
15422     for (i = 0; i < els; i++)
15423       if (inst.operands[i].vectype.type != NT_invtype)
15424 	{
15425 	  first_error (_("types specified in both the mnemonic and operands"));
15426 	  return badtype;
15427 	}
15428 
15429   /* Duplicate inst.vectype elements here as necessary.
15430      FIXME: No idea if this is exactly the same as the ARM assembler,
15431      particularly when an insn takes one register and one non-register
15432      operand. */
15433   if (inst.vectype.elems == 1 && els > 1)
15434     {
15435       unsigned j;
15436       inst.vectype.elems = els;
15437       inst.vectype.el[key_el] = inst.vectype.el[0];
15438       for (j = 0; j < els; j++)
15439 	if (j != key_el)
15440 	  inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
15441 						  types[j]);
15442     }
15443   else if (inst.vectype.elems == 0 && els > 0)
15444     {
15445       unsigned j;
15446       /* No types were given after the mnemonic, so look for types specified
15447 	 after each operand. We allow some flexibility here; as long as the
15448 	 "key" operand has a type, we can infer the others.  */
15449       for (j = 0; j < els; j++)
15450 	if (inst.operands[j].vectype.type != NT_invtype)
15451 	  inst.vectype.el[j] = inst.operands[j].vectype;
15452 
15453       if (inst.operands[key_el].vectype.type != NT_invtype)
15454 	{
15455 	  for (j = 0; j < els; j++)
15456 	    if (inst.operands[j].vectype.type == NT_invtype)
15457 	      inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
15458 						      types[j]);
15459 	}
15460       else
15461 	{
15462 	  first_error (_("operand types can't be inferred"));
15463 	  return badtype;
15464 	}
15465     }
15466   else if (inst.vectype.elems != els)
15467     {
15468       first_error (_("type specifier has the wrong number of parts"));
15469       return badtype;
15470     }
15471 
15472   for (pass = 0; pass < 2; pass++)
15473     {
15474       for (i = 0; i < els; i++)
15475 	{
15476 	  unsigned thisarg = types[i];
15477 	  unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
15478 	    ? modify_types_allowed (key_allowed, thisarg) : thisarg;
15479 	  enum neon_el_type g_type = inst.vectype.el[i].type;
15480 	  unsigned g_size = inst.vectype.el[i].size;
15481 
15482 	  /* Decay more-specific signed & unsigned types to sign-insensitive
15483 	     integer types if sign-specific variants are unavailable.  */
15484 	  if ((g_type == NT_signed || g_type == NT_unsigned)
15485 	      && (types_allowed & N_SU_ALL) == 0)
15486 	    g_type = NT_integer;
15487 
15488 	  /* If only untyped args are allowed, decay any more specific types to
15489 	     them. Some instructions only care about signs for some element
15490 	     sizes, so handle that properly.  */
15491 	  if (((types_allowed & N_UNT) == 0)
15492 	      && ((g_size == 8 && (types_allowed & N_8) != 0)
15493 		  || (g_size == 16 && (types_allowed & N_16) != 0)
15494 		  || (g_size == 32 && (types_allowed & N_32) != 0)
15495 		  || (g_size == 64 && (types_allowed & N_64) != 0)))
15496 	    g_type = NT_untyped;
15497 
15498 	  if (pass == 0)
15499 	    {
15500 	      if ((thisarg & N_KEY) != 0)
15501 		{
15502 		  k_type = g_type;
15503 		  k_size = g_size;
15504 		  key_allowed = thisarg & ~N_KEY;
15505 
15506 		  /* Check architecture constraint on FP16 extension.  */
15507 		  if (k_size == 16
15508 		      && k_type == NT_float
15509 		      && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
15510 		    {
15511 		      inst.error = _(BAD_FP16);
15512 		      return badtype;
15513 		    }
15514 		}
15515 	    }
15516 	  else
15517 	    {
15518 	      if ((thisarg & N_VFP) != 0)
15519 		{
15520 		  enum neon_shape_el regshape;
15521 		  unsigned regwidth, match;
15522 
15523 		  /* PR 11136: Catch the case where we are passed a shape of NS_NULL.  */
15524 		  if (ns == NS_NULL)
15525 		    {
15526 		      first_error (_("invalid instruction shape"));
15527 		      return badtype;
15528 		    }
15529 		  regshape = neon_shape_tab[ns].el[i];
15530 		  regwidth = neon_shape_el_size[regshape];
15531 
15532 		  /* In VFP mode, operands must match register widths. If we
15533 		     have a key operand, use its width, else use the width of
15534 		     the current operand.  */
15535 		  if (k_size != -1u)
15536 		    match = k_size;
15537 		  else
15538 		    match = g_size;
15539 
15540 		  /* FP16 will use a single precision register.  */
15541 		  if (regwidth == 32 && match == 16)
15542 		    {
15543 		      if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
15544 			match = regwidth;
15545 		      else
15546 			{
15547 			  inst.error = _(BAD_FP16);
15548 			  return badtype;
15549 			}
15550 		    }
15551 
15552 		  if (regwidth != match)
15553 		    {
15554 		      first_error (_("operand size must match register width"));
15555 		      return badtype;
15556 		    }
15557 		}
15558 
15559 	      if ((thisarg & N_EQK) == 0)
15560 		{
15561 		  unsigned given_type = type_chk_of_el_type (g_type, g_size);
15562 
15563 		  if ((given_type & types_allowed) == 0)
15564 		    {
15565 		      first_error (BAD_SIMD_TYPE);
15566 		      return badtype;
15567 		    }
15568 		}
15569 	      else
15570 		{
15571 		  enum neon_el_type mod_k_type = k_type;
15572 		  unsigned mod_k_size = k_size;
15573 		  neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
15574 		  if (g_type != mod_k_type || g_size != mod_k_size)
15575 		    {
15576 		      first_error (_("inconsistent types in Neon instruction"));
15577 		      return badtype;
15578 		    }
15579 		}
15580 	    }
15581 	}
15582     }
15583 
15584   return inst.vectype.el[key_el];
15585 }
15586 
15587 /* Neon-style VFP instruction forwarding.  */
15588 
15589 /* Thumb VFP instructions have 0xE in the condition field.  */
15590 
15591 static void
do_vfp_cond_or_thumb(void)15592 do_vfp_cond_or_thumb (void)
15593 {
15594   inst.is_neon = 1;
15595 
15596   if (thumb_mode)
15597     inst.instruction |= 0xe0000000;
15598   else
15599     inst.instruction |= inst.cond << 28;
15600 }
15601 
15602 /* Look up and encode a simple mnemonic, for use as a helper function for the
15603    Neon-style VFP syntax.  This avoids duplication of bits of the insns table,
15604    etc.  It is assumed that operand parsing has already been done, and that the
15605    operands are in the form expected by the given opcode (this isn't necessarily
15606    the same as the form in which they were parsed, hence some massaging must
15607    take place before this function is called).
15608    Checks current arch version against that in the looked-up opcode.  */
15609 
15610 static void
do_vfp_nsyn_opcode(const char * opname)15611 do_vfp_nsyn_opcode (const char *opname)
15612 {
15613   const struct asm_opcode *opcode;
15614 
15615   opcode = (const struct asm_opcode *) str_hash_find (arm_ops_hsh, opname);
15616 
15617   if (!opcode)
15618     abort ();
15619 
15620   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
15621 		thumb_mode ? *opcode->tvariant : *opcode->avariant),
15622 	      _(BAD_FPU));
15623 
15624   inst.is_neon = 1;
15625 
15626   if (thumb_mode)
15627     {
15628       inst.instruction = opcode->tvalue;
15629       opcode->tencode ();
15630     }
15631   else
15632     {
15633       inst.instruction = (inst.cond << 28) | opcode->avalue;
15634       opcode->aencode ();
15635     }
15636 }
15637 
15638 static void
do_vfp_nsyn_add_sub(enum neon_shape rs)15639 do_vfp_nsyn_add_sub (enum neon_shape rs)
15640 {
15641   int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
15642 
15643   if (rs == NS_FFF || rs == NS_HHH)
15644     {
15645       if (is_add)
15646 	do_vfp_nsyn_opcode ("fadds");
15647       else
15648 	do_vfp_nsyn_opcode ("fsubs");
15649 
15650       /* ARMv8.2 fp16 instruction.  */
15651       if (rs == NS_HHH)
15652 	do_scalar_fp16_v82_encode ();
15653     }
15654   else
15655     {
15656       if (is_add)
15657 	do_vfp_nsyn_opcode ("faddd");
15658       else
15659 	do_vfp_nsyn_opcode ("fsubd");
15660     }
15661 }
15662 
15663 /* Check operand types to see if this is a VFP instruction, and if so call
15664    PFN ().  */
15665 
15666 static int
try_vfp_nsyn(int args,void (* pfn)(enum neon_shape))15667 try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
15668 {
15669   enum neon_shape rs;
15670   struct neon_type_el et;
15671 
15672   switch (args)
15673     {
15674     case 2:
15675       rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
15676       et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
15677       break;
15678 
15679     case 3:
15680       rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15681       et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15682 			    N_F_ALL | N_KEY | N_VFP);
15683       break;
15684 
15685     default:
15686       abort ();
15687     }
15688 
15689   if (et.type != NT_invtype)
15690     {
15691       pfn (rs);
15692       return SUCCESS;
15693     }
15694 
15695   inst.error = NULL;
15696   return FAIL;
15697 }
15698 
15699 static void
do_vfp_nsyn_mla_mls(enum neon_shape rs)15700 do_vfp_nsyn_mla_mls (enum neon_shape rs)
15701 {
15702   int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
15703 
15704   if (rs == NS_FFF || rs == NS_HHH)
15705     {
15706       if (is_mla)
15707 	do_vfp_nsyn_opcode ("fmacs");
15708       else
15709 	do_vfp_nsyn_opcode ("fnmacs");
15710 
15711       /* ARMv8.2 fp16 instruction.  */
15712       if (rs == NS_HHH)
15713 	do_scalar_fp16_v82_encode ();
15714     }
15715   else
15716     {
15717       if (is_mla)
15718 	do_vfp_nsyn_opcode ("fmacd");
15719       else
15720 	do_vfp_nsyn_opcode ("fnmacd");
15721     }
15722 }
15723 
15724 static void
do_vfp_nsyn_fma_fms(enum neon_shape rs)15725 do_vfp_nsyn_fma_fms (enum neon_shape rs)
15726 {
15727   int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
15728 
15729   if (rs == NS_FFF || rs == NS_HHH)
15730     {
15731       if (is_fma)
15732 	do_vfp_nsyn_opcode ("ffmas");
15733       else
15734 	do_vfp_nsyn_opcode ("ffnmas");
15735 
15736       /* ARMv8.2 fp16 instruction.  */
15737       if (rs == NS_HHH)
15738 	do_scalar_fp16_v82_encode ();
15739     }
15740   else
15741     {
15742       if (is_fma)
15743 	do_vfp_nsyn_opcode ("ffmad");
15744       else
15745 	do_vfp_nsyn_opcode ("ffnmad");
15746     }
15747 }
15748 
15749 static void
do_vfp_nsyn_mul(enum neon_shape rs)15750 do_vfp_nsyn_mul (enum neon_shape rs)
15751 {
15752   if (rs == NS_FFF || rs == NS_HHH)
15753     {
15754       do_vfp_nsyn_opcode ("fmuls");
15755 
15756       /* ARMv8.2 fp16 instruction.  */
15757       if (rs == NS_HHH)
15758 	do_scalar_fp16_v82_encode ();
15759     }
15760   else
15761     do_vfp_nsyn_opcode ("fmuld");
15762 }
15763 
15764 static void
do_vfp_nsyn_abs_neg(enum neon_shape rs)15765 do_vfp_nsyn_abs_neg (enum neon_shape rs)
15766 {
15767   int is_neg = (inst.instruction & 0x80) != 0;
15768   neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
15769 
15770   if (rs == NS_FF || rs == NS_HH)
15771     {
15772       if (is_neg)
15773 	do_vfp_nsyn_opcode ("fnegs");
15774       else
15775 	do_vfp_nsyn_opcode ("fabss");
15776 
15777       /* ARMv8.2 fp16 instruction.  */
15778       if (rs == NS_HH)
15779 	do_scalar_fp16_v82_encode ();
15780     }
15781   else
15782     {
15783       if (is_neg)
15784 	do_vfp_nsyn_opcode ("fnegd");
15785       else
15786 	do_vfp_nsyn_opcode ("fabsd");
15787     }
15788 }
15789 
15790 /* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
15791    insns belong to Neon, and are handled elsewhere.  */
15792 
15793 static void
do_vfp_nsyn_ldm_stm(int is_dbmode)15794 do_vfp_nsyn_ldm_stm (int is_dbmode)
15795 {
15796   int is_ldm = (inst.instruction & (1 << 20)) != 0;
15797   if (is_ldm)
15798     {
15799       if (is_dbmode)
15800 	do_vfp_nsyn_opcode ("fldmdbs");
15801       else
15802 	do_vfp_nsyn_opcode ("fldmias");
15803     }
15804   else
15805     {
15806       if (is_dbmode)
15807 	do_vfp_nsyn_opcode ("fstmdbs");
15808       else
15809 	do_vfp_nsyn_opcode ("fstmias");
15810     }
15811 }
15812 
15813 static void
do_vfp_nsyn_sqrt(void)15814 do_vfp_nsyn_sqrt (void)
15815 {
15816   enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
15817   neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
15818 
15819   if (rs == NS_FF || rs == NS_HH)
15820     {
15821       do_vfp_nsyn_opcode ("fsqrts");
15822 
15823       /* ARMv8.2 fp16 instruction.  */
15824       if (rs == NS_HH)
15825 	do_scalar_fp16_v82_encode ();
15826     }
15827   else
15828     do_vfp_nsyn_opcode ("fsqrtd");
15829 }
15830 
15831 static void
do_vfp_nsyn_div(void)15832 do_vfp_nsyn_div (void)
15833 {
15834   enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15835   neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15836 		   N_F_ALL | N_KEY | N_VFP);
15837 
15838   if (rs == NS_FFF || rs == NS_HHH)
15839     {
15840       do_vfp_nsyn_opcode ("fdivs");
15841 
15842       /* ARMv8.2 fp16 instruction.  */
15843       if (rs == NS_HHH)
15844 	do_scalar_fp16_v82_encode ();
15845     }
15846   else
15847     do_vfp_nsyn_opcode ("fdivd");
15848 }
15849 
15850 static void
do_vfp_nsyn_nmul(void)15851 do_vfp_nsyn_nmul (void)
15852 {
15853   enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15854   neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15855 		   N_F_ALL | N_KEY | N_VFP);
15856 
15857   if (rs == NS_FFF || rs == NS_HHH)
15858     {
15859       NEON_ENCODE (SINGLE, inst);
15860       do_vfp_sp_dyadic ();
15861 
15862       /* ARMv8.2 fp16 instruction.  */
15863       if (rs == NS_HHH)
15864 	do_scalar_fp16_v82_encode ();
15865     }
15866   else
15867     {
15868       NEON_ENCODE (DOUBLE, inst);
15869       do_vfp_dp_rd_rn_rm ();
15870     }
15871   do_vfp_cond_or_thumb ();
15872 
15873 }
15874 
15875 /* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
15876    (0, 1, 2, 3).  */
15877 
15878 static unsigned
neon_logbits(unsigned x)15879 neon_logbits (unsigned x)
15880 {
15881   return ffs (x) - 4;
15882 }
15883 
15884 #define LOW4(R) ((R) & 0xf)
15885 #define HI1(R) (((R) >> 4) & 1)
15886 #define LOW1(R) ((R) & 0x1)
15887 #define HI4(R) (((R) >> 1) & 0xf)
15888 
15889 static unsigned
mve_get_vcmp_vpt_cond(struct neon_type_el et)15890 mve_get_vcmp_vpt_cond (struct neon_type_el et)
15891 {
15892   switch (et.type)
15893     {
15894     default:
15895       first_error (BAD_EL_TYPE);
15896       return 0;
15897     case NT_float:
15898       switch (inst.operands[0].imm)
15899 	{
15900 	default:
15901 	  first_error (_("invalid condition"));
15902 	  return 0;
15903 	case 0x0:
15904 	  /* eq.  */
15905 	  return 0;
15906 	case 0x1:
15907 	  /* ne.  */
15908 	  return 1;
15909 	case 0xa:
15910 	  /* ge/  */
15911 	  return 4;
15912 	case 0xb:
15913 	  /* lt.  */
15914 	  return 5;
15915 	case 0xc:
15916 	  /* gt.  */
15917 	  return 6;
15918 	case 0xd:
15919 	  /* le.  */
15920 	  return 7;
15921 	}
15922     case NT_integer:
15923       /* only accept eq and ne.  */
15924       if (inst.operands[0].imm > 1)
15925 	{
15926 	  first_error (_("invalid condition"));
15927 	  return 0;
15928 	}
15929       return inst.operands[0].imm;
15930     case NT_unsigned:
15931       if (inst.operands[0].imm == 0x2)
15932 	return 2;
15933       else if (inst.operands[0].imm == 0x8)
15934 	return 3;
15935       else
15936 	{
15937 	  first_error (_("invalid condition"));
15938 	  return 0;
15939 	}
15940     case NT_signed:
15941       switch (inst.operands[0].imm)
15942 	{
15943 	  default:
15944 	    first_error (_("invalid condition"));
15945 	    return 0;
15946 	  case 0xa:
15947 	    /* ge.  */
15948 	    return 4;
15949 	  case 0xb:
15950 	    /* lt.  */
15951 	    return 5;
15952 	  case 0xc:
15953 	    /* gt.  */
15954 	    return 6;
15955 	  case 0xd:
15956 	    /* le.  */
15957 	    return 7;
15958 	}
15959     }
15960   /* Should be unreachable.  */
15961   abort ();
15962 }
15963 
15964 /* For VCTP (create vector tail predicate) in MVE.  */
15965 static void
do_mve_vctp(void)15966 do_mve_vctp (void)
15967 {
15968   int dt = 0;
15969   unsigned size = 0x0;
15970 
15971   if (inst.cond > COND_ALWAYS)
15972     inst.pred_insn_type = INSIDE_VPT_INSN;
15973   else
15974     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
15975 
15976   /* This is a typical MVE instruction which has no type but have size 8, 16,
15977      32 and 64.  For instructions with no type, inst.vectype.el[j].type is set
15978      to NT_untyped and size is updated in inst.vectype.el[j].size.  */
15979   if ((inst.operands[0].present) && (inst.vectype.el[0].type == NT_untyped))
15980     dt = inst.vectype.el[0].size;
15981 
15982   /* Setting this does not indicate an actual NEON instruction, but only
15983      indicates that the mnemonic accepts neon-style type suffixes.  */
15984   inst.is_neon = 1;
15985 
15986   switch (dt)
15987     {
15988       case 8:
15989 	break;
15990       case 16:
15991 	size = 0x1; break;
15992       case 32:
15993 	size = 0x2; break;
15994       case 64:
15995 	size = 0x3; break;
15996       default:
15997 	first_error (_("Type is not allowed for this instruction"));
15998     }
15999   inst.instruction |= size << 20;
16000   inst.instruction |= inst.operands[0].reg << 16;
16001 }
16002 
16003 static void
do_mve_vpt(void)16004 do_mve_vpt (void)
16005 {
16006   /* We are dealing with a vector predicated block.  */
16007   if (inst.operands[0].present)
16008     {
16009       enum neon_shape rs = neon_select_shape (NS_IQQ, NS_IQR, NS_NULL);
16010       struct neon_type_el et
16011 	= neon_check_type (3, rs, N_EQK, N_KEY | N_F_MVE | N_I_MVE | N_SU_32,
16012 			   N_EQK);
16013 
16014       unsigned fcond = mve_get_vcmp_vpt_cond (et);
16015 
16016       constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
16017 
16018       if (et.type == NT_invtype)
16019 	return;
16020 
16021       if (et.type == NT_float)
16022 	{
16023 	  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext),
16024 		      BAD_FPU);
16025 	  constraint (et.size != 16 && et.size != 32, BAD_EL_TYPE);
16026 	  inst.instruction |= (et.size == 16) << 28;
16027 	  inst.instruction |= 0x3 << 20;
16028 	}
16029       else
16030 	{
16031 	  constraint (et.size != 8 && et.size != 16 && et.size != 32,
16032 		      BAD_EL_TYPE);
16033 	  inst.instruction |= 1 << 28;
16034 	  inst.instruction |= neon_logbits (et.size) << 20;
16035 	}
16036 
16037       if (inst.operands[2].isquad)
16038 	{
16039 	  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16040 	  inst.instruction |= LOW4 (inst.operands[2].reg);
16041 	  inst.instruction |= (fcond & 0x2) >> 1;
16042 	}
16043       else
16044 	{
16045 	  if (inst.operands[2].reg == REG_SP)
16046 	    as_tsktsk (MVE_BAD_SP);
16047 	  inst.instruction |= 1 << 6;
16048 	  inst.instruction |= (fcond & 0x2) << 4;
16049 	  inst.instruction |= inst.operands[2].reg;
16050 	}
16051       inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16052       inst.instruction |= (fcond & 0x4) << 10;
16053       inst.instruction |= (fcond & 0x1) << 7;
16054 
16055     }
16056     set_pred_insn_type (VPT_INSN);
16057     now_pred.cc = 0;
16058     now_pred.mask = ((inst.instruction & 0x00400000) >> 19)
16059 		    | ((inst.instruction & 0xe000) >> 13);
16060     now_pred.warn_deprecated = false;
16061     now_pred.type = VECTOR_PRED;
16062     inst.is_neon = 1;
16063 }
16064 
16065 static void
do_mve_vcmp(void)16066 do_mve_vcmp (void)
16067 {
16068   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
16069   if (!inst.operands[1].isreg || !inst.operands[1].isquad)
16070     first_error (_(reg_expected_msgs[REG_TYPE_MQ]));
16071   if (!inst.operands[2].present)
16072     first_error (_("MVE vector or ARM register expected"));
16073   constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
16074 
16075   /* Deal with 'else' conditional MVE's vcmp, it will be parsed as vcmpe.  */
16076   if ((inst.instruction & 0xffffffff) == N_MNEM_vcmpe
16077       && inst.operands[1].isquad)
16078     {
16079       inst.instruction = N_MNEM_vcmp;
16080       inst.cond = 0x10;
16081     }
16082 
16083   if (inst.cond > COND_ALWAYS)
16084     inst.pred_insn_type = INSIDE_VPT_INSN;
16085   else
16086     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16087 
16088   enum neon_shape rs = neon_select_shape (NS_IQQ, NS_IQR, NS_NULL);
16089   struct neon_type_el et
16090     = neon_check_type (3, rs, N_EQK, N_KEY | N_F_MVE | N_I_MVE | N_SU_32,
16091 		       N_EQK);
16092 
16093   constraint (rs == NS_IQR && inst.operands[2].reg == REG_PC
16094 	      && !inst.operands[2].iszr, BAD_PC);
16095 
16096   unsigned fcond = mve_get_vcmp_vpt_cond (et);
16097 
16098   inst.instruction = 0xee010f00;
16099   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16100   inst.instruction |= (fcond & 0x4) << 10;
16101   inst.instruction |= (fcond & 0x1) << 7;
16102   if (et.type == NT_float)
16103     {
16104       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext),
16105 		  BAD_FPU);
16106       inst.instruction |= (et.size == 16) << 28;
16107       inst.instruction |= 0x3 << 20;
16108     }
16109   else
16110     {
16111       inst.instruction |= 1 << 28;
16112       inst.instruction |= neon_logbits (et.size) << 20;
16113     }
16114   if (inst.operands[2].isquad)
16115     {
16116       inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16117       inst.instruction |= (fcond & 0x2) >> 1;
16118       inst.instruction |= LOW4 (inst.operands[2].reg);
16119     }
16120   else
16121     {
16122       if (inst.operands[2].reg == REG_SP)
16123 	as_tsktsk (MVE_BAD_SP);
16124       inst.instruction |= 1 << 6;
16125       inst.instruction |= (fcond & 0x2) << 4;
16126       inst.instruction |= inst.operands[2].reg;
16127     }
16128 
16129   inst.is_neon = 1;
16130   return;
16131 }
16132 
16133 static void
do_mve_vmaxa_vmina(void)16134 do_mve_vmaxa_vmina (void)
16135 {
16136   if (inst.cond > COND_ALWAYS)
16137     inst.pred_insn_type = INSIDE_VPT_INSN;
16138   else
16139     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16140 
16141   enum neon_shape rs = neon_select_shape (NS_QQ, NS_NULL);
16142   struct neon_type_el et
16143     = neon_check_type (2, rs, N_EQK, N_KEY | N_S8 | N_S16 | N_S32);
16144 
16145   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16146   inst.instruction |= neon_logbits (et.size) << 18;
16147   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16148   inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16149   inst.instruction |= LOW4 (inst.operands[1].reg);
16150   inst.is_neon = 1;
16151 }
16152 
16153 static void
do_mve_vfmas(void)16154 do_mve_vfmas (void)
16155 {
16156   enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
16157   struct neon_type_el et
16158     = neon_check_type (3, rs, N_F_MVE | N_KEY, N_EQK, N_EQK);
16159 
16160   if (inst.cond > COND_ALWAYS)
16161     inst.pred_insn_type = INSIDE_VPT_INSN;
16162   else
16163     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16164 
16165   if (inst.operands[2].reg == REG_SP)
16166     as_tsktsk (MVE_BAD_SP);
16167   else if (inst.operands[2].reg == REG_PC)
16168     as_tsktsk (MVE_BAD_PC);
16169 
16170   inst.instruction |= (et.size == 16) << 28;
16171   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16172   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16173   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16174   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16175   inst.instruction |= inst.operands[2].reg;
16176   inst.is_neon = 1;
16177 }
16178 
16179 static void
do_mve_viddup(void)16180 do_mve_viddup (void)
16181 {
16182   if (inst.cond > COND_ALWAYS)
16183     inst.pred_insn_type = INSIDE_VPT_INSN;
16184   else
16185     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16186 
16187   unsigned imm = inst.relocs[0].exp.X_add_number;
16188   constraint (imm != 1 && imm != 2 && imm != 4 && imm != 8,
16189 	      _("immediate must be either 1, 2, 4 or 8"));
16190 
16191   enum neon_shape rs;
16192   struct neon_type_el et;
16193   unsigned Rm;
16194   if (inst.instruction == M_MNEM_vddup || inst.instruction == M_MNEM_vidup)
16195     {
16196       rs = neon_select_shape (NS_QRI, NS_NULL);
16197       et = neon_check_type (2, rs, N_KEY | N_U8 | N_U16 | N_U32, N_EQK);
16198       Rm = 7;
16199     }
16200   else
16201     {
16202       constraint ((inst.operands[2].reg % 2) != 1, BAD_EVEN);
16203       if (inst.operands[2].reg == REG_SP)
16204 	as_tsktsk (MVE_BAD_SP);
16205       else if (inst.operands[2].reg == REG_PC)
16206 	first_error (BAD_PC);
16207 
16208       rs = neon_select_shape (NS_QRRI, NS_NULL);
16209       et = neon_check_type (3, rs, N_KEY | N_U8 | N_U16 | N_U32, N_EQK, N_EQK);
16210       Rm = inst.operands[2].reg >> 1;
16211     }
16212   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16213   inst.instruction |= neon_logbits (et.size) << 20;
16214   inst.instruction |= inst.operands[1].reg << 16;
16215   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16216   inst.instruction |= (imm > 2) << 7;
16217   inst.instruction |= Rm << 1;
16218   inst.instruction |= (imm == 2 || imm == 8);
16219   inst.is_neon = 1;
16220 }
16221 
16222 static void
do_mve_vmlas(void)16223 do_mve_vmlas (void)
16224 {
16225   enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
16226   struct neon_type_el et
16227     = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
16228 
16229   if (inst.operands[2].reg == REG_PC)
16230     as_tsktsk (MVE_BAD_PC);
16231   else if (inst.operands[2].reg == REG_SP)
16232     as_tsktsk (MVE_BAD_SP);
16233 
16234   if (inst.cond > COND_ALWAYS)
16235     inst.pred_insn_type = INSIDE_VPT_INSN;
16236   else
16237     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16238 
16239   inst.instruction |= (et.type == NT_unsigned) << 28;
16240   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16241   inst.instruction |= neon_logbits (et.size) << 20;
16242   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16243   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16244   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16245   inst.instruction |= inst.operands[2].reg;
16246   inst.is_neon = 1;
16247 }
16248 
16249 static void
do_mve_vshll(void)16250 do_mve_vshll (void)
16251 {
16252   struct neon_type_el et
16253     = neon_check_type (2, NS_QQI, N_EQK, N_S8 | N_U8 | N_S16 | N_U16 | N_KEY);
16254 
16255   if (inst.cond > COND_ALWAYS)
16256     inst.pred_insn_type = INSIDE_VPT_INSN;
16257   else
16258     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16259 
16260   int imm = inst.operands[2].imm;
16261   constraint (imm < 1 || (unsigned)imm > et.size,
16262 	      _("immediate value out of range"));
16263 
16264   if ((unsigned)imm == et.size)
16265     {
16266       inst.instruction |= neon_logbits (et.size) << 18;
16267       inst.instruction |= 0x110001;
16268     }
16269   else
16270     {
16271       inst.instruction |= (et.size + imm) << 16;
16272       inst.instruction |= 0x800140;
16273     }
16274 
16275   inst.instruction |= (et.type == NT_unsigned) << 28;
16276   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16277   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16278   inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16279   inst.instruction |= LOW4 (inst.operands[1].reg);
16280   inst.is_neon = 1;
16281 }
16282 
16283 static void
do_mve_vshlc(void)16284 do_mve_vshlc (void)
16285 {
16286   if (inst.cond > COND_ALWAYS)
16287     inst.pred_insn_type = INSIDE_VPT_INSN;
16288   else
16289     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16290 
16291   if (inst.operands[1].reg == REG_PC)
16292     as_tsktsk (MVE_BAD_PC);
16293   else if (inst.operands[1].reg == REG_SP)
16294     as_tsktsk (MVE_BAD_SP);
16295 
16296   int imm = inst.operands[2].imm;
16297   constraint (imm < 1 || imm > 32, _("immediate value out of range"));
16298 
16299   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16300   inst.instruction |= (imm & 0x1f) << 16;
16301   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16302   inst.instruction |= inst.operands[1].reg;
16303   inst.is_neon = 1;
16304 }
16305 
16306 static void
do_mve_vshrn(void)16307 do_mve_vshrn (void)
16308 {
16309   unsigned types;
16310   switch (inst.instruction)
16311     {
16312     case M_MNEM_vshrnt:
16313     case M_MNEM_vshrnb:
16314     case M_MNEM_vrshrnt:
16315     case M_MNEM_vrshrnb:
16316       types = N_I16 | N_I32;
16317       break;
16318     case M_MNEM_vqshrnt:
16319     case M_MNEM_vqshrnb:
16320     case M_MNEM_vqrshrnt:
16321     case M_MNEM_vqrshrnb:
16322       types = N_U16 | N_U32 | N_S16 | N_S32;
16323       break;
16324     case M_MNEM_vqshrunt:
16325     case M_MNEM_vqshrunb:
16326     case M_MNEM_vqrshrunt:
16327     case M_MNEM_vqrshrunb:
16328       types = N_S16 | N_S32;
16329       break;
16330     default:
16331       abort ();
16332     }
16333 
16334   struct neon_type_el et = neon_check_type (2, NS_QQI, N_EQK, types | N_KEY);
16335 
16336   if (inst.cond > COND_ALWAYS)
16337     inst.pred_insn_type = INSIDE_VPT_INSN;
16338   else
16339     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16340 
16341   unsigned Qd = inst.operands[0].reg;
16342   unsigned Qm = inst.operands[1].reg;
16343   unsigned imm = inst.operands[2].imm;
16344   constraint (imm < 1 || ((unsigned) imm) > (et.size / 2),
16345 	      et.size == 16
16346 	      ? _("immediate operand expected in the range [1,8]")
16347 	      : _("immediate operand expected in the range [1,16]"));
16348 
16349   inst.instruction |= (et.type == NT_unsigned) << 28;
16350   inst.instruction |= HI1 (Qd) << 22;
16351   inst.instruction |= (et.size - imm) << 16;
16352   inst.instruction |= LOW4 (Qd) << 12;
16353   inst.instruction |= HI1 (Qm) << 5;
16354   inst.instruction |= LOW4 (Qm);
16355   inst.is_neon = 1;
16356 }
16357 
16358 static void
do_mve_vqmovn(void)16359 do_mve_vqmovn (void)
16360 {
16361   struct neon_type_el et;
16362   if (inst.instruction == M_MNEM_vqmovnt
16363      || inst.instruction == M_MNEM_vqmovnb)
16364     et = neon_check_type (2, NS_QQ, N_EQK,
16365 			  N_U16 | N_U32 | N_S16 | N_S32 | N_KEY);
16366   else
16367     et = neon_check_type (2, NS_QQ, N_EQK, N_S16 | N_S32 | N_KEY);
16368 
16369   if (inst.cond > COND_ALWAYS)
16370     inst.pred_insn_type = INSIDE_VPT_INSN;
16371   else
16372     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16373 
16374   inst.instruction |= (et.type == NT_unsigned) << 28;
16375   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16376   inst.instruction |= (et.size == 32) << 18;
16377   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16378   inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16379   inst.instruction |= LOW4 (inst.operands[1].reg);
16380   inst.is_neon = 1;
16381 }
16382 
16383 static void
do_mve_vpsel(void)16384 do_mve_vpsel (void)
16385 {
16386   neon_select_shape (NS_QQQ, NS_NULL);
16387 
16388   if (inst.cond > COND_ALWAYS)
16389     inst.pred_insn_type = INSIDE_VPT_INSN;
16390   else
16391     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16392 
16393   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16394   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16395   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16396   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16397   inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16398   inst.instruction |= LOW4 (inst.operands[2].reg);
16399   inst.is_neon = 1;
16400 }
16401 
16402 static void
do_mve_vpnot(void)16403 do_mve_vpnot (void)
16404 {
16405   if (inst.cond > COND_ALWAYS)
16406     inst.pred_insn_type = INSIDE_VPT_INSN;
16407   else
16408     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16409 }
16410 
16411 static void
do_mve_vmaxnma_vminnma(void)16412 do_mve_vmaxnma_vminnma (void)
16413 {
16414   enum neon_shape rs = neon_select_shape (NS_QQ, NS_NULL);
16415   struct neon_type_el et
16416     = neon_check_type (2, rs, N_EQK, N_F_MVE | N_KEY);
16417 
16418   if (inst.cond > COND_ALWAYS)
16419     inst.pred_insn_type = INSIDE_VPT_INSN;
16420   else
16421     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16422 
16423   inst.instruction |= (et.size == 16) << 28;
16424   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16425   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16426   inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16427   inst.instruction |= LOW4 (inst.operands[1].reg);
16428   inst.is_neon = 1;
16429 }
16430 
16431 static void
do_mve_vcmul(void)16432 do_mve_vcmul (void)
16433 {
16434   enum neon_shape rs = neon_select_shape (NS_QQQI, NS_NULL);
16435   struct neon_type_el et
16436     = neon_check_type (3, rs, N_EQK, N_EQK, N_F_MVE | N_KEY);
16437 
16438   if (inst.cond > COND_ALWAYS)
16439     inst.pred_insn_type = INSIDE_VPT_INSN;
16440   else
16441     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16442 
16443   unsigned rot = inst.relocs[0].exp.X_add_number;
16444   constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
16445 	      _("immediate out of range"));
16446 
16447   if (et.size == 32 && (inst.operands[0].reg == inst.operands[1].reg
16448 			|| inst.operands[0].reg == inst.operands[2].reg))
16449     as_tsktsk (BAD_MVE_SRCDEST);
16450 
16451   inst.instruction |= (et.size == 32) << 28;
16452   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16453   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16454   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16455   inst.instruction |= (rot > 90) << 12;
16456   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16457   inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16458   inst.instruction |= LOW4 (inst.operands[2].reg);
16459   inst.instruction |= (rot == 90 || rot == 270);
16460   inst.is_neon = 1;
16461 }
16462 
16463 /* To handle the Low Overhead Loop instructions
16464    in Armv8.1-M Mainline and MVE.  */
16465 static void
do_t_loloop(void)16466 do_t_loloop (void)
16467 {
16468   unsigned long insn = inst.instruction;
16469 
16470   inst.instruction = THUMB_OP32 (inst.instruction);
16471 
16472   if (insn == T_MNEM_lctp)
16473     return;
16474 
16475   set_pred_insn_type (MVE_OUTSIDE_PRED_INSN);
16476 
16477   if (insn == T_MNEM_wlstp || insn == T_MNEM_dlstp)
16478     {
16479       struct neon_type_el et
16480        = neon_check_type (2, NS_RR, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16481       inst.instruction |= neon_logbits (et.size) << 20;
16482       inst.is_neon = 1;
16483     }
16484 
16485   switch (insn)
16486     {
16487     case T_MNEM_letp:
16488       constraint (!inst.operands[0].present,
16489 		  _("expected LR"));
16490       /* fall through.  */
16491     case T_MNEM_le:
16492       /* le <label>.  */
16493       if (!inst.operands[0].present)
16494        inst.instruction |= 1 << 21;
16495 
16496       v8_1_loop_reloc (true);
16497       break;
16498 
16499     case T_MNEM_wls:
16500     case T_MNEM_wlstp:
16501       v8_1_loop_reloc (false);
16502       /* fall through.  */
16503     case T_MNEM_dlstp:
16504     case T_MNEM_dls:
16505       constraint (inst.operands[1].isreg != 1, BAD_ARGS);
16506 
16507       if (insn == T_MNEM_wlstp || insn == T_MNEM_dlstp)
16508        constraint (inst.operands[1].reg == REG_PC, BAD_PC);
16509       else if (inst.operands[1].reg == REG_PC)
16510        as_tsktsk (MVE_BAD_PC);
16511       if (inst.operands[1].reg == REG_SP)
16512        as_tsktsk (MVE_BAD_SP);
16513 
16514       inst.instruction |= (inst.operands[1].reg << 16);
16515       break;
16516 
16517     default:
16518       abort ();
16519     }
16520 }
16521 
16522 
16523 static void
do_vfp_nsyn_cmp(void)16524 do_vfp_nsyn_cmp (void)
16525 {
16526   enum neon_shape rs;
16527   if (!inst.operands[0].isreg)
16528     {
16529       do_mve_vcmp ();
16530       return;
16531     }
16532   else
16533     {
16534       constraint (inst.operands[2].present, BAD_SYNTAX);
16535       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd),
16536 		  BAD_FPU);
16537     }
16538 
16539   if (inst.operands[1].isreg)
16540     {
16541       rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
16542       neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
16543 
16544       if (rs == NS_FF || rs == NS_HH)
16545 	{
16546 	  NEON_ENCODE (SINGLE, inst);
16547 	  do_vfp_sp_monadic ();
16548 	}
16549       else
16550 	{
16551 	  NEON_ENCODE (DOUBLE, inst);
16552 	  do_vfp_dp_rd_rm ();
16553 	}
16554     }
16555   else
16556     {
16557       rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
16558       neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
16559 
16560       switch (inst.instruction & 0x0fffffff)
16561 	{
16562 	case N_MNEM_vcmp:
16563 	  inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
16564 	  break;
16565 	case N_MNEM_vcmpe:
16566 	  inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
16567 	  break;
16568 	default:
16569 	  abort ();
16570 	}
16571 
16572       if (rs == NS_FI || rs == NS_HI)
16573 	{
16574 	  NEON_ENCODE (SINGLE, inst);
16575 	  do_vfp_sp_compare_z ();
16576 	}
16577       else
16578 	{
16579 	  NEON_ENCODE (DOUBLE, inst);
16580 	  do_vfp_dp_rd ();
16581 	}
16582     }
16583   do_vfp_cond_or_thumb ();
16584 
16585   /* ARMv8.2 fp16 instruction.  */
16586   if (rs == NS_HI || rs == NS_HH)
16587     do_scalar_fp16_v82_encode ();
16588 }
16589 
16590 static void
nsyn_insert_sp(void)16591 nsyn_insert_sp (void)
16592 {
16593   inst.operands[1] = inst.operands[0];
16594   memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
16595   inst.operands[0].reg = REG_SP;
16596   inst.operands[0].isreg = 1;
16597   inst.operands[0].writeback = 1;
16598   inst.operands[0].present = 1;
16599 }
16600 
16601 /* Fix up Neon data-processing instructions, ORing in the correct bits for
16602    ARM mode or Thumb mode and moving the encoded bit 24 to bit 28.  */
16603 
16604 static void
neon_dp_fixup(struct arm_it * insn)16605 neon_dp_fixup (struct arm_it* insn)
16606 {
16607   unsigned int i = insn->instruction;
16608   insn->is_neon = 1;
16609 
16610   if (thumb_mode)
16611     {
16612       /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode.  */
16613       if (i & (1 << 24))
16614 	i |= 1 << 28;
16615 
16616       i &= ~(1 << 24);
16617 
16618       i |= 0xef000000;
16619     }
16620   else
16621     i |= 0xf2000000;
16622 
16623   insn->instruction = i;
16624 }
16625 
16626 static void
mve_encode_qqr(int size,int U,int fp)16627 mve_encode_qqr (int size, int U, int fp)
16628 {
16629   if (inst.operands[2].reg == REG_SP)
16630     as_tsktsk (MVE_BAD_SP);
16631   else if (inst.operands[2].reg == REG_PC)
16632     as_tsktsk (MVE_BAD_PC);
16633 
16634   if (fp)
16635     {
16636       /* vadd.  */
16637       if (((unsigned)inst.instruction) == 0xd00)
16638 	inst.instruction = 0xee300f40;
16639       /* vsub.  */
16640       else if (((unsigned)inst.instruction) == 0x200d00)
16641 	inst.instruction = 0xee301f40;
16642       /* vmul.  */
16643       else if (((unsigned)inst.instruction) == 0x1000d10)
16644 	inst.instruction = 0xee310e60;
16645 
16646       /* Setting size which is 1 for F16 and 0 for F32.  */
16647       inst.instruction |= (size == 16) << 28;
16648     }
16649   else
16650     {
16651       /* vadd.  */
16652       if (((unsigned)inst.instruction) == 0x800)
16653 	inst.instruction = 0xee010f40;
16654       /* vsub.  */
16655       else if (((unsigned)inst.instruction) == 0x1000800)
16656 	inst.instruction = 0xee011f40;
16657       /* vhadd.  */
16658       else if (((unsigned)inst.instruction) == 0)
16659 	inst.instruction = 0xee000f40;
16660       /* vhsub.  */
16661       else if (((unsigned)inst.instruction) == 0x200)
16662 	inst.instruction = 0xee001f40;
16663       /* vmla.  */
16664       else if (((unsigned)inst.instruction) == 0x900)
16665 	inst.instruction = 0xee010e40;
16666       /* vmul.  */
16667       else if (((unsigned)inst.instruction) == 0x910)
16668 	inst.instruction = 0xee011e60;
16669       /* vqadd.  */
16670       else if (((unsigned)inst.instruction) == 0x10)
16671 	inst.instruction = 0xee000f60;
16672       /* vqsub.  */
16673       else if (((unsigned)inst.instruction) == 0x210)
16674 	inst.instruction = 0xee001f60;
16675       /* vqrdmlah.  */
16676       else if (((unsigned)inst.instruction) == 0x3000b10)
16677 	inst.instruction = 0xee000e40;
16678       /* vqdmulh.  */
16679       else if (((unsigned)inst.instruction) == 0x0000b00)
16680 	inst.instruction = 0xee010e60;
16681       /* vqrdmulh.  */
16682       else if (((unsigned)inst.instruction) == 0x1000b00)
16683 	inst.instruction = 0xfe010e60;
16684 
16685       /* Set U-bit.  */
16686       inst.instruction |= U << 28;
16687 
16688       /* Setting bits for size.  */
16689       inst.instruction |= neon_logbits (size) << 20;
16690     }
16691   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16692   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16693   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16694   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16695   inst.instruction |= inst.operands[2].reg;
16696   inst.is_neon = 1;
16697 }
16698 
16699 static void
mve_encode_rqq(unsigned bit28,unsigned size)16700 mve_encode_rqq (unsigned bit28, unsigned size)
16701 {
16702   inst.instruction |= bit28 << 28;
16703   inst.instruction |= neon_logbits (size) << 20;
16704   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16705   inst.instruction |= inst.operands[0].reg << 12;
16706   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16707   inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16708   inst.instruction |= LOW4 (inst.operands[2].reg);
16709   inst.is_neon = 1;
16710 }
16711 
16712 static void
mve_encode_qqq(int ubit,int size)16713 mve_encode_qqq (int ubit, int size)
16714 {
16715 
16716   inst.instruction |= (ubit != 0) << 28;
16717   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16718   inst.instruction |= neon_logbits (size) << 20;
16719   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16720   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16721   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16722   inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16723   inst.instruction |= LOW4 (inst.operands[2].reg);
16724 
16725   inst.is_neon = 1;
16726 }
16727 
16728 static void
mve_encode_rq(unsigned bit28,unsigned size)16729 mve_encode_rq (unsigned bit28, unsigned size)
16730 {
16731   inst.instruction |= bit28 << 28;
16732   inst.instruction |= neon_logbits (size) << 18;
16733   inst.instruction |= inst.operands[0].reg << 12;
16734   inst.instruction |= LOW4 (inst.operands[1].reg);
16735   inst.is_neon = 1;
16736 }
16737 
16738 static void
mve_encode_rrqq(unsigned U,unsigned size)16739 mve_encode_rrqq (unsigned U, unsigned size)
16740 {
16741   constraint (inst.operands[3].reg > 14, MVE_BAD_QREG);
16742 
16743   inst.instruction |= U << 28;
16744   inst.instruction |= (inst.operands[1].reg >> 1) << 20;
16745   inst.instruction |= LOW4 (inst.operands[2].reg) << 16;
16746   inst.instruction |= (size == 32) << 16;
16747   inst.instruction |= inst.operands[0].reg << 12;
16748   inst.instruction |= HI1 (inst.operands[2].reg) << 7;
16749   inst.instruction |= inst.operands[3].reg;
16750   inst.is_neon = 1;
16751 }
16752 
16753 /* Helper function for neon_three_same handling the operands.  */
16754 static void
neon_three_args(int isquad)16755 neon_three_args (int isquad)
16756 {
16757   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16758   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16759   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16760   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16761   inst.instruction |= LOW4 (inst.operands[2].reg);
16762   inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16763   inst.instruction |= (isquad != 0) << 6;
16764   inst.is_neon = 1;
16765 }
16766 
16767 /* Encode insns with bit pattern:
16768 
16769   |28/24|23|22 |21 20|19 16|15 12|11    8|7|6|5|4|3  0|
16770   |  U  |x |D  |size | Rn  | Rd  |x x x x|N|Q|M|x| Rm |
16771 
16772   SIZE is passed in bits. -1 means size field isn't changed, in case it has a
16773   different meaning for some instruction.  */
16774 
16775 static void
neon_three_same(int isquad,int ubit,int size)16776 neon_three_same (int isquad, int ubit, int size)
16777 {
16778   neon_three_args (isquad);
16779   inst.instruction |= (ubit != 0) << 24;
16780   if (size != -1)
16781     inst.instruction |= neon_logbits (size) << 20;
16782 
16783   neon_dp_fixup (&inst);
16784 }
16785 
16786 /* Encode instructions of the form:
16787 
16788   |28/24|23|22|21 20|19 18|17 16|15 12|11      7|6|5|4|3  0|
16789   |  U  |x |D |x  x |size |x  x | Rd  |x x x x x|Q|M|x| Rm |
16790 
16791   Don't write size if SIZE == -1.  */
16792 
16793 static void
neon_two_same(int qbit,int ubit,int size)16794 neon_two_same (int qbit, int ubit, int size)
16795 {
16796   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16797   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16798   inst.instruction |= LOW4 (inst.operands[1].reg);
16799   inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16800   inst.instruction |= (qbit != 0) << 6;
16801   inst.instruction |= (ubit != 0) << 24;
16802 
16803   if (size != -1)
16804     inst.instruction |= neon_logbits (size) << 18;
16805 
16806   neon_dp_fixup (&inst);
16807 }
16808 
16809 enum vfp_or_neon_is_neon_bits
16810 {
16811 NEON_CHECK_CC = 1,
16812 NEON_CHECK_ARCH = 2,
16813 NEON_CHECK_ARCH8 = 4
16814 };
16815 
16816 /* Call this function if an instruction which may have belonged to the VFP or
16817  Neon instruction sets, but turned out to be a Neon instruction (due to the
16818  operand types involved, etc.). We have to check and/or fix-up a couple of
16819  things:
16820 
16821    - Make sure the user hasn't attempted to make a Neon instruction
16822      conditional.
16823    - Alter the value in the condition code field if necessary.
16824    - Make sure that the arch supports Neon instructions.
16825 
16826  Which of these operations take place depends on bits from enum
16827  vfp_or_neon_is_neon_bits.
16828 
16829  WARNING: This function has side effects! If NEON_CHECK_CC is used and the
16830  current instruction's condition is COND_ALWAYS, the condition field is
16831  changed to inst.uncond_value.  This is necessary because instructions shared
16832  between VFP and Neon may be conditional for the VFP variants only, and the
16833  unconditional Neon version must have, e.g., 0xF in the condition field.  */
16834 
16835 static int
vfp_or_neon_is_neon(unsigned check)16836 vfp_or_neon_is_neon (unsigned check)
16837 {
16838 /* Conditions are always legal in Thumb mode (IT blocks).  */
16839 if (!thumb_mode && (check & NEON_CHECK_CC))
16840   {
16841     if (inst.cond != COND_ALWAYS)
16842       {
16843 	first_error (_(BAD_COND));
16844 	return FAIL;
16845       }
16846     if (inst.uncond_value != -1u)
16847       inst.instruction |= inst.uncond_value << 28;
16848   }
16849 
16850 
16851   if (((check & NEON_CHECK_ARCH) && !mark_feature_used (&fpu_neon_ext_v1))
16852       || ((check & NEON_CHECK_ARCH8)
16853 	  && !mark_feature_used (&fpu_neon_ext_armv8)))
16854     {
16855       first_error (_(BAD_FPU));
16856       return FAIL;
16857     }
16858 
16859 return SUCCESS;
16860 }
16861 
16862 
16863 /* Return TRUE if the SIMD instruction is available for the current
16864    cpu_variant.  FP is set to TRUE if this is a SIMD floating-point
16865    instruction.  CHECK contains th.  CHECK contains the set of bits to pass to
16866    vfp_or_neon_is_neon for the NEON specific checks.  */
16867 
16868 static bool
check_simd_pred_availability(int fp,unsigned check)16869 check_simd_pred_availability (int fp, unsigned check)
16870 {
16871 if (inst.cond > COND_ALWAYS)
16872   {
16873     if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16874       {
16875 	inst.error = BAD_FPU;
16876 	return false;
16877       }
16878     inst.pred_insn_type = INSIDE_VPT_INSN;
16879   }
16880 else if (inst.cond < COND_ALWAYS)
16881   {
16882     if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16883       inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16884     else if (vfp_or_neon_is_neon (check) == FAIL)
16885       return false;
16886   }
16887 else
16888   {
16889     if (!ARM_CPU_HAS_FEATURE (cpu_variant, fp ? mve_fp_ext : mve_ext)
16890 	&& vfp_or_neon_is_neon (check) == FAIL)
16891       return false;
16892 
16893     if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16894       inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16895   }
16896 return true;
16897 }
16898 
16899 /* Neon instruction encoders, in approximate order of appearance.  */
16900 
16901 static void
do_neon_dyadic_i_su(void)16902 do_neon_dyadic_i_su (void)
16903 {
16904   if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
16905    return;
16906 
16907   enum neon_shape rs;
16908   struct neon_type_el et;
16909   if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16910     rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
16911   else
16912     rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16913 
16914   et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_32 | N_KEY);
16915 
16916 
16917   if (rs != NS_QQR)
16918     neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
16919   else
16920     mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
16921 }
16922 
16923 static void
do_neon_dyadic_i64_su(void)16924 do_neon_dyadic_i64_su (void)
16925 {
16926   if (!check_simd_pred_availability (false, NEON_CHECK_CC | NEON_CHECK_ARCH))
16927     return;
16928   enum neon_shape rs;
16929   struct neon_type_el et;
16930   if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16931     {
16932       rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
16933       et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
16934     }
16935   else
16936     {
16937       rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16938       et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_ALL | N_KEY);
16939     }
16940   if (rs == NS_QQR)
16941     mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
16942   else
16943     neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
16944 }
16945 
16946 static void
neon_imm_shift(int write_ubit,int uval,int isquad,struct neon_type_el et,unsigned immbits)16947 neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
16948 		unsigned immbits)
16949 {
16950   unsigned size = et.size >> 3;
16951   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16952   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16953   inst.instruction |= LOW4 (inst.operands[1].reg);
16954   inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16955   inst.instruction |= (isquad != 0) << 6;
16956   inst.instruction |= immbits << 16;
16957   inst.instruction |= (size >> 3) << 7;
16958   inst.instruction |= (size & 0x7) << 19;
16959   if (write_ubit)
16960     inst.instruction |= (uval != 0) << 24;
16961 
16962   neon_dp_fixup (&inst);
16963 }
16964 
16965 static void
do_neon_shl(void)16966 do_neon_shl (void)
16967 {
16968   if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
16969    return;
16970 
16971   if (!inst.operands[2].isreg)
16972     {
16973       enum neon_shape rs;
16974       struct neon_type_el et;
16975       if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16976 	{
16977 	  rs = neon_select_shape (NS_QQI, NS_NULL);
16978 	  et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_MVE);
16979 	}
16980       else
16981 	{
16982 	  rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16983 	  et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
16984 	}
16985       int imm = inst.operands[2].imm;
16986 
16987       constraint (imm < 0 || (unsigned)imm >= et.size,
16988 		  _("immediate out of range for shift"));
16989       NEON_ENCODE (IMMED, inst);
16990       neon_imm_shift (false, 0, neon_quad (rs), et, imm);
16991     }
16992   else
16993     {
16994       enum neon_shape rs;
16995       struct neon_type_el et;
16996       if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16997 	{
16998 	  rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
16999 	  et = neon_check_type (3, rs, N_EQK, N_SU_MVE | N_KEY, N_EQK | N_EQK);
17000 	}
17001       else
17002 	{
17003 	  rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17004 	  et = neon_check_type (3, rs, N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
17005 	}
17006 
17007 
17008       if (rs == NS_QQR)
17009 	{
17010 	  constraint (inst.operands[0].reg != inst.operands[1].reg,
17011 		       _("invalid instruction shape"));
17012 	  if (inst.operands[2].reg == REG_SP)
17013 	    as_tsktsk (MVE_BAD_SP);
17014 	  else if (inst.operands[2].reg == REG_PC)
17015 	    as_tsktsk (MVE_BAD_PC);
17016 
17017 	  inst.instruction = 0xee311e60;
17018 	  inst.instruction |= (et.type == NT_unsigned) << 28;
17019 	  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17020 	  inst.instruction |= neon_logbits (et.size) << 18;
17021 	  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17022 	  inst.instruction |= inst.operands[2].reg;
17023 	  inst.is_neon = 1;
17024 	}
17025       else
17026 	{
17027 	  unsigned int tmp;
17028 
17029 	  /* VSHL/VQSHL 3-register variants have syntax such as:
17030 	       vshl.xx Dd, Dm, Dn
17031 	     whereas other 3-register operations encoded by neon_three_same have
17032 	     syntax like:
17033 	       vadd.xx Dd, Dn, Dm
17034 	     (i.e. with Dn & Dm reversed). Swap operands[1].reg and
17035 	     operands[2].reg here.  */
17036 	  tmp = inst.operands[2].reg;
17037 	  inst.operands[2].reg = inst.operands[1].reg;
17038 	  inst.operands[1].reg = tmp;
17039 	  NEON_ENCODE (INTEGER, inst);
17040 	  neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
17041 	}
17042     }
17043 }
17044 
17045 static void
do_neon_qshl(void)17046 do_neon_qshl (void)
17047 {
17048   if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
17049    return;
17050 
17051   if (!inst.operands[2].isreg)
17052     {
17053       enum neon_shape rs;
17054       struct neon_type_el et;
17055       if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17056 	{
17057 	  rs = neon_select_shape (NS_QQI, NS_NULL);
17058 	  et = neon_check_type (2, rs, N_EQK, N_KEY | N_SU_MVE);
17059 	}
17060       else
17061 	{
17062 	  rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
17063 	  et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
17064 	}
17065       int imm = inst.operands[2].imm;
17066 
17067       constraint (imm < 0 || (unsigned)imm >= et.size,
17068 		  _("immediate out of range for shift"));
17069       NEON_ENCODE (IMMED, inst);
17070       neon_imm_shift (true, et.type == NT_unsigned, neon_quad (rs), et, imm);
17071     }
17072   else
17073     {
17074       enum neon_shape rs;
17075       struct neon_type_el et;
17076 
17077       if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17078 	{
17079 	  rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
17080 	  et = neon_check_type (3, rs, N_EQK, N_SU_MVE | N_KEY, N_EQK | N_EQK);
17081 	}
17082       else
17083 	{
17084 	  rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17085 	  et = neon_check_type (3, rs, N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
17086 	}
17087 
17088       if (rs == NS_QQR)
17089 	{
17090 	  constraint (inst.operands[0].reg != inst.operands[1].reg,
17091 		       _("invalid instruction shape"));
17092 	  if (inst.operands[2].reg == REG_SP)
17093 	    as_tsktsk (MVE_BAD_SP);
17094 	  else if (inst.operands[2].reg == REG_PC)
17095 	    as_tsktsk (MVE_BAD_PC);
17096 
17097 	  inst.instruction = 0xee311ee0;
17098 	  inst.instruction |= (et.type == NT_unsigned) << 28;
17099 	  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17100 	  inst.instruction |= neon_logbits (et.size) << 18;
17101 	  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17102 	  inst.instruction |= inst.operands[2].reg;
17103 	  inst.is_neon = 1;
17104 	}
17105       else
17106 	{
17107 	  unsigned int tmp;
17108 
17109 	  /* See note in do_neon_shl.  */
17110 	  tmp = inst.operands[2].reg;
17111 	  inst.operands[2].reg = inst.operands[1].reg;
17112 	  inst.operands[1].reg = tmp;
17113 	  NEON_ENCODE (INTEGER, inst);
17114 	  neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
17115 	}
17116     }
17117 }
17118 
17119 static void
do_neon_rshl(void)17120 do_neon_rshl (void)
17121 {
17122   if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
17123    return;
17124 
17125   enum neon_shape rs;
17126   struct neon_type_el et;
17127   if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17128     {
17129       rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
17130       et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
17131     }
17132   else
17133     {
17134       rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17135       et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_ALL | N_KEY);
17136     }
17137 
17138   unsigned int tmp;
17139 
17140   if (rs == NS_QQR)
17141     {
17142       if (inst.operands[2].reg == REG_PC)
17143 	as_tsktsk (MVE_BAD_PC);
17144       else if (inst.operands[2].reg == REG_SP)
17145 	as_tsktsk (MVE_BAD_SP);
17146 
17147       constraint (inst.operands[0].reg != inst.operands[1].reg,
17148 		  _("invalid instruction shape"));
17149 
17150       if (inst.instruction == 0x0000510)
17151 	/* We are dealing with vqrshl.  */
17152 	inst.instruction = 0xee331ee0;
17153       else
17154 	/* We are dealing with vrshl.  */
17155 	inst.instruction = 0xee331e60;
17156 
17157       inst.instruction |= (et.type == NT_unsigned) << 28;
17158       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17159       inst.instruction |= neon_logbits (et.size) << 18;
17160       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17161       inst.instruction |= inst.operands[2].reg;
17162       inst.is_neon = 1;
17163     }
17164   else
17165     {
17166       tmp = inst.operands[2].reg;
17167       inst.operands[2].reg = inst.operands[1].reg;
17168       inst.operands[1].reg = tmp;
17169       neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
17170     }
17171 }
17172 
17173 static int
neon_cmode_for_logic_imm(unsigned immediate,unsigned * immbits,int size)17174 neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
17175 {
17176   /* Handle .I8 pseudo-instructions.  */
17177   if (size == 8)
17178     {
17179       /* Unfortunately, this will make everything apart from zero out-of-range.
17180 	 FIXME is this the intended semantics? There doesn't seem much point in
17181 	 accepting .I8 if so.  */
17182       immediate |= immediate << 8;
17183       size = 16;
17184     }
17185 
17186   if (size >= 32)
17187     {
17188       if (immediate == (immediate & 0x000000ff))
17189 	{
17190 	  *immbits = immediate;
17191 	  return 0x1;
17192 	}
17193       else if (immediate == (immediate & 0x0000ff00))
17194 	{
17195 	  *immbits = immediate >> 8;
17196 	  return 0x3;
17197 	}
17198       else if (immediate == (immediate & 0x00ff0000))
17199 	{
17200 	  *immbits = immediate >> 16;
17201 	  return 0x5;
17202 	}
17203       else if (immediate == (immediate & 0xff000000))
17204 	{
17205 	  *immbits = immediate >> 24;
17206 	  return 0x7;
17207 	}
17208       if ((immediate & 0xffff) != (immediate >> 16))
17209 	goto bad_immediate;
17210       immediate &= 0xffff;
17211     }
17212 
17213   if (immediate == (immediate & 0x000000ff))
17214     {
17215       *immbits = immediate;
17216       return 0x9;
17217     }
17218   else if (immediate == (immediate & 0x0000ff00))
17219     {
17220       *immbits = immediate >> 8;
17221       return 0xb;
17222     }
17223 
17224   bad_immediate:
17225   first_error (_("immediate value out of range"));
17226   return FAIL;
17227 }
17228 
17229 static void
do_neon_logic(void)17230 do_neon_logic (void)
17231 {
17232   if (inst.operands[2].present && inst.operands[2].isreg)
17233     {
17234       enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17235       if (rs == NS_QQQ
17236 	  && !check_simd_pred_availability (false,
17237 					    NEON_CHECK_ARCH | NEON_CHECK_CC))
17238 	return;
17239       else if (rs != NS_QQQ
17240 	       && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
17241 	first_error (BAD_FPU);
17242 
17243       neon_check_type (3, rs, N_IGNORE_TYPE);
17244       /* U bit and size field were set as part of the bitmask.  */
17245       NEON_ENCODE (INTEGER, inst);
17246       neon_three_same (neon_quad (rs), 0, -1);
17247     }
17248   else
17249     {
17250       const int three_ops_form = (inst.operands[2].present
17251 				  && !inst.operands[2].isreg);
17252       const int immoperand = (three_ops_form ? 2 : 1);
17253       enum neon_shape rs = (three_ops_form
17254 			    ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
17255 			    : neon_select_shape (NS_DI, NS_QI, NS_NULL));
17256       /* Because neon_select_shape makes the second operand a copy of the first
17257 	 if the second operand is not present.  */
17258       if (rs == NS_QQI
17259 	  && !check_simd_pred_availability (false,
17260 					    NEON_CHECK_ARCH | NEON_CHECK_CC))
17261 	return;
17262       else if (rs != NS_QQI
17263 	       && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
17264 	first_error (BAD_FPU);
17265 
17266       struct neon_type_el et;
17267       if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17268 	et = neon_check_type (2, rs, N_I32 | N_I16 | N_KEY, N_EQK);
17269       else
17270 	et = neon_check_type (2, rs, N_I8 | N_I16 | N_I32 | N_I64 | N_F32
17271 			      | N_KEY, N_EQK);
17272 
17273       if (et.type == NT_invtype)
17274 	return;
17275       enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
17276       unsigned immbits;
17277       int cmode;
17278 
17279 
17280       if (three_ops_form)
17281 	constraint (inst.operands[0].reg != inst.operands[1].reg,
17282 		    _("first and second operands shall be the same register"));
17283 
17284       NEON_ENCODE (IMMED, inst);
17285 
17286       immbits = inst.operands[immoperand].imm;
17287       if (et.size == 64)
17288 	{
17289 	  /* .i64 is a pseudo-op, so the immediate must be a repeating
17290 	     pattern.  */
17291 	  if (immbits != (inst.operands[immoperand].regisimm ?
17292 			  inst.operands[immoperand].reg : 0))
17293 	    {
17294 	      /* Set immbits to an invalid constant.  */
17295 	      immbits = 0xdeadbeef;
17296 	    }
17297 	}
17298 
17299       switch (opcode)
17300 	{
17301 	case N_MNEM_vbic:
17302 	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17303 	  break;
17304 
17305 	case N_MNEM_vorr:
17306 	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17307 	  break;
17308 
17309 	case N_MNEM_vand:
17310 	  /* Pseudo-instruction for VBIC.  */
17311 	  neon_invert_size (&immbits, 0, et.size);
17312 	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17313 	  break;
17314 
17315 	case N_MNEM_vorn:
17316 	  /* Pseudo-instruction for VORR.  */
17317 	  neon_invert_size (&immbits, 0, et.size);
17318 	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17319 	  break;
17320 
17321 	default:
17322 	  abort ();
17323 	}
17324 
17325       if (cmode == FAIL)
17326 	return;
17327 
17328       inst.instruction |= neon_quad (rs) << 6;
17329       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17330       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17331       inst.instruction |= cmode << 8;
17332       neon_write_immbits (immbits);
17333 
17334       neon_dp_fixup (&inst);
17335     }
17336 }
17337 
17338 static void
do_neon_bitfield(void)17339 do_neon_bitfield (void)
17340 {
17341   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17342   neon_check_type (3, rs, N_IGNORE_TYPE);
17343   neon_three_same (neon_quad (rs), 0, -1);
17344 }
17345 
17346 static void
neon_dyadic_misc(enum neon_el_type ubit_meaning,unsigned types,unsigned destbits)17347 neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
17348 		  unsigned destbits)
17349 {
17350   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
17351   struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
17352 					    types | N_KEY);
17353   if (et.type == NT_float)
17354     {
17355       NEON_ENCODE (FLOAT, inst);
17356       if (rs == NS_QQR)
17357 	mve_encode_qqr (et.size, 0, 1);
17358       else
17359 	neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
17360     }
17361   else
17362     {
17363       NEON_ENCODE (INTEGER, inst);
17364       if (rs == NS_QQR)
17365 	mve_encode_qqr (et.size, et.type == ubit_meaning, 0);
17366       else
17367 	neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
17368     }
17369 }
17370 
17371 
17372 static void
do_neon_dyadic_if_su_d(void)17373 do_neon_dyadic_if_su_d (void)
17374 {
17375   /* This version only allow D registers, but that constraint is enforced during
17376      operand parsing so we don't need to do anything extra here.  */
17377   neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
17378 }
17379 
17380 static void
do_neon_dyadic_if_i_d(void)17381 do_neon_dyadic_if_i_d (void)
17382 {
17383   /* The "untyped" case can't happen. Do this to stop the "U" bit being
17384      affected if we specify unsigned args.  */
17385   neon_dyadic_misc (NT_untyped, N_IF_32, 0);
17386 }
17387 
17388 static void
do_mve_vstr_vldr_QI(int size,int elsize,int load)17389 do_mve_vstr_vldr_QI (int size, int elsize, int load)
17390 {
17391   constraint (size < 32, BAD_ADDR_MODE);
17392   constraint (size != elsize, BAD_EL_TYPE);
17393   constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
17394   constraint (!inst.operands[1].preind, BAD_ADDR_MODE);
17395   constraint (load && inst.operands[0].reg == inst.operands[1].reg,
17396 	      _("destination register and offset register may not be the"
17397 		" same"));
17398 
17399   int imm = inst.relocs[0].exp.X_add_number;
17400   int add = 1;
17401   if (imm < 0)
17402     {
17403       add = 0;
17404       imm = -imm;
17405     }
17406   constraint ((imm % (size / 8) != 0)
17407 	      || imm > (0x7f << neon_logbits (size)),
17408 	      (size == 32) ? _("immediate must be a multiple of 4 in the"
17409 			       " range of +/-[0,508]")
17410 			   : _("immediate must be a multiple of 8 in the"
17411 			       " range of +/-[0,1016]"));
17412   inst.instruction |= 0x11 << 24;
17413   inst.instruction |= add << 23;
17414   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17415   inst.instruction |= inst.operands[1].writeback << 21;
17416   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17417   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17418   inst.instruction |= 1 << 12;
17419   inst.instruction |= (size == 64) << 8;
17420   inst.instruction &= 0xffffff00;
17421   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17422   inst.instruction |= imm >> neon_logbits (size);
17423 }
17424 
17425 static void
do_mve_vstr_vldr_RQ(int size,int elsize,int load)17426 do_mve_vstr_vldr_RQ (int size, int elsize, int load)
17427 {
17428     unsigned os = inst.operands[1].imm >> 5;
17429     unsigned type = inst.vectype.el[0].type;
17430     constraint (os != 0 && size == 8,
17431 		_("can not shift offsets when accessing less than half-word"));
17432     constraint (os && os != neon_logbits (size),
17433 		_("shift immediate must be 1, 2 or 3 for half-word, word"
17434 		  " or double-word accesses respectively"));
17435     if (inst.operands[1].reg == REG_PC)
17436       as_tsktsk (MVE_BAD_PC);
17437 
17438     switch (size)
17439       {
17440       case 8:
17441 	constraint (elsize >= 64, BAD_EL_TYPE);
17442 	break;
17443       case 16:
17444 	constraint (elsize < 16 || elsize >= 64, BAD_EL_TYPE);
17445 	break;
17446       case 32:
17447       case 64:
17448 	constraint (elsize != size, BAD_EL_TYPE);
17449 	break;
17450       default:
17451 	break;
17452       }
17453     constraint (inst.operands[1].writeback || !inst.operands[1].preind,
17454 		BAD_ADDR_MODE);
17455     if (load)
17456       {
17457 	constraint (inst.operands[0].reg == (inst.operands[1].imm & 0x1f),
17458 		    _("destination register and offset register may not be"
17459 		    " the same"));
17460 	constraint (size == elsize && type == NT_signed, BAD_EL_TYPE);
17461 	constraint (size != elsize && type != NT_unsigned && type != NT_signed,
17462 		    BAD_EL_TYPE);
17463 	inst.instruction |= ((size == elsize) || (type == NT_unsigned)) << 28;
17464       }
17465     else
17466       {
17467 	constraint (type != NT_untyped, BAD_EL_TYPE);
17468       }
17469 
17470     inst.instruction |= 1 << 23;
17471     inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17472     inst.instruction |= inst.operands[1].reg << 16;
17473     inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17474     inst.instruction |= neon_logbits (elsize) << 7;
17475     inst.instruction |= HI1 (inst.operands[1].imm) << 5;
17476     inst.instruction |= LOW4 (inst.operands[1].imm);
17477     inst.instruction |= !!os;
17478 }
17479 
17480 static void
do_mve_vstr_vldr_RI(int size,int elsize,int load)17481 do_mve_vstr_vldr_RI (int size, int elsize, int load)
17482 {
17483   enum neon_el_type type = inst.vectype.el[0].type;
17484 
17485   constraint (size >= 64, BAD_ADDR_MODE);
17486   switch (size)
17487     {
17488     case 16:
17489       constraint (elsize < 16 || elsize >= 64, BAD_EL_TYPE);
17490       break;
17491     case 32:
17492       constraint (elsize != size, BAD_EL_TYPE);
17493       break;
17494     default:
17495       break;
17496     }
17497   if (load)
17498     {
17499       constraint (elsize != size && type != NT_unsigned
17500 		  && type != NT_signed, BAD_EL_TYPE);
17501     }
17502   else
17503     {
17504       constraint (elsize != size && type != NT_untyped, BAD_EL_TYPE);
17505     }
17506 
17507   int imm = inst.relocs[0].exp.X_add_number;
17508   int add = 1;
17509   if (imm < 0)
17510     {
17511       add = 0;
17512       imm = -imm;
17513     }
17514 
17515   if ((imm % (size / 8) != 0) || imm > (0x7f << neon_logbits (size)))
17516     {
17517       switch (size)
17518 	{
17519 	case 8:
17520 	  constraint (1, _("immediate must be in the range of +/-[0,127]"));
17521 	  break;
17522 	case 16:
17523 	  constraint (1, _("immediate must be a multiple of 2 in the"
17524 			   " range of +/-[0,254]"));
17525 	  break;
17526 	case 32:
17527 	  constraint (1, _("immediate must be a multiple of 4 in the"
17528 			   " range of +/-[0,508]"));
17529 	  break;
17530 	}
17531     }
17532 
17533   if (size != elsize)
17534     {
17535       constraint (inst.operands[1].reg > 7, BAD_HIREG);
17536       constraint (inst.operands[0].reg > 14,
17537 		  _("MVE vector register in the range [Q0..Q7] expected"));
17538       inst.instruction |= (load && type == NT_unsigned) << 28;
17539       inst.instruction |= (size == 16) << 19;
17540       inst.instruction |= neon_logbits (elsize) << 7;
17541     }
17542   else
17543     {
17544       if (inst.operands[1].reg == REG_PC)
17545 	as_tsktsk (MVE_BAD_PC);
17546       else if (inst.operands[1].reg == REG_SP && inst.operands[1].writeback)
17547 	as_tsktsk (MVE_BAD_SP);
17548       inst.instruction |= 1 << 12;
17549       inst.instruction |= neon_logbits (size) << 7;
17550     }
17551   inst.instruction |= inst.operands[1].preind << 24;
17552   inst.instruction |= add << 23;
17553   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17554   inst.instruction |= inst.operands[1].writeback << 21;
17555   inst.instruction |= inst.operands[1].reg << 16;
17556   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17557   inst.instruction &= 0xffffff80;
17558   inst.instruction |= imm >> neon_logbits (size);
17559 
17560 }
17561 
17562 static void
do_mve_vstr_vldr(void)17563 do_mve_vstr_vldr (void)
17564 {
17565   unsigned size;
17566   int load = 0;
17567 
17568   if (inst.cond > COND_ALWAYS)
17569     inst.pred_insn_type = INSIDE_VPT_INSN;
17570   else
17571     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
17572 
17573   switch (inst.instruction)
17574     {
17575     default:
17576       gas_assert (0);
17577       break;
17578     case M_MNEM_vldrb:
17579       load = 1;
17580       /* fall through.  */
17581     case M_MNEM_vstrb:
17582       size = 8;
17583       break;
17584     case M_MNEM_vldrh:
17585       load = 1;
17586       /* fall through.  */
17587     case M_MNEM_vstrh:
17588       size = 16;
17589       break;
17590     case M_MNEM_vldrw:
17591       load = 1;
17592       /* fall through.  */
17593     case M_MNEM_vstrw:
17594       size = 32;
17595       break;
17596     case M_MNEM_vldrd:
17597       load = 1;
17598       /* fall through.  */
17599     case M_MNEM_vstrd:
17600       size = 64;
17601       break;
17602     }
17603   unsigned elsize = inst.vectype.el[0].size;
17604 
17605   if (inst.operands[1].isquad)
17606     {
17607       /* We are dealing with [Q, imm]{!} cases.  */
17608       do_mve_vstr_vldr_QI (size, elsize, load);
17609     }
17610   else
17611     {
17612       if (inst.operands[1].immisreg == 2)
17613 	{
17614 	  /* We are dealing with [R, Q, {UXTW #os}] cases.  */
17615 	  do_mve_vstr_vldr_RQ (size, elsize, load);
17616 	}
17617       else if (!inst.operands[1].immisreg)
17618 	{
17619 	  /* We are dealing with [R, Imm]{!}/[R], Imm cases.  */
17620 	  do_mve_vstr_vldr_RI (size, elsize, load);
17621 	}
17622       else
17623 	constraint (1, BAD_ADDR_MODE);
17624     }
17625 
17626   inst.is_neon = 1;
17627 }
17628 
17629 static void
do_mve_vst_vld(void)17630 do_mve_vst_vld (void)
17631 {
17632   if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17633     return;
17634 
17635   constraint (!inst.operands[1].preind || inst.relocs[0].exp.X_add_symbol != 0
17636 	      || inst.relocs[0].exp.X_add_number != 0
17637 	      || inst.operands[1].immisreg != 0,
17638 	      BAD_ADDR_MODE);
17639   constraint (inst.vectype.el[0].size > 32, BAD_EL_TYPE);
17640   if (inst.operands[1].reg == REG_PC)
17641     as_tsktsk (MVE_BAD_PC);
17642   else if (inst.operands[1].reg == REG_SP && inst.operands[1].writeback)
17643     as_tsktsk (MVE_BAD_SP);
17644 
17645 
17646   /* These instructions are one of the "exceptions" mentioned in
17647      handle_pred_state.  They are MVE instructions that are not VPT compatible
17648      and do not accept a VPT code, thus appending such a code is a syntax
17649      error.  */
17650   if (inst.cond > COND_ALWAYS)
17651     first_error (BAD_SYNTAX);
17652   /* If we append a scalar condition code we can set this to
17653      MVE_OUTSIDE_PRED_INSN as it will also lead to a syntax error.  */
17654   else if (inst.cond < COND_ALWAYS)
17655     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
17656   else
17657     inst.pred_insn_type = MVE_UNPREDICABLE_INSN;
17658 
17659   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17660   inst.instruction |= inst.operands[1].writeback << 21;
17661   inst.instruction |= inst.operands[1].reg << 16;
17662   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17663   inst.instruction |= neon_logbits (inst.vectype.el[0].size) << 7;
17664   inst.is_neon = 1;
17665 }
17666 
17667 static void
do_mve_vaddlv(void)17668 do_mve_vaddlv (void)
17669 {
17670   enum neon_shape rs = neon_select_shape (NS_RRQ, NS_NULL);
17671   struct neon_type_el et
17672     = neon_check_type (3, rs, N_EQK, N_EQK, N_S32 | N_U32 | N_KEY);
17673 
17674   if (et.type == NT_invtype)
17675     first_error (BAD_EL_TYPE);
17676 
17677   if (inst.cond > COND_ALWAYS)
17678     inst.pred_insn_type = INSIDE_VPT_INSN;
17679   else
17680     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
17681 
17682   constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
17683 
17684   inst.instruction |= (et.type == NT_unsigned) << 28;
17685   inst.instruction |= inst.operands[1].reg << 19;
17686   inst.instruction |= inst.operands[0].reg << 12;
17687   inst.instruction |= inst.operands[2].reg;
17688   inst.is_neon = 1;
17689 }
17690 
17691 static void
do_neon_dyadic_if_su(void)17692 do_neon_dyadic_if_su (void)
17693 {
17694   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
17695   struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
17696 					    N_SUF_32 | N_KEY);
17697 
17698   constraint ((inst.instruction == ((unsigned) N_MNEM_vmax)
17699 	       || inst.instruction == ((unsigned) N_MNEM_vmin))
17700 	      && et.type == NT_float
17701 	      && !ARM_CPU_HAS_FEATURE (cpu_variant,fpu_neon_ext_v1), BAD_FPU);
17702 
17703   if (!check_simd_pred_availability (et.type == NT_float,
17704 				     NEON_CHECK_ARCH | NEON_CHECK_CC))
17705     return;
17706 
17707   neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
17708 }
17709 
17710 static void
do_neon_addsub_if_i(void)17711 do_neon_addsub_if_i (void)
17712 {
17713   if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
17714       && try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
17715     return;
17716 
17717   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
17718   struct neon_type_el et = neon_check_type (3, rs, N_EQK,
17719 					    N_EQK, N_IF_32 | N_I64 | N_KEY);
17720 
17721   constraint (rs == NS_QQR && et.size == 64, BAD_FPU);
17722   /* If we are parsing Q registers and the element types match MVE, which NEON
17723      also supports, then we must check whether this is an instruction that can
17724      be used by both MVE/NEON.  This distinction can be made based on whether
17725      they are predicated or not.  */
17726   if ((rs == NS_QQQ || rs == NS_QQR) && et.size != 64)
17727     {
17728       if (!check_simd_pred_availability (et.type == NT_float,
17729 					 NEON_CHECK_ARCH | NEON_CHECK_CC))
17730 	return;
17731     }
17732   else
17733     {
17734       /* If they are either in a D register or are using an unsupported.  */
17735       if (rs != NS_QQR
17736 	  && vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
17737 	return;
17738     }
17739 
17740   /* The "untyped" case can't happen. Do this to stop the "U" bit being
17741      affected if we specify unsigned args.  */
17742   neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
17743 }
17744 
17745 /* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
17746    result to be:
17747      V<op> A,B     (A is operand 0, B is operand 2)
17748    to mean:
17749      V<op> A,B,A
17750    not:
17751      V<op> A,B,B
17752    so handle that case specially.  */
17753 
17754 static void
neon_exchange_operands(void)17755 neon_exchange_operands (void)
17756 {
17757   if (inst.operands[1].present)
17758     {
17759       void *scratch = xmalloc (sizeof (inst.operands[0]));
17760 
17761       /* Swap operands[1] and operands[2].  */
17762       memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
17763       inst.operands[1] = inst.operands[2];
17764       memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
17765       free (scratch);
17766     }
17767   else
17768     {
17769       inst.operands[1] = inst.operands[2];
17770       inst.operands[2] = inst.operands[0];
17771     }
17772 }
17773 
17774 static void
neon_compare(unsigned regtypes,unsigned immtypes,int invert)17775 neon_compare (unsigned regtypes, unsigned immtypes, int invert)
17776 {
17777   if (inst.operands[2].isreg)
17778     {
17779       if (invert)
17780 	neon_exchange_operands ();
17781       neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
17782     }
17783   else
17784     {
17785       enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
17786       struct neon_type_el et = neon_check_type (2, rs,
17787 	N_EQK | N_SIZ, immtypes | N_KEY);
17788 
17789       NEON_ENCODE (IMMED, inst);
17790       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17791       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17792       inst.instruction |= LOW4 (inst.operands[1].reg);
17793       inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17794       inst.instruction |= neon_quad (rs) << 6;
17795       inst.instruction |= (et.type == NT_float) << 10;
17796       inst.instruction |= neon_logbits (et.size) << 18;
17797 
17798       neon_dp_fixup (&inst);
17799     }
17800 }
17801 
17802 static void
do_neon_cmp(void)17803 do_neon_cmp (void)
17804 {
17805   neon_compare (N_SUF_32, N_S_32 | N_F_16_32, false);
17806 }
17807 
17808 static void
do_neon_cmp_inv(void)17809 do_neon_cmp_inv (void)
17810 {
17811   neon_compare (N_SUF_32, N_S_32 | N_F_16_32, true);
17812 }
17813 
17814 static void
do_neon_ceq(void)17815 do_neon_ceq (void)
17816 {
17817   neon_compare (N_IF_32, N_IF_32, false);
17818 }
17819 
17820 /* For multiply instructions, we have the possibility of 16-bit or 32-bit
17821    scalars, which are encoded in 5 bits, M : Rm.
17822    For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
17823    M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
17824    index in M.
17825 
17826    Dot Product instructions are similar to multiply instructions except elsize
17827    should always be 32.
17828 
17829    This function translates SCALAR, which is GAS's internal encoding of indexed
17830    scalar register, to raw encoding.  There is also register and index range
17831    check based on ELSIZE.  */
17832 
17833 static unsigned
neon_scalar_for_mul(unsigned scalar,unsigned elsize)17834 neon_scalar_for_mul (unsigned scalar, unsigned elsize)
17835 {
17836   unsigned regno = NEON_SCALAR_REG (scalar);
17837   unsigned elno = NEON_SCALAR_INDEX (scalar);
17838 
17839   switch (elsize)
17840     {
17841     case 16:
17842       if (regno > 7 || elno > 3)
17843 	goto bad_scalar;
17844       return regno | (elno << 3);
17845 
17846     case 32:
17847       if (regno > 15 || elno > 1)
17848 	goto bad_scalar;
17849       return regno | (elno << 4);
17850 
17851     default:
17852     bad_scalar:
17853       first_error (_("scalar out of range for multiply instruction"));
17854     }
17855 
17856   return 0;
17857 }
17858 
17859 /* Encode multiply / multiply-accumulate scalar instructions.  */
17860 
17861 static void
neon_mul_mac(struct neon_type_el et,int ubit)17862 neon_mul_mac (struct neon_type_el et, int ubit)
17863 {
17864   unsigned scalar;
17865 
17866   /* Give a more helpful error message if we have an invalid type.  */
17867   if (et.type == NT_invtype)
17868     return;
17869 
17870   scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
17871   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17872   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17873   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17874   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17875   inst.instruction |= LOW4 (scalar);
17876   inst.instruction |= HI1 (scalar) << 5;
17877   inst.instruction |= (et.type == NT_float) << 8;
17878   inst.instruction |= neon_logbits (et.size) << 20;
17879   inst.instruction |= (ubit != 0) << 24;
17880 
17881   neon_dp_fixup (&inst);
17882 }
17883 
17884 static void
do_neon_mac_maybe_scalar(void)17885 do_neon_mac_maybe_scalar (void)
17886 {
17887   if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
17888     return;
17889 
17890   if (!check_simd_pred_availability (false, NEON_CHECK_CC | NEON_CHECK_ARCH))
17891     return;
17892 
17893   if (inst.operands[2].isscalar)
17894     {
17895       constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17896       enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
17897       struct neon_type_el et = neon_check_type (3, rs,
17898 	N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
17899       NEON_ENCODE (SCALAR, inst);
17900       neon_mul_mac (et, neon_quad (rs));
17901     }
17902   else if (!inst.operands[2].isvec)
17903     {
17904       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17905 
17906       enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
17907       neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
17908 
17909       neon_dyadic_misc (NT_unsigned, N_SU_MVE, 0);
17910     }
17911   else
17912     {
17913       constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17914       /* The "untyped" case can't happen.  Do this to stop the "U" bit being
17915 	 affected if we specify unsigned args.  */
17916       neon_dyadic_misc (NT_untyped, N_IF_32, 0);
17917     }
17918 }
17919 
17920 static void
do_bfloat_vfma(void)17921 do_bfloat_vfma (void)
17922 {
17923   constraint (!mark_feature_used (&fpu_neon_ext_armv8), _(BAD_FPU));
17924   constraint (!mark_feature_used (&arm_ext_bf16), _(BAD_BF16));
17925   enum neon_shape rs;
17926   int t_bit = 0;
17927 
17928   if (inst.instruction != B_MNEM_vfmab)
17929   {
17930       t_bit = 1;
17931       inst.instruction = B_MNEM_vfmat;
17932   }
17933 
17934   if (inst.operands[2].isscalar)
17935     {
17936       rs = neon_select_shape (NS_QQS, NS_NULL);
17937       neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
17938 
17939       inst.instruction |= (1 << 25);
17940       int idx = inst.operands[2].reg & 0xf;
17941       constraint (!(idx < 4), _("index must be in the range 0 to 3"));
17942       inst.operands[2].reg >>= 4;
17943       constraint (!(inst.operands[2].reg < 8),
17944 		  _("indexed register must be less than 8"));
17945       neon_three_args (t_bit);
17946       inst.instruction |= ((idx & 1) << 3);
17947       inst.instruction |= ((idx & 2) << 4);
17948     }
17949   else
17950     {
17951       rs = neon_select_shape (NS_QQQ, NS_NULL);
17952       neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
17953       neon_three_args (t_bit);
17954     }
17955 
17956 }
17957 
17958 static void
do_neon_fmac(void)17959 do_neon_fmac (void)
17960 {
17961   if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_fma)
17962       && try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
17963     return;
17964 
17965   if (!check_simd_pred_availability (true, NEON_CHECK_CC | NEON_CHECK_ARCH))
17966     return;
17967 
17968   if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
17969     {
17970       enum neon_shape rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
17971       struct neon_type_el et = neon_check_type (3, rs, N_F_MVE | N_KEY, N_EQK,
17972 						N_EQK);
17973 
17974       if (rs == NS_QQR)
17975 	{
17976 
17977 	  if (inst.operands[2].reg == REG_SP)
17978 	    as_tsktsk (MVE_BAD_SP);
17979 	  else if (inst.operands[2].reg == REG_PC)
17980 	    as_tsktsk (MVE_BAD_PC);
17981 
17982 	  inst.instruction = 0xee310e40;
17983 	  inst.instruction |= (et.size == 16) << 28;
17984 	  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17985 	  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17986 	  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17987 	  inst.instruction |= HI1 (inst.operands[1].reg) << 6;
17988 	  inst.instruction |= inst.operands[2].reg;
17989 	  inst.is_neon = 1;
17990 	  return;
17991 	}
17992     }
17993   else
17994     {
17995       constraint (!inst.operands[2].isvec, BAD_FPU);
17996     }
17997 
17998   neon_dyadic_misc (NT_untyped, N_IF_32, 0);
17999 }
18000 
18001 static void
do_mve_vfma(void)18002 do_mve_vfma (void)
18003 {
18004   if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_bf16) &&
18005       inst.cond == COND_ALWAYS)
18006     {
18007       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
18008       inst.instruction = N_MNEM_vfma;
18009       inst.pred_insn_type = INSIDE_VPT_INSN;
18010       inst.cond = 0xf;
18011       return do_neon_fmac();
18012     }
18013   else
18014     {
18015       do_bfloat_vfma();
18016     }
18017 }
18018 
18019 static void
do_neon_tst(void)18020 do_neon_tst (void)
18021 {
18022   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18023   struct neon_type_el et = neon_check_type (3, rs,
18024     N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
18025   neon_three_same (neon_quad (rs), 0, et.size);
18026 }
18027 
18028 /* VMUL with 3 registers allows the P8 type. The scalar version supports the
18029    same types as the MAC equivalents. The polynomial type for this instruction
18030    is encoded the same as the integer type.  */
18031 
18032 static void
do_neon_mul(void)18033 do_neon_mul (void)
18034 {
18035   if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
18036     return;
18037 
18038   if (!check_simd_pred_availability (false, NEON_CHECK_CC | NEON_CHECK_ARCH))
18039     return;
18040 
18041   if (inst.operands[2].isscalar)
18042     {
18043       constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
18044       do_neon_mac_maybe_scalar ();
18045     }
18046   else
18047     {
18048       if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18049 	{
18050 	  enum neon_shape rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
18051 	  struct neon_type_el et
18052 	    = neon_check_type (3, rs, N_EQK, N_EQK, N_I_MVE | N_F_MVE | N_KEY);
18053 	  if (et.type == NT_float)
18054 	    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext),
18055 			BAD_FPU);
18056 
18057 	  neon_dyadic_misc (NT_float, N_I_MVE | N_F_MVE, 0);
18058 	}
18059       else
18060 	{
18061 	  constraint (!inst.operands[2].isvec, BAD_FPU);
18062 	  neon_dyadic_misc (NT_poly,
18063 			    N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
18064 	}
18065     }
18066 }
18067 
18068 static void
do_neon_qdmulh(void)18069 do_neon_qdmulh (void)
18070 {
18071   if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
18072    return;
18073 
18074   if (inst.operands[2].isscalar)
18075     {
18076       constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
18077       enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
18078       struct neon_type_el et = neon_check_type (3, rs,
18079 	N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18080       NEON_ENCODE (SCALAR, inst);
18081       neon_mul_mac (et, neon_quad (rs));
18082     }
18083   else
18084     {
18085       enum neon_shape rs;
18086       struct neon_type_el et;
18087       if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18088 	{
18089 	  rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
18090 	  et = neon_check_type (3, rs,
18091 	    N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18092 	}
18093       else
18094 	{
18095 	  rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18096 	  et = neon_check_type (3, rs,
18097 	    N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18098 	}
18099 
18100       NEON_ENCODE (INTEGER, inst);
18101       if (rs == NS_QQR)
18102 	mve_encode_qqr (et.size, 0, 0);
18103       else
18104 	/* The U bit (rounding) comes from bit mask.  */
18105 	neon_three_same (neon_quad (rs), 0, et.size);
18106     }
18107 }
18108 
18109 static void
do_mve_vaddv(void)18110 do_mve_vaddv (void)
18111 {
18112   enum neon_shape rs = neon_select_shape (NS_RQ, NS_NULL);
18113   struct neon_type_el et
18114     = neon_check_type (2, rs, N_EQK,  N_SU_32 | N_KEY);
18115 
18116   if (et.type == NT_invtype)
18117     first_error (BAD_EL_TYPE);
18118 
18119   if (inst.cond > COND_ALWAYS)
18120     inst.pred_insn_type = INSIDE_VPT_INSN;
18121   else
18122     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18123 
18124   constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
18125 
18126   mve_encode_rq (et.type == NT_unsigned, et.size);
18127 }
18128 
18129 static void
do_mve_vhcadd(void)18130 do_mve_vhcadd (void)
18131 {
18132   enum neon_shape rs = neon_select_shape (NS_QQQI, NS_NULL);
18133   struct neon_type_el et
18134     = neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18135 
18136   if (inst.cond > COND_ALWAYS)
18137     inst.pred_insn_type = INSIDE_VPT_INSN;
18138   else
18139     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18140 
18141   unsigned rot = inst.relocs[0].exp.X_add_number;
18142   constraint (rot != 90 && rot != 270, _("immediate out of range"));
18143 
18144   if (et.size == 32 && inst.operands[0].reg == inst.operands[2].reg)
18145     as_tsktsk (_("Warning: 32-bit element size and same first and third "
18146 		 "operand makes instruction UNPREDICTABLE"));
18147 
18148   mve_encode_qqq (0, et.size);
18149   inst.instruction |= (rot == 270) << 12;
18150   inst.is_neon = 1;
18151 }
18152 
18153 static void
do_mve_vqdmull(void)18154 do_mve_vqdmull (void)
18155 {
18156   enum neon_shape rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
18157   struct neon_type_el et
18158     = neon_check_type (3, rs, N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18159 
18160   if (et.size == 32
18161       && (inst.operands[0].reg == inst.operands[1].reg
18162 	  || (rs == NS_QQQ && inst.operands[0].reg == inst.operands[2].reg)))
18163     as_tsktsk (BAD_MVE_SRCDEST);
18164 
18165   if (inst.cond > COND_ALWAYS)
18166     inst.pred_insn_type = INSIDE_VPT_INSN;
18167   else
18168     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18169 
18170   if (rs == NS_QQQ)
18171     {
18172       mve_encode_qqq (et.size == 32, 64);
18173       inst.instruction |= 1;
18174     }
18175   else
18176     {
18177       mve_encode_qqr (64, et.size == 32, 0);
18178       inst.instruction |= 0x3 << 5;
18179     }
18180 }
18181 
18182 static void
do_mve_vadc(void)18183 do_mve_vadc (void)
18184 {
18185   enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
18186   struct neon_type_el et
18187     = neon_check_type (3, rs, N_KEY | N_I32, N_EQK, N_EQK);
18188 
18189   if (et.type == NT_invtype)
18190     first_error (BAD_EL_TYPE);
18191 
18192   if (inst.cond > COND_ALWAYS)
18193     inst.pred_insn_type = INSIDE_VPT_INSN;
18194   else
18195     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18196 
18197   mve_encode_qqq (0, 64);
18198 }
18199 
18200 static void
do_mve_vbrsr(void)18201 do_mve_vbrsr (void)
18202 {
18203   enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
18204   struct neon_type_el et
18205     = neon_check_type (3, rs, N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
18206 
18207   if (inst.cond > COND_ALWAYS)
18208     inst.pred_insn_type = INSIDE_VPT_INSN;
18209   else
18210     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18211 
18212   mve_encode_qqr (et.size, 0, 0);
18213 }
18214 
18215 static void
do_mve_vsbc(void)18216 do_mve_vsbc (void)
18217 {
18218   neon_check_type (3, NS_QQQ, N_EQK, N_EQK, N_I32 | N_KEY);
18219 
18220   if (inst.cond > COND_ALWAYS)
18221     inst.pred_insn_type = INSIDE_VPT_INSN;
18222   else
18223     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18224 
18225   mve_encode_qqq (1, 64);
18226 }
18227 
18228 static void
do_mve_vmulh(void)18229 do_mve_vmulh (void)
18230 {
18231   enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
18232   struct neon_type_el et
18233     = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
18234 
18235   if (inst.cond > COND_ALWAYS)
18236     inst.pred_insn_type = INSIDE_VPT_INSN;
18237   else
18238     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18239 
18240   mve_encode_qqq (et.type == NT_unsigned, et.size);
18241 }
18242 
18243 static void
do_mve_vqdmlah(void)18244 do_mve_vqdmlah (void)
18245 {
18246   enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
18247   struct neon_type_el et
18248     = neon_check_type (3, rs, N_EQK, N_EQK, N_S_32 | N_KEY);
18249 
18250   if (inst.cond > COND_ALWAYS)
18251     inst.pred_insn_type = INSIDE_VPT_INSN;
18252   else
18253     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18254 
18255   mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
18256 }
18257 
18258 static void
do_mve_vqdmladh(void)18259 do_mve_vqdmladh (void)
18260 {
18261   enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
18262   struct neon_type_el et
18263     = neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18264 
18265   if (inst.cond > COND_ALWAYS)
18266     inst.pred_insn_type = INSIDE_VPT_INSN;
18267   else
18268     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18269 
18270   mve_encode_qqq (0, et.size);
18271 }
18272 
18273 
18274 static void
do_mve_vmull(void)18275 do_mve_vmull (void)
18276 {
18277 
18278   enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_DDS,
18279 					  NS_QQS, NS_QQQ, NS_QQR, NS_NULL);
18280   if (inst.cond == COND_ALWAYS
18281       && ((unsigned)inst.instruction) == M_MNEM_vmullt)
18282     {
18283 
18284       if (rs == NS_QQQ)
18285 	{
18286 	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18287 	    goto neon_vmul;
18288 	}
18289       else
18290 	goto neon_vmul;
18291     }
18292 
18293   constraint (rs != NS_QQQ, BAD_FPU);
18294   struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
18295 					    N_SU_32 | N_P8 | N_P16 | N_KEY);
18296 
18297   /* We are dealing with MVE's vmullt.  */
18298   if (et.size == 32
18299       && (inst.operands[0].reg == inst.operands[1].reg
18300 	  || inst.operands[0].reg == inst.operands[2].reg))
18301     as_tsktsk (BAD_MVE_SRCDEST);
18302 
18303   if (inst.cond > COND_ALWAYS)
18304     inst.pred_insn_type = INSIDE_VPT_INSN;
18305   else
18306     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18307 
18308   if (et.type == NT_poly)
18309     mve_encode_qqq (neon_logbits (et.size), 64);
18310   else
18311     mve_encode_qqq (et.type == NT_unsigned, et.size);
18312 
18313   return;
18314 
18315  neon_vmul:
18316   inst.instruction = N_MNEM_vmul;
18317   inst.cond = 0xb;
18318   if (thumb_mode)
18319     inst.pred_insn_type = INSIDE_IT_INSN;
18320   do_neon_mul ();
18321 }
18322 
18323 static void
do_mve_vabav(void)18324 do_mve_vabav (void)
18325 {
18326   enum neon_shape rs = neon_select_shape (NS_RQQ, NS_NULL);
18327 
18328   if (rs == NS_NULL)
18329     return;
18330 
18331   if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18332     return;
18333 
18334   struct neon_type_el et = neon_check_type (2, NS_NULL, N_EQK, N_KEY | N_S8
18335 					    | N_S16 | N_S32 | N_U8 | N_U16
18336 					    | N_U32);
18337 
18338   if (inst.cond > COND_ALWAYS)
18339     inst.pred_insn_type = INSIDE_VPT_INSN;
18340   else
18341     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18342 
18343   mve_encode_rqq (et.type == NT_unsigned, et.size);
18344 }
18345 
18346 static void
do_mve_vmladav(void)18347 do_mve_vmladav (void)
18348 {
18349   enum neon_shape rs = neon_select_shape (NS_RQQ, NS_NULL);
18350   struct neon_type_el et = neon_check_type (3, rs,
18351 					    N_EQK, N_EQK, N_SU_MVE | N_KEY);
18352 
18353   if (et.type == NT_unsigned
18354       && (inst.instruction == M_MNEM_vmladavx
18355 	  || inst.instruction == M_MNEM_vmladavax
18356 	  || inst.instruction == M_MNEM_vmlsdav
18357 	  || inst.instruction == M_MNEM_vmlsdava
18358 	  || inst.instruction == M_MNEM_vmlsdavx
18359 	  || inst.instruction == M_MNEM_vmlsdavax))
18360     first_error (BAD_SIMD_TYPE);
18361 
18362   constraint (inst.operands[2].reg > 14,
18363 	      _("MVE vector register in the range [Q0..Q7] expected"));
18364 
18365   if (inst.cond > COND_ALWAYS)
18366     inst.pred_insn_type = INSIDE_VPT_INSN;
18367   else
18368     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18369 
18370   if (inst.instruction == M_MNEM_vmlsdav
18371       || inst.instruction == M_MNEM_vmlsdava
18372       || inst.instruction == M_MNEM_vmlsdavx
18373       || inst.instruction == M_MNEM_vmlsdavax)
18374     inst.instruction |= (et.size == 8) << 28;
18375   else
18376     inst.instruction |= (et.size == 8) << 8;
18377 
18378   mve_encode_rqq (et.type == NT_unsigned, 64);
18379   inst.instruction |= (et.size == 32) << 16;
18380 }
18381 
18382 static void
do_mve_vmlaldav(void)18383 do_mve_vmlaldav (void)
18384 {
18385   enum neon_shape rs = neon_select_shape (NS_RRQQ, NS_NULL);
18386   struct neon_type_el et
18387     = neon_check_type (4, rs, N_EQK, N_EQK, N_EQK,
18388 		       N_S16 | N_S32 | N_U16 | N_U32 | N_KEY);
18389 
18390   if (et.type == NT_unsigned
18391       && (inst.instruction == M_MNEM_vmlsldav
18392 	  || inst.instruction == M_MNEM_vmlsldava
18393 	  || inst.instruction == M_MNEM_vmlsldavx
18394 	  || inst.instruction == M_MNEM_vmlsldavax))
18395     first_error (BAD_SIMD_TYPE);
18396 
18397   if (inst.cond > COND_ALWAYS)
18398     inst.pred_insn_type = INSIDE_VPT_INSN;
18399   else
18400     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18401 
18402   mve_encode_rrqq (et.type == NT_unsigned, et.size);
18403 }
18404 
18405 static void
do_mve_vrmlaldavh(void)18406 do_mve_vrmlaldavh (void)
18407 {
18408   struct neon_type_el et;
18409   if (inst.instruction == M_MNEM_vrmlsldavh
18410      || inst.instruction == M_MNEM_vrmlsldavha
18411      || inst.instruction == M_MNEM_vrmlsldavhx
18412      || inst.instruction == M_MNEM_vrmlsldavhax)
18413     {
18414       et = neon_check_type (4, NS_RRQQ, N_EQK, N_EQK, N_EQK, N_S32 | N_KEY);
18415       if (inst.operands[1].reg == REG_SP)
18416 	as_tsktsk (MVE_BAD_SP);
18417     }
18418   else
18419     {
18420       if (inst.instruction == M_MNEM_vrmlaldavhx
18421 	  || inst.instruction == M_MNEM_vrmlaldavhax)
18422 	et = neon_check_type (4, NS_RRQQ, N_EQK, N_EQK, N_EQK, N_S32 | N_KEY);
18423       else
18424 	et = neon_check_type (4, NS_RRQQ, N_EQK, N_EQK, N_EQK,
18425 			      N_U32 | N_S32 | N_KEY);
18426       /* vrmlaldavh's encoding with SP as the second, odd, GPR operand may alias
18427 	 with vmax/min instructions, making the use of SP in assembly really
18428 	 nonsensical, so instead of issuing a warning like we do for other uses
18429 	 of SP for the odd register operand we error out.  */
18430       constraint (inst.operands[1].reg == REG_SP, BAD_SP);
18431     }
18432 
18433   /* Make sure we still check the second operand is an odd one and that PC is
18434      disallowed.  This because we are parsing for any GPR operand, to be able
18435      to distinguish between giving a warning or an error for SP as described
18436      above.  */
18437   constraint ((inst.operands[1].reg % 2) != 1, BAD_EVEN);
18438   constraint (inst.operands[1].reg == REG_PC, BAD_PC);
18439 
18440   if (inst.cond > COND_ALWAYS)
18441     inst.pred_insn_type = INSIDE_VPT_INSN;
18442   else
18443     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18444 
18445   mve_encode_rrqq (et.type == NT_unsigned, 0);
18446 }
18447 
18448 
18449 static void
do_mve_vmaxnmv(void)18450 do_mve_vmaxnmv (void)
18451 {
18452   enum neon_shape rs = neon_select_shape (NS_RQ, NS_NULL);
18453   struct neon_type_el et
18454     = neon_check_type (2, rs, N_EQK, N_F_MVE | N_KEY);
18455 
18456   if (inst.cond > COND_ALWAYS)
18457     inst.pred_insn_type = INSIDE_VPT_INSN;
18458   else
18459     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18460 
18461   if (inst.operands[0].reg == REG_SP)
18462     as_tsktsk (MVE_BAD_SP);
18463   else if (inst.operands[0].reg == REG_PC)
18464     as_tsktsk (MVE_BAD_PC);
18465 
18466   mve_encode_rq (et.size == 16, 64);
18467 }
18468 
18469 static void
do_mve_vmaxv(void)18470 do_mve_vmaxv (void)
18471 {
18472   enum neon_shape rs = neon_select_shape (NS_RQ, NS_NULL);
18473   struct neon_type_el et;
18474 
18475   if (inst.instruction == M_MNEM_vmaxv || inst.instruction == M_MNEM_vminv)
18476     et = neon_check_type (2, rs, N_EQK, N_SU_MVE | N_KEY);
18477   else
18478     et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18479 
18480   if (inst.cond > COND_ALWAYS)
18481     inst.pred_insn_type = INSIDE_VPT_INSN;
18482   else
18483     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18484 
18485   if (inst.operands[0].reg == REG_SP)
18486     as_tsktsk (MVE_BAD_SP);
18487   else if (inst.operands[0].reg == REG_PC)
18488     as_tsktsk (MVE_BAD_PC);
18489 
18490   mve_encode_rq (et.type == NT_unsigned, et.size);
18491 }
18492 
18493 
18494 static void
do_neon_qrdmlah(void)18495 do_neon_qrdmlah (void)
18496 {
18497   if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
18498    return;
18499   if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18500     {
18501       /* Check we're on the correct architecture.  */
18502       if (!mark_feature_used (&fpu_neon_ext_armv8))
18503 	inst.error
18504 	  = _("instruction form not available on this architecture.");
18505       else if (!mark_feature_used (&fpu_neon_ext_v8_1))
18506 	{
18507 	  as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
18508 	  record_feature_use (&fpu_neon_ext_v8_1);
18509 	}
18510 	if (inst.operands[2].isscalar)
18511 	  {
18512 	    enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
18513 	    struct neon_type_el et = neon_check_type (3, rs,
18514 	      N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18515 	    NEON_ENCODE (SCALAR, inst);
18516 	    neon_mul_mac (et, neon_quad (rs));
18517 	  }
18518 	else
18519 	  {
18520 	    enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18521 	    struct neon_type_el et = neon_check_type (3, rs,
18522 	      N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18523 	    NEON_ENCODE (INTEGER, inst);
18524 	    /* The U bit (rounding) comes from bit mask.  */
18525 	    neon_three_same (neon_quad (rs), 0, et.size);
18526 	  }
18527     }
18528   else
18529     {
18530       enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
18531       struct neon_type_el et
18532 	= neon_check_type (3, rs, N_EQK, N_EQK, N_S_32 | N_KEY);
18533 
18534       NEON_ENCODE (INTEGER, inst);
18535       mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
18536     }
18537 }
18538 
18539 static void
do_neon_fcmp_absolute(void)18540 do_neon_fcmp_absolute (void)
18541 {
18542   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18543   struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
18544 					    N_F_16_32 | N_KEY);
18545   /* Size field comes from bit mask.  */
18546   neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
18547 }
18548 
18549 static void
do_neon_fcmp_absolute_inv(void)18550 do_neon_fcmp_absolute_inv (void)
18551 {
18552   neon_exchange_operands ();
18553   do_neon_fcmp_absolute ();
18554 }
18555 
18556 static void
do_neon_step(void)18557 do_neon_step (void)
18558 {
18559   enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18560   struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
18561 					    N_F_16_32 | N_KEY);
18562   neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
18563 }
18564 
18565 static void
do_neon_abs_neg(void)18566 do_neon_abs_neg (void)
18567 {
18568   enum neon_shape rs;
18569   struct neon_type_el et;
18570 
18571   if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
18572     return;
18573 
18574   rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
18575   et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
18576 
18577   if (!check_simd_pred_availability (et.type == NT_float,
18578 				     NEON_CHECK_ARCH | NEON_CHECK_CC))
18579     return;
18580 
18581   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18582   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18583   inst.instruction |= LOW4 (inst.operands[1].reg);
18584   inst.instruction |= HI1 (inst.operands[1].reg) << 5;
18585   inst.instruction |= neon_quad (rs) << 6;
18586   inst.instruction |= (et.type == NT_float) << 10;
18587   inst.instruction |= neon_logbits (et.size) << 18;
18588 
18589   neon_dp_fixup (&inst);
18590 }
18591 
18592 static void
do_neon_sli(void)18593 do_neon_sli (void)
18594 {
18595   if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
18596     return;
18597 
18598   enum neon_shape rs;
18599   struct neon_type_el et;
18600   if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18601     {
18602       rs = neon_select_shape (NS_QQI, NS_NULL);
18603       et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_KEY);
18604     }
18605   else
18606     {
18607       rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
18608       et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
18609     }
18610 
18611 
18612   int imm = inst.operands[2].imm;
18613   constraint (imm < 0 || (unsigned)imm >= et.size,
18614 	      _("immediate out of range for insert"));
18615   neon_imm_shift (false, 0, neon_quad (rs), et, imm);
18616 }
18617 
18618 static void
do_neon_sri(void)18619 do_neon_sri (void)
18620 {
18621   if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
18622     return;
18623 
18624   enum neon_shape rs;
18625   struct neon_type_el et;
18626   if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18627     {
18628       rs = neon_select_shape (NS_QQI, NS_NULL);
18629       et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_KEY);
18630     }
18631   else
18632     {
18633       rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
18634       et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
18635     }
18636 
18637   int imm = inst.operands[2].imm;
18638   constraint (imm < 1 || (unsigned)imm > et.size,
18639 	      _("immediate out of range for insert"));
18640   neon_imm_shift (false, 0, neon_quad (rs), et, et.size - imm);
18641 }
18642 
18643 static void
do_neon_qshlu_imm(void)18644 do_neon_qshlu_imm (void)
18645 {
18646   if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
18647     return;
18648 
18649   enum neon_shape rs;
18650   struct neon_type_el et;
18651   if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18652     {
18653       rs = neon_select_shape (NS_QQI, NS_NULL);
18654       et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18655     }
18656   else
18657     {
18658       rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
18659       et = neon_check_type (2, rs, N_EQK | N_UNS,
18660 			    N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
18661     }
18662 
18663   int imm = inst.operands[2].imm;
18664   constraint (imm < 0 || (unsigned)imm >= et.size,
18665 	      _("immediate out of range for shift"));
18666   /* Only encodes the 'U present' variant of the instruction.
18667      In this case, signed types have OP (bit 8) set to 0.
18668      Unsigned types have OP set to 1.  */
18669   inst.instruction |= (et.type == NT_unsigned) << 8;
18670   /* The rest of the bits are the same as other immediate shifts.  */
18671   neon_imm_shift (false, 0, neon_quad (rs), et, imm);
18672 }
18673 
18674 static void
do_neon_qmovn(void)18675 do_neon_qmovn (void)
18676 {
18677   struct neon_type_el et = neon_check_type (2, NS_DQ,
18678     N_EQK | N_HLF, N_SU_16_64 | N_KEY);
18679   /* Saturating move where operands can be signed or unsigned, and the
18680      destination has the same signedness.  */
18681   NEON_ENCODE (INTEGER, inst);
18682   if (et.type == NT_unsigned)
18683     inst.instruction |= 0xc0;
18684   else
18685     inst.instruction |= 0x80;
18686   neon_two_same (0, 1, et.size / 2);
18687 }
18688 
18689 static void
do_neon_qmovun(void)18690 do_neon_qmovun (void)
18691 {
18692   struct neon_type_el et = neon_check_type (2, NS_DQ,
18693     N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
18694   /* Saturating move with unsigned results. Operands must be signed.  */
18695   NEON_ENCODE (INTEGER, inst);
18696   neon_two_same (0, 1, et.size / 2);
18697 }
18698 
18699 static void
do_neon_rshift_sat_narrow(void)18700 do_neon_rshift_sat_narrow (void)
18701 {
18702   /* FIXME: Types for narrowing. If operands are signed, results can be signed
18703      or unsigned. If operands are unsigned, results must also be unsigned.  */
18704   struct neon_type_el et = neon_check_type (2, NS_DQI,
18705     N_EQK | N_HLF, N_SU_16_64 | N_KEY);
18706   int imm = inst.operands[2].imm;
18707   /* This gets the bounds check, size encoding and immediate bits calculation
18708      right.  */
18709   et.size /= 2;
18710 
18711   /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
18712      VQMOVN.I<size> <Dd>, <Qm>.  */
18713   if (imm == 0)
18714     {
18715       inst.operands[2].present = 0;
18716       inst.instruction = N_MNEM_vqmovn;
18717       do_neon_qmovn ();
18718       return;
18719     }
18720 
18721   constraint (imm < 1 || (unsigned)imm > et.size,
18722 	      _("immediate out of range"));
18723   neon_imm_shift (true, et.type == NT_unsigned, 0, et, et.size - imm);
18724 }
18725 
18726 static void
do_neon_rshift_sat_narrow_u(void)18727 do_neon_rshift_sat_narrow_u (void)
18728 {
18729   /* FIXME: Types for narrowing. If operands are signed, results can be signed
18730      or unsigned. If operands are unsigned, results must also be unsigned.  */
18731   struct neon_type_el et = neon_check_type (2, NS_DQI,
18732     N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
18733   int imm = inst.operands[2].imm;
18734   /* This gets the bounds check, size encoding and immediate bits calculation
18735      right.  */
18736   et.size /= 2;
18737 
18738   /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
18739      VQMOVUN.I<size> <Dd>, <Qm>.  */
18740   if (imm == 0)
18741     {
18742       inst.operands[2].present = 0;
18743       inst.instruction = N_MNEM_vqmovun;
18744       do_neon_qmovun ();
18745       return;
18746     }
18747 
18748   constraint (imm < 1 || (unsigned)imm > et.size,
18749 	      _("immediate out of range"));
18750   /* FIXME: The manual is kind of unclear about what value U should have in
18751      VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
18752      must be 1.  */
18753   neon_imm_shift (true, 1, 0, et, et.size - imm);
18754 }
18755 
18756 static void
do_neon_movn(void)18757 do_neon_movn (void)
18758 {
18759   struct neon_type_el et = neon_check_type (2, NS_DQ,
18760     N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
18761   NEON_ENCODE (INTEGER, inst);
18762   neon_two_same (0, 1, et.size / 2);
18763 }
18764 
18765 static void
do_neon_rshift_narrow(void)18766 do_neon_rshift_narrow (void)
18767 {
18768   struct neon_type_el et = neon_check_type (2, NS_DQI,
18769     N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
18770   int imm = inst.operands[2].imm;
18771   /* This gets the bounds check, size encoding and immediate bits calculation
18772      right.  */
18773   et.size /= 2;
18774 
18775   /* If immediate is zero then we are a pseudo-instruction for
18776      VMOVN.I<size> <Dd>, <Qm>  */
18777   if (imm == 0)
18778     {
18779       inst.operands[2].present = 0;
18780       inst.instruction = N_MNEM_vmovn;
18781       do_neon_movn ();
18782       return;
18783     }
18784 
18785   constraint (imm < 1 || (unsigned)imm > et.size,
18786 	      _("immediate out of range for narrowing operation"));
18787   neon_imm_shift (false, 0, 0, et, et.size - imm);
18788 }
18789 
18790 static void
do_neon_shll(void)18791 do_neon_shll (void)
18792 {
18793   /* FIXME: Type checking when lengthening.  */
18794   struct neon_type_el et = neon_check_type (2, NS_QDI,
18795     N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
18796   unsigned imm = inst.operands[2].imm;
18797 
18798   if (imm == et.size)
18799     {
18800       /* Maximum shift variant.  */
18801       NEON_ENCODE (INTEGER, inst);
18802       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18803       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18804       inst.instruction |= LOW4 (inst.operands[1].reg);
18805       inst.instruction |= HI1 (inst.operands[1].reg) << 5;
18806       inst.instruction |= neon_logbits (et.size) << 18;
18807 
18808       neon_dp_fixup (&inst);
18809     }
18810   else
18811     {
18812       /* A more-specific type check for non-max versions.  */
18813       et = neon_check_type (2, NS_QDI,
18814 	N_EQK | N_DBL, N_SU_32 | N_KEY);
18815       NEON_ENCODE (IMMED, inst);
18816       neon_imm_shift (true, et.type == NT_unsigned, 0, et, imm);
18817     }
18818 }
18819 
18820 /* Check the various types for the VCVT instruction, and return which version
18821    the current instruction is.  */
18822 
18823 #define CVT_FLAVOUR_VAR							      \
18824   CVT_VAR (s32_f32, N_S32, N_F32, whole_reg,   "ftosls", "ftosis", "ftosizs") \
18825   CVT_VAR (u32_f32, N_U32, N_F32, whole_reg,   "ftouls", "ftouis", "ftouizs") \
18826   CVT_VAR (f32_s32, N_F32, N_S32, whole_reg,   "fsltos", "fsitos", NULL)      \
18827   CVT_VAR (f32_u32, N_F32, N_U32, whole_reg,   "fultos", "fuitos", NULL)      \
18828   /* Half-precision conversions.  */					      \
18829   CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL)	      \
18830   CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL)	      \
18831   CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL)	      \
18832   CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL)	      \
18833   CVT_VAR (f32_f16, N_F32, N_F16, whole_reg,   NULL,     NULL,     NULL)      \
18834   CVT_VAR (f16_f32, N_F16, N_F32, whole_reg,   NULL,     NULL,     NULL)      \
18835   /* New VCVT instructions introduced by ARMv8.2 fp16 extension.	      \
18836      Compared with single/double precision variants, only the co-processor    \
18837      field is different, so the encoding flow is reused here.  */	      \
18838   CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL)    \
18839   CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL)    \
18840   CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
18841   CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
18842   CVT_VAR (bf16_f32, N_BF16, N_F32, whole_reg,   NULL, NULL, NULL)	      \
18843   /* VFP instructions.  */						      \
18844   CVT_VAR (f32_f64, N_F32, N_F64, N_VFP,       NULL,     "fcvtsd", NULL)      \
18845   CVT_VAR (f64_f32, N_F64, N_F32, N_VFP,       NULL,     "fcvtds", NULL)      \
18846   CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
18847   CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
18848   CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL)      \
18849   CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL)      \
18850   /* VFP instructions with bitshift.  */				      \
18851   CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL,     NULL)      \
18852   CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL,     NULL)      \
18853   CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL,     NULL)      \
18854   CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL,     NULL)      \
18855   CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL,     NULL)      \
18856   CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL,     NULL)      \
18857   CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL,     NULL)      \
18858   CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL,     NULL)
18859 
18860 #define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
18861   neon_cvt_flavour_##C,
18862 
18863 /* The different types of conversions we can do.  */
18864 enum neon_cvt_flavour
18865 {
18866   CVT_FLAVOUR_VAR
18867   neon_cvt_flavour_invalid,
18868   neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
18869 };
18870 
18871 #undef CVT_VAR
18872 
18873 static enum neon_cvt_flavour
get_neon_cvt_flavour(enum neon_shape rs)18874 get_neon_cvt_flavour (enum neon_shape rs)
18875 {
18876 #define CVT_VAR(C,X,Y,R,BSN,CN,ZN)			\
18877   et = neon_check_type (2, rs, (R) | (X), (R) | (Y));	\
18878   if (et.type != NT_invtype)				\
18879     {							\
18880       inst.error = NULL;				\
18881       return (neon_cvt_flavour_##C);			\
18882     }
18883 
18884   struct neon_type_el et;
18885   unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
18886 			|| rs == NS_FF) ? N_VFP : 0;
18887   /* The instruction versions which take an immediate take one register
18888      argument, which is extended to the width of the full register. Thus the
18889      "source" and "destination" registers must have the same width.  Hack that
18890      here by making the size equal to the key (wider, in this case) operand.  */
18891   unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
18892 
18893   CVT_FLAVOUR_VAR;
18894 
18895   return neon_cvt_flavour_invalid;
18896 #undef CVT_VAR
18897 }
18898 
18899 enum neon_cvt_mode
18900 {
18901   neon_cvt_mode_a,
18902   neon_cvt_mode_n,
18903   neon_cvt_mode_p,
18904   neon_cvt_mode_m,
18905   neon_cvt_mode_z,
18906   neon_cvt_mode_x,
18907   neon_cvt_mode_r
18908 };
18909 
18910 /* Neon-syntax VFP conversions.  */
18911 
18912 static void
do_vfp_nsyn_cvt(enum neon_shape rs,enum neon_cvt_flavour flavour)18913 do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
18914 {
18915   const char *opname = 0;
18916 
18917   if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
18918       || rs == NS_FHI || rs == NS_HFI)
18919     {
18920       /* Conversions with immediate bitshift.  */
18921       const char *enc[] =
18922 	{
18923 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
18924 	  CVT_FLAVOUR_VAR
18925 	  NULL
18926 #undef CVT_VAR
18927 	};
18928 
18929       if (flavour < (int) ARRAY_SIZE (enc))
18930 	{
18931 	  opname = enc[flavour];
18932 	  constraint (inst.operands[0].reg != inst.operands[1].reg,
18933 		      _("operands 0 and 1 must be the same register"));
18934 	  inst.operands[1] = inst.operands[2];
18935 	  memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
18936 	}
18937     }
18938   else
18939     {
18940       /* Conversions without bitshift.  */
18941       const char *enc[] =
18942 	{
18943 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
18944 	  CVT_FLAVOUR_VAR
18945 	  NULL
18946 #undef CVT_VAR
18947 	};
18948 
18949       if (flavour < (int) ARRAY_SIZE (enc))
18950 	opname = enc[flavour];
18951     }
18952 
18953   if (opname)
18954     do_vfp_nsyn_opcode (opname);
18955 
18956   /* ARMv8.2 fp16 VCVT instruction.  */
18957   if (flavour == neon_cvt_flavour_s32_f16
18958       || flavour == neon_cvt_flavour_u32_f16
18959       || flavour == neon_cvt_flavour_f16_u32
18960       || flavour == neon_cvt_flavour_f16_s32)
18961     do_scalar_fp16_v82_encode ();
18962 }
18963 
18964 static void
do_vfp_nsyn_cvtz(void)18965 do_vfp_nsyn_cvtz (void)
18966 {
18967   enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
18968   enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
18969   const char *enc[] =
18970     {
18971 #define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
18972       CVT_FLAVOUR_VAR
18973       NULL
18974 #undef CVT_VAR
18975     };
18976 
18977   if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
18978     do_vfp_nsyn_opcode (enc[flavour]);
18979 }
18980 
18981 static void
do_vfp_nsyn_cvt_fpv8(enum neon_cvt_flavour flavour,enum neon_cvt_mode mode)18982 do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
18983 		      enum neon_cvt_mode mode)
18984 {
18985   int sz, op;
18986   int rm;
18987 
18988   /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
18989      D register operands.  */
18990   if (flavour == neon_cvt_flavour_s32_f64
18991       || flavour == neon_cvt_flavour_u32_f64)
18992     constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
18993 		_(BAD_FPU));
18994 
18995   if (flavour == neon_cvt_flavour_s32_f16
18996       || flavour == neon_cvt_flavour_u32_f16)
18997     constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
18998 		_(BAD_FP16));
18999 
19000   set_pred_insn_type (OUTSIDE_PRED_INSN);
19001 
19002   switch (flavour)
19003     {
19004     case neon_cvt_flavour_s32_f64:
19005       sz = 1;
19006       op = 1;
19007       break;
19008     case neon_cvt_flavour_s32_f32:
19009       sz = 0;
19010       op = 1;
19011       break;
19012     case neon_cvt_flavour_s32_f16:
19013       sz = 0;
19014       op = 1;
19015       break;
19016     case neon_cvt_flavour_u32_f64:
19017       sz = 1;
19018       op = 0;
19019       break;
19020     case neon_cvt_flavour_u32_f32:
19021       sz = 0;
19022       op = 0;
19023       break;
19024     case neon_cvt_flavour_u32_f16:
19025       sz = 0;
19026       op = 0;
19027       break;
19028     default:
19029       first_error (_("invalid instruction shape"));
19030       return;
19031     }
19032 
19033   switch (mode)
19034     {
19035     case neon_cvt_mode_a: rm = 0; break;
19036     case neon_cvt_mode_n: rm = 1; break;
19037     case neon_cvt_mode_p: rm = 2; break;
19038     case neon_cvt_mode_m: rm = 3; break;
19039     default: first_error (_("invalid rounding mode")); return;
19040     }
19041 
19042   NEON_ENCODE (FPV8, inst);
19043   encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
19044   encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
19045   inst.instruction |= sz << 8;
19046 
19047   /* ARMv8.2 fp16 VCVT instruction.  */
19048   if (flavour == neon_cvt_flavour_s32_f16
19049       ||flavour == neon_cvt_flavour_u32_f16)
19050     do_scalar_fp16_v82_encode ();
19051   inst.instruction |= op << 7;
19052   inst.instruction |= rm << 16;
19053   inst.instruction |= 0xf0000000;
19054   inst.is_neon = true;
19055 }
19056 
19057 static void
do_neon_cvt_1(enum neon_cvt_mode mode)19058 do_neon_cvt_1 (enum neon_cvt_mode mode)
19059 {
19060   enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
19061 					  NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
19062 					  NS_FH, NS_HF, NS_FHI, NS_HFI,
19063 					  NS_NULL);
19064   enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
19065 
19066   if (flavour == neon_cvt_flavour_invalid)
19067     return;
19068 
19069   /* PR11109: Handle round-to-zero for VCVT conversions.  */
19070   if (mode == neon_cvt_mode_z
19071       && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
19072       && (flavour == neon_cvt_flavour_s16_f16
19073 	  || flavour == neon_cvt_flavour_u16_f16
19074 	  || flavour == neon_cvt_flavour_s32_f32
19075 	  || flavour == neon_cvt_flavour_u32_f32
19076 	  || flavour == neon_cvt_flavour_s32_f64
19077 	  || flavour == neon_cvt_flavour_u32_f64)
19078       && (rs == NS_FD || rs == NS_FF))
19079     {
19080       do_vfp_nsyn_cvtz ();
19081       return;
19082     }
19083 
19084   /* ARMv8.2 fp16 VCVT conversions.  */
19085   if (mode == neon_cvt_mode_z
19086       && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
19087       && (flavour == neon_cvt_flavour_s32_f16
19088 	  || flavour == neon_cvt_flavour_u32_f16)
19089       && (rs == NS_FH))
19090     {
19091       do_vfp_nsyn_cvtz ();
19092       do_scalar_fp16_v82_encode ();
19093       return;
19094     }
19095 
19096   if ((rs == NS_FD || rs == NS_QQI) && mode == neon_cvt_mode_n
19097       && ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19098     {
19099       /* We are dealing with vcvt with the 'ne' condition.  */
19100       inst.cond = 0x1;
19101       inst.instruction = N_MNEM_vcvt;
19102       do_neon_cvt_1 (neon_cvt_mode_z);
19103       return;
19104     }
19105 
19106   /* VFP rather than Neon conversions.  */
19107   if (flavour >= neon_cvt_flavour_first_fp)
19108     {
19109       if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
19110 	do_vfp_nsyn_cvt (rs, flavour);
19111       else
19112 	do_vfp_nsyn_cvt_fpv8 (flavour, mode);
19113 
19114       return;
19115     }
19116 
19117   switch (rs)
19118     {
19119     case NS_QQI:
19120       if (mode == neon_cvt_mode_z
19121 	  && (flavour == neon_cvt_flavour_f16_s16
19122 	      || flavour == neon_cvt_flavour_f16_u16
19123 	      || flavour == neon_cvt_flavour_s16_f16
19124 	      || flavour == neon_cvt_flavour_u16_f16
19125 	      || flavour == neon_cvt_flavour_f32_u32
19126 	      || flavour == neon_cvt_flavour_f32_s32
19127 	      || flavour == neon_cvt_flavour_s32_f32
19128 	      || flavour == neon_cvt_flavour_u32_f32))
19129 	{
19130 	  if (!check_simd_pred_availability (true,
19131 					     NEON_CHECK_CC | NEON_CHECK_ARCH))
19132 	    return;
19133 	}
19134       /* fall through.  */
19135     case NS_DDI:
19136       {
19137 	unsigned immbits;
19138 	unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
19139 			     0x0000100, 0x1000100, 0x0, 0x1000000};
19140 
19141 	if ((rs != NS_QQI || !ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
19142 	    && vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
19143 	    return;
19144 
19145 	if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
19146 	  {
19147 	    constraint (inst.operands[2].present && inst.operands[2].imm == 0,
19148 			_("immediate value out of range"));
19149 	    switch (flavour)
19150 	      {
19151 		case neon_cvt_flavour_f16_s16:
19152 		case neon_cvt_flavour_f16_u16:
19153 		case neon_cvt_flavour_s16_f16:
19154 		case neon_cvt_flavour_u16_f16:
19155 		  constraint (inst.operands[2].imm > 16,
19156 			      _("immediate value out of range"));
19157 		  break;
19158 		case neon_cvt_flavour_f32_u32:
19159 		case neon_cvt_flavour_f32_s32:
19160 		case neon_cvt_flavour_s32_f32:
19161 		case neon_cvt_flavour_u32_f32:
19162 		  constraint (inst.operands[2].imm > 32,
19163 			      _("immediate value out of range"));
19164 		  break;
19165 		default:
19166 		  inst.error = BAD_FPU;
19167 		  return;
19168 	      }
19169 	  }
19170 
19171 	/* Fixed-point conversion with #0 immediate is encoded as an
19172 	   integer conversion.  */
19173 	if (inst.operands[2].present && inst.operands[2].imm == 0)
19174 	  goto int_encode;
19175 	NEON_ENCODE (IMMED, inst);
19176 	if (flavour != neon_cvt_flavour_invalid)
19177 	  inst.instruction |= enctab[flavour];
19178 	inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19179 	inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19180 	inst.instruction |= LOW4 (inst.operands[1].reg);
19181 	inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19182 	inst.instruction |= neon_quad (rs) << 6;
19183 	inst.instruction |= 1 << 21;
19184 	if (flavour < neon_cvt_flavour_s16_f16)
19185 	  {
19186 	    inst.instruction |= 1 << 21;
19187 	    immbits = 32 - inst.operands[2].imm;
19188 	    inst.instruction |= immbits << 16;
19189 	  }
19190 	else
19191 	  {
19192 	    inst.instruction |= 3 << 20;
19193 	    immbits = 16 - inst.operands[2].imm;
19194 	    inst.instruction |= immbits << 16;
19195 	    inst.instruction &= ~(1 << 9);
19196 	  }
19197 
19198 	neon_dp_fixup (&inst);
19199       }
19200       break;
19201 
19202     case NS_QQ:
19203       if ((mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
19204 	   || mode == neon_cvt_mode_m || mode == neon_cvt_mode_p)
19205 	  && (flavour == neon_cvt_flavour_s16_f16
19206 	      || flavour == neon_cvt_flavour_u16_f16
19207 	      || flavour == neon_cvt_flavour_s32_f32
19208 	      || flavour == neon_cvt_flavour_u32_f32))
19209 	{
19210 	  if (!check_simd_pred_availability (true,
19211 					     NEON_CHECK_CC | NEON_CHECK_ARCH8))
19212 	    return;
19213 	}
19214       else if (mode == neon_cvt_mode_z
19215 	       && (flavour == neon_cvt_flavour_f16_s16
19216 		   || flavour == neon_cvt_flavour_f16_u16
19217 		   || flavour == neon_cvt_flavour_s16_f16
19218 		   || flavour == neon_cvt_flavour_u16_f16
19219 		   || flavour == neon_cvt_flavour_f32_u32
19220 		   || flavour == neon_cvt_flavour_f32_s32
19221 		   || flavour == neon_cvt_flavour_s32_f32
19222 		   || flavour == neon_cvt_flavour_u32_f32))
19223 	{
19224 	  if (!check_simd_pred_availability (true,
19225 					     NEON_CHECK_CC | NEON_CHECK_ARCH))
19226 	    return;
19227 	}
19228       /* fall through.  */
19229     case NS_DD:
19230       if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
19231 	{
19232 
19233 	  NEON_ENCODE (FLOAT, inst);
19234 	  if (!check_simd_pred_availability (true,
19235 					     NEON_CHECK_CC | NEON_CHECK_ARCH8))
19236 	    return;
19237 
19238 	  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19239 	  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19240 	  inst.instruction |= LOW4 (inst.operands[1].reg);
19241 	  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19242 	  inst.instruction |= neon_quad (rs) << 6;
19243 	  inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
19244 			       || flavour == neon_cvt_flavour_u32_f32) << 7;
19245 	  inst.instruction |= mode << 8;
19246 	  if (flavour == neon_cvt_flavour_u16_f16
19247 	      || flavour == neon_cvt_flavour_s16_f16)
19248 	    /* Mask off the original size bits and reencode them.  */
19249 	    inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
19250 
19251 	  if (thumb_mode)
19252 	    inst.instruction |= 0xfc000000;
19253 	  else
19254 	    inst.instruction |= 0xf0000000;
19255 	}
19256       else
19257 	{
19258     int_encode:
19259 	  {
19260 	    unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
19261 				  0x100, 0x180, 0x0, 0x080};
19262 
19263 	    NEON_ENCODE (INTEGER, inst);
19264 
19265 	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
19266 	    {
19267 	      if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
19268 		return;
19269 	    }
19270 
19271 	    if (flavour != neon_cvt_flavour_invalid)
19272 	      inst.instruction |= enctab[flavour];
19273 
19274 	    inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19275 	    inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19276 	    inst.instruction |= LOW4 (inst.operands[1].reg);
19277 	    inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19278 	    inst.instruction |= neon_quad (rs) << 6;
19279 	    if (flavour >= neon_cvt_flavour_s16_f16
19280 		&& flavour <= neon_cvt_flavour_f16_u16)
19281 	      /* Half precision.  */
19282 	      inst.instruction |= 1 << 18;
19283 	    else
19284 	      inst.instruction |= 2 << 18;
19285 
19286 	    neon_dp_fixup (&inst);
19287 	  }
19288 	}
19289       break;
19290 
19291     /* Half-precision conversions for Advanced SIMD -- neon.  */
19292     case NS_QD:
19293     case NS_DQ:
19294       if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
19295 	return;
19296 
19297       if ((rs == NS_DQ)
19298 	  && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
19299 	  {
19300 	    as_bad (_("operand size must match register width"));
19301 	    break;
19302 	  }
19303 
19304       if ((rs == NS_QD)
19305 	  && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
19306 	  {
19307 	    as_bad (_("operand size must match register width"));
19308 	    break;
19309 	  }
19310 
19311       if (rs == NS_DQ)
19312 	{
19313 	  if (flavour == neon_cvt_flavour_bf16_f32)
19314 	    {
19315 	      if (vfp_or_neon_is_neon (NEON_CHECK_ARCH8) == FAIL)
19316 		return;
19317 	      constraint (!mark_feature_used (&arm_ext_bf16), _(BAD_BF16));
19318 	      /* VCVT.bf16.f32.  */
19319 	      inst.instruction = 0x11b60640;
19320 	    }
19321 	  else
19322 	    /* VCVT.f16.f32.  */
19323 	    inst.instruction = 0x3b60600;
19324 	}
19325       else
19326 	/* VCVT.f32.f16.  */
19327 	inst.instruction = 0x3b60700;
19328 
19329       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19330       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19331       inst.instruction |= LOW4 (inst.operands[1].reg);
19332       inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19333       neon_dp_fixup (&inst);
19334       break;
19335 
19336     default:
19337       /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32).  */
19338       if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
19339 	do_vfp_nsyn_cvt (rs, flavour);
19340       else
19341 	do_vfp_nsyn_cvt_fpv8 (flavour, mode);
19342     }
19343 }
19344 
19345 static void
do_neon_cvtr(void)19346 do_neon_cvtr (void)
19347 {
19348   do_neon_cvt_1 (neon_cvt_mode_x);
19349 }
19350 
19351 static void
do_neon_cvt(void)19352 do_neon_cvt (void)
19353 {
19354   do_neon_cvt_1 (neon_cvt_mode_z);
19355 }
19356 
19357 static void
do_neon_cvta(void)19358 do_neon_cvta (void)
19359 {
19360   do_neon_cvt_1 (neon_cvt_mode_a);
19361 }
19362 
19363 static void
do_neon_cvtn(void)19364 do_neon_cvtn (void)
19365 {
19366   do_neon_cvt_1 (neon_cvt_mode_n);
19367 }
19368 
19369 static void
do_neon_cvtp(void)19370 do_neon_cvtp (void)
19371 {
19372   do_neon_cvt_1 (neon_cvt_mode_p);
19373 }
19374 
19375 static void
do_neon_cvtm(void)19376 do_neon_cvtm (void)
19377 {
19378   do_neon_cvt_1 (neon_cvt_mode_m);
19379 }
19380 
19381 static void
do_neon_cvttb_2(bool t,bool to,bool is_double)19382 do_neon_cvttb_2 (bool t, bool to, bool is_double)
19383 {
19384   if (is_double)
19385     mark_feature_used (&fpu_vfp_ext_armv8);
19386 
19387   encode_arm_vfp_reg (inst.operands[0].reg,
19388 		      (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
19389   encode_arm_vfp_reg (inst.operands[1].reg,
19390 		      (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
19391   inst.instruction |= to ? 0x10000 : 0;
19392   inst.instruction |= t ? 0x80 : 0;
19393   inst.instruction |= is_double ? 0x100 : 0;
19394   do_vfp_cond_or_thumb ();
19395 }
19396 
19397 static void
do_neon_cvttb_1(bool t)19398 do_neon_cvttb_1 (bool t)
19399 {
19400   enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
19401 					  NS_DF, NS_DH, NS_QQ, NS_QQI, NS_NULL);
19402 
19403   if (rs == NS_NULL)
19404     return;
19405   else if (rs == NS_QQ || rs == NS_QQI)
19406     {
19407       int single_to_half = 0;
19408       if (!check_simd_pred_availability (true, NEON_CHECK_ARCH))
19409 	return;
19410 
19411       enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
19412 
19413       if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
19414 	  && (flavour ==  neon_cvt_flavour_u16_f16
19415 	      || flavour ==  neon_cvt_flavour_s16_f16
19416 	      || flavour ==  neon_cvt_flavour_f16_s16
19417 	      || flavour ==  neon_cvt_flavour_f16_u16
19418 	      || flavour ==  neon_cvt_flavour_u32_f32
19419 	      || flavour ==  neon_cvt_flavour_s32_f32
19420 	      || flavour ==  neon_cvt_flavour_f32_s32
19421 	      || flavour ==  neon_cvt_flavour_f32_u32))
19422 	{
19423 	  inst.cond = 0xf;
19424 	  inst.instruction = N_MNEM_vcvt;
19425 	  set_pred_insn_type (INSIDE_VPT_INSN);
19426 	  do_neon_cvt_1 (neon_cvt_mode_z);
19427 	  return;
19428 	}
19429       else if (rs == NS_QQ && flavour == neon_cvt_flavour_f32_f16)
19430 	single_to_half = 1;
19431       else if (rs == NS_QQ && flavour != neon_cvt_flavour_f16_f32)
19432 	{
19433 	  first_error (BAD_FPU);
19434 	  return;
19435 	}
19436 
19437       inst.instruction = 0xee3f0e01;
19438       inst.instruction |= single_to_half << 28;
19439       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19440       inst.instruction |= LOW4 (inst.operands[0].reg) << 13;
19441       inst.instruction |= t << 12;
19442       inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19443       inst.instruction |= LOW4 (inst.operands[1].reg) << 1;
19444       inst.is_neon = 1;
19445     }
19446   else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
19447     {
19448       inst.error = NULL;
19449       do_neon_cvttb_2 (t, /*to=*/true, /*is_double=*/false);
19450     }
19451   else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
19452     {
19453       inst.error = NULL;
19454       do_neon_cvttb_2 (t, /*to=*/false, /*is_double=*/false);
19455     }
19456   else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
19457     {
19458       /* The VCVTB and VCVTT instructions with D-register operands
19459          don't work for SP only targets.  */
19460       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
19461 		  _(BAD_FPU));
19462 
19463       inst.error = NULL;
19464       do_neon_cvttb_2 (t, /*to=*/true, /*is_double=*/true);
19465     }
19466   else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
19467     {
19468       /* The VCVTB and VCVTT instructions with D-register operands
19469          don't work for SP only targets.  */
19470       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
19471 		  _(BAD_FPU));
19472 
19473       inst.error = NULL;
19474       do_neon_cvttb_2 (t, /*to=*/false, /*is_double=*/true);
19475     }
19476   else if (neon_check_type (2, rs, N_BF16 | N_VFP, N_F32).type != NT_invtype)
19477     {
19478       constraint (!mark_feature_used (&arm_ext_bf16), _(BAD_BF16));
19479       inst.error = NULL;
19480       inst.instruction |= (1 << 8);
19481       inst.instruction &= ~(1 << 9);
19482       do_neon_cvttb_2 (t, /*to=*/true, /*is_double=*/false);
19483     }
19484   else
19485     return;
19486 }
19487 
19488 static void
do_neon_cvtb(void)19489 do_neon_cvtb (void)
19490 {
19491   do_neon_cvttb_1 (false);
19492 }
19493 
19494 
19495 static void
do_neon_cvtt(void)19496 do_neon_cvtt (void)
19497 {
19498   do_neon_cvttb_1 (true);
19499 }
19500 
19501 static void
neon_move_immediate(void)19502 neon_move_immediate (void)
19503 {
19504   enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
19505   struct neon_type_el et = neon_check_type (2, rs,
19506     N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
19507   unsigned immlo, immhi = 0, immbits;
19508   int op, cmode, float_p;
19509 
19510   constraint (et.type == NT_invtype,
19511 	      _("operand size must be specified for immediate VMOV"));
19512 
19513   /* We start out as an MVN instruction if OP = 1, MOV otherwise.  */
19514   op = (inst.instruction & (1 << 5)) != 0;
19515 
19516   immlo = inst.operands[1].imm;
19517   if (inst.operands[1].regisimm)
19518     immhi = inst.operands[1].reg;
19519 
19520   constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
19521 	      _("immediate has bits set outside the operand size"));
19522 
19523   float_p = inst.operands[1].immisfloat;
19524 
19525   if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
19526 					et.size, et.type)) == FAIL)
19527     {
19528       /* Invert relevant bits only.  */
19529       neon_invert_size (&immlo, &immhi, et.size);
19530       /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
19531 	 with one or the other; those cases are caught by
19532 	 neon_cmode_for_move_imm.  */
19533       op = !op;
19534       if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
19535 					    &op, et.size, et.type)) == FAIL)
19536 	{
19537 	  first_error (_("immediate out of range"));
19538 	  return;
19539 	}
19540     }
19541 
19542   inst.instruction &= ~(1 << 5);
19543   inst.instruction |= op << 5;
19544 
19545   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19546   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19547   inst.instruction |= neon_quad (rs) << 6;
19548   inst.instruction |= cmode << 8;
19549 
19550   neon_write_immbits (immbits);
19551 }
19552 
19553 static void
do_neon_mvn(void)19554 do_neon_mvn (void)
19555 {
19556   if (!check_simd_pred_availability (false, NEON_CHECK_CC | NEON_CHECK_ARCH))
19557     return;
19558 
19559   if (inst.operands[1].isreg)
19560     {
19561       enum neon_shape rs;
19562       if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19563 	rs = neon_select_shape (NS_QQ, NS_NULL);
19564       else
19565 	rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
19566 
19567       if (rs == NS_NULL)
19568 	return;
19569 
19570       NEON_ENCODE (INTEGER, inst);
19571       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19572       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19573       inst.instruction |= LOW4 (inst.operands[1].reg);
19574       inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19575       inst.instruction |= neon_quad (rs) << 6;
19576     }
19577   else
19578     {
19579       NEON_ENCODE (IMMED, inst);
19580       neon_move_immediate ();
19581     }
19582 
19583   neon_dp_fixup (&inst);
19584 
19585   if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19586     {
19587       constraint (!inst.operands[1].isreg && !inst.operands[0].isquad, BAD_FPU);
19588     }
19589 }
19590 
19591 /* Encode instructions of form:
19592 
19593   |28/24|23|22|21 20|19 16|15 12|11    8|7|6|5|4|3  0|
19594   |  U  |x |D |size | Rn  | Rd  |x x x x|N|x|M|x| Rm |  */
19595 
19596 static void
neon_mixed_length(struct neon_type_el et,unsigned size)19597 neon_mixed_length (struct neon_type_el et, unsigned size)
19598 {
19599   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19600   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19601   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
19602   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
19603   inst.instruction |= LOW4 (inst.operands[2].reg);
19604   inst.instruction |= HI1 (inst.operands[2].reg) << 5;
19605   inst.instruction |= (et.type == NT_unsigned) << 24;
19606   inst.instruction |= neon_logbits (size) << 20;
19607 
19608   neon_dp_fixup (&inst);
19609 }
19610 
19611 static void
do_neon_dyadic_long(void)19612 do_neon_dyadic_long (void)
19613 {
19614   enum neon_shape rs = neon_select_shape (NS_QDD, NS_HHH, NS_FFF, NS_DDD, NS_NULL);
19615   if (rs == NS_QDD)
19616     {
19617       if (vfp_or_neon_is_neon (NEON_CHECK_ARCH | NEON_CHECK_CC) == FAIL)
19618 	return;
19619 
19620       NEON_ENCODE (INTEGER, inst);
19621       /* FIXME: Type checking for lengthening op.  */
19622       struct neon_type_el et = neon_check_type (3, NS_QDD,
19623 	N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
19624       neon_mixed_length (et, et.size);
19625     }
19626   else if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
19627 	   && (inst.cond == 0xf || inst.cond == 0x10))
19628     {
19629       /* If parsing for MVE, vaddl/vsubl/vabdl{e,t} can only be vadd/vsub/vabd
19630 	 in an IT block with le/lt conditions.  */
19631 
19632       if (inst.cond == 0xf)
19633 	inst.cond = 0xb;
19634       else if (inst.cond == 0x10)
19635 	inst.cond = 0xd;
19636 
19637       inst.pred_insn_type = INSIDE_IT_INSN;
19638 
19639       if (inst.instruction == N_MNEM_vaddl)
19640 	{
19641 	  inst.instruction = N_MNEM_vadd;
19642 	  do_neon_addsub_if_i ();
19643 	}
19644       else if (inst.instruction == N_MNEM_vsubl)
19645 	{
19646 	  inst.instruction = N_MNEM_vsub;
19647 	  do_neon_addsub_if_i ();
19648 	}
19649       else if (inst.instruction == N_MNEM_vabdl)
19650 	{
19651 	  inst.instruction = N_MNEM_vabd;
19652 	  do_neon_dyadic_if_su ();
19653 	}
19654     }
19655   else
19656     first_error (BAD_FPU);
19657 }
19658 
19659 static void
do_neon_abal(void)19660 do_neon_abal (void)
19661 {
19662   struct neon_type_el et = neon_check_type (3, NS_QDD,
19663     N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
19664   neon_mixed_length (et, et.size);
19665 }
19666 
19667 static void
neon_mac_reg_scalar_long(unsigned regtypes,unsigned scalartypes)19668 neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
19669 {
19670   if (inst.operands[2].isscalar)
19671     {
19672       struct neon_type_el et = neon_check_type (3, NS_QDS,
19673 	N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
19674       NEON_ENCODE (SCALAR, inst);
19675       neon_mul_mac (et, et.type == NT_unsigned);
19676     }
19677   else
19678     {
19679       struct neon_type_el et = neon_check_type (3, NS_QDD,
19680 	N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
19681       NEON_ENCODE (INTEGER, inst);
19682       neon_mixed_length (et, et.size);
19683     }
19684 }
19685 
19686 static void
do_neon_mac_maybe_scalar_long(void)19687 do_neon_mac_maybe_scalar_long (void)
19688 {
19689   neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
19690 }
19691 
19692 /* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
19693    internal SCALAR.  QUAD_P is 1 if it's for Q format, otherwise it's 0.  */
19694 
19695 static unsigned
neon_scalar_for_fmac_fp16_long(unsigned scalar,unsigned quad_p)19696 neon_scalar_for_fmac_fp16_long (unsigned scalar, unsigned quad_p)
19697 {
19698   unsigned regno = NEON_SCALAR_REG (scalar);
19699   unsigned elno = NEON_SCALAR_INDEX (scalar);
19700 
19701   if (quad_p)
19702     {
19703       if (regno > 7 || elno > 3)
19704 	goto bad_scalar;
19705 
19706       return ((regno & 0x7)
19707 	      | ((elno & 0x1) << 3)
19708 	      | (((elno >> 1) & 0x1) << 5));
19709     }
19710   else
19711     {
19712       if (regno > 15 || elno > 1)
19713 	goto bad_scalar;
19714 
19715       return (((regno & 0x1) << 5)
19716 	      | ((regno >> 1) & 0x7)
19717 	      | ((elno & 0x1) << 3));
19718     }
19719 
19720  bad_scalar:
19721   first_error (_("scalar out of range for multiply instruction"));
19722   return 0;
19723 }
19724 
19725 static void
do_neon_fmac_maybe_scalar_long(int subtype)19726 do_neon_fmac_maybe_scalar_long (int subtype)
19727 {
19728   enum neon_shape rs;
19729   int high8;
19730   /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding.  'size"
19731      field (bits[21:20]) has different meaning.  For scalar index variant, it's
19732      used to differentiate add and subtract, otherwise it's with fixed value
19733      0x2.  */
19734   int size = -1;
19735 
19736   /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
19737      be a scalar index register.  */
19738   if (inst.operands[2].isscalar)
19739     {
19740       high8 = 0xfe000000;
19741       if (subtype)
19742 	size = 16;
19743       rs = neon_select_shape (NS_DHS, NS_QDS, NS_NULL);
19744     }
19745   else
19746     {
19747       high8 = 0xfc000000;
19748       size = 32;
19749       if (subtype)
19750 	inst.instruction |= (0x1 << 23);
19751       rs = neon_select_shape (NS_DHH, NS_QDD, NS_NULL);
19752     }
19753 
19754 
19755   if (inst.cond != COND_ALWAYS)
19756     as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
19757 	       "behaviour is UNPREDICTABLE"));
19758 
19759   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16_fml),
19760 	      _(BAD_FP16));
19761 
19762   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
19763 	      _(BAD_FPU));
19764 
19765   /* "opcode" from template has included "ubit", so simply pass 0 here.  Also,
19766      the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
19767      so we simply pass -1 as size.  */
19768   unsigned quad_p = (rs == NS_QDD || rs == NS_QDS);
19769   neon_three_same (quad_p, 0, size);
19770 
19771   /* Undo neon_dp_fixup.  Redo the high eight bits.  */
19772   inst.instruction &= 0x00ffffff;
19773   inst.instruction |= high8;
19774 
19775   /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
19776      whether the instruction is in Q form and whether Vm is a scalar indexed
19777      operand.  */
19778   if (inst.operands[2].isscalar)
19779     {
19780       unsigned rm
19781 	= neon_scalar_for_fmac_fp16_long (inst.operands[2].reg, quad_p);
19782       inst.instruction &= 0xffffffd0;
19783       inst.instruction |= rm;
19784 
19785       if (!quad_p)
19786 	{
19787 	  /* Redo Rn as well.  */
19788 	  inst.instruction &= 0xfff0ff7f;
19789 	  inst.instruction |= HI4 (inst.operands[1].reg) << 16;
19790 	  inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
19791 	}
19792     }
19793   else if (!quad_p)
19794     {
19795       /* Redo Rn and Rm.  */
19796       inst.instruction &= 0xfff0ff50;
19797       inst.instruction |= HI4 (inst.operands[1].reg) << 16;
19798       inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
19799       inst.instruction |= HI4 (inst.operands[2].reg);
19800       inst.instruction |= LOW1 (inst.operands[2].reg) << 5;
19801     }
19802 }
19803 
19804 static void
do_neon_vfmal(void)19805 do_neon_vfmal (void)
19806 {
19807   return do_neon_fmac_maybe_scalar_long (0);
19808 }
19809 
19810 static void
do_neon_vfmsl(void)19811 do_neon_vfmsl (void)
19812 {
19813   return do_neon_fmac_maybe_scalar_long (1);
19814 }
19815 
19816 static void
do_neon_dyadic_wide(void)19817 do_neon_dyadic_wide (void)
19818 {
19819   struct neon_type_el et = neon_check_type (3, NS_QQD,
19820     N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
19821   neon_mixed_length (et, et.size);
19822 }
19823 
19824 static void
do_neon_dyadic_narrow(void)19825 do_neon_dyadic_narrow (void)
19826 {
19827   struct neon_type_el et = neon_check_type (3, NS_QDD,
19828     N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
19829   /* Operand sign is unimportant, and the U bit is part of the opcode,
19830      so force the operand type to integer.  */
19831   et.type = NT_integer;
19832   neon_mixed_length (et, et.size / 2);
19833 }
19834 
19835 static void
do_neon_mul_sat_scalar_long(void)19836 do_neon_mul_sat_scalar_long (void)
19837 {
19838   neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
19839 }
19840 
19841 static void
do_neon_vmull(void)19842 do_neon_vmull (void)
19843 {
19844   if (inst.operands[2].isscalar)
19845     do_neon_mac_maybe_scalar_long ();
19846   else
19847     {
19848       struct neon_type_el et = neon_check_type (3, NS_QDD,
19849 	N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
19850 
19851       if (et.type == NT_poly)
19852 	NEON_ENCODE (POLY, inst);
19853       else
19854 	NEON_ENCODE (INTEGER, inst);
19855 
19856       /* For polynomial encoding the U bit must be zero, and the size must
19857 	 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
19858 	 obviously, as 0b10).  */
19859       if (et.size == 64)
19860 	{
19861 	  /* Check we're on the correct architecture.  */
19862 	  if (!mark_feature_used (&fpu_crypto_ext_armv8))
19863 	    inst.error =
19864 	      _("Instruction form not available on this architecture.");
19865 
19866 	  et.size = 32;
19867 	}
19868 
19869       neon_mixed_length (et, et.size);
19870     }
19871 }
19872 
19873 static void
do_neon_ext(void)19874 do_neon_ext (void)
19875 {
19876   enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
19877   struct neon_type_el et = neon_check_type (3, rs,
19878     N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
19879   unsigned imm = (inst.operands[3].imm * et.size) / 8;
19880 
19881   constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
19882 	      _("shift out of range"));
19883   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19884   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19885   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
19886   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
19887   inst.instruction |= LOW4 (inst.operands[2].reg);
19888   inst.instruction |= HI1 (inst.operands[2].reg) << 5;
19889   inst.instruction |= neon_quad (rs) << 6;
19890   inst.instruction |= imm << 8;
19891 
19892   neon_dp_fixup (&inst);
19893 }
19894 
19895 static void
do_neon_rev(void)19896 do_neon_rev (void)
19897 {
19898   if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
19899    return;
19900 
19901   enum neon_shape rs;
19902   if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19903     rs = neon_select_shape (NS_QQ, NS_NULL);
19904   else
19905     rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
19906 
19907   struct neon_type_el et = neon_check_type (2, rs,
19908     N_EQK, N_8 | N_16 | N_32 | N_KEY);
19909 
19910   unsigned op = (inst.instruction >> 7) & 3;
19911   /* N (width of reversed regions) is encoded as part of the bitmask. We
19912      extract it here to check the elements to be reversed are smaller.
19913      Otherwise we'd get a reserved instruction.  */
19914   unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
19915 
19916   if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext) && elsize == 64
19917       && inst.operands[0].reg == inst.operands[1].reg)
19918     as_tsktsk (_("Warning: 64-bit element size and same destination and source"
19919 		 " operands makes instruction UNPREDICTABLE"));
19920 
19921   gas_assert (elsize != 0);
19922   constraint (et.size >= elsize,
19923 	      _("elements must be smaller than reversal region"));
19924   neon_two_same (neon_quad (rs), 1, et.size);
19925 }
19926 
19927 static void
do_neon_dup(void)19928 do_neon_dup (void)
19929 {
19930   if (inst.operands[1].isscalar)
19931     {
19932       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1),
19933 		  BAD_FPU);
19934       enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
19935       struct neon_type_el et = neon_check_type (2, rs,
19936 	N_EQK, N_8 | N_16 | N_32 | N_KEY);
19937       unsigned sizebits = et.size >> 3;
19938       unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
19939       int logsize = neon_logbits (et.size);
19940       unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
19941 
19942       if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
19943 	return;
19944 
19945       NEON_ENCODE (SCALAR, inst);
19946       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19947       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19948       inst.instruction |= LOW4 (dm);
19949       inst.instruction |= HI1 (dm) << 5;
19950       inst.instruction |= neon_quad (rs) << 6;
19951       inst.instruction |= x << 17;
19952       inst.instruction |= sizebits << 16;
19953 
19954       neon_dp_fixup (&inst);
19955     }
19956   else
19957     {
19958       enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
19959       struct neon_type_el et = neon_check_type (2, rs,
19960 	N_8 | N_16 | N_32 | N_KEY, N_EQK);
19961       if (rs == NS_QR)
19962 	{
19963 	  if (!check_simd_pred_availability (false, NEON_CHECK_ARCH))
19964 	    return;
19965 	}
19966       else
19967 	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1),
19968 		    BAD_FPU);
19969 
19970       if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19971 	{
19972 	  if (inst.operands[1].reg == REG_SP)
19973 	    as_tsktsk (MVE_BAD_SP);
19974 	  else if (inst.operands[1].reg == REG_PC)
19975 	    as_tsktsk (MVE_BAD_PC);
19976 	}
19977 
19978       /* Duplicate ARM register to lanes of vector.  */
19979       NEON_ENCODE (ARMREG, inst);
19980       switch (et.size)
19981 	{
19982 	case 8:  inst.instruction |= 0x400000; break;
19983 	case 16: inst.instruction |= 0x000020; break;
19984 	case 32: inst.instruction |= 0x000000; break;
19985 	default: break;
19986 	}
19987       inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
19988       inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
19989       inst.instruction |= HI1 (inst.operands[0].reg) << 7;
19990       inst.instruction |= neon_quad (rs) << 21;
19991       /* The encoding for this instruction is identical for the ARM and Thumb
19992 	 variants, except for the condition field.  */
19993       do_vfp_cond_or_thumb ();
19994     }
19995 }
19996 
19997 static void
do_mve_mov(int toQ)19998 do_mve_mov (int toQ)
19999 {
20000   if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20001     return;
20002   if (inst.cond > COND_ALWAYS)
20003     inst.pred_insn_type = MVE_UNPREDICABLE_INSN;
20004 
20005   unsigned Rt = 0, Rt2 = 1, Q0 = 2, Q1 = 3;
20006   if (toQ)
20007     {
20008       Q0 = 0;
20009       Q1 = 1;
20010       Rt = 2;
20011       Rt2 = 3;
20012     }
20013 
20014   constraint (inst.operands[Q0].reg != inst.operands[Q1].reg + 2,
20015 	      _("Index one must be [2,3] and index two must be two less than"
20016 		" index one."));
20017   constraint (!toQ && inst.operands[Rt].reg == inst.operands[Rt2].reg,
20018 	      _("Destination registers may not be the same"));
20019   constraint (inst.operands[Rt].reg == REG_SP
20020 	      || inst.operands[Rt2].reg == REG_SP,
20021 	      BAD_SP);
20022   constraint (inst.operands[Rt].reg == REG_PC
20023 	      || inst.operands[Rt2].reg == REG_PC,
20024 	      BAD_PC);
20025 
20026   inst.instruction = 0xec000f00;
20027   inst.instruction |= HI1 (inst.operands[Q1].reg / 32) << 23;
20028   inst.instruction |= !!toQ << 20;
20029   inst.instruction |= inst.operands[Rt2].reg << 16;
20030   inst.instruction |= LOW4 (inst.operands[Q1].reg / 32) << 13;
20031   inst.instruction |= (inst.operands[Q1].reg % 4) << 4;
20032   inst.instruction |= inst.operands[Rt].reg;
20033 }
20034 
20035 static void
do_mve_movn(void)20036 do_mve_movn (void)
20037 {
20038   if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20039     return;
20040 
20041   if (inst.cond > COND_ALWAYS)
20042     inst.pred_insn_type = INSIDE_VPT_INSN;
20043   else
20044     inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
20045 
20046   struct neon_type_el et = neon_check_type (2, NS_QQ, N_EQK, N_I16 | N_I32
20047 					    | N_KEY);
20048 
20049   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
20050   inst.instruction |= (neon_logbits (et.size) - 1) << 18;
20051   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
20052   inst.instruction |= HI1 (inst.operands[1].reg) << 5;
20053   inst.instruction |= LOW4 (inst.operands[1].reg);
20054   inst.is_neon = 1;
20055 
20056 }
20057 
20058 /* VMOV has particularly many variations. It can be one of:
20059      0. VMOV<c><q> <Qd>, <Qm>
20060      1. VMOV<c><q> <Dd>, <Dm>
20061    (Register operations, which are VORR with Rm = Rn.)
20062      2. VMOV<c><q>.<dt> <Qd>, #<imm>
20063      3. VMOV<c><q>.<dt> <Dd>, #<imm>
20064    (Immediate loads.)
20065      4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
20066    (ARM register to scalar.)
20067      5. VMOV<c><q> <Dm>, <Rd>, <Rn>
20068    (Two ARM registers to vector.)
20069      6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
20070    (Scalar to ARM register.)
20071      7. VMOV<c><q> <Rd>, <Rn>, <Dm>
20072    (Vector to two ARM registers.)
20073      8. VMOV.F32 <Sd>, <Sm>
20074      9. VMOV.F64 <Dd>, <Dm>
20075    (VFP register moves.)
20076     10. VMOV.F32 <Sd>, #imm
20077     11. VMOV.F64 <Dd>, #imm
20078    (VFP float immediate load.)
20079     12. VMOV <Rd>, <Sm>
20080    (VFP single to ARM reg.)
20081     13. VMOV <Sd>, <Rm>
20082    (ARM reg to VFP single.)
20083     14. VMOV <Rd>, <Re>, <Sn>, <Sm>
20084    (Two ARM regs to two VFP singles.)
20085     15. VMOV <Sd>, <Se>, <Rn>, <Rm>
20086    (Two VFP singles to two ARM regs.)
20087    16. VMOV<c> <Rt>, <Rt2>, <Qd[idx]>, <Qd[idx2]>
20088    17. VMOV<c> <Qd[idx]>, <Qd[idx2]>, <Rt>, <Rt2>
20089    18. VMOV<c>.<dt> <Rt>, <Qn[idx]>
20090    19. VMOV<c>.<dt> <Qd[idx]>, <Rt>
20091 
20092    These cases can be disambiguated using neon_select_shape, except cases 1/9
20093    and 3/11 which depend on the operand type too.
20094 
20095    All the encoded bits are hardcoded by this function.
20096 
20097    Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
20098    Cases 5, 7 may be used with VFPv2 and above.
20099 
20100    FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
20101    can specify a type where it doesn't make sense to, and is ignored).  */
20102 
20103 static void
do_neon_mov(void)20104 do_neon_mov (void)
20105 {
20106   enum neon_shape rs = neon_select_shape (NS_RRSS, NS_SSRR, NS_RRFF, NS_FFRR,
20107 					  NS_DRR, NS_RRD, NS_QQ, NS_DD, NS_QI,
20108 					  NS_DI, NS_SR, NS_RS, NS_FF, NS_FI,
20109 					  NS_RF, NS_FR, NS_HR, NS_RH, NS_HI,
20110 					  NS_NULL);
20111   struct neon_type_el et;
20112   const char *ldconst = 0;
20113 
20114   switch (rs)
20115     {
20116     case NS_DD:  /* case 1/9.  */
20117       et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
20118       /* It is not an error here if no type is given.  */
20119       inst.error = NULL;
20120 
20121       /* In MVE we interpret the following instructions as same, so ignoring
20122 	 the following type (float) and size (64) checks.
20123 	 a: VMOV<c><q> <Dd>, <Dm>
20124 	 b: VMOV<c><q>.F64 <Dd>, <Dm>.  */
20125       if ((et.type == NT_float && et.size == 64)
20126 	  || (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)))
20127 	{
20128 	  do_vfp_nsyn_opcode ("fcpyd");
20129 	  break;
20130 	}
20131       /* fall through.  */
20132 
20133     case NS_QQ:  /* case 0/1.  */
20134       {
20135 	if (!check_simd_pred_availability (false,
20136 					   NEON_CHECK_CC | NEON_CHECK_ARCH))
20137 	  return;
20138 	/* The architecture manual I have doesn't explicitly state which
20139 	   value the U bit should have for register->register moves, but
20140 	   the equivalent VORR instruction has U = 0, so do that.  */
20141 	inst.instruction = 0x0200110;
20142 	inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
20143 	inst.instruction |= HI1 (inst.operands[0].reg) << 22;
20144 	inst.instruction |= LOW4 (inst.operands[1].reg);
20145 	inst.instruction |= HI1 (inst.operands[1].reg) << 5;
20146 	inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
20147 	inst.instruction |= HI1 (inst.operands[1].reg) << 7;
20148 	inst.instruction |= neon_quad (rs) << 6;
20149 
20150 	neon_dp_fixup (&inst);
20151       }
20152       break;
20153 
20154     case NS_DI:  /* case 3/11.  */
20155       et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
20156       inst.error = NULL;
20157       if (et.type == NT_float && et.size == 64)
20158 	{
20159 	  /* case 11 (fconstd).  */
20160 	  ldconst = "fconstd";
20161 	  goto encode_fconstd;
20162 	}
20163       /* fall through.  */
20164 
20165     case NS_QI:  /* case 2/3.  */
20166       if (!check_simd_pred_availability (false,
20167 					 NEON_CHECK_CC | NEON_CHECK_ARCH))
20168 	return;
20169       inst.instruction = 0x0800010;
20170       neon_move_immediate ();
20171       neon_dp_fixup (&inst);
20172       break;
20173 
20174     case NS_SR:  /* case 4.  */
20175       {
20176 	unsigned bcdebits = 0;
20177 	int logsize;
20178 	unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
20179 	unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
20180 
20181 	/* .<size> is optional here, defaulting to .32. */
20182 	if (inst.vectype.elems == 0
20183 	    && inst.operands[0].vectype.type == NT_invtype
20184 	    && inst.operands[1].vectype.type == NT_invtype)
20185 	  {
20186 	    inst.vectype.el[0].type = NT_untyped;
20187 	    inst.vectype.el[0].size = 32;
20188 	    inst.vectype.elems = 1;
20189 	  }
20190 
20191 	et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
20192 	logsize = neon_logbits (et.size);
20193 
20194 	if (et.size != 32)
20195 	  {
20196 	    if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
20197 		&& vfp_or_neon_is_neon (NEON_CHECK_ARCH) == FAIL)
20198 	      return;
20199 	  }
20200 	else
20201 	  {
20202 	    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1)
20203 			&& !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20204 			_(BAD_FPU));
20205 	  }
20206 
20207 	if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20208 	  {
20209 	    if (inst.operands[1].reg == REG_SP)
20210 	      as_tsktsk (MVE_BAD_SP);
20211 	    else if (inst.operands[1].reg == REG_PC)
20212 	      as_tsktsk (MVE_BAD_PC);
20213 	  }
20214 	unsigned size = inst.operands[0].isscalar == 1 ? 64 : 128;
20215 
20216 	constraint (et.type == NT_invtype, _("bad type for scalar"));
20217 	constraint (x >= size / et.size, _("scalar index out of range"));
20218 
20219 
20220 	switch (et.size)
20221 	  {
20222 	  case 8:  bcdebits = 0x8; break;
20223 	  case 16: bcdebits = 0x1; break;
20224 	  case 32: bcdebits = 0x0; break;
20225 	  default: ;
20226 	  }
20227 
20228 	bcdebits |= (x & ((1 << (3-logsize)) - 1)) << logsize;
20229 
20230 	inst.instruction = 0xe000b10;
20231 	do_vfp_cond_or_thumb ();
20232 	inst.instruction |= LOW4 (dn) << 16;
20233 	inst.instruction |= HI1 (dn) << 7;
20234 	inst.instruction |= inst.operands[1].reg << 12;
20235 	inst.instruction |= (bcdebits & 3) << 5;
20236 	inst.instruction |= ((bcdebits >> 2) & 3) << 21;
20237 	inst.instruction |= (x >> (3-logsize)) << 16;
20238       }
20239       break;
20240 
20241     case NS_DRR:  /* case 5 (fmdrr).  */
20242       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20243 		  && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20244 		  _(BAD_FPU));
20245 
20246       inst.instruction = 0xc400b10;
20247       do_vfp_cond_or_thumb ();
20248       inst.instruction |= LOW4 (inst.operands[0].reg);
20249       inst.instruction |= HI1 (inst.operands[0].reg) << 5;
20250       inst.instruction |= inst.operands[1].reg << 12;
20251       inst.instruction |= inst.operands[2].reg << 16;
20252       break;
20253 
20254     case NS_RS:  /* case 6.  */
20255       {
20256 	unsigned logsize;
20257 	unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
20258 	unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
20259 	unsigned abcdebits = 0;
20260 
20261 	/* .<dt> is optional here, defaulting to .32. */
20262 	if (inst.vectype.elems == 0
20263 	    && inst.operands[0].vectype.type == NT_invtype
20264 	    && inst.operands[1].vectype.type == NT_invtype)
20265 	  {
20266 	    inst.vectype.el[0].type = NT_untyped;
20267 	    inst.vectype.el[0].size = 32;
20268 	    inst.vectype.elems = 1;
20269 	  }
20270 
20271 	et = neon_check_type (2, NS_NULL,
20272 			      N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
20273 	logsize = neon_logbits (et.size);
20274 
20275 	if (et.size != 32)
20276 	  {
20277 	    if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
20278 		&& vfp_or_neon_is_neon (NEON_CHECK_CC
20279 					| NEON_CHECK_ARCH) == FAIL)
20280 	      return;
20281 	  }
20282 	else
20283 	  {
20284 	    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1)
20285 			&& !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20286 			_(BAD_FPU));
20287 	  }
20288 
20289 	if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20290 	  {
20291 	    if (inst.operands[0].reg == REG_SP)
20292 	      as_tsktsk (MVE_BAD_SP);
20293 	    else if (inst.operands[0].reg == REG_PC)
20294 	      as_tsktsk (MVE_BAD_PC);
20295 	  }
20296 
20297 	unsigned size = inst.operands[1].isscalar == 1 ? 64 : 128;
20298 
20299 	constraint (et.type == NT_invtype, _("bad type for scalar"));
20300 	constraint (x >= size / et.size, _("scalar index out of range"));
20301 
20302 	switch (et.size)
20303 	  {
20304 	  case 8:  abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
20305 	  case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
20306 	  case 32: abcdebits = 0x00; break;
20307 	  default: ;
20308 	  }
20309 
20310 	abcdebits |= (x & ((1 << (3-logsize)) - 1)) << logsize;
20311 	inst.instruction = 0xe100b10;
20312 	do_vfp_cond_or_thumb ();
20313 	inst.instruction |= LOW4 (dn) << 16;
20314 	inst.instruction |= HI1 (dn) << 7;
20315 	inst.instruction |= inst.operands[0].reg << 12;
20316 	inst.instruction |= (abcdebits & 3) << 5;
20317 	inst.instruction |= (abcdebits >> 2) << 21;
20318 	inst.instruction |= (x >> (3-logsize)) << 16;
20319       }
20320       break;
20321 
20322     case NS_RRD:  /* case 7 (fmrrd).  */
20323       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20324 		  && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20325 		  _(BAD_FPU));
20326 
20327       inst.instruction = 0xc500b10;
20328       do_vfp_cond_or_thumb ();
20329       inst.instruction |= inst.operands[0].reg << 12;
20330       inst.instruction |= inst.operands[1].reg << 16;
20331       inst.instruction |= LOW4 (inst.operands[2].reg);
20332       inst.instruction |= HI1 (inst.operands[2].reg) << 5;
20333       break;
20334 
20335     case NS_FF:  /* case 8 (fcpys).  */
20336       do_vfp_nsyn_opcode ("fcpys");
20337       break;
20338 
20339     case NS_HI:
20340     case NS_FI:  /* case 10 (fconsts).  */
20341       ldconst = "fconsts";
20342     encode_fconstd:
20343       if (!inst.operands[1].immisfloat)
20344 	{
20345 	  unsigned new_imm;
20346 	  /* Immediate has to fit in 8 bits so float is enough.  */
20347 	  float imm = (float) inst.operands[1].imm;
20348 	  memcpy (&new_imm, &imm, sizeof (float));
20349 	  /* But the assembly may have been written to provide an integer
20350 	     bit pattern that equates to a float, so check that the
20351 	     conversion has worked.  */
20352 	  if (is_quarter_float (new_imm))
20353 	    {
20354 	      if (is_quarter_float (inst.operands[1].imm))
20355 		as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
20356 
20357 	      inst.operands[1].imm = new_imm;
20358 	      inst.operands[1].immisfloat = 1;
20359 	    }
20360 	}
20361 
20362       if (is_quarter_float (inst.operands[1].imm))
20363 	{
20364 	  inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
20365 	  do_vfp_nsyn_opcode (ldconst);
20366 
20367 	  /* ARMv8.2 fp16 vmov.f16 instruction.  */
20368 	  if (rs == NS_HI)
20369 	    do_scalar_fp16_v82_encode ();
20370 	}
20371       else
20372 	first_error (_("immediate out of range"));
20373       break;
20374 
20375     case NS_RH:
20376     case NS_RF:  /* case 12 (fmrs).  */
20377       do_vfp_nsyn_opcode ("fmrs");
20378       /* ARMv8.2 fp16 vmov.f16 instruction.  */
20379       if (rs == NS_RH)
20380 	do_scalar_fp16_v82_encode ();
20381       break;
20382 
20383     case NS_HR:
20384     case NS_FR:  /* case 13 (fmsr).  */
20385       do_vfp_nsyn_opcode ("fmsr");
20386       /* ARMv8.2 fp16 vmov.f16 instruction.  */
20387       if (rs == NS_HR)
20388 	do_scalar_fp16_v82_encode ();
20389       break;
20390 
20391     case NS_RRSS:
20392       do_mve_mov (0);
20393       break;
20394     case NS_SSRR:
20395       do_mve_mov (1);
20396       break;
20397 
20398     /* The encoders for the fmrrs and fmsrr instructions expect three operands
20399        (one of which is a list), but we have parsed four.  Do some fiddling to
20400        make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
20401        expect.  */
20402     case NS_RRFF:  /* case 14 (fmrrs).  */
20403       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20404 		  && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20405 		  _(BAD_FPU));
20406       constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
20407 		  _("VFP registers must be adjacent"));
20408       inst.operands[2].imm = 2;
20409       memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
20410       do_vfp_nsyn_opcode ("fmrrs");
20411       break;
20412 
20413     case NS_FFRR:  /* case 15 (fmsrr).  */
20414       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20415 		  && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20416 		  _(BAD_FPU));
20417       constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
20418 		  _("VFP registers must be adjacent"));
20419       inst.operands[1] = inst.operands[2];
20420       inst.operands[2] = inst.operands[3];
20421       inst.operands[0].imm = 2;
20422       memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
20423       do_vfp_nsyn_opcode ("fmsrr");
20424       break;
20425 
20426     case NS_NULL:
20427       /* neon_select_shape has determined that the instruction
20428 	 shape is wrong and has already set the error message.  */
20429       break;
20430 
20431     default:
20432       abort ();
20433     }
20434 }
20435 
20436 static void
do_mve_movl(void)20437 do_mve_movl (void)
20438 {
20439   if (!(inst.operands[0].present && inst.operands[0].isquad
20440       && inst.operands[1].present && inst.operands[1].isquad
20441       && !inst.operands[2].present))
20442     {
20443       inst.instruction = 0;
20444       inst.cond = 0xb;
20445       if (thumb_mode)
20446 	set_pred_insn_type (INSIDE_IT_INSN);
20447       do_neon_mov ();
20448       return;
20449     }
20450 
20451   if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20452     return;
20453 
20454   if (inst.cond != COND_ALWAYS)
20455     inst.pred_insn_type = INSIDE_VPT_INSN;
20456 
20457   struct neon_type_el et = neon_check_type (2, NS_QQ, N_EQK, N_S8 | N_U8
20458 					    | N_S16 | N_U16 | N_KEY);
20459 
20460   inst.instruction |= (et.type == NT_unsigned) << 28;
20461   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
20462   inst.instruction |= (neon_logbits (et.size) + 1) << 19;
20463   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
20464   inst.instruction |= HI1 (inst.operands[1].reg) << 5;
20465   inst.instruction |= LOW4 (inst.operands[1].reg);
20466   inst.is_neon = 1;
20467 }
20468 
20469 static void
do_neon_rshift_round_imm(void)20470 do_neon_rshift_round_imm (void)
20471 {
20472   if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
20473    return;
20474 
20475   enum neon_shape rs;
20476   struct neon_type_el et;
20477 
20478   if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20479     {
20480       rs = neon_select_shape (NS_QQI, NS_NULL);
20481       et = neon_check_type (2, rs, N_EQK, N_SU_MVE | N_KEY);
20482     }
20483   else
20484     {
20485       rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
20486       et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
20487     }
20488   int imm = inst.operands[2].imm;
20489 
20490   /* imm == 0 case is encoded as VMOV for V{R}SHR.  */
20491   if (imm == 0)
20492     {
20493       inst.operands[2].present = 0;
20494       do_neon_mov ();
20495       return;
20496     }
20497 
20498   constraint (imm < 1 || (unsigned)imm > et.size,
20499 	      _("immediate out of range for shift"));
20500   neon_imm_shift (true, et.type == NT_unsigned, neon_quad (rs), et,
20501 		  et.size - imm);
20502 }
20503 
20504 static void
do_neon_movhf(void)20505 do_neon_movhf (void)
20506 {
20507   enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
20508   constraint (rs != NS_HH, _("invalid suffix"));
20509 
20510   if (inst.cond != COND_ALWAYS)
20511     {
20512       if (thumb_mode)
20513 	{
20514 	  as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
20515 		     " the behaviour is UNPREDICTABLE"));
20516 	}
20517       else
20518 	{
20519 	  inst.error = BAD_COND;
20520 	  return;
20521 	}
20522     }
20523 
20524   do_vfp_sp_monadic ();
20525 
20526   inst.is_neon = 1;
20527   inst.instruction |= 0xf0000000;
20528 }
20529 
20530 static void
do_neon_movl(void)20531 do_neon_movl (void)
20532 {
20533   struct neon_type_el et = neon_check_type (2, NS_QD,
20534     N_EQK | N_DBL, N_SU_32 | N_KEY);
20535   unsigned sizebits = et.size >> 3;
20536   inst.instruction |= sizebits << 19;
20537   neon_two_same (0, et.type == NT_unsigned, -1);
20538 }
20539 
20540 static void
do_neon_trn(void)20541 do_neon_trn (void)
20542 {
20543   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20544   struct neon_type_el et = neon_check_type (2, rs,
20545     N_EQK, N_8 | N_16 | N_32 | N_KEY);
20546   NEON_ENCODE (INTEGER, inst);
20547   neon_two_same (neon_quad (rs), 1, et.size);
20548 }
20549 
20550 static void
do_neon_zip_uzp(void)20551 do_neon_zip_uzp (void)
20552 {
20553   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20554   struct neon_type_el et = neon_check_type (2, rs,
20555     N_EQK, N_8 | N_16 | N_32 | N_KEY);
20556   if (rs == NS_DD && et.size == 32)
20557     {
20558       /* Special case: encode as VTRN.32 <Dd>, <Dm>.  */
20559       inst.instruction = N_MNEM_vtrn;
20560       do_neon_trn ();
20561       return;
20562     }
20563   neon_two_same (neon_quad (rs), 1, et.size);
20564 }
20565 
20566 static void
do_neon_sat_abs_neg(void)20567 do_neon_sat_abs_neg (void)
20568 {
20569   if (!check_simd_pred_availability (false, NEON_CHECK_CC | NEON_CHECK_ARCH))
20570     return;
20571 
20572   enum neon_shape rs;
20573   if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20574     rs = neon_select_shape (NS_QQ, NS_NULL);
20575   else
20576     rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20577   struct neon_type_el et = neon_check_type (2, rs,
20578     N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
20579   neon_two_same (neon_quad (rs), 1, et.size);
20580 }
20581 
20582 static void
do_neon_pair_long(void)20583 do_neon_pair_long (void)
20584 {
20585   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20586   struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
20587   /* Unsigned is encoded in OP field (bit 7) for these instruction.  */
20588   inst.instruction |= (et.type == NT_unsigned) << 7;
20589   neon_two_same (neon_quad (rs), 1, et.size);
20590 }
20591 
20592 static void
do_neon_recip_est(void)20593 do_neon_recip_est (void)
20594 {
20595   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20596   struct neon_type_el et = neon_check_type (2, rs,
20597     N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
20598   inst.instruction |= (et.type == NT_float) << 8;
20599   neon_two_same (neon_quad (rs), 1, et.size);
20600 }
20601 
20602 static void
do_neon_cls(void)20603 do_neon_cls (void)
20604 {
20605   if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
20606     return;
20607 
20608   enum neon_shape rs;
20609   if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20610    rs = neon_select_shape (NS_QQ, NS_NULL);
20611   else
20612    rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20613 
20614   struct neon_type_el et = neon_check_type (2, rs,
20615     N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
20616   neon_two_same (neon_quad (rs), 1, et.size);
20617 }
20618 
20619 static void
do_neon_clz(void)20620 do_neon_clz (void)
20621 {
20622   if (!check_simd_pred_availability (false, NEON_CHECK_ARCH | NEON_CHECK_CC))
20623     return;
20624 
20625   enum neon_shape rs;
20626   if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20627    rs = neon_select_shape (NS_QQ, NS_NULL);
20628   else
20629    rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20630 
20631   struct neon_type_el et = neon_check_type (2, rs,
20632     N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
20633   neon_two_same (neon_quad (rs), 1, et.size);
20634 }
20635 
20636 static void
do_neon_cnt(void)20637 do_neon_cnt (void)
20638 {
20639   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20640   struct neon_type_el et = neon_check_type (2, rs,
20641     N_EQK | N_INT, N_8 | N_KEY);
20642   neon_two_same (neon_quad (rs), 1, et.size);
20643 }
20644 
20645 static void
do_neon_swp(void)20646 do_neon_swp (void)
20647 {
20648   enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20649   if (rs == NS_NULL)
20650     return;
20651   neon_two_same (neon_quad (rs), 1, -1);
20652 }
20653 
20654 static void
do_neon_tbl_tbx(void)20655 do_neon_tbl_tbx (void)
20656 {
20657   unsigned listlenbits;
20658   neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
20659 
20660   if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
20661     {
20662       first_error (_("bad list length for table lookup"));
20663       return;
20664     }
20665 
20666   listlenbits = inst.operands[1].imm - 1;
20667   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
20668   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
20669   inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
20670   inst.instruction |= HI1 (inst.operands[1].reg) << 7;
20671   inst.instruction |= LOW4 (inst.operands[2].reg);
20672   inst.instruction |= HI1 (inst.operands[2].reg) << 5;
20673   inst.instruction |= listlenbits << 8;
20674 
20675   neon_dp_fixup (&inst);
20676 }
20677 
20678 static void
do_neon_ldm_stm(void)20679 do_neon_ldm_stm (void)
20680 {
20681   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
20682 	      && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20683 	      _(BAD_FPU));
20684   /* P, U and L bits are part of bitmask.  */
20685   int is_dbmode = (inst.instruction & (1 << 24)) != 0;
20686   unsigned offsetbits = inst.operands[1].imm * 2;
20687 
20688   if (inst.operands[1].issingle)
20689     {
20690       do_vfp_nsyn_ldm_stm (is_dbmode);
20691       return;
20692     }
20693 
20694   constraint (is_dbmode && !inst.operands[0].writeback,
20695 	      _("writeback (!) must be used for VLDMDB and VSTMDB"));
20696 
20697   constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
20698 	      _("register list must contain at least 1 and at most 16 "
20699 		"registers"));
20700 
20701   inst.instruction |= inst.operands[0].reg << 16;
20702   inst.instruction |= inst.operands[0].writeback << 21;
20703   inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
20704   inst.instruction |= HI1 (inst.operands[1].reg) << 22;
20705 
20706   inst.instruction |= offsetbits;
20707 
20708   do_vfp_cond_or_thumb ();
20709 }
20710 
20711 static void
do_vfp_nsyn_pop(void)20712 do_vfp_nsyn_pop (void)
20713 {
20714   nsyn_insert_sp ();
20715   if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)) {
20716     return do_vfp_nsyn_opcode ("vldm");
20717   }
20718 
20719   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd),
20720 	      _(BAD_FPU));
20721 
20722   constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
20723 	      _("register list must contain at least 1 and at most 16 "
20724 		"registers"));
20725 
20726   if (inst.operands[1].issingle)
20727     do_vfp_nsyn_opcode ("fldmias");
20728   else
20729     do_vfp_nsyn_opcode ("fldmiad");
20730 }
20731 
20732 static void
do_vfp_nsyn_push(void)20733 do_vfp_nsyn_push (void)
20734 {
20735   nsyn_insert_sp ();
20736   if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)) {
20737     return do_vfp_nsyn_opcode ("vstmdb");
20738   }
20739 
20740   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd),
20741 	      _(BAD_FPU));
20742 
20743   constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
20744 	      _("register list must contain at least 1 and at most 16 "
20745 		"registers"));
20746 
20747   if (inst.operands[1].issingle)
20748     do_vfp_nsyn_opcode ("fstmdbs");
20749   else
20750     do_vfp_nsyn_opcode ("fstmdbd");
20751 }
20752 
20753 
20754 static void
do_neon_ldr_str(void)20755 do_neon_ldr_str (void)
20756 {
20757   int is_ldr = (inst.instruction & (1 << 20)) != 0;
20758 
20759   /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
20760      And is UNPREDICTABLE in thumb mode.  */
20761   if (!is_ldr
20762       && inst.operands[1].reg == REG_PC
20763       && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
20764     {
20765       if (thumb_mode)
20766 	inst.error = _("Use of PC here is UNPREDICTABLE");
20767       else if (warn_on_deprecated)
20768 	as_tsktsk (_("Use of PC here is deprecated"));
20769     }
20770 
20771   if (inst.operands[0].issingle)
20772     {
20773       if (is_ldr)
20774 	do_vfp_nsyn_opcode ("flds");
20775       else
20776 	do_vfp_nsyn_opcode ("fsts");
20777 
20778       /* ARMv8.2 vldr.16/vstr.16 instruction.  */
20779       if (inst.vectype.el[0].size == 16)
20780 	do_scalar_fp16_v82_encode ();
20781     }
20782   else
20783     {
20784       if (is_ldr)
20785 	do_vfp_nsyn_opcode ("fldd");
20786       else
20787 	do_vfp_nsyn_opcode ("fstd");
20788     }
20789 }
20790 
20791 static void
do_t_vldr_vstr_sysreg(void)20792 do_t_vldr_vstr_sysreg (void)
20793 {
20794   int fp_vldr_bitno = 20, sysreg_vldr_bitno = 20;
20795   bool is_vldr = ((inst.instruction & (1 << fp_vldr_bitno)) != 0);
20796 
20797   /* Use of PC is UNPREDICTABLE.  */
20798   if (inst.operands[1].reg == REG_PC)
20799     inst.error = _("Use of PC here is UNPREDICTABLE");
20800 
20801   if (inst.operands[1].immisreg)
20802     inst.error = _("instruction does not accept register index");
20803 
20804   if (!inst.operands[1].isreg)
20805     inst.error = _("instruction does not accept PC-relative addressing");
20806 
20807   if (abs (inst.operands[1].imm) >= (1 << 7))
20808     inst.error = _("immediate value out of range");
20809 
20810   inst.instruction = 0xec000f80;
20811   if (is_vldr)
20812     inst.instruction |= 1 << sysreg_vldr_bitno;
20813   encode_arm_cp_address (1, true, false, BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM);
20814   inst.instruction |= (inst.operands[0].imm & 0x7) << 13;
20815   inst.instruction |= (inst.operands[0].imm & 0x8) << 19;
20816 }
20817 
20818 static void
do_vldr_vstr(void)20819 do_vldr_vstr (void)
20820 {
20821   bool sysreg_op = !inst.operands[0].isreg;
20822 
20823   /* VLDR/VSTR (System Register).  */
20824   if (sysreg_op)
20825     {
20826       if (!mark_feature_used (&arm_ext_v8_1m_main))
20827 	as_bad (_("Instruction not permitted on this architecture"));
20828 
20829       do_t_vldr_vstr_sysreg ();
20830     }
20831   /* VLDR/VSTR.  */
20832   else
20833     {
20834       if (!mark_feature_used (&fpu_vfp_ext_v1xd)
20835 	  && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20836 	as_bad (_("Instruction not permitted on this architecture"));
20837       do_neon_ldr_str ();
20838     }
20839 }
20840 
20841 /* "interleave" version also handles non-interleaving register VLD1/VST1
20842    instructions.  */
20843 
20844 static void
do_neon_ld_st_interleave(void)20845 do_neon_ld_st_interleave (void)
20846 {
20847   struct neon_type_el et = neon_check_type (1, NS_NULL,
20848 					    N_8 | N_16 | N_32 | N_64);
20849   unsigned alignbits = 0;
20850   unsigned idx;
20851   /* The bits in this table go:
20852      0: register stride of one (0) or two (1)
20853      1,2: register list length, minus one (1, 2, 3, 4).
20854      3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
20855      We use -1 for invalid entries.  */
20856   const int typetable[] =
20857     {
20858       0x7,  -1, 0xa,  -1, 0x6,  -1, 0x2,  -1, /* VLD1 / VST1.  */
20859        -1,  -1, 0x8, 0x9,  -1,  -1, 0x3,  -1, /* VLD2 / VST2.  */
20860        -1,  -1,  -1,  -1, 0x4, 0x5,  -1,  -1, /* VLD3 / VST3.  */
20861        -1,  -1,  -1,  -1,  -1,  -1, 0x0, 0x1  /* VLD4 / VST4.  */
20862     };
20863   int typebits;
20864 
20865   if (et.type == NT_invtype)
20866     return;
20867 
20868   if (inst.operands[1].immisalign)
20869     switch (inst.operands[1].imm >> 8)
20870       {
20871       case 64: alignbits = 1; break;
20872       case 128:
20873 	if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
20874 	    && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
20875 	  goto bad_alignment;
20876 	alignbits = 2;
20877 	break;
20878       case 256:
20879 	if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
20880 	  goto bad_alignment;
20881 	alignbits = 3;
20882 	break;
20883       default:
20884       bad_alignment:
20885 	first_error (_("bad alignment"));
20886 	return;
20887       }
20888 
20889   inst.instruction |= alignbits << 4;
20890   inst.instruction |= neon_logbits (et.size) << 6;
20891 
20892   /* Bits [4:6] of the immediate in a list specifier encode register stride
20893      (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
20894      VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
20895      up the right value for "type" in a table based on this value and the given
20896      list style, then stick it back.  */
20897   idx = ((inst.operands[0].imm >> 4) & 7)
20898 	| (((inst.instruction >> 8) & 3) << 3);
20899 
20900   typebits = typetable[idx];
20901 
20902   constraint (typebits == -1, _("bad list type for instruction"));
20903   constraint (((inst.instruction >> 8) & 3) && et.size == 64,
20904 	      BAD_EL_TYPE);
20905 
20906   inst.instruction &= ~0xf00;
20907   inst.instruction |= typebits << 8;
20908 }
20909 
20910 /* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
20911    *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
20912    otherwise. The variable arguments are a list of pairs of legal (size, align)
20913    values, terminated with -1.  */
20914 
20915 static int
neon_alignment_bit(int size,int align,int * do_alignment,...)20916 neon_alignment_bit (int size, int align, int *do_alignment, ...)
20917 {
20918   va_list ap;
20919   int result = FAIL, thissize, thisalign;
20920 
20921   if (!inst.operands[1].immisalign)
20922     {
20923       *do_alignment = 0;
20924       return SUCCESS;
20925     }
20926 
20927   va_start (ap, do_alignment);
20928 
20929   do
20930     {
20931       thissize = va_arg (ap, int);
20932       if (thissize == -1)
20933 	break;
20934       thisalign = va_arg (ap, int);
20935 
20936       if (size == thissize && align == thisalign)
20937 	result = SUCCESS;
20938     }
20939   while (result != SUCCESS);
20940 
20941   va_end (ap);
20942 
20943   if (result == SUCCESS)
20944     *do_alignment = 1;
20945   else
20946     first_error (_("unsupported alignment for instruction"));
20947 
20948   return result;
20949 }
20950 
20951 static void
do_neon_ld_st_lane(void)20952 do_neon_ld_st_lane (void)
20953 {
20954   struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
20955   int align_good, do_alignment = 0;
20956   int logsize = neon_logbits (et.size);
20957   int align = inst.operands[1].imm >> 8;
20958   int n = (inst.instruction >> 8) & 3;
20959   int max_el = 64 / et.size;
20960 
20961   if (et.type == NT_invtype)
20962     return;
20963 
20964   constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
20965 	      _("bad list length"));
20966   constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
20967 	      _("scalar index out of range"));
20968   constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
20969 	      && et.size == 8,
20970 	      _("stride of 2 unavailable when element size is 8"));
20971 
20972   switch (n)
20973     {
20974     case 0:  /* VLD1 / VST1.  */
20975       align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
20976 				       32, 32, -1);
20977       if (align_good == FAIL)
20978 	return;
20979       if (do_alignment)
20980 	{
20981 	  unsigned alignbits = 0;
20982 	  switch (et.size)
20983 	    {
20984 	    case 16: alignbits = 0x1; break;
20985 	    case 32: alignbits = 0x3; break;
20986 	    default: ;
20987 	    }
20988 	  inst.instruction |= alignbits << 4;
20989 	}
20990       break;
20991 
20992     case 1:  /* VLD2 / VST2.  */
20993       align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
20994 		      16, 32, 32, 64, -1);
20995       if (align_good == FAIL)
20996 	return;
20997       if (do_alignment)
20998 	inst.instruction |= 1 << 4;
20999       break;
21000 
21001     case 2:  /* VLD3 / VST3.  */
21002       constraint (inst.operands[1].immisalign,
21003 		  _("can't use alignment with this instruction"));
21004       break;
21005 
21006     case 3:  /* VLD4 / VST4.  */
21007       align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
21008 				       16, 64, 32, 64, 32, 128, -1);
21009       if (align_good == FAIL)
21010 	return;
21011       if (do_alignment)
21012 	{
21013 	  unsigned alignbits = 0;
21014 	  switch (et.size)
21015 	    {
21016 	    case 8:  alignbits = 0x1; break;
21017 	    case 16: alignbits = 0x1; break;
21018 	    case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
21019 	    default: ;
21020 	    }
21021 	  inst.instruction |= alignbits << 4;
21022 	}
21023       break;
21024 
21025     default: ;
21026     }
21027 
21028   /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32.  */
21029   if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
21030     inst.instruction |= 1 << (4 + logsize);
21031 
21032   inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
21033   inst.instruction |= logsize << 10;
21034 }
21035 
21036 /* Encode single n-element structure to all lanes VLD<n> instructions.  */
21037 
21038 static void
do_neon_ld_dup(void)21039 do_neon_ld_dup (void)
21040 {
21041   struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
21042   int align_good, do_alignment = 0;
21043 
21044   if (et.type == NT_invtype)
21045     return;
21046 
21047   switch ((inst.instruction >> 8) & 3)
21048     {
21049     case 0:  /* VLD1.  */
21050       gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
21051       align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
21052 				       &do_alignment, 16, 16, 32, 32, -1);
21053       if (align_good == FAIL)
21054 	return;
21055       switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
21056 	{
21057 	case 1: break;
21058 	case 2: inst.instruction |= 1 << 5; break;
21059 	default: first_error (_("bad list length")); return;
21060 	}
21061       inst.instruction |= neon_logbits (et.size) << 6;
21062       break;
21063 
21064     case 1:  /* VLD2.  */
21065       align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
21066 				       &do_alignment, 8, 16, 16, 32, 32, 64,
21067 				       -1);
21068       if (align_good == FAIL)
21069 	return;
21070       constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
21071 		  _("bad list length"));
21072       if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
21073 	inst.instruction |= 1 << 5;
21074       inst.instruction |= neon_logbits (et.size) << 6;
21075       break;
21076 
21077     case 2:  /* VLD3.  */
21078       constraint (inst.operands[1].immisalign,
21079 		  _("can't use alignment with this instruction"));
21080       constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
21081 		  _("bad list length"));
21082       if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
21083 	inst.instruction |= 1 << 5;
21084       inst.instruction |= neon_logbits (et.size) << 6;
21085       break;
21086 
21087     case 3:  /* VLD4.  */
21088       {
21089 	int align = inst.operands[1].imm >> 8;
21090 	align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
21091 					 16, 64, 32, 64, 32, 128, -1);
21092 	if (align_good == FAIL)
21093 	  return;
21094 	constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
21095 		    _("bad list length"));
21096 	if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
21097 	  inst.instruction |= 1 << 5;
21098 	if (et.size == 32 && align == 128)
21099 	  inst.instruction |= 0x3 << 6;
21100 	else
21101 	  inst.instruction |= neon_logbits (et.size) << 6;
21102       }
21103       break;
21104 
21105     default: ;
21106     }
21107 
21108   inst.instruction |= do_alignment << 4;
21109 }
21110 
21111 /* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
21112    apart from bits [11:4].  */
21113 
21114 static void
do_neon_ldx_stx(void)21115 do_neon_ldx_stx (void)
21116 {
21117   if (inst.operands[1].isreg)
21118     constraint (inst.operands[1].reg == REG_PC, BAD_PC);
21119 
21120   switch (NEON_LANE (inst.operands[0].imm))
21121     {
21122     case NEON_INTERLEAVE_LANES:
21123       NEON_ENCODE (INTERLV, inst);
21124       do_neon_ld_st_interleave ();
21125       break;
21126 
21127     case NEON_ALL_LANES:
21128       NEON_ENCODE (DUP, inst);
21129       if (inst.instruction == N_INV)
21130 	{
21131 	  first_error ("only loads support such operands");
21132 	  break;
21133 	}
21134       do_neon_ld_dup ();
21135       break;
21136 
21137     default:
21138       NEON_ENCODE (LANE, inst);
21139       do_neon_ld_st_lane ();
21140     }
21141 
21142   /* L bit comes from bit mask.  */
21143   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21144   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21145   inst.instruction |= inst.operands[1].reg << 16;
21146 
21147   if (inst.operands[1].postind)
21148     {
21149       int postreg = inst.operands[1].imm & 0xf;
21150       constraint (!inst.operands[1].immisreg,
21151 		  _("post-index must be a register"));
21152       constraint (postreg == 0xd || postreg == 0xf,
21153 		  _("bad register for post-index"));
21154       inst.instruction |= postreg;
21155     }
21156   else
21157     {
21158       constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
21159       constraint (inst.relocs[0].exp.X_op != O_constant
21160 		  || inst.relocs[0].exp.X_add_number != 0,
21161 		  BAD_ADDR_MODE);
21162 
21163       if (inst.operands[1].writeback)
21164 	{
21165 	  inst.instruction |= 0xd;
21166 	}
21167       else
21168 	inst.instruction |= 0xf;
21169     }
21170 
21171   if (thumb_mode)
21172     inst.instruction |= 0xf9000000;
21173   else
21174     inst.instruction |= 0xf4000000;
21175 }
21176 
21177 /* FP v8.  */
21178 static void
do_vfp_nsyn_fpv8(enum neon_shape rs)21179 do_vfp_nsyn_fpv8 (enum neon_shape rs)
21180 {
21181   /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
21182      D register operands.  */
21183   if (neon_shape_class[rs] == SC_DOUBLE)
21184     constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
21185 		_(BAD_FPU));
21186 
21187   NEON_ENCODE (FPV8, inst);
21188 
21189   if (rs == NS_FFF || rs == NS_HHH)
21190     {
21191       do_vfp_sp_dyadic ();
21192 
21193       /* ARMv8.2 fp16 instruction.  */
21194       if (rs == NS_HHH)
21195 	do_scalar_fp16_v82_encode ();
21196     }
21197   else
21198     do_vfp_dp_rd_rn_rm ();
21199 
21200   if (rs == NS_DDD)
21201     inst.instruction |= 0x100;
21202 
21203   inst.instruction |= 0xf0000000;
21204 }
21205 
21206 static void
do_vsel(void)21207 do_vsel (void)
21208 {
21209   set_pred_insn_type (OUTSIDE_PRED_INSN);
21210 
21211   if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
21212     first_error (_("invalid instruction shape"));
21213 }
21214 
21215 static void
do_vmaxnm(void)21216 do_vmaxnm (void)
21217 {
21218   if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
21219     set_pred_insn_type (OUTSIDE_PRED_INSN);
21220 
21221   if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
21222     return;
21223 
21224   if (!check_simd_pred_availability (true, NEON_CHECK_CC | NEON_CHECK_ARCH8))
21225     return;
21226 
21227   neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
21228 }
21229 
21230 static void
do_vrint_1(enum neon_cvt_mode mode)21231 do_vrint_1 (enum neon_cvt_mode mode)
21232 {
21233   enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
21234   struct neon_type_el et;
21235 
21236   if (rs == NS_NULL)
21237     return;
21238 
21239   /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
21240      D register operands.  */
21241   if (neon_shape_class[rs] == SC_DOUBLE)
21242     constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
21243 		_(BAD_FPU));
21244 
21245   et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
21246 			| N_VFP);
21247   if (et.type != NT_invtype)
21248     {
21249       /* VFP encodings.  */
21250       if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
21251 	  || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
21252 	set_pred_insn_type (OUTSIDE_PRED_INSN);
21253 
21254       NEON_ENCODE (FPV8, inst);
21255       if (rs == NS_FF || rs == NS_HH)
21256 	do_vfp_sp_monadic ();
21257       else
21258 	do_vfp_dp_rd_rm ();
21259 
21260       switch (mode)
21261 	{
21262 	case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
21263 	case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
21264 	case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
21265 	case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
21266 	case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
21267 	case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
21268 	case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
21269 	default: abort ();
21270 	}
21271 
21272       inst.instruction |= (rs == NS_DD) << 8;
21273       do_vfp_cond_or_thumb ();
21274 
21275       /* ARMv8.2 fp16 vrint instruction.  */
21276       if (rs == NS_HH)
21277       do_scalar_fp16_v82_encode ();
21278     }
21279   else
21280     {
21281       /* Neon encodings (or something broken...).  */
21282       inst.error = NULL;
21283       et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
21284 
21285       if (et.type == NT_invtype)
21286 	return;
21287 
21288       if (!check_simd_pred_availability (true,
21289 					 NEON_CHECK_CC | NEON_CHECK_ARCH8))
21290 	return;
21291 
21292       NEON_ENCODE (FLOAT, inst);
21293 
21294       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21295       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21296       inst.instruction |= LOW4 (inst.operands[1].reg);
21297       inst.instruction |= HI1 (inst.operands[1].reg) << 5;
21298       inst.instruction |= neon_quad (rs) << 6;
21299       /* Mask off the original size bits and reencode them.  */
21300       inst.instruction = ((inst.instruction & 0xfff3ffff)
21301 			  | neon_logbits (et.size) << 18);
21302 
21303       switch (mode)
21304 	{
21305 	case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
21306 	case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
21307 	case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
21308 	case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
21309 	case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
21310 	case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
21311 	case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
21312 	default: abort ();
21313 	}
21314 
21315       if (thumb_mode)
21316 	inst.instruction |= 0xfc000000;
21317       else
21318 	inst.instruction |= 0xf0000000;
21319     }
21320 }
21321 
21322 static void
do_vrintx(void)21323 do_vrintx (void)
21324 {
21325   do_vrint_1 (neon_cvt_mode_x);
21326 }
21327 
21328 static void
do_vrintz(void)21329 do_vrintz (void)
21330 {
21331   do_vrint_1 (neon_cvt_mode_z);
21332 }
21333 
21334 static void
do_vrintr(void)21335 do_vrintr (void)
21336 {
21337   do_vrint_1 (neon_cvt_mode_r);
21338 }
21339 
21340 static void
do_vrinta(void)21341 do_vrinta (void)
21342 {
21343   do_vrint_1 (neon_cvt_mode_a);
21344 }
21345 
21346 static void
do_vrintn(void)21347 do_vrintn (void)
21348 {
21349   do_vrint_1 (neon_cvt_mode_n);
21350 }
21351 
21352 static void
do_vrintp(void)21353 do_vrintp (void)
21354 {
21355   do_vrint_1 (neon_cvt_mode_p);
21356 }
21357 
21358 static void
do_vrintm(void)21359 do_vrintm (void)
21360 {
21361   do_vrint_1 (neon_cvt_mode_m);
21362 }
21363 
21364 static unsigned
neon_scalar_for_vcmla(unsigned opnd,unsigned elsize)21365 neon_scalar_for_vcmla (unsigned opnd, unsigned elsize)
21366 {
21367   unsigned regno = NEON_SCALAR_REG (opnd);
21368   unsigned elno = NEON_SCALAR_INDEX (opnd);
21369 
21370   if (elsize == 16 && elno < 2 && regno < 16)
21371     return regno | (elno << 4);
21372   else if (elsize == 32 && elno == 0)
21373     return regno;
21374 
21375   first_error (_("scalar out of range"));
21376   return 0;
21377 }
21378 
21379 static void
do_vcmla(void)21380 do_vcmla (void)
21381 {
21382   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext)
21383 	      && (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8)
21384 		  || !mark_feature_used (&arm_ext_v8_3)), (BAD_FPU));
21385   constraint (inst.relocs[0].exp.X_op != O_constant,
21386 	      _("expression too complex"));
21387   unsigned rot = inst.relocs[0].exp.X_add_number;
21388   constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
21389 	      _("immediate out of range"));
21390   rot /= 90;
21391 
21392   if (!check_simd_pred_availability (true,
21393 				     NEON_CHECK_ARCH8 | NEON_CHECK_CC))
21394     return;
21395 
21396   if (inst.operands[2].isscalar)
21397     {
21398       if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
21399 	first_error (_("invalid instruction shape"));
21400       enum neon_shape rs = neon_select_shape (NS_DDSI, NS_QQSI, NS_NULL);
21401       unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
21402 				       N_KEY | N_F16 | N_F32).size;
21403       unsigned m = neon_scalar_for_vcmla (inst.operands[2].reg, size);
21404       inst.is_neon = 1;
21405       inst.instruction = 0xfe000800;
21406       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21407       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21408       inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
21409       inst.instruction |= HI1 (inst.operands[1].reg) << 7;
21410       inst.instruction |= LOW4 (m);
21411       inst.instruction |= HI1 (m) << 5;
21412       inst.instruction |= neon_quad (rs) << 6;
21413       inst.instruction |= rot << 20;
21414       inst.instruction |= (size == 32) << 23;
21415     }
21416   else
21417     {
21418       enum neon_shape rs;
21419       if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
21420 	rs = neon_select_shape (NS_QQQI, NS_NULL);
21421       else
21422 	rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
21423 
21424       unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
21425 				       N_KEY | N_F16 | N_F32).size;
21426       if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext) && size == 32
21427 	  && (inst.operands[0].reg == inst.operands[1].reg
21428 	      || inst.operands[0].reg == inst.operands[2].reg))
21429 	as_tsktsk (BAD_MVE_SRCDEST);
21430 
21431       neon_three_same (neon_quad (rs), 0, -1);
21432       inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup.  */
21433       inst.instruction |= 0xfc200800;
21434       inst.instruction |= rot << 23;
21435       inst.instruction |= (size == 32) << 20;
21436     }
21437 }
21438 
21439 static void
do_vcadd(void)21440 do_vcadd (void)
21441 {
21442   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
21443 	      && (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8)
21444 		  || !mark_feature_used (&arm_ext_v8_3)), (BAD_FPU));
21445   constraint (inst.relocs[0].exp.X_op != O_constant,
21446 	      _("expression too complex"));
21447 
21448   unsigned rot = inst.relocs[0].exp.X_add_number;
21449   constraint (rot != 90 && rot != 270, _("immediate out of range"));
21450   enum neon_shape rs;
21451   struct neon_type_el et;
21452   if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
21453     {
21454       rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
21455       et = neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16 | N_F32);
21456     }
21457   else
21458     {
21459       rs = neon_select_shape (NS_QQQI, NS_NULL);
21460       et = neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16 | N_F32 | N_I8
21461 			    | N_I16 | N_I32);
21462       if (et.size == 32 && inst.operands[0].reg == inst.operands[2].reg)
21463 	as_tsktsk (_("Warning: 32-bit element size and same first and third "
21464 		     "operand makes instruction UNPREDICTABLE"));
21465     }
21466 
21467   if (et.type == NT_invtype)
21468     return;
21469 
21470   if (!check_simd_pred_availability (et.type == NT_float,
21471 				     NEON_CHECK_ARCH8 | NEON_CHECK_CC))
21472     return;
21473 
21474   if (et.type == NT_float)
21475     {
21476       neon_three_same (neon_quad (rs), 0, -1);
21477       inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup.  */
21478       inst.instruction |= 0xfc800800;
21479       inst.instruction |= (rot == 270) << 24;
21480       inst.instruction |= (et.size == 32) << 20;
21481     }
21482   else
21483     {
21484       constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
21485       inst.instruction = 0xfe000f00;
21486       inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21487       inst.instruction |= neon_logbits (et.size) << 20;
21488       inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
21489       inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21490       inst.instruction |= (rot == 270) << 12;
21491       inst.instruction |= HI1 (inst.operands[1].reg) << 7;
21492       inst.instruction |= HI1 (inst.operands[2].reg) << 5;
21493       inst.instruction |= LOW4 (inst.operands[2].reg);
21494       inst.is_neon = 1;
21495     }
21496 }
21497 
21498 /* Dot Product instructions encoding support.  */
21499 
21500 static void
do_neon_dotproduct(int unsigned_p)21501 do_neon_dotproduct (int unsigned_p)
21502 {
21503   enum neon_shape rs;
21504   unsigned scalar_oprd2 = 0;
21505   int high8;
21506 
21507   if (inst.cond != COND_ALWAYS)
21508     as_warn (_("Dot Product instructions cannot be conditional,  the behaviour "
21509 	       "is UNPREDICTABLE"));
21510 
21511   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
21512 	      _(BAD_FPU));
21513 
21514   /* Dot Product instructions are in three-same D/Q register format or the third
21515      operand can be a scalar index register.  */
21516   if (inst.operands[2].isscalar)
21517     {
21518       scalar_oprd2 = neon_scalar_for_mul (inst.operands[2].reg, 32);
21519       high8 = 0xfe000000;
21520       rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
21521     }
21522   else
21523     {
21524       high8 = 0xfc000000;
21525       rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
21526     }
21527 
21528   if (unsigned_p)
21529     neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_U8);
21530   else
21531     neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_S8);
21532 
21533   /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
21534      Product instruction, so we pass 0 as the "ubit" parameter.  And the
21535      "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter.  */
21536   neon_three_same (neon_quad (rs), 0, 32);
21537 
21538   /* Undo neon_dp_fixup.  Dot Product instructions are using a slightly
21539      different NEON three-same encoding.  */
21540   inst.instruction &= 0x00ffffff;
21541   inst.instruction |= high8;
21542   /* Encode 'U' bit which indicates signedness.  */
21543   inst.instruction |= (unsigned_p ? 1 : 0) << 4;
21544   /* Re-encode operand2 if it's indexed scalar operand.  What has been encoded
21545      from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
21546      the instruction encoding.  */
21547   if (inst.operands[2].isscalar)
21548     {
21549       inst.instruction &= 0xffffffd0;
21550       inst.instruction |= LOW4 (scalar_oprd2);
21551       inst.instruction |= HI1 (scalar_oprd2) << 5;
21552     }
21553 }
21554 
21555 /* Dot Product instructions for signed integer.  */
21556 
21557 static void
do_neon_dotproduct_s(void)21558 do_neon_dotproduct_s (void)
21559 {
21560   return do_neon_dotproduct (0);
21561 }
21562 
21563 /* Dot Product instructions for unsigned integer.  */
21564 
21565 static void
do_neon_dotproduct_u(void)21566 do_neon_dotproduct_u (void)
21567 {
21568   return do_neon_dotproduct (1);
21569 }
21570 
21571 static void
do_vusdot(void)21572 do_vusdot (void)
21573 {
21574   enum neon_shape rs;
21575   set_pred_insn_type (OUTSIDE_PRED_INSN);
21576   if (inst.operands[2].isscalar)
21577     {
21578       rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
21579       neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_KEY);
21580 
21581       inst.instruction |= (1 << 25);
21582       int idx = inst.operands[2].reg & 0xf;
21583       constraint ((idx != 1 && idx != 0), _("index must be 0 or 1"));
21584       inst.operands[2].reg >>= 4;
21585       constraint (!(inst.operands[2].reg < 16),
21586 		  _("indexed register must be less than 16"));
21587       neon_three_args (rs == NS_QQS);
21588       inst.instruction |= (idx << 5);
21589     }
21590   else
21591     {
21592       inst.instruction |= (1 << 21);
21593       rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
21594       neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_KEY);
21595       neon_three_args (rs == NS_QQQ);
21596     }
21597 }
21598 
21599 static void
do_vsudot(void)21600 do_vsudot (void)
21601 {
21602   enum neon_shape rs;
21603   set_pred_insn_type (OUTSIDE_PRED_INSN);
21604   if (inst.operands[2].isscalar)
21605     {
21606       rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
21607       neon_check_type (3, rs, N_EQK, N_EQK, N_U8 | N_KEY);
21608 
21609       inst.instruction |= (1 << 25);
21610       int idx = inst.operands[2].reg & 0xf;
21611       constraint ((idx != 1 && idx != 0), _("index must be 0 or 1"));
21612       inst.operands[2].reg >>= 4;
21613       constraint (!(inst.operands[2].reg < 16),
21614 		  _("indexed register must be less than 16"));
21615       neon_three_args (rs == NS_QQS);
21616       inst.instruction |= (idx << 5);
21617     }
21618 }
21619 
21620 static void
do_vsmmla(void)21621 do_vsmmla (void)
21622 {
21623   enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
21624   neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_KEY);
21625 
21626   set_pred_insn_type (OUTSIDE_PRED_INSN);
21627 
21628   neon_three_args (1);
21629 
21630 }
21631 
21632 static void
do_vummla(void)21633 do_vummla (void)
21634 {
21635   enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
21636   neon_check_type (3, rs, N_EQK, N_EQK, N_U8 | N_KEY);
21637 
21638   set_pred_insn_type (OUTSIDE_PRED_INSN);
21639 
21640   neon_three_args (1);
21641 
21642 }
21643 
21644 static void
check_cde_operand(size_t idx,int is_dual)21645 check_cde_operand (size_t idx, int is_dual)
21646 {
21647   unsigned Rx = inst.operands[idx].reg;
21648   bool isvec = inst.operands[idx].isvec;
21649   if (is_dual == 0 && thumb_mode)
21650     constraint (
21651 		!((Rx <= 14 && Rx != 13) || (Rx == REG_PC && isvec)),
21652 		_("Register must be r0-r14 except r13, or APSR_nzcv."));
21653   else
21654     constraint ( !((Rx <= 10 && Rx % 2 == 0 )),
21655       _("Register must be an even register between r0-r10."));
21656 }
21657 
21658 static bool
cde_coproc_enabled(unsigned coproc)21659 cde_coproc_enabled (unsigned coproc)
21660 {
21661   switch (coproc)
21662   {
21663     case 0: return mark_feature_used (&arm_ext_cde0);
21664     case 1: return mark_feature_used (&arm_ext_cde1);
21665     case 2: return mark_feature_used (&arm_ext_cde2);
21666     case 3: return mark_feature_used (&arm_ext_cde3);
21667     case 4: return mark_feature_used (&arm_ext_cde4);
21668     case 5: return mark_feature_used (&arm_ext_cde5);
21669     case 6: return mark_feature_used (&arm_ext_cde6);
21670     case 7: return mark_feature_used (&arm_ext_cde7);
21671     default: return false;
21672   }
21673 }
21674 
21675 #define cde_coproc_pos 8
21676 static void
cde_handle_coproc(void)21677 cde_handle_coproc (void)
21678 {
21679   unsigned coproc = inst.operands[0].reg;
21680   constraint (coproc > 7, _("CDE Coprocessor must be in range 0-7"));
21681   constraint (!(cde_coproc_enabled (coproc)), BAD_CDE_COPROC);
21682   inst.instruction |= coproc << cde_coproc_pos;
21683 }
21684 #undef cde_coproc_pos
21685 
21686 static void
cxn_handle_predication(bool is_accum)21687 cxn_handle_predication (bool is_accum)
21688 {
21689   if (is_accum && conditional_insn ())
21690     set_pred_insn_type (INSIDE_IT_INSN);
21691   else if (conditional_insn ())
21692   /* conditional_insn essentially checks for a suffix, not whether the
21693      instruction is inside an IT block or not.
21694      The non-accumulator versions should not have suffixes.  */
21695     inst.error = BAD_SYNTAX;
21696   else
21697     set_pred_insn_type (OUTSIDE_PRED_INSN);
21698 }
21699 
21700 static void
do_custom_instruction_1(int is_dual,bool is_accum)21701 do_custom_instruction_1 (int is_dual, bool is_accum)
21702 {
21703 
21704   constraint (!mark_feature_used (&arm_ext_cde), _(BAD_CDE));
21705 
21706   unsigned imm, Rd;
21707 
21708   Rd = inst.operands[1].reg;
21709   check_cde_operand (1, is_dual);
21710 
21711   if (is_dual == 1)
21712     {
21713       constraint (inst.operands[2].reg != Rd + 1,
21714 		  _("cx1d requires consecutive destination registers."));
21715       imm = inst.operands[3].imm;
21716     }
21717   else if (is_dual == 0)
21718     imm = inst.operands[2].imm;
21719   else
21720     abort ();
21721 
21722   inst.instruction |= Rd << 12;
21723   inst.instruction |= (imm & 0x1F80) << 9;
21724   inst.instruction |= (imm & 0x0040) << 1;
21725   inst.instruction |= (imm & 0x003f);
21726 
21727   cde_handle_coproc ();
21728   cxn_handle_predication (is_accum);
21729 }
21730 
21731 static void
do_custom_instruction_2(int is_dual,bool is_accum)21732 do_custom_instruction_2 (int is_dual, bool is_accum)
21733 {
21734 
21735   constraint (!mark_feature_used (&arm_ext_cde), _(BAD_CDE));
21736 
21737   unsigned imm, Rd, Rn;
21738 
21739   Rd = inst.operands[1].reg;
21740 
21741   if (is_dual == 1)
21742     {
21743       constraint (inst.operands[2].reg != Rd + 1,
21744 		  _("cx2d requires consecutive destination registers."));
21745       imm = inst.operands[4].imm;
21746       Rn = inst.operands[3].reg;
21747     }
21748   else if (is_dual == 0)
21749   {
21750     imm = inst.operands[3].imm;
21751     Rn = inst.operands[2].reg;
21752   }
21753   else
21754     abort ();
21755 
21756   check_cde_operand (2 + is_dual, /* is_dual = */0);
21757   check_cde_operand (1, is_dual);
21758 
21759   inst.instruction |= Rd << 12;
21760   inst.instruction |= Rn << 16;
21761 
21762   inst.instruction |= (imm & 0x0380) << 13;
21763   inst.instruction |= (imm & 0x0040) << 1;
21764   inst.instruction |= (imm & 0x003f);
21765 
21766   cde_handle_coproc ();
21767   cxn_handle_predication (is_accum);
21768 }
21769 
21770 static void
do_custom_instruction_3(int is_dual,bool is_accum)21771 do_custom_instruction_3 (int is_dual, bool is_accum)
21772 {
21773 
21774   constraint (!mark_feature_used (&arm_ext_cde), _(BAD_CDE));
21775 
21776   unsigned imm, Rd, Rn, Rm;
21777 
21778   Rd = inst.operands[1].reg;
21779 
21780   if (is_dual == 1)
21781     {
21782       constraint (inst.operands[2].reg != Rd + 1,
21783 		  _("cx3d requires consecutive destination registers."));
21784       imm = inst.operands[5].imm;
21785       Rn = inst.operands[3].reg;
21786       Rm = inst.operands[4].reg;
21787     }
21788   else if (is_dual == 0)
21789   {
21790     imm = inst.operands[4].imm;
21791     Rn = inst.operands[2].reg;
21792     Rm = inst.operands[3].reg;
21793   }
21794   else
21795     abort ();
21796 
21797   check_cde_operand (1, is_dual);
21798   check_cde_operand (2 + is_dual, /* is_dual = */0);
21799   check_cde_operand (3 + is_dual, /* is_dual = */0);
21800 
21801   inst.instruction |= Rd;
21802   inst.instruction |= Rn << 16;
21803   inst.instruction |= Rm << 12;
21804 
21805   inst.instruction |= (imm & 0x0038) << 17;
21806   inst.instruction |= (imm & 0x0004) << 5;
21807   inst.instruction |= (imm & 0x0003) << 4;
21808 
21809   cde_handle_coproc ();
21810   cxn_handle_predication (is_accum);
21811 }
21812 
21813 static void
do_cx1(void)21814 do_cx1 (void)
21815 {
21816   return do_custom_instruction_1 (0, 0);
21817 }
21818 
21819 static void
do_cx1a(void)21820 do_cx1a (void)
21821 {
21822   return do_custom_instruction_1 (0, 1);
21823 }
21824 
21825 static void
do_cx1d(void)21826 do_cx1d (void)
21827 {
21828   return do_custom_instruction_1 (1, 0);
21829 }
21830 
21831 static void
do_cx1da(void)21832 do_cx1da (void)
21833 {
21834   return do_custom_instruction_1 (1, 1);
21835 }
21836 
21837 static void
do_cx2(void)21838 do_cx2 (void)
21839 {
21840   return do_custom_instruction_2 (0, 0);
21841 }
21842 
21843 static void
do_cx2a(void)21844 do_cx2a (void)
21845 {
21846   return do_custom_instruction_2 (0, 1);
21847 }
21848 
21849 static void
do_cx2d(void)21850 do_cx2d (void)
21851 {
21852   return do_custom_instruction_2 (1, 0);
21853 }
21854 
21855 static void
do_cx2da(void)21856 do_cx2da (void)
21857 {
21858   return do_custom_instruction_2 (1, 1);
21859 }
21860 
21861 static void
do_cx3(void)21862 do_cx3 (void)
21863 {
21864   return do_custom_instruction_3 (0, 0);
21865 }
21866 
21867 static void
do_cx3a(void)21868 do_cx3a (void)
21869 {
21870   return do_custom_instruction_3 (0, 1);
21871 }
21872 
21873 static void
do_cx3d(void)21874 do_cx3d (void)
21875 {
21876   return do_custom_instruction_3 (1, 0);
21877 }
21878 
21879 static void
do_cx3da(void)21880 do_cx3da (void)
21881 {
21882   return do_custom_instruction_3 (1, 1);
21883 }
21884 
21885 static void
vcx_assign_vec_d(unsigned regnum)21886 vcx_assign_vec_d (unsigned regnum)
21887 {
21888   inst.instruction |= HI4 (regnum) << 12;
21889   inst.instruction |= LOW1 (regnum) << 22;
21890 }
21891 
21892 static void
vcx_assign_vec_m(unsigned regnum)21893 vcx_assign_vec_m (unsigned regnum)
21894 {
21895   inst.instruction |= HI4 (regnum);
21896   inst.instruction |= LOW1 (regnum) << 5;
21897 }
21898 
21899 static void
vcx_assign_vec_n(unsigned regnum)21900 vcx_assign_vec_n (unsigned regnum)
21901 {
21902   inst.instruction |= HI4 (regnum) << 16;
21903   inst.instruction |= LOW1 (regnum) << 7;
21904 }
21905 
21906 enum vcx_reg_type {
21907     q_reg,
21908     d_reg,
21909     s_reg
21910 };
21911 
21912 static enum vcx_reg_type
vcx_get_reg_type(enum neon_shape ns)21913 vcx_get_reg_type (enum neon_shape ns)
21914 {
21915   gas_assert (ns == NS_PQI
21916 	      || ns == NS_PDI
21917 	      || ns == NS_PFI
21918 	      || ns == NS_PQQI
21919 	      || ns == NS_PDDI
21920 	      || ns == NS_PFFI
21921 	      || ns == NS_PQQQI
21922 	      || ns == NS_PDDDI
21923 	      || ns == NS_PFFFI);
21924   if (ns == NS_PQI || ns == NS_PQQI || ns == NS_PQQQI)
21925     return q_reg;
21926   if (ns == NS_PDI || ns == NS_PDDI || ns == NS_PDDDI)
21927     return d_reg;
21928   return s_reg;
21929 }
21930 
21931 #define vcx_size_pos 24
21932 #define vcx_vec_pos 6
21933 static unsigned
vcx_handle_shape(enum vcx_reg_type reg_type)21934 vcx_handle_shape (enum vcx_reg_type reg_type)
21935 {
21936   unsigned mult = 2;
21937   if (reg_type == q_reg)
21938     inst.instruction |= 1 << vcx_vec_pos;
21939   else if (reg_type == d_reg)
21940     inst.instruction |= 1 << vcx_size_pos;
21941   else
21942     mult = 1;
21943   /* NOTE:
21944      The documentation says that the Q registers are encoded as 2*N in the D:Vd
21945      bits (or equivalent for N and M registers).
21946      Similarly the D registers are encoded as N in D:Vd bits.
21947      While the S registers are encoded as N in the Vd:D bits.
21948 
21949      Taking into account the maximum values of these registers we can see a
21950      nicer pattern for calculation:
21951        Q -> 7, D -> 15, S -> 31
21952 
21953      If we say that everything is encoded in the Vd:D bits, then we can say
21954      that Q is encoded as 4*N, and D is encoded as 2*N.
21955      This way the bits will end up the same, and calculation is simpler.
21956      (calculation is now:
21957 	1. Multiply by a number determined by the register letter.
21958 	2. Encode resulting number in Vd:D bits.)
21959 
21960       This is made a little more complicated by automatic handling of 'Q'
21961       registers elsewhere, which means the register number is already 2*N where
21962       N is the number the user wrote after the register letter.
21963      */
21964   return mult;
21965 }
21966 #undef vcx_vec_pos
21967 #undef vcx_size_pos
21968 
21969 static void
vcx_ensure_register_in_range(unsigned R,enum vcx_reg_type reg_type)21970 vcx_ensure_register_in_range (unsigned R, enum vcx_reg_type reg_type)
21971 {
21972   if (reg_type == q_reg)
21973     {
21974       gas_assert (R % 2 == 0);
21975       constraint (R >= 16, _("'q' register must be in range 0-7"));
21976     }
21977   else if (reg_type == d_reg)
21978     constraint (R >= 16, _("'d' register must be in range 0-15"));
21979   else
21980     constraint (R >= 32, _("'s' register must be in range 0-31"));
21981 }
21982 
21983 static void (*vcx_assign_vec[3]) (unsigned) = {
21984     vcx_assign_vec_d,
21985     vcx_assign_vec_m,
21986     vcx_assign_vec_n
21987 };
21988 
21989 static void
vcx_handle_register_arguments(unsigned num_registers,enum vcx_reg_type reg_type)21990 vcx_handle_register_arguments (unsigned num_registers,
21991 			       enum vcx_reg_type reg_type)
21992 {
21993   unsigned R, i;
21994   unsigned reg_mult = vcx_handle_shape (reg_type);
21995   for (i = 0; i < num_registers; i++)
21996     {
21997       R = inst.operands[i+1].reg;
21998       vcx_ensure_register_in_range (R, reg_type);
21999       if (num_registers == 3 && i > 0)
22000 	{
22001 	  if (i == 2)
22002 	    vcx_assign_vec[1] (R * reg_mult);
22003 	  else
22004 	    vcx_assign_vec[2] (R * reg_mult);
22005 	  continue;
22006 	}
22007       vcx_assign_vec[i](R * reg_mult);
22008     }
22009 }
22010 
22011 static void
vcx_handle_insn_block(enum vcx_reg_type reg_type)22012 vcx_handle_insn_block (enum vcx_reg_type reg_type)
22013 {
22014   if (reg_type == q_reg)
22015     if (inst.cond > COND_ALWAYS)
22016       inst.pred_insn_type = INSIDE_VPT_INSN;
22017     else
22018       inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
22019   else if (inst.cond == COND_ALWAYS)
22020     inst.pred_insn_type = OUTSIDE_PRED_INSN;
22021   else
22022     inst.error = BAD_NOT_IT;
22023 }
22024 
22025 static void
vcx_handle_common_checks(unsigned num_args,enum neon_shape rs)22026 vcx_handle_common_checks (unsigned num_args, enum neon_shape rs)
22027 {
22028   constraint (!mark_feature_used (&arm_ext_cde), _(BAD_CDE));
22029   cde_handle_coproc ();
22030   enum vcx_reg_type reg_type = vcx_get_reg_type (rs);
22031   vcx_handle_register_arguments (num_args, reg_type);
22032   vcx_handle_insn_block (reg_type);
22033   if (reg_type == q_reg)
22034     constraint (!mark_feature_used (&mve_ext),
22035 		_("vcx instructions with Q registers require MVE"));
22036   else
22037     constraint (!(ARM_FSET_CPU_SUBSET (armv8m_fp, cpu_variant)
22038 		  && mark_feature_used (&armv8m_fp))
22039 		&& !mark_feature_used (&mve_ext),
22040 		_("vcx instructions with S or D registers require either MVE"
22041 		  " or Armv8-M floating point extension."));
22042 }
22043 
22044 static void
do_vcx1(void)22045 do_vcx1 (void)
22046 {
22047   enum neon_shape rs = neon_select_shape (NS_PQI, NS_PDI, NS_PFI, NS_NULL);
22048   vcx_handle_common_checks (1, rs);
22049 
22050   unsigned imm = inst.operands[2].imm;
22051   inst.instruction |= (imm & 0x03f);
22052   inst.instruction |= (imm & 0x040) << 1;
22053   inst.instruction |= (imm & 0x780) << 9;
22054   if (rs != NS_PQI)
22055     constraint (imm >= 2048,
22056 		_("vcx1 with S or D registers takes immediate within 0-2047"));
22057   inst.instruction |= (imm & 0x800) << 13;
22058 }
22059 
22060 static void
do_vcx2(void)22061 do_vcx2 (void)
22062 {
22063   enum neon_shape rs = neon_select_shape (NS_PQQI, NS_PDDI, NS_PFFI, NS_NULL);
22064   vcx_handle_common_checks (2, rs);
22065 
22066   unsigned imm = inst.operands[3].imm;
22067   inst.instruction |= (imm & 0x01) << 4;
22068   inst.instruction |= (imm & 0x02) << 6;
22069   inst.instruction |= (imm & 0x3c) << 14;
22070   if (rs != NS_PQQI)
22071     constraint (imm >= 64,
22072 		_("vcx2 with S or D registers takes immediate within 0-63"));
22073   inst.instruction |= (imm & 0x40) << 18;
22074 }
22075 
22076 static void
do_vcx3(void)22077 do_vcx3 (void)
22078 {
22079   enum neon_shape rs = neon_select_shape (NS_PQQQI, NS_PDDDI, NS_PFFFI, NS_NULL);
22080   vcx_handle_common_checks (3, rs);
22081 
22082   unsigned imm = inst.operands[4].imm;
22083   inst.instruction |= (imm & 0x1) << 4;
22084   inst.instruction |= (imm & 0x6) << 19;
22085   if (rs != NS_PQQQI)
22086     constraint (imm >= 8,
22087 		_("vcx2 with S or D registers takes immediate within 0-7"));
22088   inst.instruction |= (imm & 0x8) << 21;
22089 }
22090 
22091 /* Crypto v1 instructions.  */
22092 static void
do_crypto_2op_1(unsigned elttype,int op)22093 do_crypto_2op_1 (unsigned elttype, int op)
22094 {
22095   set_pred_insn_type (OUTSIDE_PRED_INSN);
22096 
22097   if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
22098       == NT_invtype)
22099     return;
22100 
22101   inst.error = NULL;
22102 
22103   NEON_ENCODE (INTEGER, inst);
22104   inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
22105   inst.instruction |= HI1 (inst.operands[0].reg) << 22;
22106   inst.instruction |= LOW4 (inst.operands[1].reg);
22107   inst.instruction |= HI1 (inst.operands[1].reg) << 5;
22108   if (op != -1)
22109     inst.instruction |= op << 6;
22110 
22111   if (thumb_mode)
22112     inst.instruction |= 0xfc000000;
22113   else
22114     inst.instruction |= 0xf0000000;
22115 }
22116 
22117 static void
do_crypto_3op_1(int u,int op)22118 do_crypto_3op_1 (int u, int op)
22119 {
22120   set_pred_insn_type (OUTSIDE_PRED_INSN);
22121 
22122   if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
22123 		       N_32 | N_UNT | N_KEY).type == NT_invtype)
22124     return;
22125 
22126   inst.error = NULL;
22127 
22128   NEON_ENCODE (INTEGER, inst);
22129   neon_three_same (1, u, 8 << op);
22130 }
22131 
22132 static void
do_aese(void)22133 do_aese (void)
22134 {
22135   do_crypto_2op_1 (N_8, 0);
22136 }
22137 
22138 static void
do_aesd(void)22139 do_aesd (void)
22140 {
22141   do_crypto_2op_1 (N_8, 1);
22142 }
22143 
22144 static void
do_aesmc(void)22145 do_aesmc (void)
22146 {
22147   do_crypto_2op_1 (N_8, 2);
22148 }
22149 
22150 static void
do_aesimc(void)22151 do_aesimc (void)
22152 {
22153   do_crypto_2op_1 (N_8, 3);
22154 }
22155 
22156 static void
do_sha1c(void)22157 do_sha1c (void)
22158 {
22159   do_crypto_3op_1 (0, 0);
22160 }
22161 
22162 static void
do_sha1p(void)22163 do_sha1p (void)
22164 {
22165   do_crypto_3op_1 (0, 1);
22166 }
22167 
22168 static void
do_sha1m(void)22169 do_sha1m (void)
22170 {
22171   do_crypto_3op_1 (0, 2);
22172 }
22173 
22174 static void
do_sha1su0(void)22175 do_sha1su0 (void)
22176 {
22177   do_crypto_3op_1 (0, 3);
22178 }
22179 
22180 static void
do_sha256h(void)22181 do_sha256h (void)
22182 {
22183   do_crypto_3op_1 (1, 0);
22184 }
22185 
22186 static void
do_sha256h2(void)22187 do_sha256h2 (void)
22188 {
22189   do_crypto_3op_1 (1, 1);
22190 }
22191 
22192 static void
do_sha256su1(void)22193 do_sha256su1 (void)
22194 {
22195   do_crypto_3op_1 (1, 2);
22196 }
22197 
22198 static void
do_sha1h(void)22199 do_sha1h (void)
22200 {
22201   do_crypto_2op_1 (N_32, -1);
22202 }
22203 
22204 static void
do_sha1su1(void)22205 do_sha1su1 (void)
22206 {
22207   do_crypto_2op_1 (N_32, 0);
22208 }
22209 
22210 static void
do_sha256su0(void)22211 do_sha256su0 (void)
22212 {
22213   do_crypto_2op_1 (N_32, 1);
22214 }
22215 
22216 static void
do_crc32_1(unsigned int poly,unsigned int sz)22217 do_crc32_1 (unsigned int poly, unsigned int sz)
22218 {
22219   unsigned int Rd = inst.operands[0].reg;
22220   unsigned int Rn = inst.operands[1].reg;
22221   unsigned int Rm = inst.operands[2].reg;
22222 
22223   set_pred_insn_type (OUTSIDE_PRED_INSN);
22224   inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
22225   inst.instruction |= LOW4 (Rn) << 16;
22226   inst.instruction |= LOW4 (Rm);
22227   inst.instruction |= sz << (thumb_mode ? 4 : 21);
22228   inst.instruction |= poly << (thumb_mode ? 20 : 9);
22229 
22230   if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
22231     as_warn (UNPRED_REG ("r15"));
22232 }
22233 
22234 static void
do_crc32b(void)22235 do_crc32b (void)
22236 {
22237   do_crc32_1 (0, 0);
22238 }
22239 
22240 static void
do_crc32h(void)22241 do_crc32h (void)
22242 {
22243   do_crc32_1 (0, 1);
22244 }
22245 
22246 static void
do_crc32w(void)22247 do_crc32w (void)
22248 {
22249   do_crc32_1 (0, 2);
22250 }
22251 
22252 static void
do_crc32cb(void)22253 do_crc32cb (void)
22254 {
22255   do_crc32_1 (1, 0);
22256 }
22257 
22258 static void
do_crc32ch(void)22259 do_crc32ch (void)
22260 {
22261   do_crc32_1 (1, 1);
22262 }
22263 
22264 static void
do_crc32cw(void)22265 do_crc32cw (void)
22266 {
22267   do_crc32_1 (1, 2);
22268 }
22269 
22270 static void
do_vjcvt(void)22271 do_vjcvt (void)
22272 {
22273   constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
22274 	      _(BAD_FPU));
22275   neon_check_type (2, NS_FD, N_S32, N_F64);
22276   do_vfp_sp_dp_cvt ();
22277   do_vfp_cond_or_thumb ();
22278 }
22279 
22280 static void
do_vdot(void)22281 do_vdot (void)
22282 {
22283   enum neon_shape rs;
22284   constraint (!mark_feature_used (&fpu_neon_ext_armv8), _(BAD_FPU));
22285   set_pred_insn_type (OUTSIDE_PRED_INSN);
22286   if (inst.operands[2].isscalar)
22287     {
22288       rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
22289       neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
22290 
22291       inst.instruction |= (1 << 25);
22292       int idx = inst.operands[2].reg & 0xf;
22293       constraint ((idx != 1 && idx != 0), _("index must be 0 or 1"));
22294       inst.operands[2].reg >>= 4;
22295       constraint (!(inst.operands[2].reg < 16),
22296 		  _("indexed register must be less than 16"));
22297       neon_three_args (rs == NS_QQS);
22298       inst.instruction |= (idx << 5);
22299     }
22300   else
22301     {
22302       rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
22303       neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
22304       neon_three_args (rs == NS_QQQ);
22305     }
22306 }
22307 
22308 static void
do_vmmla(void)22309 do_vmmla (void)
22310 {
22311   enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
22312   neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
22313 
22314   constraint (!mark_feature_used (&fpu_neon_ext_armv8), _(BAD_FPU));
22315   set_pred_insn_type (OUTSIDE_PRED_INSN);
22316 
22317   neon_three_args (1);
22318 }
22319 
22320 
22321 /* Overall per-instruction processing.	*/
22322 
22323 /* We need to be able to fix up arbitrary expressions in some statements.
22324    This is so that we can handle symbols that are an arbitrary distance from
22325    the pc.  The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
22326    which returns part of an address in a form which will be valid for
22327    a data instruction.	We do this by pushing the expression into a symbol
22328    in the expr_section, and creating a fix for that.  */
22329 
22330 static void
fix_new_arm(fragS * frag,int where,short int size,expressionS * exp,int pc_rel,int reloc)22331 fix_new_arm (fragS *	   frag,
22332 	     int	   where,
22333 	     short int	   size,
22334 	     expressionS * exp,
22335 	     int	   pc_rel,
22336 	     int	   reloc)
22337 {
22338   fixS *	   new_fix;
22339 
22340   switch (exp->X_op)
22341     {
22342     case O_constant:
22343       if (pc_rel)
22344 	{
22345 	  /* Create an absolute valued symbol, so we have something to
22346 	     refer to in the object file.  Unfortunately for us, gas's
22347 	     generic expression parsing will already have folded out
22348 	     any use of .set foo/.type foo %function that may have
22349 	     been used to set type information of the target location,
22350 	     that's being specified symbolically.  We have to presume
22351 	     the user knows what they are doing.  */
22352 	  char name[16 + 8];
22353 	  symbolS *symbol;
22354 
22355 	  sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
22356 
22357 	  symbol = symbol_find_or_make (name);
22358 	  S_SET_SEGMENT (symbol, absolute_section);
22359 	  symbol_set_frag (symbol, &zero_address_frag);
22360 	  S_SET_VALUE (symbol, exp->X_add_number);
22361 	  exp->X_op = O_symbol;
22362 	  exp->X_add_symbol = symbol;
22363 	  exp->X_add_number = 0;
22364 	}
22365       /* FALLTHROUGH */
22366     case O_symbol:
22367     case O_add:
22368     case O_subtract:
22369       new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
22370 			     (enum bfd_reloc_code_real) reloc);
22371       break;
22372 
22373     default:
22374       new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
22375 				  pc_rel, (enum bfd_reloc_code_real) reloc);
22376       break;
22377     }
22378 
22379   /* Mark whether the fix is to a THUMB instruction, or an ARM
22380      instruction.  */
22381   new_fix->tc_fix_data = thumb_mode;
22382 }
22383 
22384 /* Create a frg for an instruction requiring relaxation.  */
22385 static void
output_relax_insn(void)22386 output_relax_insn (void)
22387 {
22388   char * to;
22389   symbolS *sym;
22390   int offset;
22391 
22392   /* The size of the instruction is unknown, so tie the debug info to the
22393      start of the instruction.  */
22394   dwarf2_emit_insn (0);
22395 
22396   switch (inst.relocs[0].exp.X_op)
22397     {
22398     case O_symbol:
22399       sym = inst.relocs[0].exp.X_add_symbol;
22400       offset = inst.relocs[0].exp.X_add_number;
22401       break;
22402     case O_constant:
22403       sym = NULL;
22404       offset = inst.relocs[0].exp.X_add_number;
22405       break;
22406     default:
22407       sym = make_expr_symbol (&inst.relocs[0].exp);
22408       offset = 0;
22409       break;
22410   }
22411   to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
22412 		 inst.relax, sym, offset, NULL/*offset, opcode*/);
22413   md_number_to_chars (to, inst.instruction, THUMB_SIZE);
22414 }
22415 
22416 /* Write a 32-bit thumb instruction to buf.  */
22417 static void
put_thumb32_insn(char * buf,unsigned long insn)22418 put_thumb32_insn (char * buf, unsigned long insn)
22419 {
22420   md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
22421   md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
22422 }
22423 
22424 static void
output_inst(const char * str)22425 output_inst (const char * str)
22426 {
22427   char * to = NULL;
22428 
22429   if (inst.error)
22430     {
22431       as_bad ("%s -- `%s'", inst.error, str);
22432       return;
22433     }
22434   if (inst.relax)
22435     {
22436       output_relax_insn ();
22437       return;
22438     }
22439   if (inst.size == 0)
22440     return;
22441 
22442   to = frag_more (inst.size);
22443   /* PR 9814: Record the thumb mode into the current frag so that we know
22444      what type of NOP padding to use, if necessary.  We override any previous
22445      setting so that if the mode has changed then the NOPS that we use will
22446      match the encoding of the last instruction in the frag.  */
22447   frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
22448 
22449   if (thumb_mode && (inst.size > THUMB_SIZE))
22450     {
22451       gas_assert (inst.size == (2 * THUMB_SIZE));
22452       put_thumb32_insn (to, inst.instruction);
22453     }
22454   else if (inst.size > INSN_SIZE)
22455     {
22456       gas_assert (inst.size == (2 * INSN_SIZE));
22457       md_number_to_chars (to, inst.instruction, INSN_SIZE);
22458       md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
22459     }
22460   else
22461     md_number_to_chars (to, inst.instruction, inst.size);
22462 
22463   int r;
22464   for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
22465     {
22466       if (inst.relocs[r].type != BFD_RELOC_UNUSED)
22467 	fix_new_arm (frag_now, to - frag_now->fr_literal,
22468 		     inst.size, & inst.relocs[r].exp, inst.relocs[r].pc_rel,
22469 		     inst.relocs[r].type);
22470     }
22471 
22472   dwarf2_emit_insn (inst.size);
22473 }
22474 
22475 static char *
output_it_inst(int cond,int mask,char * to)22476 output_it_inst (int cond, int mask, char * to)
22477 {
22478   unsigned long instruction = 0xbf00;
22479 
22480   mask &= 0xf;
22481   instruction |= mask;
22482   instruction |= cond << 4;
22483 
22484   if (to == NULL)
22485     {
22486       to = frag_more (2);
22487 #ifdef OBJ_ELF
22488       dwarf2_emit_insn (2);
22489 #endif
22490     }
22491 
22492   md_number_to_chars (to, instruction, 2);
22493 
22494   return to;
22495 }
22496 
22497 /* Tag values used in struct asm_opcode's tag field.  */
22498 enum opcode_tag
22499 {
22500   OT_unconditional,	/* Instruction cannot be conditionalized.
22501 			   The ARM condition field is still 0xE.  */
22502   OT_unconditionalF,	/* Instruction cannot be conditionalized
22503 			   and carries 0xF in its ARM condition field.  */
22504   OT_csuffix,		/* Instruction takes a conditional suffix.  */
22505   OT_csuffixF,		/* Some forms of the instruction take a scalar
22506 			   conditional suffix, others place 0xF where the
22507 			   condition field would be, others take a vector
22508 			   conditional suffix.  */
22509   OT_cinfix3,		/* Instruction takes a conditional infix,
22510 			   beginning at character index 3.  (In
22511 			   unified mode, it becomes a suffix.)  */
22512   OT_cinfix3_deprecated, /* The same as OT_cinfix3.  This is used for
22513 			    tsts, cmps, cmns, and teqs. */
22514   OT_cinfix3_legacy,	/* Legacy instruction takes a conditional infix at
22515 			   character index 3, even in unified mode.  Used for
22516 			   legacy instructions where suffix and infix forms
22517 			   may be ambiguous.  */
22518   OT_csuf_or_in3,	/* Instruction takes either a conditional
22519 			   suffix or an infix at character index 3.  */
22520   OT_odd_infix_unc,	/* This is the unconditional variant of an
22521 			   instruction that takes a conditional infix
22522 			   at an unusual position.  In unified mode,
22523 			   this variant will accept a suffix.  */
22524   OT_odd_infix_0	/* Values greater than or equal to OT_odd_infix_0
22525 			   are the conditional variants of instructions that
22526 			   take conditional infixes in unusual positions.
22527 			   The infix appears at character index
22528 			   (tag - OT_odd_infix_0).  These are not accepted
22529 			   in unified mode.  */
22530 };
22531 
22532 /* Subroutine of md_assemble, responsible for looking up the primary
22533    opcode from the mnemonic the user wrote.  STR points to the
22534    beginning of the mnemonic.
22535 
22536    This is not simply a hash table lookup, because of conditional
22537    variants.  Most instructions have conditional variants, which are
22538    expressed with a _conditional affix_ to the mnemonic.  If we were
22539    to encode each conditional variant as a literal string in the opcode
22540    table, it would have approximately 20,000 entries.
22541 
22542    Most mnemonics take this affix as a suffix, and in unified syntax,
22543    'most' is upgraded to 'all'.  However, in the divided syntax, some
22544    instructions take the affix as an infix, notably the s-variants of
22545    the arithmetic instructions.  Of those instructions, all but six
22546    have the infix appear after the third character of the mnemonic.
22547 
22548    Accordingly, the algorithm for looking up primary opcodes given
22549    an identifier is:
22550 
22551    1. Look up the identifier in the opcode table.
22552       If we find a match, go to step U.
22553 
22554    2. Look up the last two characters of the identifier in the
22555       conditions table.  If we find a match, look up the first N-2
22556       characters of the identifier in the opcode table.  If we
22557       find a match, go to step CE.
22558 
22559    3. Look up the fourth and fifth characters of the identifier in
22560       the conditions table.  If we find a match, extract those
22561       characters from the identifier, and look up the remaining
22562       characters in the opcode table.  If we find a match, go
22563       to step CM.
22564 
22565    4. Fail.
22566 
22567    U. Examine the tag field of the opcode structure, in case this is
22568       one of the six instructions with its conditional infix in an
22569       unusual place.  If it is, the tag tells us where to find the
22570       infix; look it up in the conditions table and set inst.cond
22571       accordingly.  Otherwise, this is an unconditional instruction.
22572       Again set inst.cond accordingly.  Return the opcode structure.
22573 
22574   CE. Examine the tag field to make sure this is an instruction that
22575       should receive a conditional suffix.  If it is not, fail.
22576       Otherwise, set inst.cond from the suffix we already looked up,
22577       and return the opcode structure.
22578 
22579   CM. Examine the tag field to make sure this is an instruction that
22580       should receive a conditional infix after the third character.
22581       If it is not, fail.  Otherwise, undo the edits to the current
22582       line of input and proceed as for case CE.  */
22583 
22584 static const struct asm_opcode *
opcode_lookup(char ** str)22585 opcode_lookup (char **str)
22586 {
22587   char *end, *base;
22588   char *affix;
22589   const struct asm_opcode *opcode;
22590   const struct asm_cond *cond;
22591   char save[2];
22592 
22593   /* Scan up to the end of the mnemonic, which must end in white space,
22594      '.' (in unified mode, or for Neon/VFP instructions), or end of string.  */
22595   for (base = end = *str; *end != '\0'; end++)
22596     if (*end == ' ' || *end == '.')
22597       break;
22598 
22599   if (end == base)
22600     return NULL;
22601 
22602   /* Handle a possible width suffix and/or Neon type suffix.  */
22603   if (end[0] == '.')
22604     {
22605       int offset = 2;
22606 
22607       /* The .w and .n suffixes are only valid if the unified syntax is in
22608 	 use.  */
22609       if (unified_syntax && end[1] == 'w')
22610 	inst.size_req = 4;
22611       else if (unified_syntax && end[1] == 'n')
22612 	inst.size_req = 2;
22613       else
22614 	offset = 0;
22615 
22616       inst.vectype.elems = 0;
22617 
22618       *str = end + offset;
22619 
22620       if (end[offset] == '.')
22621 	{
22622 	  /* See if we have a Neon type suffix (possible in either unified or
22623 	     non-unified ARM syntax mode).  */
22624 	  if (parse_neon_type (&inst.vectype, str) == FAIL)
22625 	    return NULL;
22626 	}
22627       else if (end[offset] != '\0' && end[offset] != ' ')
22628 	return NULL;
22629     }
22630   else
22631     *str = end;
22632 
22633   /* Look for unaffixed or special-case affixed mnemonic.  */
22634   opcode = (const struct asm_opcode *) str_hash_find_n (arm_ops_hsh, base,
22635 							end - base);
22636   cond = NULL;
22637   if (opcode)
22638     {
22639       /* step U */
22640       if (opcode->tag < OT_odd_infix_0)
22641 	{
22642 	  inst.cond = COND_ALWAYS;
22643 	  return opcode;
22644 	}
22645 
22646       if (warn_on_deprecated && unified_syntax)
22647 	as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
22648       affix = base + (opcode->tag - OT_odd_infix_0);
22649       cond = (const struct asm_cond *) str_hash_find_n (arm_cond_hsh, affix, 2);
22650       gas_assert (cond);
22651 
22652       inst.cond = cond->value;
22653       return opcode;
22654     }
22655  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
22656    {
22657     /* Cannot have a conditional suffix on a mnemonic of less than a character.
22658      */
22659     if (end - base < 2)
22660       return NULL;
22661      affix = end - 1;
22662      cond = (const struct asm_cond *) str_hash_find_n (arm_vcond_hsh, affix, 1);
22663      opcode = (const struct asm_opcode *) str_hash_find_n (arm_ops_hsh, base,
22664 							   affix - base);
22665      /* If this opcode can not be vector predicated then don't accept it with a
22666 	vector predication code.  */
22667      if (opcode && !opcode->mayBeVecPred)
22668        opcode = NULL;
22669    }
22670   if (!opcode || !cond)
22671     {
22672       /* Cannot have a conditional suffix on a mnemonic of less than two
22673 	 characters.  */
22674       if (end - base < 3)
22675 	return NULL;
22676 
22677       /* Look for suffixed mnemonic.  */
22678       affix = end - 2;
22679       cond = (const struct asm_cond *) str_hash_find_n (arm_cond_hsh, affix, 2);
22680       opcode = (const struct asm_opcode *) str_hash_find_n (arm_ops_hsh, base,
22681 							    affix - base);
22682     }
22683 
22684   if (opcode && cond)
22685     {
22686       /* step CE */
22687       switch (opcode->tag)
22688 	{
22689 	case OT_cinfix3_legacy:
22690 	  /* Ignore conditional suffixes matched on infix only mnemonics.  */
22691 	  break;
22692 
22693 	case OT_cinfix3:
22694 	case OT_cinfix3_deprecated:
22695 	case OT_odd_infix_unc:
22696 	  if (!unified_syntax)
22697 	    return NULL;
22698 	  /* Fall through.  */
22699 
22700 	case OT_csuffix:
22701 	case OT_csuffixF:
22702 	case OT_csuf_or_in3:
22703 	  inst.cond = cond->value;
22704 	  return opcode;
22705 
22706 	case OT_unconditional:
22707 	case OT_unconditionalF:
22708 	  if (thumb_mode)
22709 	    inst.cond = cond->value;
22710 	  else
22711 	    {
22712 	      /* Delayed diagnostic.  */
22713 	      inst.error = BAD_COND;
22714 	      inst.cond = COND_ALWAYS;
22715 	    }
22716 	  return opcode;
22717 
22718 	default:
22719 	  return NULL;
22720 	}
22721     }
22722 
22723   /* Cannot have a usual-position infix on a mnemonic of less than
22724      six characters (five would be a suffix).  */
22725   if (end - base < 6)
22726     return NULL;
22727 
22728   /* Look for infixed mnemonic in the usual position.  */
22729   affix = base + 3;
22730   cond = (const struct asm_cond *) str_hash_find_n (arm_cond_hsh, affix, 2);
22731   if (!cond)
22732     return NULL;
22733 
22734   memcpy (save, affix, 2);
22735   memmove (affix, affix + 2, (end - affix) - 2);
22736   opcode = (const struct asm_opcode *) str_hash_find_n (arm_ops_hsh, base,
22737 							(end - base) - 2);
22738   memmove (affix + 2, affix, (end - affix) - 2);
22739   memcpy (affix, save, 2);
22740 
22741   if (opcode
22742       && (opcode->tag == OT_cinfix3
22743 	  || opcode->tag == OT_cinfix3_deprecated
22744 	  || opcode->tag == OT_csuf_or_in3
22745 	  || opcode->tag == OT_cinfix3_legacy))
22746     {
22747       /* Step CM.  */
22748       if (warn_on_deprecated && unified_syntax
22749 	  && (opcode->tag == OT_cinfix3
22750 	      || opcode->tag == OT_cinfix3_deprecated))
22751 	as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
22752 
22753       inst.cond = cond->value;
22754       return opcode;
22755     }
22756 
22757   return NULL;
22758 }
22759 
22760 /* This function generates an initial IT instruction, leaving its block
22761    virtually open for the new instructions. Eventually,
22762    the mask will be updated by now_pred_add_mask () each time
22763    a new instruction needs to be included in the IT block.
22764    Finally, the block is closed with close_automatic_it_block ().
22765    The block closure can be requested either from md_assemble (),
22766    a tencode (), or due to a label hook.  */
22767 
22768 static void
new_automatic_it_block(int cond)22769 new_automatic_it_block (int cond)
22770 {
22771   now_pred.state = AUTOMATIC_PRED_BLOCK;
22772   now_pred.mask = 0x18;
22773   now_pred.cc = cond;
22774   now_pred.block_length = 1;
22775   mapping_state (MAP_THUMB);
22776   now_pred.insn = output_it_inst (cond, now_pred.mask, NULL);
22777   now_pred.warn_deprecated = false;
22778   now_pred.insn_cond = true;
22779 }
22780 
22781 /* Close an automatic IT block.
22782    See comments in new_automatic_it_block ().  */
22783 
22784 static void
close_automatic_it_block(void)22785 close_automatic_it_block (void)
22786 {
22787   now_pred.mask = 0x10;
22788   now_pred.block_length = 0;
22789 }
22790 
22791 /* Update the mask of the current automatically-generated IT
22792    instruction. See comments in new_automatic_it_block ().  */
22793 
22794 static void
now_pred_add_mask(int cond)22795 now_pred_add_mask (int cond)
22796 {
22797 #define CLEAR_BIT(value, nbit)  ((value) & ~(1 << (nbit)))
22798 #define SET_BIT_VALUE(value, bitvalue, nbit)  (CLEAR_BIT (value, nbit) \
22799 					      | ((bitvalue) << (nbit)))
22800   const int resulting_bit = (cond & 1);
22801 
22802   now_pred.mask &= 0xf;
22803   now_pred.mask = SET_BIT_VALUE (now_pred.mask,
22804 				   resulting_bit,
22805 				  (5 - now_pred.block_length));
22806   now_pred.mask = SET_BIT_VALUE (now_pred.mask,
22807 				   1,
22808 				   ((5 - now_pred.block_length) - 1));
22809   output_it_inst (now_pred.cc, now_pred.mask, now_pred.insn);
22810 
22811 #undef CLEAR_BIT
22812 #undef SET_BIT_VALUE
22813 }
22814 
22815 /* The IT blocks handling machinery is accessed through the these functions:
22816      it_fsm_pre_encode ()               from md_assemble ()
22817      set_pred_insn_type ()		optional, from the tencode functions
22818      set_pred_insn_type_last ()		ditto
22819      in_pred_block ()			ditto
22820      it_fsm_post_encode ()              from md_assemble ()
22821      force_automatic_it_block_close ()  from label handling functions
22822 
22823    Rationale:
22824      1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
22825 	initializing the IT insn type with a generic initial value depending
22826 	on the inst.condition.
22827      2) During the tencode function, two things may happen:
22828 	a) The tencode function overrides the IT insn type by
22829 	   calling either set_pred_insn_type (type) or
22830 	   set_pred_insn_type_last ().
22831 	b) The tencode function queries the IT block state by
22832 	   calling in_pred_block () (i.e. to determine narrow/not narrow mode).
22833 
22834 	Both set_pred_insn_type and in_pred_block run the internal FSM state
22835 	handling function (handle_pred_state), because: a) setting the IT insn
22836 	type may incur in an invalid state (exiting the function),
22837 	and b) querying the state requires the FSM to be updated.
22838 	Specifically we want to avoid creating an IT block for conditional
22839 	branches, so it_fsm_pre_encode is actually a guess and we can't
22840 	determine whether an IT block is required until the tencode () routine
22841 	has decided what type of instruction this actually it.
22842 	Because of this, if set_pred_insn_type and in_pred_block have to be
22843 	used, set_pred_insn_type has to be called first.
22844 
22845 	set_pred_insn_type_last () is a wrapper of set_pred_insn_type (type),
22846 	that determines the insn IT type depending on the inst.cond code.
22847 	When a tencode () routine encodes an instruction that can be
22848 	either outside an IT block, or, in the case of being inside, has to be
22849 	the last one, set_pred_insn_type_last () will determine the proper
22850 	IT instruction type based on the inst.cond code. Otherwise,
22851 	set_pred_insn_type can be called for overriding that logic or
22852 	for covering other cases.
22853 
22854 	Calling handle_pred_state () may not transition the IT block state to
22855 	OUTSIDE_PRED_BLOCK immediately, since the (current) state could be
22856 	still queried. Instead, if the FSM determines that the state should
22857 	be transitioned to OUTSIDE_PRED_BLOCK, a flag is marked to be closed
22858 	after the tencode () function: that's what it_fsm_post_encode () does.
22859 
22860 	Since in_pred_block () calls the state handling function to get an
22861 	updated state, an error may occur (due to invalid insns combination).
22862 	In that case, inst.error is set.
22863 	Therefore, inst.error has to be checked after the execution of
22864 	the tencode () routine.
22865 
22866      3) Back in md_assemble(), it_fsm_post_encode () is called to commit
22867 	any pending state change (if any) that didn't take place in
22868 	handle_pred_state () as explained above.  */
22869 
22870 static void
it_fsm_pre_encode(void)22871 it_fsm_pre_encode (void)
22872 {
22873   if (inst.cond != COND_ALWAYS)
22874     inst.pred_insn_type =  INSIDE_IT_INSN;
22875   else
22876     inst.pred_insn_type = OUTSIDE_PRED_INSN;
22877 
22878   now_pred.state_handled = 0;
22879 }
22880 
22881 /* IT state FSM handling function.  */
22882 /* MVE instructions and non-MVE instructions are handled differently because of
22883    the introduction of VPT blocks.
22884    Specifications say that any non-MVE instruction inside a VPT block is
22885    UNPREDICTABLE, with the exception of the BKPT instruction.  Whereas most MVE
22886    instructions are deemed to be UNPREDICTABLE if inside an IT block.  For the
22887    few exceptions we have MVE_UNPREDICABLE_INSN.
22888    The error messages provided depending on the different combinations possible
22889    are described in the cases below:
22890    For 'most' MVE instructions:
22891    1) In an IT block, with an IT code: syntax error
22892    2) In an IT block, with a VPT code: error: must be in a VPT block
22893    3) In an IT block, with no code: warning: UNPREDICTABLE
22894    4) In a VPT block, with an IT code: syntax error
22895    5) In a VPT block, with a VPT code: OK!
22896    6) In a VPT block, with no code: error: missing code
22897    7) Outside a pred block, with an IT code: error: syntax error
22898    8) Outside a pred block, with a VPT code: error: should be in a VPT block
22899    9) Outside a pred block, with no code: OK!
22900    For non-MVE instructions:
22901    10) In an IT block, with an IT code: OK!
22902    11) In an IT block, with a VPT code: syntax error
22903    12) In an IT block, with no code: error: missing code
22904    13) In a VPT block, with an IT code: error: should be in an IT block
22905    14) In a VPT block, with a VPT code: syntax error
22906    15) In a VPT block, with no code: UNPREDICTABLE
22907    16) Outside a pred block, with an IT code: error: should be in an IT block
22908    17) Outside a pred block, with a VPT code: syntax error
22909    18) Outside a pred block, with no code: OK!
22910  */
22911 
22912 
22913 static int
handle_pred_state(void)22914 handle_pred_state (void)
22915 {
22916   now_pred.state_handled = 1;
22917   now_pred.insn_cond = false;
22918 
22919   switch (now_pred.state)
22920     {
22921     case OUTSIDE_PRED_BLOCK:
22922       switch (inst.pred_insn_type)
22923 	{
22924 	case MVE_UNPREDICABLE_INSN:
22925 	case MVE_OUTSIDE_PRED_INSN:
22926 	  if (inst.cond < COND_ALWAYS)
22927 	    {
22928 	      /* Case 7: Outside a pred block, with an IT code: error: syntax
22929 		 error.  */
22930 	      inst.error = BAD_SYNTAX;
22931 	      return FAIL;
22932 	    }
22933 	  /* Case 9:  Outside a pred block, with no code: OK!  */
22934 	  break;
22935 	case OUTSIDE_PRED_INSN:
22936 	  if (inst.cond > COND_ALWAYS)
22937 	    {
22938 	      /* Case 17:  Outside a pred block, with a VPT code: syntax error.
22939 	       */
22940 	      inst.error = BAD_SYNTAX;
22941 	      return FAIL;
22942 	    }
22943 	  /* Case 18: Outside a pred block, with no code: OK!  */
22944 	  break;
22945 
22946 	case INSIDE_VPT_INSN:
22947 	  /* Case 8: Outside a pred block, with a VPT code: error: should be in
22948 	     a VPT block.  */
22949 	  inst.error = BAD_OUT_VPT;
22950 	  return FAIL;
22951 
22952 	case INSIDE_IT_INSN:
22953 	case INSIDE_IT_LAST_INSN:
22954 	  if (inst.cond < COND_ALWAYS)
22955 	    {
22956 	      /* Case 16: Outside a pred block, with an IT code: error: should
22957 		 be in an IT block.  */
22958 	      if (thumb_mode == 0)
22959 		{
22960 		  if (unified_syntax
22961 		      && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
22962 		    as_tsktsk (_("Warning: conditional outside an IT block"\
22963 				 " for Thumb."));
22964 		}
22965 	      else
22966 		{
22967 		  if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
22968 		      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
22969 		    {
22970 		      /* Automatically generate the IT instruction.  */
22971 		      new_automatic_it_block (inst.cond);
22972 		      if (inst.pred_insn_type == INSIDE_IT_LAST_INSN)
22973 			close_automatic_it_block ();
22974 		    }
22975 		  else
22976 		    {
22977 		      inst.error = BAD_OUT_IT;
22978 		      return FAIL;
22979 		    }
22980 		}
22981 	      break;
22982 	    }
22983 	  else if (inst.cond > COND_ALWAYS)
22984 	    {
22985 	      /* Case 17: Outside a pred block, with a VPT code: syntax error.
22986 	       */
22987 	      inst.error = BAD_SYNTAX;
22988 	      return FAIL;
22989 	    }
22990 	  else
22991 	    gas_assert (0);
22992 	case IF_INSIDE_IT_LAST_INSN:
22993 	case NEUTRAL_IT_INSN:
22994 	  break;
22995 
22996 	case VPT_INSN:
22997 	  if (inst.cond != COND_ALWAYS)
22998 	    first_error (BAD_SYNTAX);
22999 	  now_pred.state = MANUAL_PRED_BLOCK;
23000 	  now_pred.block_length = 0;
23001 	  now_pred.type = VECTOR_PRED;
23002 	  now_pred.cc = 0;
23003 	  break;
23004 	case IT_INSN:
23005 	  now_pred.state = MANUAL_PRED_BLOCK;
23006 	  now_pred.block_length = 0;
23007 	  now_pred.type = SCALAR_PRED;
23008 	  break;
23009 	}
23010       break;
23011 
23012     case AUTOMATIC_PRED_BLOCK:
23013       /* Three things may happen now:
23014 	 a) We should increment current it block size;
23015 	 b) We should close current it block (closing insn or 4 insns);
23016 	 c) We should close current it block and start a new one (due
23017 	 to incompatible conditions or
23018 	 4 insns-length block reached).  */
23019 
23020       switch (inst.pred_insn_type)
23021 	{
23022 	case INSIDE_VPT_INSN:
23023 	case VPT_INSN:
23024 	case MVE_UNPREDICABLE_INSN:
23025 	case MVE_OUTSIDE_PRED_INSN:
23026 	  gas_assert (0);
23027 	case OUTSIDE_PRED_INSN:
23028 	  /* The closure of the block shall happen immediately,
23029 	     so any in_pred_block () call reports the block as closed.  */
23030 	  force_automatic_it_block_close ();
23031 	  break;
23032 
23033 	case INSIDE_IT_INSN:
23034 	case INSIDE_IT_LAST_INSN:
23035 	case IF_INSIDE_IT_LAST_INSN:
23036 	  now_pred.block_length++;
23037 
23038 	  if (now_pred.block_length > 4
23039 	      || !now_pred_compatible (inst.cond))
23040 	    {
23041 	      force_automatic_it_block_close ();
23042 	      if (inst.pred_insn_type != IF_INSIDE_IT_LAST_INSN)
23043 		new_automatic_it_block (inst.cond);
23044 	    }
23045 	  else
23046 	    {
23047 	      now_pred.insn_cond = true;
23048 	      now_pred_add_mask (inst.cond);
23049 	    }
23050 
23051 	  if (now_pred.state == AUTOMATIC_PRED_BLOCK
23052 	      && (inst.pred_insn_type == INSIDE_IT_LAST_INSN
23053 		  || inst.pred_insn_type == IF_INSIDE_IT_LAST_INSN))
23054 	    close_automatic_it_block ();
23055 	  break;
23056 
23057 	  /* Fallthrough.  */
23058 	case NEUTRAL_IT_INSN:
23059 	  now_pred.block_length++;
23060 	  now_pred.insn_cond = true;
23061 
23062 	  if (now_pred.block_length > 4)
23063 	    force_automatic_it_block_close ();
23064 	  else
23065 	    now_pred_add_mask (now_pred.cc & 1);
23066 	  break;
23067 
23068 	case IT_INSN:
23069 	  close_automatic_it_block ();
23070 	  now_pred.state = MANUAL_PRED_BLOCK;
23071 	  break;
23072 	}
23073       break;
23074 
23075     case MANUAL_PRED_BLOCK:
23076       {
23077 	unsigned int cond;
23078 	int is_last;
23079 	if (now_pred.type == SCALAR_PRED)
23080 	  {
23081 	    /* Check conditional suffixes.  */
23082 	    cond = now_pred.cc ^ ((now_pred.mask >> 4) & 1) ^ 1;
23083 	    now_pred.mask <<= 1;
23084 	    now_pred.mask &= 0x1f;
23085 	    is_last = (now_pred.mask == 0x10);
23086 	  }
23087 	else
23088 	  {
23089 	    now_pred.cc ^= (now_pred.mask >> 4);
23090 	    cond = now_pred.cc + 0xf;
23091 	    now_pred.mask <<= 1;
23092 	    now_pred.mask &= 0x1f;
23093 	    is_last = now_pred.mask == 0x10;
23094 	  }
23095 	now_pred.insn_cond = true;
23096 
23097 	switch (inst.pred_insn_type)
23098 	  {
23099 	  case OUTSIDE_PRED_INSN:
23100 	    if (now_pred.type == SCALAR_PRED)
23101 	      {
23102 		if (inst.cond == COND_ALWAYS)
23103 		  {
23104 		    /* Case 12: In an IT block, with no code: error: missing
23105 		       code.  */
23106 		    inst.error = BAD_NOT_IT;
23107 		    return FAIL;
23108 		  }
23109 		else if (inst.cond > COND_ALWAYS)
23110 		  {
23111 		    /* Case 11: In an IT block, with a VPT code: syntax error.
23112 		     */
23113 		    inst.error = BAD_SYNTAX;
23114 		    return FAIL;
23115 		  }
23116 		else if (thumb_mode)
23117 		  {
23118 		    /* This is for some special cases where a non-MVE
23119 		       instruction is not allowed in an IT block, such as cbz,
23120 		       but are put into one with a condition code.
23121 		       You could argue this should be a syntax error, but we
23122 		       gave the 'not allowed in IT block' diagnostic in the
23123 		       past so we will keep doing so.  */
23124 		    inst.error = BAD_NOT_IT;
23125 		    return FAIL;
23126 		  }
23127 		break;
23128 	      }
23129 	    else
23130 	      {
23131 		/* Case 15: In a VPT block, with no code: UNPREDICTABLE.  */
23132 		as_tsktsk (MVE_NOT_VPT);
23133 		return SUCCESS;
23134 	      }
23135 	  case MVE_OUTSIDE_PRED_INSN:
23136 	    if (now_pred.type == SCALAR_PRED)
23137 	      {
23138 		if (inst.cond == COND_ALWAYS)
23139 		  {
23140 		    /* Case 3: In an IT block, with no code: warning:
23141 		       UNPREDICTABLE.  */
23142 		    as_tsktsk (MVE_NOT_IT);
23143 		    return SUCCESS;
23144 		  }
23145 		else if (inst.cond < COND_ALWAYS)
23146 		  {
23147 		    /* Case 1: In an IT block, with an IT code: syntax error.
23148 		     */
23149 		    inst.error = BAD_SYNTAX;
23150 		    return FAIL;
23151 		  }
23152 		else
23153 		  gas_assert (0);
23154 	      }
23155 	    else
23156 	      {
23157 		if (inst.cond < COND_ALWAYS)
23158 		  {
23159 		    /* Case 4: In a VPT block, with an IT code: syntax error.
23160 		     */
23161 		    inst.error = BAD_SYNTAX;
23162 		    return FAIL;
23163 		  }
23164 		else if (inst.cond == COND_ALWAYS)
23165 		  {
23166 		    /* Case 6: In a VPT block, with no code: error: missing
23167 		       code.  */
23168 		    inst.error = BAD_NOT_VPT;
23169 		    return FAIL;
23170 		  }
23171 		else
23172 		  {
23173 		    gas_assert (0);
23174 		  }
23175 	      }
23176 	  case MVE_UNPREDICABLE_INSN:
23177 	    as_tsktsk (now_pred.type == SCALAR_PRED ? MVE_NOT_IT : MVE_NOT_VPT);
23178 	    return SUCCESS;
23179 	  case INSIDE_IT_INSN:
23180 	    if (inst.cond > COND_ALWAYS)
23181 	      {
23182 		/* Case 11: In an IT block, with a VPT code: syntax error.  */
23183 		/* Case 14: In a VPT block, with a VPT code: syntax error.  */
23184 		inst.error = BAD_SYNTAX;
23185 		return FAIL;
23186 	      }
23187 	    else if (now_pred.type == SCALAR_PRED)
23188 	      {
23189 		/* Case 10: In an IT block, with an IT code: OK!  */
23190 		if (cond != inst.cond)
23191 		  {
23192 		    inst.error = now_pred.type == SCALAR_PRED ? BAD_IT_COND :
23193 		      BAD_VPT_COND;
23194 		    return FAIL;
23195 		  }
23196 	      }
23197 	    else
23198 	      {
23199 		/* Case 13: In a VPT block, with an IT code: error: should be
23200 		   in an IT block.  */
23201 		inst.error = BAD_OUT_IT;
23202 		return FAIL;
23203 	      }
23204 	    break;
23205 
23206 	  case INSIDE_VPT_INSN:
23207 	    if (now_pred.type == SCALAR_PRED)
23208 	      {
23209 		/* Case 2: In an IT block, with a VPT code: error: must be in a
23210 		   VPT block.  */
23211 		inst.error = BAD_OUT_VPT;
23212 		return FAIL;
23213 	      }
23214 	    /* Case 5:  In a VPT block, with a VPT code: OK!  */
23215 	    else if (cond != inst.cond)
23216 	      {
23217 		inst.error = BAD_VPT_COND;
23218 		return FAIL;
23219 	      }
23220 	    break;
23221 	  case INSIDE_IT_LAST_INSN:
23222 	  case IF_INSIDE_IT_LAST_INSN:
23223 	    if (now_pred.type == VECTOR_PRED || inst.cond > COND_ALWAYS)
23224 	      {
23225 		/* Case 4: In a VPT block, with an IT code: syntax error.  */
23226 		/* Case 11: In an IT block, with a VPT code: syntax error.  */
23227 		inst.error = BAD_SYNTAX;
23228 		return FAIL;
23229 	      }
23230 	    else if (cond != inst.cond)
23231 	      {
23232 		inst.error = BAD_IT_COND;
23233 		return FAIL;
23234 	      }
23235 	    if (!is_last)
23236 	      {
23237 		inst.error = BAD_BRANCH;
23238 		return FAIL;
23239 	      }
23240 	    break;
23241 
23242 	  case NEUTRAL_IT_INSN:
23243 	    /* The BKPT instruction is unconditional even in a IT or VPT
23244 	       block.  */
23245 	    break;
23246 
23247 	  case IT_INSN:
23248 	    if (now_pred.type == SCALAR_PRED)
23249 	      {
23250 		inst.error = BAD_IT_IT;
23251 		return FAIL;
23252 	      }
23253 	    /* fall through.  */
23254 	  case VPT_INSN:
23255 	    if (inst.cond == COND_ALWAYS)
23256 	      {
23257 		/* Executing a VPT/VPST instruction inside an IT block or a
23258 		   VPT/VPST/IT instruction inside a VPT block is UNPREDICTABLE.
23259 		 */
23260 		if (now_pred.type == SCALAR_PRED)
23261 		  as_tsktsk (MVE_NOT_IT);
23262 		else
23263 		  as_tsktsk (MVE_NOT_VPT);
23264 		return SUCCESS;
23265 	      }
23266 	    else
23267 	      {
23268 		/* VPT/VPST do not accept condition codes.  */
23269 		inst.error = BAD_SYNTAX;
23270 		return FAIL;
23271 	      }
23272 	  }
23273 	}
23274       break;
23275     }
23276 
23277   return SUCCESS;
23278 }
23279 
23280 struct depr_insn_mask
23281 {
23282   unsigned long pattern;
23283   unsigned long mask;
23284   const char* description;
23285 };
23286 
23287 /* List of 16-bit instruction patterns deprecated in an IT block in
23288    ARMv8.  */
23289 static const struct depr_insn_mask depr_it_insns[] = {
23290   { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
23291   { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
23292   { 0xa000, 0xb800, N_("ADR") },
23293   { 0x4800, 0xf800, N_("Literal loads") },
23294   { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
23295   { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
23296   /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
23297      field in asm_opcode. 'tvalue' is used at the stage this check happen.  */
23298   { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
23299   { 0, 0, NULL }
23300 };
23301 
23302 static void
it_fsm_post_encode(void)23303 it_fsm_post_encode (void)
23304 {
23305   int is_last;
23306 
23307   if (!now_pred.state_handled)
23308     handle_pred_state ();
23309 
23310   if (now_pred.insn_cond
23311       && warn_on_restrict_it
23312       && !now_pred.warn_deprecated
23313       && warn_on_deprecated
23314       && (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)
23315           || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8r))
23316       && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m))
23317     {
23318       if (inst.instruction >= 0x10000)
23319 	{
23320 	  as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
23321 		     "performance deprecated in ARMv8-A and ARMv8-R"));
23322 	  now_pred.warn_deprecated = true;
23323 	}
23324       else
23325 	{
23326 	  const struct depr_insn_mask *p = depr_it_insns;
23327 
23328 	  while (p->mask != 0)
23329 	    {
23330 	      if ((inst.instruction & p->mask) == p->pattern)
23331 		{
23332 		  as_tsktsk (_("IT blocks containing 16-bit Thumb "
23333 			       "instructions of the following class are "
23334 			       "performance deprecated in ARMv8-A and "
23335 			       "ARMv8-R: %s"), p->description);
23336 		  now_pred.warn_deprecated = true;
23337 		  break;
23338 		}
23339 
23340 	      ++p;
23341 	    }
23342 	}
23343 
23344       if (now_pred.block_length > 1)
23345 	{
23346 	  as_tsktsk (_("IT blocks containing more than one conditional "
23347 		     "instruction are performance deprecated in ARMv8-A and "
23348 		     "ARMv8-R"));
23349 	  now_pred.warn_deprecated = true;
23350 	}
23351     }
23352 
23353     is_last = (now_pred.mask == 0x10);
23354     if (is_last)
23355       {
23356 	now_pred.state = OUTSIDE_PRED_BLOCK;
23357 	now_pred.mask = 0;
23358       }
23359 }
23360 
23361 static void
force_automatic_it_block_close(void)23362 force_automatic_it_block_close (void)
23363 {
23364   if (now_pred.state == AUTOMATIC_PRED_BLOCK)
23365     {
23366       close_automatic_it_block ();
23367       now_pred.state = OUTSIDE_PRED_BLOCK;
23368       now_pred.mask = 0;
23369     }
23370 }
23371 
23372 static int
in_pred_block(void)23373 in_pred_block (void)
23374 {
23375   if (!now_pred.state_handled)
23376     handle_pred_state ();
23377 
23378   return now_pred.state != OUTSIDE_PRED_BLOCK;
23379 }
23380 
23381 /* Whether OPCODE only has T32 encoding.  Since this function is only used by
23382    t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
23383    here, hence the "known" in the function name.  */
23384 
23385 static bool
known_t32_only_insn(const struct asm_opcode * opcode)23386 known_t32_only_insn (const struct asm_opcode *opcode)
23387 {
23388   /* Original Thumb-1 wide instruction.  */
23389   if (opcode->tencode == do_t_blx
23390       || opcode->tencode == do_t_branch23
23391       || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
23392       || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
23393     return true;
23394 
23395   /* Wide-only instruction added to ARMv8-M Baseline.  */
23396   if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
23397       || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
23398       || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
23399       || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
23400     return true;
23401 
23402   return false;
23403 }
23404 
23405 /* Whether wide instruction variant can be used if available for a valid OPCODE
23406    in ARCH.  */
23407 
23408 static bool
t32_insn_ok(arm_feature_set arch,const struct asm_opcode * opcode)23409 t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
23410 {
23411   if (known_t32_only_insn (opcode))
23412     return true;
23413 
23414   /* Instruction with narrow and wide encoding added to ARMv8-M.  Availability
23415      of variant T3 of B.W is checked in do_t_branch.  */
23416   if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
23417       && opcode->tencode == do_t_branch)
23418     return true;
23419 
23420   /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit.  */
23421   if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
23422       && opcode->tencode == do_t_mov_cmp
23423       /* Make sure CMP instruction is not affected.  */
23424       && opcode->aencode == do_mov)
23425     return true;
23426 
23427   /* Wide instruction variants of all instructions with narrow *and* wide
23428      variants become available with ARMv6t2.  Other opcodes are either
23429      narrow-only or wide-only and are thus available if OPCODE is valid.  */
23430   if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
23431     return true;
23432 
23433   /* OPCODE with narrow only instruction variant or wide variant not
23434      available.  */
23435   return false;
23436 }
23437 
23438 void
md_assemble(char * str)23439 md_assemble (char *str)
23440 {
23441   char *p = str;
23442   const struct asm_opcode * opcode;
23443 
23444   /* Align the previous label if needed.  */
23445   if (last_label_seen != NULL)
23446     {
23447       symbol_set_frag (last_label_seen, frag_now);
23448       S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
23449       S_SET_SEGMENT (last_label_seen, now_seg);
23450     }
23451 
23452   memset (&inst, '\0', sizeof (inst));
23453   int r;
23454   for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
23455     inst.relocs[r].type = BFD_RELOC_UNUSED;
23456 
23457   opcode = opcode_lookup (&p);
23458   if (!opcode)
23459     {
23460       /* It wasn't an instruction, but it might be a register alias of
23461 	 the form alias .req reg, or a Neon .dn/.qn directive.  */
23462       if (! create_register_alias (str, p)
23463 	  && ! create_neon_reg_alias (str, p))
23464 	as_bad (_("bad instruction `%s'"), str);
23465 
23466       return;
23467     }
23468 
23469   if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
23470     as_tsktsk (_("s suffix on comparison instruction is deprecated"));
23471 
23472   /* The value which unconditional instructions should have in place of the
23473      condition field.  */
23474   inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1u;
23475 
23476   if (thumb_mode)
23477     {
23478       arm_feature_set variant;
23479 
23480       variant = cpu_variant;
23481       /* Only allow coprocessor instructions on Thumb-2 capable devices.  */
23482       if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
23483 	ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
23484       /* Check that this instruction is supported for this CPU.  */
23485       if (!opcode->tvariant
23486 	  || (thumb_mode == 1
23487 	      && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
23488 	{
23489 	  if (opcode->tencode == do_t_swi)
23490 	    as_bad (_("SVC is not permitted on this architecture"));
23491 	  else
23492 	    as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
23493 	  return;
23494 	}
23495       if (inst.cond != COND_ALWAYS && !unified_syntax
23496 	  && opcode->tencode != do_t_branch)
23497 	{
23498 	  as_bad (_("Thumb does not support conditional execution"));
23499 	  return;
23500 	}
23501 
23502       /* Two things are addressed here:
23503 	 1) Implicit require narrow instructions on Thumb-1.
23504 	    This avoids relaxation accidentally introducing Thumb-2
23505 	    instructions.
23506 	 2) Reject wide instructions in non Thumb-2 cores.
23507 
23508 	 Only instructions with narrow and wide variants need to be handled
23509 	 but selecting all non wide-only instructions is easier.  */
23510       if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
23511 	  && !t32_insn_ok (variant, opcode))
23512 	{
23513 	  if (inst.size_req == 0)
23514 	    inst.size_req = 2;
23515 	  else if (inst.size_req == 4)
23516 	    {
23517 	      if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
23518 		as_bad (_("selected processor does not support 32bit wide "
23519 			  "variant of instruction `%s'"), str);
23520 	      else
23521 		as_bad (_("selected processor does not support `%s' in "
23522 			  "Thumb-2 mode"), str);
23523 	      return;
23524 	    }
23525 	}
23526 
23527       inst.instruction = opcode->tvalue;
23528 
23529       if (!parse_operands (p, opcode->operands, /*thumb=*/true))
23530 	{
23531 	  /* Prepare the pred_insn_type for those encodings that don't set
23532 	     it.  */
23533 	  it_fsm_pre_encode ();
23534 
23535 	  opcode->tencode ();
23536 
23537 	  it_fsm_post_encode ();
23538 	}
23539 
23540       if (!(inst.error || inst.relax))
23541 	{
23542 	  gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
23543 	  inst.size = (inst.instruction > 0xffff ? 4 : 2);
23544 	  if (inst.size_req && inst.size_req != inst.size)
23545 	    {
23546 	      as_bad (_("cannot honor width suffix -- `%s'"), str);
23547 	      return;
23548 	    }
23549 	}
23550 
23551       /* Something has gone badly wrong if we try to relax a fixed size
23552 	 instruction.  */
23553       gas_assert (inst.size_req == 0 || !inst.relax);
23554 
23555       ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
23556 			      *opcode->tvariant);
23557       /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
23558 	 set those bits when Thumb-2 32-bit instructions are seen.  The impact
23559 	 of relaxable instructions will be considered later after we finish all
23560 	 relaxation.  */
23561       if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
23562 	variant = arm_arch_none;
23563       else
23564 	variant = cpu_variant;
23565       if (inst.size == 4 && !t32_insn_ok (variant, opcode))
23566 	ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
23567 				arm_ext_v6t2);
23568 
23569       check_neon_suffixes;
23570 
23571       if (!inst.error)
23572 	{
23573 	  mapping_state (MAP_THUMB);
23574 	}
23575     }
23576   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
23577     {
23578       bool is_bx;
23579 
23580       /* bx is allowed on v5 cores, and sometimes on v4 cores.  */
23581       is_bx = (opcode->aencode == do_bx);
23582 
23583       /* Check that this instruction is supported for this CPU.  */
23584       if (!(is_bx && fix_v4bx)
23585 	  && !(opcode->avariant &&
23586 	       ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
23587 	{
23588 	  as_bad (_("selected processor does not support `%s' in ARM mode"), str);
23589 	  return;
23590 	}
23591       if (inst.size_req)
23592 	{
23593 	  as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
23594 	  return;
23595 	}
23596 
23597       inst.instruction = opcode->avalue;
23598       if (opcode->tag == OT_unconditionalF)
23599 	inst.instruction |= 0xFU << 28;
23600       else
23601 	inst.instruction |= inst.cond << 28;
23602       inst.size = INSN_SIZE;
23603       if (!parse_operands (p, opcode->operands, /*thumb=*/false))
23604 	{
23605 	  it_fsm_pre_encode ();
23606 	  opcode->aencode ();
23607 	  it_fsm_post_encode ();
23608 	}
23609       /* Arm mode bx is marked as both v4T and v5 because it's still required
23610 	 on a hypothetical non-thumb v5 core.  */
23611       if (is_bx)
23612 	ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
23613       else
23614 	ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
23615 				*opcode->avariant);
23616 
23617       check_neon_suffixes;
23618 
23619       if (!inst.error)
23620 	{
23621 	  mapping_state (MAP_ARM);
23622 	}
23623     }
23624   else
23625     {
23626       as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
23627 		"-- `%s'"), str);
23628       return;
23629     }
23630   output_inst (str);
23631 }
23632 
23633 static void
check_pred_blocks_finished(void)23634 check_pred_blocks_finished (void)
23635 {
23636 #ifdef OBJ_ELF
23637   asection *sect;
23638 
23639   for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
23640     if (seg_info (sect)->tc_segment_info_data.current_pred.state
23641 	== MANUAL_PRED_BLOCK)
23642       {
23643 	if (now_pred.type == SCALAR_PRED)
23644 	  as_warn (_("section '%s' finished with an open IT block."),
23645 		   sect->name);
23646 	else
23647 	  as_warn (_("section '%s' finished with an open VPT/VPST block."),
23648 		   sect->name);
23649       }
23650 #else
23651   if (now_pred.state == MANUAL_PRED_BLOCK)
23652     {
23653       if (now_pred.type == SCALAR_PRED)
23654        as_warn (_("file finished with an open IT block."));
23655       else
23656 	as_warn (_("file finished with an open VPT/VPST block."));
23657     }
23658 #endif
23659 }
23660 
23661 /* Various frobbings of labels and their addresses.  */
23662 
23663 void
arm_start_line_hook(void)23664 arm_start_line_hook (void)
23665 {
23666   last_label_seen = NULL;
23667 }
23668 
23669 void
arm_frob_label(symbolS * sym)23670 arm_frob_label (symbolS * sym)
23671 {
23672   last_label_seen = sym;
23673 
23674   ARM_SET_THUMB (sym, thumb_mode);
23675 
23676 #if defined OBJ_COFF || defined OBJ_ELF
23677   ARM_SET_INTERWORK (sym, support_interwork);
23678 #endif
23679 
23680   force_automatic_it_block_close ();
23681 
23682   /* Note - do not allow local symbols (.Lxxx) to be labelled
23683      as Thumb functions.  This is because these labels, whilst
23684      they exist inside Thumb code, are not the entry points for
23685      possible ARM->Thumb calls.	 Also, these labels can be used
23686      as part of a computed goto or switch statement.  eg gcc
23687      can generate code that looks like this:
23688 
23689 		ldr  r2, [pc, .Laaa]
23690 		lsl  r3, r3, #2
23691 		ldr  r2, [r3, r2]
23692 		mov  pc, r2
23693 
23694        .Lbbb:  .word .Lxxx
23695        .Lccc:  .word .Lyyy
23696        ..etc...
23697        .Laaa:	.word Lbbb
23698 
23699      The first instruction loads the address of the jump table.
23700      The second instruction converts a table index into a byte offset.
23701      The third instruction gets the jump address out of the table.
23702      The fourth instruction performs the jump.
23703 
23704      If the address stored at .Laaa is that of a symbol which has the
23705      Thumb_Func bit set, then the linker will arrange for this address
23706      to have the bottom bit set, which in turn would mean that the
23707      address computation performed by the third instruction would end
23708      up with the bottom bit set.  Since the ARM is capable of unaligned
23709      word loads, the instruction would then load the incorrect address
23710      out of the jump table, and chaos would ensue.  */
23711   if (label_is_thumb_function_name
23712       && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
23713       && (bfd_section_flags (now_seg) & SEC_CODE) != 0)
23714     {
23715       /* When the address of a Thumb function is taken the bottom
23716 	 bit of that address should be set.  This will allow
23717 	 interworking between Arm and Thumb functions to work
23718 	 correctly.  */
23719 
23720       THUMB_SET_FUNC (sym, 1);
23721 
23722       label_is_thumb_function_name = false;
23723     }
23724 
23725   dwarf2_emit_label (sym);
23726 }
23727 
23728 bool
arm_data_in_code(void)23729 arm_data_in_code (void)
23730 {
23731   if (thumb_mode && startswith (input_line_pointer + 1, "data:"))
23732     {
23733       *input_line_pointer = '/';
23734       input_line_pointer += 5;
23735       *input_line_pointer = 0;
23736       return true;
23737     }
23738 
23739   return false;
23740 }
23741 
23742 char *
arm_canonicalize_symbol_name(char * name)23743 arm_canonicalize_symbol_name (char * name)
23744 {
23745   int len;
23746 
23747   if (thumb_mode && (len = strlen (name)) > 5
23748       && streq (name + len - 5, "/data"))
23749     *(name + len - 5) = 0;
23750 
23751   return name;
23752 }
23753 
23754 /* Table of all register names defined by default.  The user can
23755    define additional names with .req.  Note that all register names
23756    should appear in both upper and lowercase variants.	Some registers
23757    also have mixed-case names.	*/
23758 
23759 #define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, true, 0 }
23760 #define REGNUM(p,n,t) REGDEF(p##n, n, t)
23761 #define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
23762 #define REGSET(p,t) \
23763   REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
23764   REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
23765   REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
23766   REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
23767 #define REGSETH(p,t) \
23768   REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
23769   REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
23770   REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
23771   REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
23772 #define REGSET2(p,t) \
23773   REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
23774   REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
23775   REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
23776   REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
23777 #define SPLRBANK(base,bank,t) \
23778   REGDEF(lr_##bank, 768|((base+0)<<16), t), \
23779   REGDEF(sp_##bank, 768|((base+1)<<16), t), \
23780   REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
23781   REGDEF(LR_##bank, 768|((base+0)<<16), t), \
23782   REGDEF(SP_##bank, 768|((base+1)<<16), t), \
23783   REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
23784 
23785 static const struct reg_entry reg_names[] =
23786 {
23787   /* ARM integer registers.  */
23788   REGSET(r, RN), REGSET(R, RN),
23789 
23790   /* ATPCS synonyms.  */
23791   REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
23792   REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
23793   REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
23794 
23795   REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
23796   REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
23797   REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
23798 
23799   /* Well-known aliases.  */
23800   REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
23801   REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
23802 
23803   REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
23804   REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
23805 
23806   /* Defining the new Zero register from ARMv8.1-M.  */
23807   REGDEF(zr,15,ZR),
23808   REGDEF(ZR,15,ZR),
23809 
23810   /* Coprocessor numbers.  */
23811   REGSET(p, CP), REGSET(P, CP),
23812 
23813   /* Coprocessor register numbers.  The "cr" variants are for backward
23814      compatibility.  */
23815   REGSET(c,  CN), REGSET(C, CN),
23816   REGSET(cr, CN), REGSET(CR, CN),
23817 
23818   /* ARM banked registers.  */
23819   REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
23820   REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
23821   REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
23822   REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
23823   REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
23824   REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
23825   REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
23826 
23827   REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
23828   REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
23829   REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
23830   REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
23831   REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
23832   REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
23833   REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
23834   REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
23835 
23836   SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
23837   SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
23838   SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
23839   SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
23840   SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
23841   REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
23842   REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
23843   REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
23844   REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
23845 
23846   /* FPA registers.  */
23847   REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
23848   REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
23849 
23850   REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
23851   REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
23852 
23853   /* VFP SP registers.	*/
23854   REGSET(s,VFS),  REGSET(S,VFS),
23855   REGSETH(s,VFS), REGSETH(S,VFS),
23856 
23857   /* VFP DP Registers.	*/
23858   REGSET(d,VFD),  REGSET(D,VFD),
23859   /* Extra Neon DP registers.  */
23860   REGSETH(d,VFD), REGSETH(D,VFD),
23861 
23862   /* Neon QP registers.  */
23863   REGSET2(q,NQ),  REGSET2(Q,NQ),
23864 
23865   /* VFP control registers.  */
23866   REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
23867   REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
23868   REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
23869   REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
23870   REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
23871   REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
23872   REGDEF(mvfr2,5,VFC), REGDEF(MVFR2,5,VFC),
23873   REGDEF(fpscr_nzcvqc,2,VFC), REGDEF(FPSCR_nzcvqc,2,VFC),
23874   REGDEF(vpr,12,VFC), REGDEF(VPR,12,VFC),
23875   REGDEF(fpcxt_ns,14,VFC), REGDEF(FPCXT_NS,14,VFC),
23876   REGDEF(fpcxt_s,15,VFC), REGDEF(FPCXT_S,15,VFC),
23877 
23878   /* Maverick DSP coprocessor registers.  */
23879   REGSET(mvf,MVF),  REGSET(mvd,MVD),  REGSET(mvfx,MVFX),  REGSET(mvdx,MVDX),
23880   REGSET(MVF,MVF),  REGSET(MVD,MVD),  REGSET(MVFX,MVFX),  REGSET(MVDX,MVDX),
23881 
23882   REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
23883   REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
23884   REGDEF(dspsc,0,DSPSC),
23885 
23886   REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
23887   REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
23888   REGDEF(DSPSC,0,DSPSC),
23889 
23890   /* iWMMXt data registers - p0, c0-15.	 */
23891   REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
23892 
23893   /* iWMMXt control registers - p1, c0-3.  */
23894   REGDEF(wcid,	0,MMXWC),  REGDEF(wCID,	 0,MMXWC),  REGDEF(WCID,  0,MMXWC),
23895   REGDEF(wcon,	1,MMXWC),  REGDEF(wCon,	 1,MMXWC),  REGDEF(WCON,  1,MMXWC),
23896   REGDEF(wcssf, 2,MMXWC),  REGDEF(wCSSF, 2,MMXWC),  REGDEF(WCSSF, 2,MMXWC),
23897   REGDEF(wcasf, 3,MMXWC),  REGDEF(wCASF, 3,MMXWC),  REGDEF(WCASF, 3,MMXWC),
23898 
23899   /* iWMMXt scalar (constant/offset) registers - p1, c8-11.  */
23900   REGDEF(wcgr0, 8,MMXWCG),  REGDEF(wCGR0, 8,MMXWCG),  REGDEF(WCGR0, 8,MMXWCG),
23901   REGDEF(wcgr1, 9,MMXWCG),  REGDEF(wCGR1, 9,MMXWCG),  REGDEF(WCGR1, 9,MMXWCG),
23902   REGDEF(wcgr2,10,MMXWCG),  REGDEF(wCGR2,10,MMXWCG),  REGDEF(WCGR2,10,MMXWCG),
23903   REGDEF(wcgr3,11,MMXWCG),  REGDEF(wCGR3,11,MMXWCG),  REGDEF(WCGR3,11,MMXWCG),
23904 
23905   /* XScale accumulator registers.  */
23906   REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
23907 };
23908 #undef REGDEF
23909 #undef REGNUM
23910 #undef REGSET
23911 
23912 /* Table of all PSR suffixes.  Bare "CPSR" and "SPSR" are handled
23913    within psr_required_here.  */
23914 static const struct asm_psr psrs[] =
23915 {
23916   /* Backward compatibility notation.  Note that "all" is no longer
23917      truly all possible PSR bits.  */
23918   {"all",  PSR_c | PSR_f},
23919   {"flg",  PSR_f},
23920   {"ctl",  PSR_c},
23921 
23922   /* Individual flags.	*/
23923   {"f",	   PSR_f},
23924   {"c",	   PSR_c},
23925   {"x",	   PSR_x},
23926   {"s",	   PSR_s},
23927 
23928   /* Combinations of flags.  */
23929   {"fs",   PSR_f | PSR_s},
23930   {"fx",   PSR_f | PSR_x},
23931   {"fc",   PSR_f | PSR_c},
23932   {"sf",   PSR_s | PSR_f},
23933   {"sx",   PSR_s | PSR_x},
23934   {"sc",   PSR_s | PSR_c},
23935   {"xf",   PSR_x | PSR_f},
23936   {"xs",   PSR_x | PSR_s},
23937   {"xc",   PSR_x | PSR_c},
23938   {"cf",   PSR_c | PSR_f},
23939   {"cs",   PSR_c | PSR_s},
23940   {"cx",   PSR_c | PSR_x},
23941   {"fsx",  PSR_f | PSR_s | PSR_x},
23942   {"fsc",  PSR_f | PSR_s | PSR_c},
23943   {"fxs",  PSR_f | PSR_x | PSR_s},
23944   {"fxc",  PSR_f | PSR_x | PSR_c},
23945   {"fcs",  PSR_f | PSR_c | PSR_s},
23946   {"fcx",  PSR_f | PSR_c | PSR_x},
23947   {"sfx",  PSR_s | PSR_f | PSR_x},
23948   {"sfc",  PSR_s | PSR_f | PSR_c},
23949   {"sxf",  PSR_s | PSR_x | PSR_f},
23950   {"sxc",  PSR_s | PSR_x | PSR_c},
23951   {"scf",  PSR_s | PSR_c | PSR_f},
23952   {"scx",  PSR_s | PSR_c | PSR_x},
23953   {"xfs",  PSR_x | PSR_f | PSR_s},
23954   {"xfc",  PSR_x | PSR_f | PSR_c},
23955   {"xsf",  PSR_x | PSR_s | PSR_f},
23956   {"xsc",  PSR_x | PSR_s | PSR_c},
23957   {"xcf",  PSR_x | PSR_c | PSR_f},
23958   {"xcs",  PSR_x | PSR_c | PSR_s},
23959   {"cfs",  PSR_c | PSR_f | PSR_s},
23960   {"cfx",  PSR_c | PSR_f | PSR_x},
23961   {"csf",  PSR_c | PSR_s | PSR_f},
23962   {"csx",  PSR_c | PSR_s | PSR_x},
23963   {"cxf",  PSR_c | PSR_x | PSR_f},
23964   {"cxs",  PSR_c | PSR_x | PSR_s},
23965   {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
23966   {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
23967   {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
23968   {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
23969   {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
23970   {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
23971   {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
23972   {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
23973   {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
23974   {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
23975   {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
23976   {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
23977   {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
23978   {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
23979   {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
23980   {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
23981   {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
23982   {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
23983   {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
23984   {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
23985   {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
23986   {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
23987   {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
23988   {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
23989 };
23990 
23991 /* Table of V7M psr names.  */
23992 static const struct asm_psr v7m_psrs[] =
23993 {
23994   {"apsr",	   0x0 }, {"APSR",	   0x0 },
23995   {"iapsr",	   0x1 }, {"IAPSR",	   0x1 },
23996   {"eapsr",	   0x2 }, {"EAPSR",	   0x2 },
23997   {"psr",	   0x3 }, {"PSR",	   0x3 },
23998   {"xpsr",	   0x3 }, {"XPSR",	   0x3 }, {"xPSR",	  3 },
23999   {"ipsr",	   0x5 }, {"IPSR",	   0x5 },
24000   {"epsr",	   0x6 }, {"EPSR",	   0x6 },
24001   {"iepsr",	   0x7 }, {"IEPSR",	   0x7 },
24002   {"msp",	   0x8 }, {"MSP",	   0x8 },
24003   {"psp",	   0x9 }, {"PSP",	   0x9 },
24004   {"msplim",	   0xa }, {"MSPLIM",	   0xa },
24005   {"psplim",	   0xb }, {"PSPLIM",	   0xb },
24006   {"primask",	   0x10}, {"PRIMASK",	   0x10},
24007   {"basepri",	   0x11}, {"BASEPRI",	   0x11},
24008   {"basepri_max",  0x12}, {"BASEPRI_MAX",  0x12},
24009   {"faultmask",	   0x13}, {"FAULTMASK",	   0x13},
24010   {"control",	   0x14}, {"CONTROL",	   0x14},
24011   {"msp_ns",	   0x88}, {"MSP_NS",	   0x88},
24012   {"psp_ns",	   0x89}, {"PSP_NS",	   0x89},
24013   {"msplim_ns",	   0x8a}, {"MSPLIM_NS",	   0x8a},
24014   {"psplim_ns",	   0x8b}, {"PSPLIM_NS",	   0x8b},
24015   {"primask_ns",   0x90}, {"PRIMASK_NS",   0x90},
24016   {"basepri_ns",   0x91}, {"BASEPRI_NS",   0x91},
24017   {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
24018   {"control_ns",   0x94}, {"CONTROL_NS",   0x94},
24019   {"sp_ns",	   0x98}, {"SP_NS",	   0x98 }
24020 };
24021 
24022 /* Table of all shift-in-operand names.	 */
24023 static const struct asm_shift_name shift_names [] =
24024 {
24025   { "asl", SHIFT_LSL },	 { "ASL", SHIFT_LSL },
24026   { "lsl", SHIFT_LSL },	 { "LSL", SHIFT_LSL },
24027   { "lsr", SHIFT_LSR },	 { "LSR", SHIFT_LSR },
24028   { "asr", SHIFT_ASR },	 { "ASR", SHIFT_ASR },
24029   { "ror", SHIFT_ROR },	 { "ROR", SHIFT_ROR },
24030   { "rrx", SHIFT_RRX },	 { "RRX", SHIFT_RRX },
24031   { "uxtw", SHIFT_UXTW}, { "UXTW", SHIFT_UXTW}
24032 };
24033 
24034 /* Table of all explicit relocation names.  */
24035 #ifdef OBJ_ELF
24036 static struct reloc_entry reloc_names[] =
24037 {
24038   { "got",     BFD_RELOC_ARM_GOT32   },	 { "GOT",     BFD_RELOC_ARM_GOT32   },
24039   { "gotoff",  BFD_RELOC_ARM_GOTOFF  },	 { "GOTOFF",  BFD_RELOC_ARM_GOTOFF  },
24040   { "plt",     BFD_RELOC_ARM_PLT32   },	 { "PLT",     BFD_RELOC_ARM_PLT32   },
24041   { "target1", BFD_RELOC_ARM_TARGET1 },	 { "TARGET1", BFD_RELOC_ARM_TARGET1 },
24042   { "target2", BFD_RELOC_ARM_TARGET2 },	 { "TARGET2", BFD_RELOC_ARM_TARGET2 },
24043   { "sbrel",   BFD_RELOC_ARM_SBREL32 },	 { "SBREL",   BFD_RELOC_ARM_SBREL32 },
24044   { "tlsgd",   BFD_RELOC_ARM_TLS_GD32},  { "TLSGD",   BFD_RELOC_ARM_TLS_GD32},
24045   { "tlsldm",  BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM",  BFD_RELOC_ARM_TLS_LDM32},
24046   { "tlsldo",  BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO",  BFD_RELOC_ARM_TLS_LDO32},
24047   { "gottpoff",BFD_RELOC_ARM_TLS_IE32},  { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
24048   { "tpoff",   BFD_RELOC_ARM_TLS_LE32},  { "TPOFF",   BFD_RELOC_ARM_TLS_LE32},
24049   { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
24050   { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
24051 	{ "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
24052   { "tlscall", BFD_RELOC_ARM_TLS_CALL},
24053 	{ "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
24054   { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
24055 	{ "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ},
24056   { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC },
24057 	{ "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC },
24058   { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC },
24059 	{ "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC },
24060   { "funcdesc", BFD_RELOC_ARM_FUNCDESC },
24061 	{ "FUNCDESC", BFD_RELOC_ARM_FUNCDESC },
24062    { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC },      { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC },
24063    { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC },    { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC },
24064    { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC },   { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC },
24065 };
24066 #endif
24067 
24068 /* Table of all conditional affixes.  */
24069 static const struct asm_cond conds[] =
24070 {
24071   {"eq", 0x0},
24072   {"ne", 0x1},
24073   {"cs", 0x2}, {"hs", 0x2},
24074   {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
24075   {"mi", 0x4},
24076   {"pl", 0x5},
24077   {"vs", 0x6},
24078   {"vc", 0x7},
24079   {"hi", 0x8},
24080   {"ls", 0x9},
24081   {"ge", 0xa},
24082   {"lt", 0xb},
24083   {"gt", 0xc},
24084   {"le", 0xd},
24085   {"al", 0xe}
24086 };
24087 static const struct asm_cond vconds[] =
24088 {
24089     {"t", 0xf},
24090     {"e", 0x10}
24091 };
24092 
24093 #define UL_BARRIER(L,U,CODE,FEAT) \
24094   { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
24095   { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
24096 
24097 static struct asm_barrier_opt barrier_opt_names[] =
24098 {
24099   UL_BARRIER ("sy",	"SY",	 0xf, ARM_EXT_BARRIER),
24100   UL_BARRIER ("st",	"ST",	 0xe, ARM_EXT_BARRIER),
24101   UL_BARRIER ("ld",	"LD",	 0xd, ARM_EXT_V8),
24102   UL_BARRIER ("ish",	"ISH",	 0xb, ARM_EXT_BARRIER),
24103   UL_BARRIER ("sh",	"SH",	 0xb, ARM_EXT_BARRIER),
24104   UL_BARRIER ("ishst",	"ISHST", 0xa, ARM_EXT_BARRIER),
24105   UL_BARRIER ("shst",	"SHST",	 0xa, ARM_EXT_BARRIER),
24106   UL_BARRIER ("ishld",	"ISHLD", 0x9, ARM_EXT_V8),
24107   UL_BARRIER ("un",	"UN",	 0x7, ARM_EXT_BARRIER),
24108   UL_BARRIER ("nsh",	"NSH",	 0x7, ARM_EXT_BARRIER),
24109   UL_BARRIER ("unst",	"UNST",	 0x6, ARM_EXT_BARRIER),
24110   UL_BARRIER ("nshst",	"NSHST", 0x6, ARM_EXT_BARRIER),
24111   UL_BARRIER ("nshld",	"NSHLD", 0x5, ARM_EXT_V8),
24112   UL_BARRIER ("osh",	"OSH",	 0x3, ARM_EXT_BARRIER),
24113   UL_BARRIER ("oshst",	"OSHST", 0x2, ARM_EXT_BARRIER),
24114   UL_BARRIER ("oshld",	"OSHLD", 0x1, ARM_EXT_V8)
24115 };
24116 
24117 #undef UL_BARRIER
24118 
24119 /* Table of ARM-format instructions.	*/
24120 
24121 /* Macros for gluing together operand strings.  N.B. In all cases
24122    other than OPS0, the trailing OP_stop comes from default
24123    zero-initialization of the unspecified elements of the array.  */
24124 #define OPS0()		  { OP_stop, }
24125 #define OPS1(a)		  { OP_##a, }
24126 #define OPS2(a,b)	  { OP_##a,OP_##b, }
24127 #define OPS3(a,b,c)	  { OP_##a,OP_##b,OP_##c, }
24128 #define OPS4(a,b,c,d)	  { OP_##a,OP_##b,OP_##c,OP_##d, }
24129 #define OPS5(a,b,c,d,e)	  { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
24130 #define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
24131 
24132 /* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
24133    This is useful when mixing operands for ARM and THUMB, i.e. using the
24134    MIX_ARM_THUMB_OPERANDS macro.
24135    In order to use these macros, prefix the number of operands with _
24136    e.g. _3.  */
24137 #define OPS_1(a)	   { a, }
24138 #define OPS_2(a,b)	   { a,b, }
24139 #define OPS_3(a,b,c)	   { a,b,c, }
24140 #define OPS_4(a,b,c,d)	   { a,b,c,d, }
24141 #define OPS_5(a,b,c,d,e)   { a,b,c,d,e, }
24142 #define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
24143 
24144 /* These macros abstract out the exact format of the mnemonic table and
24145    save some repeated characters.  */
24146 
24147 /* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix.  */
24148 #define TxCE(mnem, op, top, nops, ops, ae, te) \
24149   { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
24150     THUMB_VARIANT, do_##ae, do_##te, 0 }
24151 
24152 /* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
24153    a T_MNEM_xyz enumerator.  */
24154 #define TCE(mnem, aop, top, nops, ops, ae, te) \
24155       TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
24156 #define tCE(mnem, aop, top, nops, ops, ae, te) \
24157       TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
24158 
24159 /* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
24160    infix after the third character.  */
24161 #define TxC3(mnem, op, top, nops, ops, ae, te) \
24162   { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
24163     THUMB_VARIANT, do_##ae, do_##te, 0 }
24164 #define TxC3w(mnem, op, top, nops, ops, ae, te) \
24165   { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
24166     THUMB_VARIANT, do_##ae, do_##te, 0 }
24167 #define TC3(mnem, aop, top, nops, ops, ae, te) \
24168       TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
24169 #define TC3w(mnem, aop, top, nops, ops, ae, te) \
24170       TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
24171 #define tC3(mnem, aop, top, nops, ops, ae, te) \
24172       TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
24173 #define tC3w(mnem, aop, top, nops, ops, ae, te) \
24174       TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
24175 
24176 /* Mnemonic that cannot be conditionalized.  The ARM condition-code
24177    field is still 0xE.  Many of the Thumb variants can be executed
24178    conditionally, so this is checked separately.  */
24179 #define TUE(mnem, op, top, nops, ops, ae, te)				\
24180   { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
24181     THUMB_VARIANT, do_##ae, do_##te, 0 }
24182 
24183 /* Same as TUE but the encoding function for ARM and Thumb modes is the same.
24184    Used by mnemonics that have very minimal differences in the encoding for
24185    ARM and Thumb variants and can be handled in a common function.  */
24186 #define TUEc(mnem, op, top, nops, ops, en) \
24187   { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
24188     THUMB_VARIANT, do_##en, do_##en, 0 }
24189 
24190 /* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
24191    condition code field.  */
24192 #define TUF(mnem, op, top, nops, ops, ae, te)				\
24193   { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
24194     THUMB_VARIANT, do_##ae, do_##te, 0 }
24195 
24196 /* ARM-only variants of all the above.  */
24197 #define CE(mnem,  op, nops, ops, ae)	\
24198   { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24199 
24200 #define C3(mnem, op, nops, ops, ae)	\
24201   { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24202 
24203 /* Thumb-only variants of TCE and TUE.  */
24204 #define ToC(mnem, top, nops, ops, te) \
24205   { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
24206     do_##te, 0 }
24207 
24208 #define ToU(mnem, top, nops, ops, te) \
24209   { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
24210     NULL, do_##te, 0 }
24211 
24212 /* T_MNEM_xyz enumerator variants of ToC.  */
24213 #define toC(mnem, top, nops, ops, te) \
24214   { mnem, OPS##nops ops, OT_csuffix, 0x0, T_MNEM##top, 0, THUMB_VARIANT, NULL, \
24215     do_##te, 0 }
24216 
24217 /* T_MNEM_xyz enumerator variants of ToU.  */
24218 #define toU(mnem, top, nops, ops, te) \
24219   { mnem, OPS##nops ops, OT_unconditional, 0x0, T_MNEM##top, 0, THUMB_VARIANT, \
24220     NULL, do_##te, 0 }
24221 
24222 /* Legacy mnemonics that always have conditional infix after the third
24223    character.  */
24224 #define CL(mnem, op, nops, ops, ae)	\
24225   { mnem, OPS##nops ops, OT_cinfix3_legacy, \
24226     0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24227 
24228 /* Coprocessor instructions.  Isomorphic between Arm and Thumb-2.  */
24229 #define cCE(mnem,  op, nops, ops, ae)	\
24230   { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
24231 
24232 /* mov instructions that are shared between coprocessor and MVE.  */
24233 #define mcCE(mnem,  op, nops, ops, ae)	\
24234   { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##ae, 0 }
24235 
24236 /* Legacy coprocessor instructions where conditional infix and conditional
24237    suffix are ambiguous.  For consistency this includes all FPA instructions,
24238    not just the potentially ambiguous ones.  */
24239 #define cCL(mnem, op, nops, ops, ae)	\
24240   { mnem, OPS##nops ops, OT_cinfix3_legacy, \
24241     0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
24242 
24243 /* Coprocessor, takes either a suffix or a position-3 infix
24244    (for an FPA corner case). */
24245 #define C3E(mnem, op, nops, ops, ae) \
24246   { mnem, OPS##nops ops, OT_csuf_or_in3, \
24247     0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
24248 
24249 #define xCM_(m1, m2, m3, op, nops, ops, ae)	\
24250   { m1 #m2 m3, OPS##nops ops, \
24251     sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
24252     0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24253 
24254 #define CM(m1, m2, op, nops, ops, ae)	\
24255   xCM_ (m1,   , m2, op, nops, ops, ae),	\
24256   xCM_ (m1, eq, m2, op, nops, ops, ae),	\
24257   xCM_ (m1, ne, m2, op, nops, ops, ae),	\
24258   xCM_ (m1, cs, m2, op, nops, ops, ae),	\
24259   xCM_ (m1, hs, m2, op, nops, ops, ae),	\
24260   xCM_ (m1, cc, m2, op, nops, ops, ae),	\
24261   xCM_ (m1, ul, m2, op, nops, ops, ae),	\
24262   xCM_ (m1, lo, m2, op, nops, ops, ae),	\
24263   xCM_ (m1, mi, m2, op, nops, ops, ae),	\
24264   xCM_ (m1, pl, m2, op, nops, ops, ae),	\
24265   xCM_ (m1, vs, m2, op, nops, ops, ae),	\
24266   xCM_ (m1, vc, m2, op, nops, ops, ae),	\
24267   xCM_ (m1, hi, m2, op, nops, ops, ae),	\
24268   xCM_ (m1, ls, m2, op, nops, ops, ae),	\
24269   xCM_ (m1, ge, m2, op, nops, ops, ae),	\
24270   xCM_ (m1, lt, m2, op, nops, ops, ae),	\
24271   xCM_ (m1, gt, m2, op, nops, ops, ae),	\
24272   xCM_ (m1, le, m2, op, nops, ops, ae),	\
24273   xCM_ (m1, al, m2, op, nops, ops, ae)
24274 
24275 #define UE(mnem, op, nops, ops, ae)	\
24276   { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24277 
24278 #define UF(mnem, op, nops, ops, ae)	\
24279   { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24280 
24281 /* Neon data-processing. ARM versions are unconditional with cond=0xf.
24282    The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
24283    use the same encoding function for each.  */
24284 #define NUF(mnem, op, nops, ops, enc)					\
24285   { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op,		\
24286     ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
24287 
24288 /* Neon data processing, version which indirects through neon_enc_tab for
24289    the various overloaded versions of opcodes.  */
24290 #define nUF(mnem, op, nops, ops, enc)					\
24291   { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op,	\
24292     ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
24293 
24294 /* Neon insn with conditional suffix for the ARM version, non-overloaded
24295    version.  */
24296 #define NCE_tag(mnem, op, nops, ops, enc, tag, mve_p)				\
24297   { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT,		\
24298     THUMB_VARIANT, do_##enc, do_##enc, mve_p }
24299 
24300 #define NCE(mnem, op, nops, ops, enc)					\
24301    NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
24302 
24303 #define NCEF(mnem, op, nops, ops, enc)					\
24304     NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
24305 
24306 /* Neon insn with conditional suffix for the ARM version, overloaded types.  */
24307 #define nCE_tag(mnem, op, nops, ops, enc, tag, mve_p)				\
24308   { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op,		\
24309     ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, mve_p }
24310 
24311 #define nCE(mnem, op, nops, ops, enc)					\
24312    nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
24313 
24314 #define nCEF(mnem, op, nops, ops, enc)					\
24315     nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
24316 
24317 /*   */
24318 #define mCEF(mnem, op, nops, ops, enc)				\
24319   { #mnem, OPS##nops ops, OT_csuffixF, M_MNEM##op, M_MNEM##op,	\
24320     ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
24321 
24322 
24323 /* nCEF but for MVE predicated instructions.  */
24324 #define mnCEF(mnem, op, nops, ops, enc)					\
24325     nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
24326 
24327 /* nCE but for MVE predicated instructions.  */
24328 #define mnCE(mnem, op, nops, ops, enc)					\
24329    nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
24330 
24331 /* NUF but for potentially MVE predicated instructions.  */
24332 #define MNUF(mnem, op, nops, ops, enc)					\
24333   { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op,		\
24334     ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
24335 
24336 /* nUF but for potentially MVE predicated instructions.  */
24337 #define mnUF(mnem, op, nops, ops, enc)					\
24338   { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op,	\
24339     ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
24340 
24341 /* ToC but for potentially MVE predicated instructions.  */
24342 #define mToC(mnem, top, nops, ops, te) \
24343   { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
24344     do_##te, 1 }
24345 
24346 /* NCE but for MVE predicated instructions.  */
24347 #define MNCE(mnem, op, nops, ops, enc)					\
24348    NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
24349 
24350 /* NCEF but for MVE predicated instructions.  */
24351 #define MNCEF(mnem, op, nops, ops, enc)					\
24352     NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
24353 #define do_0 0
24354 
24355 static const struct asm_opcode insns[] =
24356 {
24357 #define ARM_VARIANT    & arm_ext_v1 /* Core ARM Instructions.  */
24358 #define THUMB_VARIANT  & arm_ext_v4t
24359  tCE("and",	0000000, _and,     3, (RR, oRR, SH), arit, t_arit3c),
24360  tC3("ands",	0100000, _ands,	   3, (RR, oRR, SH), arit, t_arit3c),
24361  tCE("eor",	0200000, _eor,	   3, (RR, oRR, SH), arit, t_arit3c),
24362  tC3("eors",	0300000, _eors,	   3, (RR, oRR, SH), arit, t_arit3c),
24363  tCE("sub",	0400000, _sub,	   3, (RR, oRR, SH), arit, t_add_sub),
24364  tC3("subs",	0500000, _subs,	   3, (RR, oRR, SH), arit, t_add_sub),
24365  tCE("add",	0800000, _add,	   3, (RR, oRR, SHG), arit, t_add_sub),
24366  tC3("adds",	0900000, _adds,	   3, (RR, oRR, SHG), arit, t_add_sub),
24367  tCE("adc",	0a00000, _adc,	   3, (RR, oRR, SH), arit, t_arit3c),
24368  tC3("adcs",	0b00000, _adcs,	   3, (RR, oRR, SH), arit, t_arit3c),
24369  tCE("sbc",	0c00000, _sbc,	   3, (RR, oRR, SH), arit, t_arit3),
24370  tC3("sbcs",	0d00000, _sbcs,	   3, (RR, oRR, SH), arit, t_arit3),
24371  tCE("orr",	1800000, _orr,	   3, (RR, oRR, SH), arit, t_arit3c),
24372  tC3("orrs",	1900000, _orrs,	   3, (RR, oRR, SH), arit, t_arit3c),
24373  tCE("bic",	1c00000, _bic,	   3, (RR, oRR, SH), arit, t_arit3),
24374  tC3("bics",	1d00000, _bics,	   3, (RR, oRR, SH), arit, t_arit3),
24375 
24376  /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
24377     for setting PSR flag bits.  They are obsolete in V6 and do not
24378     have Thumb equivalents. */
24379  tCE("tst",	1100000, _tst,	   2, (RR, SH),      cmp,  t_mvn_tst),
24380  tC3w("tsts",	1100000, _tst,	   2, (RR, SH),      cmp,  t_mvn_tst),
24381   CL("tstp",	110f000,     	   2, (RR, SH),      cmp),
24382  tCE("cmp",	1500000, _cmp,	   2, (RR, SH),      cmp,  t_mov_cmp),
24383  tC3w("cmps",	1500000, _cmp,	   2, (RR, SH),      cmp,  t_mov_cmp),
24384   CL("cmpp",	150f000,     	   2, (RR, SH),      cmp),
24385  tCE("cmn",	1700000, _cmn,	   2, (RR, SH),      cmp,  t_mvn_tst),
24386  tC3w("cmns",	1700000, _cmn,	   2, (RR, SH),      cmp,  t_mvn_tst),
24387   CL("cmnp",	170f000,     	   2, (RR, SH),      cmp),
24388 
24389  tCE("mov",	1a00000, _mov,	   2, (RR, SH),      mov,  t_mov_cmp),
24390  tC3("movs",	1b00000, _movs,	   2, (RR, SHG),     mov,  t_mov_cmp),
24391  tCE("mvn",	1e00000, _mvn,	   2, (RR, SH),      mov,  t_mvn_tst),
24392  tC3("mvns",	1f00000, _mvns,	   2, (RR, SH),      mov,  t_mvn_tst),
24393 
24394  tCE("ldr",	4100000, _ldr,	   2, (RR, ADDRGLDR),ldst, t_ldst),
24395  tC3("ldrb",	4500000, _ldrb,	   2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
24396  tCE("str",	4000000, _str,	   _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
24397 								OP_RRnpc),
24398 					OP_ADDRGLDR),ldst, t_ldst),
24399  tC3("strb",	4400000, _strb,	   2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
24400 
24401  tCE("stm",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
24402  tC3("stmia",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
24403  tC3("stmea",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
24404  tCE("ldm",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
24405  tC3("ldmia",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
24406  tC3("ldmfd",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
24407 
24408  tCE("b",	a000000, _b,	   1, (EXPr),	     branch, t_branch),
24409  TCE("bl",	b000000, f000f800, 1, (EXPr),	     bl, t_branch23),
24410 
24411   /* Pseudo ops.  */
24412  tCE("adr",	28f0000, _adr,	   2, (RR, EXP),     adr,  t_adr),
24413   C3(adrl,	28f0000,           2, (RR, EXP),     adrl),
24414  tCE("nop",	1a00000, _nop,	   1, (oI255c),	     nop,  t_nop),
24415  tCE("udf",	7f000f0, _udf,     1, (oIffffb),     bkpt, t_udf),
24416 
24417   /* Thumb-compatibility pseudo ops.  */
24418  tCE("lsl",	1a00000, _lsl,	   3, (RR, oRR, SH), shift, t_shift),
24419  tC3("lsls",	1b00000, _lsls,	   3, (RR, oRR, SH), shift, t_shift),
24420  tCE("lsr",	1a00020, _lsr,	   3, (RR, oRR, SH), shift, t_shift),
24421  tC3("lsrs",	1b00020, _lsrs,	   3, (RR, oRR, SH), shift, t_shift),
24422  tCE("asr",	1a00040, _asr,	   3, (RR, oRR, SH), shift, t_shift),
24423  tC3("asrs",      1b00040, _asrs,     3, (RR, oRR, SH), shift, t_shift),
24424  tCE("ror",	1a00060, _ror,	   3, (RR, oRR, SH), shift, t_shift),
24425  tC3("rors",	1b00060, _rors,	   3, (RR, oRR, SH), shift, t_shift),
24426  tCE("neg",	2600000, _neg,	   2, (RR, RR),      rd_rn, t_neg),
24427  tC3("negs",	2700000, _negs,	   2, (RR, RR),      rd_rn, t_neg),
24428  tCE("push",	92d0000, _push,     1, (REGLST),	     push_pop, t_push_pop),
24429  tCE("pop",	8bd0000, _pop,	   1, (REGLST),	     push_pop, t_push_pop),
24430 
24431  /* These may simplify to neg.  */
24432  TCE("rsb",	0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
24433  TC3("rsbs",	0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
24434 
24435 #undef THUMB_VARIANT
24436 #define THUMB_VARIANT  & arm_ext_os
24437 
24438  TCE("swi",	f000000, df00,     1, (EXPi),        swi, t_swi),
24439  TCE("svc",	f000000, df00,     1, (EXPi),        swi, t_swi),
24440 
24441 #undef  THUMB_VARIANT
24442 #define THUMB_VARIANT  & arm_ext_v6
24443 
24444  TCE("cpy",       1a00000, 4600,     2, (RR, RR),      rd_rm, t_cpy),
24445 
24446  /* V1 instructions with no Thumb analogue prior to V6T2.  */
24447 #undef  THUMB_VARIANT
24448 #define THUMB_VARIANT  & arm_ext_v6t2
24449 
24450  TCE("teq",	1300000, ea900f00, 2, (RR, SH),      cmp,  t_mvn_tst),
24451  TC3w("teqs",	1300000, ea900f00, 2, (RR, SH),      cmp,  t_mvn_tst),
24452   CL("teqp",	130f000,           2, (RR, SH),      cmp),
24453 
24454  TC3("ldrt",	4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
24455  TC3("ldrbt",	4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
24456  TC3("strt",	4200000, f8400e00, 2, (RR_npcsp, ADDR),   ldstt, t_ldstt),
24457  TC3("strbt",	4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
24458 
24459  TC3("stmdb",	9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24460  TC3("stmfd",     9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24461 
24462  TC3("ldmdb",	9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24463  TC3("ldmea",	9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24464 
24465  /* V1 instructions with no Thumb analogue at all.  */
24466   CE("rsc",	0e00000,	   3, (RR, oRR, SH), arit),
24467   C3(rscs,	0f00000,	   3, (RR, oRR, SH), arit),
24468 
24469   C3(stmib,	9800000,	   2, (RRw, REGLST), ldmstm),
24470   C3(stmfa,	9800000,	   2, (RRw, REGLST), ldmstm),
24471   C3(stmda,	8000000,	   2, (RRw, REGLST), ldmstm),
24472   C3(stmed,	8000000,	   2, (RRw, REGLST), ldmstm),
24473   C3(ldmib,	9900000,	   2, (RRw, REGLST), ldmstm),
24474   C3(ldmed,	9900000,	   2, (RRw, REGLST), ldmstm),
24475   C3(ldmda,	8100000,	   2, (RRw, REGLST), ldmstm),
24476   C3(ldmfa,	8100000,	   2, (RRw, REGLST), ldmstm),
24477 
24478 #undef  ARM_VARIANT
24479 #define ARM_VARIANT    & arm_ext_v2	/* ARM 2 - multiplies.	*/
24480 #undef  THUMB_VARIANT
24481 #define THUMB_VARIANT  & arm_ext_v4t
24482 
24483  tCE("mul",	0000090, _mul,	   3, (RRnpc, RRnpc, oRR), mul, t_mul),
24484  tC3("muls",	0100090, _muls,	   3, (RRnpc, RRnpc, oRR), mul, t_mul),
24485 
24486 #undef  THUMB_VARIANT
24487 #define THUMB_VARIANT  & arm_ext_v6t2
24488 
24489  TCE("mla",	0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
24490   C3(mlas,	0300090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
24491 
24492   /* Generic coprocessor instructions.	*/
24493  TCE("cdp",	e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp,    cdp),
24494  TCE("ldc",	c100000, ec100000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
24495  TC3("ldcl",	c500000, ec500000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
24496  TCE("stc",	c000000, ec000000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
24497  TC3("stcl",	c400000, ec400000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
24498  TCE("mcr",	e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
24499  TCE("mrc",	e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b),   co_reg, co_reg),
24500 
24501 #undef  ARM_VARIANT
24502 #define ARM_VARIANT  & arm_ext_v2s /* ARM 3 - swp instructions.  */
24503 
24504   CE("swp",	1000090,           3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
24505   C3(swpb,	1400090,           3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
24506 
24507 #undef  ARM_VARIANT
24508 #define ARM_VARIANT    & arm_ext_v3	/* ARM 6 Status register instructions.	*/
24509 #undef  THUMB_VARIANT
24510 #define THUMB_VARIANT  & arm_ext_msr
24511 
24512  TCE("mrs",	1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
24513  TCE("msr",	120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
24514 
24515 #undef  ARM_VARIANT
24516 #define ARM_VARIANT    & arm_ext_v3m	 /* ARM 7M long multiplies.  */
24517 #undef  THUMB_VARIANT
24518 #define THUMB_VARIANT  & arm_ext_v6t2
24519 
24520  TCE("smull",	0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
24521   CM("smull","s",	0d00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
24522  TCE("umull",	0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
24523   CM("umull","s",	0900090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
24524  TCE("smlal",	0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
24525   CM("smlal","s",	0f00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
24526  TCE("umlal",	0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
24527   CM("umlal","s",	0b00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
24528 
24529 #undef  ARM_VARIANT
24530 #define ARM_VARIANT    & arm_ext_v4	/* ARM Architecture 4.	*/
24531 #undef  THUMB_VARIANT
24532 #define THUMB_VARIANT  & arm_ext_v4t
24533 
24534  tC3("ldrh",	01000b0, _ldrh,     2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24535  tC3("strh",	00000b0, _strh,     2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24536  tC3("ldrsh",	01000f0, _ldrsh,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24537  tC3("ldrsb",	01000d0, _ldrsb,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24538  tC3("ldsh",	01000f0, _ldrsh,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24539  tC3("ldsb",	01000d0, _ldrsb,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24540 
24541 #undef  ARM_VARIANT
24542 #define ARM_VARIANT  & arm_ext_v4t_5
24543 
24544   /* ARM Architecture 4T.  */
24545   /* Note: bx (and blx) are required on V5, even if the processor does
24546      not support Thumb.	 */
24547  TCE("bx",	12fff10, 4700, 1, (RR),	bx, t_bx),
24548 
24549 #undef  ARM_VARIANT
24550 #define ARM_VARIANT    & arm_ext_v5 /*  ARM Architecture 5T.	 */
24551 #undef  THUMB_VARIANT
24552 #define THUMB_VARIANT  & arm_ext_v5t
24553 
24554   /* Note: blx has 2 variants; the .value coded here is for
24555      BLX(2).  Only this variant has conditional execution.  */
24556  TCE("blx",	12fff30, 4780, 1, (RR_EXr),			    blx,  t_blx),
24557  TUE("bkpt",	1200070, be00, 1, (oIffffb),			    bkpt, t_bkpt),
24558 
24559 #undef  THUMB_VARIANT
24560 #define THUMB_VARIANT  & arm_ext_v6t2
24561 
24562  TCE("clz",	16f0f10, fab0f080, 2, (RRnpc, RRnpc),		        rd_rm,  t_clz),
24563  TUF("ldc2",	c100000, fc100000, 3, (RCP, RCN, ADDRGLDC),	        lstc,	lstc),
24564  TUF("ldc2l",	c500000, fc500000, 3, (RCP, RCN, ADDRGLDC),		        lstc,	lstc),
24565  TUF("stc2",	c000000, fc000000, 3, (RCP, RCN, ADDRGLDC),	        lstc,	lstc),
24566  TUF("stc2l",	c400000, fc400000, 3, (RCP, RCN, ADDRGLDC),		        lstc,	lstc),
24567  TUF("cdp2",	e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp,    cdp),
24568  TUF("mcr2",	e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
24569  TUF("mrc2",	e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
24570 
24571 #undef  ARM_VARIANT
24572 #define ARM_VARIANT    & arm_ext_v5exp /*  ARM Architecture 5TExP.  */
24573 #undef  THUMB_VARIANT
24574 #define THUMB_VARIANT  & arm_ext_v5exp
24575 
24576  TCE("smlabb",	1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
24577  TCE("smlatb",	10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
24578  TCE("smlabt",	10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
24579  TCE("smlatt",	10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
24580 
24581  TCE("smlawb",	1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
24582  TCE("smlawt",	12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
24583 
24584  TCE("smlalbb",	1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
24585  TCE("smlaltb",	14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
24586  TCE("smlalbt",	14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
24587  TCE("smlaltt",	14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
24588 
24589  TCE("smulbb",	1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
24590  TCE("smultb",	16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
24591  TCE("smulbt",	16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
24592  TCE("smultt",	16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
24593 
24594  TCE("smulwb",	12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
24595  TCE("smulwt",	12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
24596 
24597  TCE("qadd",	1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
24598  TCE("qdadd",	1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
24599  TCE("qsub",	1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
24600  TCE("qdsub",	1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
24601 
24602 #undef  ARM_VARIANT
24603 #define ARM_VARIANT    & arm_ext_v5e /*  ARM Architecture 5TE.  */
24604 #undef  THUMB_VARIANT
24605 #define THUMB_VARIANT  & arm_ext_v6t2
24606 
24607  TUF("pld",	450f000, f810f000, 1, (ADDR),		     pld,  t_pld),
24608  TC3("ldrd",	00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
24609      ldrd, t_ldstd),
24610  TC3("strd",	00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
24611 				       ADDRGLDRS), ldrd, t_ldstd),
24612 
24613  TCE("mcrr",	c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
24614  TCE("mrrc",	c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
24615 
24616 #undef  ARM_VARIANT
24617 #define ARM_VARIANT  & arm_ext_v5j /*  ARM Architecture 5TEJ.  */
24618 
24619  TCE("bxj",	12fff20, f3c08f00, 1, (RR),			  bxj, t_bxj),
24620 
24621 #undef  ARM_VARIANT
24622 #define ARM_VARIANT    & arm_ext_v6 /*  ARM V6.  */
24623 #undef  THUMB_VARIANT
24624 #define THUMB_VARIANT  & arm_ext_v6
24625 
24626  TUF("cpsie",     1080000, b660,     2, (CPSF, oI31b),              cpsi,   t_cpsi),
24627  TUF("cpsid",     10c0000, b670,     2, (CPSF, oI31b),              cpsi,   t_cpsi),
24628  tCE("rev",       6bf0f30, _rev,      2, (RRnpc, RRnpc),             rd_rm,  t_rev),
24629  tCE("rev16",     6bf0fb0, _rev16,    2, (RRnpc, RRnpc),             rd_rm,  t_rev),
24630  tCE("revsh",     6ff0fb0, _revsh,    2, (RRnpc, RRnpc),             rd_rm,  t_rev),
24631  tCE("sxth",      6bf0070, _sxth,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
24632  tCE("uxth",      6ff0070, _uxth,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
24633  tCE("sxtb",      6af0070, _sxtb,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
24634  tCE("uxtb",      6ef0070, _uxtb,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
24635  TUF("setend",    1010000, b650,     1, (ENDI),                     setend, t_setend),
24636 
24637 #undef  THUMB_VARIANT
24638 #define THUMB_VARIANT  & arm_ext_v6t2_v8m
24639 
24640  TCE("ldrex",	1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR),	  ldrex, t_ldrex),
24641  TCE("strex",	1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
24642 				      strex,  t_strex),
24643 #undef  THUMB_VARIANT
24644 #define THUMB_VARIANT  & arm_ext_v6t2
24645 
24646  TUF("mcrr2",	c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
24647  TUF("mrrc2",	c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
24648 
24649  TCE("ssat",	6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat,   t_ssat),
24650  TCE("usat",	6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat,   t_usat),
24651 
24652 /*  ARM V6 not included in V7M.  */
24653 #undef  THUMB_VARIANT
24654 #define THUMB_VARIANT  & arm_ext_v6_notm
24655  TUF("rfeia",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
24656  TUF("rfe",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
24657   UF(rfeib,	9900a00,           1, (RRw),			   rfe),
24658   UF(rfeda,	8100a00,           1, (RRw),			   rfe),
24659  TUF("rfedb",	9100a00, e810c000, 1, (RRw),			   rfe, rfe),
24660  TUF("rfefd",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
24661   UF(rfefa,	8100a00,           1, (RRw),			   rfe),
24662  TUF("rfeea",	9100a00, e810c000, 1, (RRw),			   rfe, rfe),
24663   UF(rfeed,	9900a00,           1, (RRw),			   rfe),
24664  TUF("srsia",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
24665  TUF("srs",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
24666  TUF("srsea",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
24667   UF(srsib,	9c00500,           2, (oRRw, I31w),		   srs),
24668   UF(srsfa,	9c00500,           2, (oRRw, I31w),		   srs),
24669   UF(srsda,	8400500,	   2, (oRRw, I31w),		   srs),
24670   UF(srsed,	8400500,	   2, (oRRw, I31w),		   srs),
24671  TUF("srsdb",	9400500, e800c000, 2, (oRRw, I31w),		   srs,  srs),
24672  TUF("srsfd",	9400500, e800c000, 2, (oRRw, I31w),		   srs,  srs),
24673  TUF("cps",	1020000, f3af8100, 1, (I31b),			  imm0, t_cps),
24674 
24675 /*  ARM V6 not included in V7M (eg. integer SIMD).  */
24676 #undef  THUMB_VARIANT
24677 #define THUMB_VARIANT  & arm_ext_v6_dsp
24678  TCE("pkhbt",	6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll),   pkhbt, t_pkhbt),
24679  TCE("pkhtb",	6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar),   pkhtb, t_pkhtb),
24680  TCE("qadd16",	6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24681  TCE("qadd8",	6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24682  TCE("qasx",	6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24683  /* Old name for QASX.  */
24684  TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24685  TCE("qsax",	6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24686  /* Old name for QSAX.  */
24687  TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24688  TCE("qsub16",	6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24689  TCE("qsub8",	6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24690  TCE("sadd16",	6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24691  TCE("sadd8",	6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24692  TCE("sasx",	6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24693  /* Old name for SASX.  */
24694  TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24695  TCE("shadd16",	6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24696  TCE("shadd8",	6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24697  TCE("shasx",   6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24698  /* Old name for SHASX.  */
24699  TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24700  TCE("shsax",     6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24701  /* Old name for SHSAX.  */
24702  TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24703  TCE("shsub16",	6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24704  TCE("shsub8",	6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24705  TCE("ssax",	6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24706  /* Old name for SSAX.  */
24707  TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24708  TCE("ssub16",	6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24709  TCE("ssub8",	6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24710  TCE("uadd16",	6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24711  TCE("uadd8",	6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24712  TCE("uasx",	6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24713  /* Old name for UASX.  */
24714  TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24715  TCE("uhadd16",	6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24716  TCE("uhadd8",	6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24717  TCE("uhasx",   6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24718  /* Old name for UHASX.  */
24719  TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24720  TCE("uhsax",     6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24721  /* Old name for UHSAX.  */
24722  TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24723  TCE("uhsub16",	6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24724  TCE("uhsub8",	6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24725  TCE("uqadd16",	6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24726  TCE("uqadd8",	6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24727  TCE("uqasx",   6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24728  /* Old name for UQASX.  */
24729  TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24730  TCE("uqsax",     6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24731  /* Old name for UQSAX.  */
24732  TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24733  TCE("uqsub16",	6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24734  TCE("uqsub8",	6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24735  TCE("usub16",	6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24736  TCE("usax",	6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24737  /* Old name for USAX.  */
24738  TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24739  TCE("usub8",	6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24740  TCE("sxtah",	6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24741  TCE("sxtab16",	6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24742  TCE("sxtab",	6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24743  TCE("sxtb16",	68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR),	   sxth,  t_sxth),
24744  TCE("uxtah",	6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24745  TCE("uxtab16",	6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24746  TCE("uxtab",	6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24747  TCE("uxtb16",	6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR),	   sxth,  t_sxth),
24748  TCE("sel",	6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24749  TCE("smlad",	7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24750  TCE("smladx",	7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24751  TCE("smlald",	7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24752  TCE("smlaldx",	7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24753  TCE("smlsd",	7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24754  TCE("smlsdx",	7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24755  TCE("smlsld",	7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24756  TCE("smlsldx",	7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24757  TCE("smmla",	7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24758  TCE("smmlar",	7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24759  TCE("smmls",	75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24760  TCE("smmlsr",	75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24761  TCE("smmul",	750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
24762  TCE("smmulr",	750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
24763  TCE("smuad",	700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
24764  TCE("smuadx",	700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
24765  TCE("smusd",	700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
24766  TCE("smusdx",	700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
24767  TCE("ssat16",	6a00f30, f3200000, 3, (RRnpc, I16, RRnpc),	   ssat16, t_ssat16),
24768  TCE("umaal",	0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,  t_mlal),
24769  TCE("usad8",	780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc),	   smul,   t_simd),
24770  TCE("usada8",	7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla,   t_mla),
24771  TCE("usat16",	6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc),	   usat16, t_usat16),
24772 
24773 #undef  ARM_VARIANT
24774 #define ARM_VARIANT   & arm_ext_v6k_v6t2
24775 #undef  THUMB_VARIANT
24776 #define THUMB_VARIANT & arm_ext_v6k_v6t2
24777 
24778  tCE("yield",	320f001, _yield,    0, (), noargs, t_hint),
24779  tCE("wfe",	320f002, _wfe,      0, (), noargs, t_hint),
24780  tCE("wfi",	320f003, _wfi,      0, (), noargs, t_hint),
24781  tCE("sev",	320f004, _sev,      0, (), noargs, t_hint),
24782 
24783 #undef  THUMB_VARIANT
24784 #define THUMB_VARIANT  & arm_ext_v6_notm
24785  TCE("ldrexd",	1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
24786 				      ldrexd, t_ldrexd),
24787  TCE("strexd",	1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
24788 				       RRnpcb), strexd, t_strexd),
24789 
24790 #undef  THUMB_VARIANT
24791 #define THUMB_VARIANT  & arm_ext_v6t2_v8m
24792  TCE("ldrexb",	1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
24793      rd_rn,  rd_rn),
24794  TCE("ldrexh",	1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
24795      rd_rn,  rd_rn),
24796  TCE("strexb",	1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
24797      strex, t_strexbh),
24798  TCE("strexh",	1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
24799      strex, t_strexbh),
24800  TUF("clrex",	57ff01f, f3bf8f2f, 0, (),			      noargs, noargs),
24801 
24802 #undef  ARM_VARIANT
24803 #define ARM_VARIANT    & arm_ext_sec
24804 #undef  THUMB_VARIANT
24805 #define THUMB_VARIANT  & arm_ext_sec
24806 
24807  TCE("smc",	1600070, f7f08000, 1, (EXPi), smc, t_smc),
24808 
24809 #undef	ARM_VARIANT
24810 #define	ARM_VARIANT    & arm_ext_virt
24811 #undef	THUMB_VARIANT
24812 #define	THUMB_VARIANT    & arm_ext_virt
24813 
24814  TCE("hvc",	1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
24815  TCE("eret",	160006e, f3de8f00, 0, (), noargs, noargs),
24816 
24817 #undef	ARM_VARIANT
24818 #define	ARM_VARIANT    & arm_ext_pan
24819 #undef	THUMB_VARIANT
24820 #define	THUMB_VARIANT  & arm_ext_pan
24821 
24822  TUF("setpan",	1100000, b610, 1, (I7), setpan, t_setpan),
24823 
24824 #undef  ARM_VARIANT
24825 #define ARM_VARIANT    & arm_ext_v6t2
24826 #undef  THUMB_VARIANT
24827 #define THUMB_VARIANT  & arm_ext_v6t2
24828 
24829  TCE("bfc",	7c0001f, f36f0000, 3, (RRnpc, I31, I32),	   bfc, t_bfc),
24830  TCE("bfi",	7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
24831  TCE("sbfx",	7a00050, f3400000, 4, (RR, RR, I31, I32),	   bfx, t_bfx),
24832  TCE("ubfx",	7e00050, f3c00000, 4, (RR, RR, I31, I32),	   bfx, t_bfx),
24833 
24834  TCE("mls",	0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
24835  TCE("rbit",	6ff0f30, fa90f0a0, 2, (RR, RR),			    rd_rm, t_rbit),
24836 
24837  TC3("ldrht",	03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24838  TC3("ldrsht",	03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24839  TC3("ldrsbt",	03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24840  TC3("strht",	02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24841 
24842 #undef  ARM_VARIANT
24843 #define ARM_VARIANT    & arm_ext_v3
24844 #undef  THUMB_VARIANT
24845 #define THUMB_VARIANT  & arm_ext_v6t2
24846 
24847  TUE("csdb",	320f014, f3af8014, 0, (), noargs, t_csdb),
24848  TUF("ssbb",	57ff040, f3bf8f40, 0, (), noargs, t_csdb),
24849  TUF("pssbb",	57ff044, f3bf8f44, 0, (), noargs, t_csdb),
24850 
24851 #undef  ARM_VARIANT
24852 #define ARM_VARIANT    & arm_ext_v6t2
24853 #undef  THUMB_VARIANT
24854 #define THUMB_VARIANT  & arm_ext_v6t2_v8m
24855  TCE("movw",	3000000, f2400000, 2, (RRnpc, HALF),		    mov16, t_mov16),
24856  TCE("movt",	3400000, f2c00000, 2, (RRnpc, HALF),		    mov16, t_mov16),
24857 
24858  /* Thumb-only instructions.  */
24859 #undef  ARM_VARIANT
24860 #define ARM_VARIANT NULL
24861   TUE("cbnz",     0,           b900,     2, (RR, EXP), 0, t_cbz),
24862   TUE("cbz",      0,           b100,     2, (RR, EXP), 0, t_cbz),
24863 
24864  /* ARM does not really have an IT instruction, so always allow it.
24865     The opcode is copied from Thumb in order to allow warnings in
24866     -mimplicit-it=[never | arm] modes.  */
24867 #undef  ARM_VARIANT
24868 #define ARM_VARIANT  & arm_ext_v1
24869 #undef  THUMB_VARIANT
24870 #define THUMB_VARIANT  & arm_ext_v6t2
24871 
24872  TUE("it",        bf08,        bf08,     1, (COND),   it,    t_it),
24873  TUE("itt",       bf0c,        bf0c,     1, (COND),   it,    t_it),
24874  TUE("ite",       bf04,        bf04,     1, (COND),   it,    t_it),
24875  TUE("ittt",      bf0e,        bf0e,     1, (COND),   it,    t_it),
24876  TUE("itet",      bf06,        bf06,     1, (COND),   it,    t_it),
24877  TUE("itte",      bf0a,        bf0a,     1, (COND),   it,    t_it),
24878  TUE("itee",      bf02,        bf02,     1, (COND),   it,    t_it),
24879  TUE("itttt",     bf0f,        bf0f,     1, (COND),   it,    t_it),
24880  TUE("itett",     bf07,        bf07,     1, (COND),   it,    t_it),
24881  TUE("ittet",     bf0b,        bf0b,     1, (COND),   it,    t_it),
24882  TUE("iteet",     bf03,        bf03,     1, (COND),   it,    t_it),
24883  TUE("ittte",     bf0d,        bf0d,     1, (COND),   it,    t_it),
24884  TUE("itete",     bf05,        bf05,     1, (COND),   it,    t_it),
24885  TUE("ittee",     bf09,        bf09,     1, (COND),   it,    t_it),
24886  TUE("iteee",     bf01,        bf01,     1, (COND),   it,    t_it),
24887  /* ARM/Thumb-2 instructions with no Thumb-1 equivalent.  */
24888  TC3("rrx",       01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
24889  TC3("rrxs",      01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
24890 
24891  /* Thumb2 only instructions.  */
24892 #undef  ARM_VARIANT
24893 #define ARM_VARIANT  NULL
24894 
24895  TCE("addw",	0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
24896  TCE("subw",	0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
24897  TCE("orn",       0, ea600000, 3, (RR, oRR, SH),  0, t_orn),
24898  TCE("orns",      0, ea700000, 3, (RR, oRR, SH),  0, t_orn),
24899  TCE("tbb",       0, e8d0f000, 1, (TB), 0, t_tb),
24900  TCE("tbh",       0, e8d0f010, 1, (TB), 0, t_tb),
24901 
24902  /* Hardware division instructions.  */
24903 #undef  ARM_VARIANT
24904 #define ARM_VARIANT    & arm_ext_adiv
24905 #undef  THUMB_VARIANT
24906 #define THUMB_VARIANT  & arm_ext_div
24907 
24908  TCE("sdiv",	710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
24909  TCE("udiv",	730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
24910 
24911  /* ARM V6M/V7 instructions.  */
24912 #undef  ARM_VARIANT
24913 #define ARM_VARIANT    & arm_ext_barrier
24914 #undef  THUMB_VARIANT
24915 #define THUMB_VARIANT  & arm_ext_barrier
24916 
24917  TUF("dmb",	57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
24918  TUF("dsb",	57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
24919  TUF("isb",	57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
24920 
24921  /* ARM V7 instructions.  */
24922 #undef  ARM_VARIANT
24923 #define ARM_VARIANT    & arm_ext_v7
24924 #undef  THUMB_VARIANT
24925 #define THUMB_VARIANT  & arm_ext_v7
24926 
24927  TUF("pli",	450f000, f910f000, 1, (ADDR),	  pli,	    t_pld),
24928  TCE("dbg",	320f0f0, f3af80f0, 1, (I15),	  dbg,	    t_dbg),
24929 
24930 #undef  ARM_VARIANT
24931 #define ARM_VARIANT    & arm_ext_mp
24932 #undef  THUMB_VARIANT
24933 #define THUMB_VARIANT  & arm_ext_mp
24934 
24935  TUF("pldw",	410f000, f830f000, 1, (ADDR),	pld,	t_pld),
24936 
24937  /* AArchv8 instructions.  */
24938 #undef  ARM_VARIANT
24939 #define ARM_VARIANT   & arm_ext_v8
24940 
24941 /* Instructions shared between armv8-a and armv8-m.  */
24942 #undef  THUMB_VARIANT
24943 #define THUMB_VARIANT & arm_ext_atomics
24944 
24945  TCE("lda",	1900c9f, e8d00faf, 2, (RRnpc, RRnpcb),	rd_rn,	rd_rn),
24946  TCE("ldab",	1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
24947  TCE("ldah",	1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
24948  TCE("stl",	180fc90, e8c00faf, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
24949  TCE("stlb",	1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
24950  TCE("stlh",	1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
24951  TCE("ldaex",	1900e9f, e8d00fef, 2, (RRnpc, RRnpcb),	rd_rn,	rd_rn),
24952  TCE("ldaexb",	1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb),	rd_rn,  rd_rn),
24953  TCE("ldaexh",	1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
24954  TCE("stlex",	1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
24955 							stlex,  t_stlex),
24956  TCE("stlexb",	1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
24957 							stlex, t_stlex),
24958  TCE("stlexh",	1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
24959 							stlex, t_stlex),
24960 #undef  THUMB_VARIANT
24961 #define THUMB_VARIANT & arm_ext_v8
24962 
24963  tCE("sevl",	320f005, _sevl,    0, (),		noargs,	t_hint),
24964  TCE("ldaexd",	1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
24965 							ldrexd, t_ldrexd),
24966  TCE("stlexd",	1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
24967 							strexd, t_strexd),
24968 #undef THUMB_VARIANT
24969 #define THUMB_VARIANT & arm_ext_v8r
24970 #undef ARM_VARIANT
24971 #define ARM_VARIANT & arm_ext_v8r
24972 
24973 /* ARMv8-R instructions.  */
24974  TUF("dfb",	57ff04c, f3bf8f4c, 0, (), noargs, noargs),
24975 
24976 /* Defined in V8 but is in undefined encoding space for earlier
24977    architectures.  However earlier architectures are required to treat
24978    this instuction as a semihosting trap as well.  Hence while not explicitly
24979    defined as such, it is in fact correct to define the instruction for all
24980    architectures.  */
24981 #undef  THUMB_VARIANT
24982 #define THUMB_VARIANT  & arm_ext_v1
24983 #undef  ARM_VARIANT
24984 #define ARM_VARIANT  & arm_ext_v1
24985  TUE("hlt",	1000070, ba80,     1, (oIffffb),	bkpt,	t_hlt),
24986 
24987  /* ARMv8 T32 only.  */
24988 #undef  ARM_VARIANT
24989 #define ARM_VARIANT  NULL
24990  TUF("dcps1",	0,	 f78f8001, 0, (),	noargs, noargs),
24991  TUF("dcps2",	0,	 f78f8002, 0, (),	noargs, noargs),
24992  TUF("dcps3",	0,	 f78f8003, 0, (),	noargs, noargs),
24993 
24994   /* FP for ARMv8.  */
24995 #undef  ARM_VARIANT
24996 #define ARM_VARIANT   & fpu_vfp_ext_armv8xd
24997 #undef  THUMB_VARIANT
24998 #define THUMB_VARIANT & fpu_vfp_ext_armv8xd
24999 
25000   nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD),		vsel),
25001   nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD),		vsel),
25002   nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD),		vsel),
25003   nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD),		vsel),
25004   nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ),		vrintr),
25005   mnCE(vrintz, _vrintr, 2, (RNSDQMQ, oRNSDQMQ),		vrintz),
25006   mnCE(vrintx, _vrintr, 2, (RNSDQMQ, oRNSDQMQ),		vrintx),
25007   mnUF(vrinta, _vrinta, 2, (RNSDQMQ, oRNSDQMQ),		vrinta),
25008   mnUF(vrintn, _vrinta, 2, (RNSDQMQ, oRNSDQMQ),		vrintn),
25009   mnUF(vrintp, _vrinta, 2, (RNSDQMQ, oRNSDQMQ),		vrintp),
25010   mnUF(vrintm, _vrinta, 2, (RNSDQMQ, oRNSDQMQ),		vrintm),
25011 
25012   /* Crypto v1 extensions.  */
25013 #undef  ARM_VARIANT
25014 #define ARM_VARIANT & fpu_crypto_ext_armv8
25015 #undef  THUMB_VARIANT
25016 #define THUMB_VARIANT & fpu_crypto_ext_armv8
25017 
25018   nUF(aese, _aes, 2, (RNQ, RNQ), aese),
25019   nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
25020   nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
25021   nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
25022   nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
25023   nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
25024   nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
25025   nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
25026   nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
25027   nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
25028   nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
25029   nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
25030   nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
25031   nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
25032 
25033 #undef  ARM_VARIANT
25034 #define ARM_VARIANT   & arm_ext_crc
25035 #undef  THUMB_VARIANT
25036 #define THUMB_VARIANT & arm_ext_crc
25037   TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
25038   TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
25039   TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
25040   TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
25041   TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
25042   TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
25043 
25044  /* ARMv8.2 RAS extension.  */
25045 #undef  ARM_VARIANT
25046 #define ARM_VARIANT   & arm_ext_ras
25047 #undef  THUMB_VARIANT
25048 #define THUMB_VARIANT & arm_ext_ras
25049  TUE ("esb", 320f010, f3af8010, 0, (), noargs,  noargs),
25050 
25051 #undef  ARM_VARIANT
25052 #define ARM_VARIANT   & arm_ext_v8_3
25053 #undef  THUMB_VARIANT
25054 #define THUMB_VARIANT & arm_ext_v8_3
25055  NCE (vjcvt, eb90bc0, 2, (RVS, RVD), vjcvt),
25056 
25057 #undef  ARM_VARIANT
25058 #define ARM_VARIANT   & fpu_neon_ext_dotprod
25059 #undef  THUMB_VARIANT
25060 #define THUMB_VARIANT & fpu_neon_ext_dotprod
25061  NUF (vsdot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_s),
25062  NUF (vudot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_u),
25063 
25064 #undef  ARM_VARIANT
25065 #define ARM_VARIANT  & fpu_fpa_ext_v1  /* Core FPA instruction set (V1).  */
25066 #undef  THUMB_VARIANT
25067 #define THUMB_VARIANT NULL
25068 
25069  cCE("wfs",	e200110, 1, (RR),	     rd),
25070  cCE("rfs",	e300110, 1, (RR),	     rd),
25071  cCE("wfc",	e400110, 1, (RR),	     rd),
25072  cCE("rfc",	e500110, 1, (RR),	     rd),
25073 
25074  cCL("ldfs",	c100100, 2, (RF, ADDRGLDC),  rd_cpaddr),
25075  cCL("ldfd",	c108100, 2, (RF, ADDRGLDC),  rd_cpaddr),
25076  cCL("ldfe",	c500100, 2, (RF, ADDRGLDC),  rd_cpaddr),
25077  cCL("ldfp",	c508100, 2, (RF, ADDRGLDC),  rd_cpaddr),
25078 
25079  cCL("stfs",	c000100, 2, (RF, ADDRGLDC),  rd_cpaddr),
25080  cCL("stfd",	c008100, 2, (RF, ADDRGLDC),  rd_cpaddr),
25081  cCL("stfe",	c400100, 2, (RF, ADDRGLDC),  rd_cpaddr),
25082  cCL("stfp",	c408100, 2, (RF, ADDRGLDC),  rd_cpaddr),
25083 
25084  cCL("mvfs",	e008100, 2, (RF, RF_IF),     rd_rm),
25085  cCL("mvfsp",	e008120, 2, (RF, RF_IF),     rd_rm),
25086  cCL("mvfsm",	e008140, 2, (RF, RF_IF),     rd_rm),
25087  cCL("mvfsz",	e008160, 2, (RF, RF_IF),     rd_rm),
25088  cCL("mvfd",	e008180, 2, (RF, RF_IF),     rd_rm),
25089  cCL("mvfdp",	e0081a0, 2, (RF, RF_IF),     rd_rm),
25090  cCL("mvfdm",	e0081c0, 2, (RF, RF_IF),     rd_rm),
25091  cCL("mvfdz",	e0081e0, 2, (RF, RF_IF),     rd_rm),
25092  cCL("mvfe",	e088100, 2, (RF, RF_IF),     rd_rm),
25093  cCL("mvfep",	e088120, 2, (RF, RF_IF),     rd_rm),
25094  cCL("mvfem",	e088140, 2, (RF, RF_IF),     rd_rm),
25095  cCL("mvfez",	e088160, 2, (RF, RF_IF),     rd_rm),
25096 
25097  cCL("mnfs",	e108100, 2, (RF, RF_IF),     rd_rm),
25098  cCL("mnfsp",	e108120, 2, (RF, RF_IF),     rd_rm),
25099  cCL("mnfsm",	e108140, 2, (RF, RF_IF),     rd_rm),
25100  cCL("mnfsz",	e108160, 2, (RF, RF_IF),     rd_rm),
25101  cCL("mnfd",	e108180, 2, (RF, RF_IF),     rd_rm),
25102  cCL("mnfdp",	e1081a0, 2, (RF, RF_IF),     rd_rm),
25103  cCL("mnfdm",	e1081c0, 2, (RF, RF_IF),     rd_rm),
25104  cCL("mnfdz",	e1081e0, 2, (RF, RF_IF),     rd_rm),
25105  cCL("mnfe",	e188100, 2, (RF, RF_IF),     rd_rm),
25106  cCL("mnfep",	e188120, 2, (RF, RF_IF),     rd_rm),
25107  cCL("mnfem",	e188140, 2, (RF, RF_IF),     rd_rm),
25108  cCL("mnfez",	e188160, 2, (RF, RF_IF),     rd_rm),
25109 
25110  cCL("abss",	e208100, 2, (RF, RF_IF),     rd_rm),
25111  cCL("abssp",	e208120, 2, (RF, RF_IF),     rd_rm),
25112  cCL("abssm",	e208140, 2, (RF, RF_IF),     rd_rm),
25113  cCL("abssz",	e208160, 2, (RF, RF_IF),     rd_rm),
25114  cCL("absd",	e208180, 2, (RF, RF_IF),     rd_rm),
25115  cCL("absdp",	e2081a0, 2, (RF, RF_IF),     rd_rm),
25116  cCL("absdm",	e2081c0, 2, (RF, RF_IF),     rd_rm),
25117  cCL("absdz",	e2081e0, 2, (RF, RF_IF),     rd_rm),
25118  cCL("abse",	e288100, 2, (RF, RF_IF),     rd_rm),
25119  cCL("absep",	e288120, 2, (RF, RF_IF),     rd_rm),
25120  cCL("absem",	e288140, 2, (RF, RF_IF),     rd_rm),
25121  cCL("absez",	e288160, 2, (RF, RF_IF),     rd_rm),
25122 
25123  cCL("rnds",	e308100, 2, (RF, RF_IF),     rd_rm),
25124  cCL("rndsp",	e308120, 2, (RF, RF_IF),     rd_rm),
25125  cCL("rndsm",	e308140, 2, (RF, RF_IF),     rd_rm),
25126  cCL("rndsz",	e308160, 2, (RF, RF_IF),     rd_rm),
25127  cCL("rndd",	e308180, 2, (RF, RF_IF),     rd_rm),
25128  cCL("rnddp",	e3081a0, 2, (RF, RF_IF),     rd_rm),
25129  cCL("rnddm",	e3081c0, 2, (RF, RF_IF),     rd_rm),
25130  cCL("rnddz",	e3081e0, 2, (RF, RF_IF),     rd_rm),
25131  cCL("rnde",	e388100, 2, (RF, RF_IF),     rd_rm),
25132  cCL("rndep",	e388120, 2, (RF, RF_IF),     rd_rm),
25133  cCL("rndem",	e388140, 2, (RF, RF_IF),     rd_rm),
25134  cCL("rndez",	e388160, 2, (RF, RF_IF),     rd_rm),
25135 
25136  cCL("sqts",	e408100, 2, (RF, RF_IF),     rd_rm),
25137  cCL("sqtsp",	e408120, 2, (RF, RF_IF),     rd_rm),
25138  cCL("sqtsm",	e408140, 2, (RF, RF_IF),     rd_rm),
25139  cCL("sqtsz",	e408160, 2, (RF, RF_IF),     rd_rm),
25140  cCL("sqtd",	e408180, 2, (RF, RF_IF),     rd_rm),
25141  cCL("sqtdp",	e4081a0, 2, (RF, RF_IF),     rd_rm),
25142  cCL("sqtdm",	e4081c0, 2, (RF, RF_IF),     rd_rm),
25143  cCL("sqtdz",	e4081e0, 2, (RF, RF_IF),     rd_rm),
25144  cCL("sqte",	e488100, 2, (RF, RF_IF),     rd_rm),
25145  cCL("sqtep",	e488120, 2, (RF, RF_IF),     rd_rm),
25146  cCL("sqtem",	e488140, 2, (RF, RF_IF),     rd_rm),
25147  cCL("sqtez",	e488160, 2, (RF, RF_IF),     rd_rm),
25148 
25149  cCL("logs",	e508100, 2, (RF, RF_IF),     rd_rm),
25150  cCL("logsp",	e508120, 2, (RF, RF_IF),     rd_rm),
25151  cCL("logsm",	e508140, 2, (RF, RF_IF),     rd_rm),
25152  cCL("logsz",	e508160, 2, (RF, RF_IF),     rd_rm),
25153  cCL("logd",	e508180, 2, (RF, RF_IF),     rd_rm),
25154  cCL("logdp",	e5081a0, 2, (RF, RF_IF),     rd_rm),
25155  cCL("logdm",	e5081c0, 2, (RF, RF_IF),     rd_rm),
25156  cCL("logdz",	e5081e0, 2, (RF, RF_IF),     rd_rm),
25157  cCL("loge",	e588100, 2, (RF, RF_IF),     rd_rm),
25158  cCL("logep",	e588120, 2, (RF, RF_IF),     rd_rm),
25159  cCL("logem",	e588140, 2, (RF, RF_IF),     rd_rm),
25160  cCL("logez",	e588160, 2, (RF, RF_IF),     rd_rm),
25161 
25162  cCL("lgns",	e608100, 2, (RF, RF_IF),     rd_rm),
25163  cCL("lgnsp",	e608120, 2, (RF, RF_IF),     rd_rm),
25164  cCL("lgnsm",	e608140, 2, (RF, RF_IF),     rd_rm),
25165  cCL("lgnsz",	e608160, 2, (RF, RF_IF),     rd_rm),
25166  cCL("lgnd",	e608180, 2, (RF, RF_IF),     rd_rm),
25167  cCL("lgndp",	e6081a0, 2, (RF, RF_IF),     rd_rm),
25168  cCL("lgndm",	e6081c0, 2, (RF, RF_IF),     rd_rm),
25169  cCL("lgndz",	e6081e0, 2, (RF, RF_IF),     rd_rm),
25170  cCL("lgne",	e688100, 2, (RF, RF_IF),     rd_rm),
25171  cCL("lgnep",	e688120, 2, (RF, RF_IF),     rd_rm),
25172  cCL("lgnem",	e688140, 2, (RF, RF_IF),     rd_rm),
25173  cCL("lgnez",	e688160, 2, (RF, RF_IF),     rd_rm),
25174 
25175  cCL("exps",	e708100, 2, (RF, RF_IF),     rd_rm),
25176  cCL("expsp",	e708120, 2, (RF, RF_IF),     rd_rm),
25177  cCL("expsm",	e708140, 2, (RF, RF_IF),     rd_rm),
25178  cCL("expsz",	e708160, 2, (RF, RF_IF),     rd_rm),
25179  cCL("expd",	e708180, 2, (RF, RF_IF),     rd_rm),
25180  cCL("expdp",	e7081a0, 2, (RF, RF_IF),     rd_rm),
25181  cCL("expdm",	e7081c0, 2, (RF, RF_IF),     rd_rm),
25182  cCL("expdz",	e7081e0, 2, (RF, RF_IF),     rd_rm),
25183  cCL("expe",	e788100, 2, (RF, RF_IF),     rd_rm),
25184  cCL("expep",	e788120, 2, (RF, RF_IF),     rd_rm),
25185  cCL("expem",	e788140, 2, (RF, RF_IF),     rd_rm),
25186  cCL("expdz",	e788160, 2, (RF, RF_IF),     rd_rm),
25187 
25188  cCL("sins",	e808100, 2, (RF, RF_IF),     rd_rm),
25189  cCL("sinsp",	e808120, 2, (RF, RF_IF),     rd_rm),
25190  cCL("sinsm",	e808140, 2, (RF, RF_IF),     rd_rm),
25191  cCL("sinsz",	e808160, 2, (RF, RF_IF),     rd_rm),
25192  cCL("sind",	e808180, 2, (RF, RF_IF),     rd_rm),
25193  cCL("sindp",	e8081a0, 2, (RF, RF_IF),     rd_rm),
25194  cCL("sindm",	e8081c0, 2, (RF, RF_IF),     rd_rm),
25195  cCL("sindz",	e8081e0, 2, (RF, RF_IF),     rd_rm),
25196  cCL("sine",	e888100, 2, (RF, RF_IF),     rd_rm),
25197  cCL("sinep",	e888120, 2, (RF, RF_IF),     rd_rm),
25198  cCL("sinem",	e888140, 2, (RF, RF_IF),     rd_rm),
25199  cCL("sinez",	e888160, 2, (RF, RF_IF),     rd_rm),
25200 
25201  cCL("coss",	e908100, 2, (RF, RF_IF),     rd_rm),
25202  cCL("cossp",	e908120, 2, (RF, RF_IF),     rd_rm),
25203  cCL("cossm",	e908140, 2, (RF, RF_IF),     rd_rm),
25204  cCL("cossz",	e908160, 2, (RF, RF_IF),     rd_rm),
25205  cCL("cosd",	e908180, 2, (RF, RF_IF),     rd_rm),
25206  cCL("cosdp",	e9081a0, 2, (RF, RF_IF),     rd_rm),
25207  cCL("cosdm",	e9081c0, 2, (RF, RF_IF),     rd_rm),
25208  cCL("cosdz",	e9081e0, 2, (RF, RF_IF),     rd_rm),
25209  cCL("cose",	e988100, 2, (RF, RF_IF),     rd_rm),
25210  cCL("cosep",	e988120, 2, (RF, RF_IF),     rd_rm),
25211  cCL("cosem",	e988140, 2, (RF, RF_IF),     rd_rm),
25212  cCL("cosez",	e988160, 2, (RF, RF_IF),     rd_rm),
25213 
25214  cCL("tans",	ea08100, 2, (RF, RF_IF),     rd_rm),
25215  cCL("tansp",	ea08120, 2, (RF, RF_IF),     rd_rm),
25216  cCL("tansm",	ea08140, 2, (RF, RF_IF),     rd_rm),
25217  cCL("tansz",	ea08160, 2, (RF, RF_IF),     rd_rm),
25218  cCL("tand",	ea08180, 2, (RF, RF_IF),     rd_rm),
25219  cCL("tandp",	ea081a0, 2, (RF, RF_IF),     rd_rm),
25220  cCL("tandm",	ea081c0, 2, (RF, RF_IF),     rd_rm),
25221  cCL("tandz",	ea081e0, 2, (RF, RF_IF),     rd_rm),
25222  cCL("tane",	ea88100, 2, (RF, RF_IF),     rd_rm),
25223  cCL("tanep",	ea88120, 2, (RF, RF_IF),     rd_rm),
25224  cCL("tanem",	ea88140, 2, (RF, RF_IF),     rd_rm),
25225  cCL("tanez",	ea88160, 2, (RF, RF_IF),     rd_rm),
25226 
25227  cCL("asns",	eb08100, 2, (RF, RF_IF),     rd_rm),
25228  cCL("asnsp",	eb08120, 2, (RF, RF_IF),     rd_rm),
25229  cCL("asnsm",	eb08140, 2, (RF, RF_IF),     rd_rm),
25230  cCL("asnsz",	eb08160, 2, (RF, RF_IF),     rd_rm),
25231  cCL("asnd",	eb08180, 2, (RF, RF_IF),     rd_rm),
25232  cCL("asndp",	eb081a0, 2, (RF, RF_IF),     rd_rm),
25233  cCL("asndm",	eb081c0, 2, (RF, RF_IF),     rd_rm),
25234  cCL("asndz",	eb081e0, 2, (RF, RF_IF),     rd_rm),
25235  cCL("asne",	eb88100, 2, (RF, RF_IF),     rd_rm),
25236  cCL("asnep",	eb88120, 2, (RF, RF_IF),     rd_rm),
25237  cCL("asnem",	eb88140, 2, (RF, RF_IF),     rd_rm),
25238  cCL("asnez",	eb88160, 2, (RF, RF_IF),     rd_rm),
25239 
25240  cCL("acss",	ec08100, 2, (RF, RF_IF),     rd_rm),
25241  cCL("acssp",	ec08120, 2, (RF, RF_IF),     rd_rm),
25242  cCL("acssm",	ec08140, 2, (RF, RF_IF),     rd_rm),
25243  cCL("acssz",	ec08160, 2, (RF, RF_IF),     rd_rm),
25244  cCL("acsd",	ec08180, 2, (RF, RF_IF),     rd_rm),
25245  cCL("acsdp",	ec081a0, 2, (RF, RF_IF),     rd_rm),
25246  cCL("acsdm",	ec081c0, 2, (RF, RF_IF),     rd_rm),
25247  cCL("acsdz",	ec081e0, 2, (RF, RF_IF),     rd_rm),
25248  cCL("acse",	ec88100, 2, (RF, RF_IF),     rd_rm),
25249  cCL("acsep",	ec88120, 2, (RF, RF_IF),     rd_rm),
25250  cCL("acsem",	ec88140, 2, (RF, RF_IF),     rd_rm),
25251  cCL("acsez",	ec88160, 2, (RF, RF_IF),     rd_rm),
25252 
25253  cCL("atns",	ed08100, 2, (RF, RF_IF),     rd_rm),
25254  cCL("atnsp",	ed08120, 2, (RF, RF_IF),     rd_rm),
25255  cCL("atnsm",	ed08140, 2, (RF, RF_IF),     rd_rm),
25256  cCL("atnsz",	ed08160, 2, (RF, RF_IF),     rd_rm),
25257  cCL("atnd",	ed08180, 2, (RF, RF_IF),     rd_rm),
25258  cCL("atndp",	ed081a0, 2, (RF, RF_IF),     rd_rm),
25259  cCL("atndm",	ed081c0, 2, (RF, RF_IF),     rd_rm),
25260  cCL("atndz",	ed081e0, 2, (RF, RF_IF),     rd_rm),
25261  cCL("atne",	ed88100, 2, (RF, RF_IF),     rd_rm),
25262  cCL("atnep",	ed88120, 2, (RF, RF_IF),     rd_rm),
25263  cCL("atnem",	ed88140, 2, (RF, RF_IF),     rd_rm),
25264  cCL("atnez",	ed88160, 2, (RF, RF_IF),     rd_rm),
25265 
25266  cCL("urds",	ee08100, 2, (RF, RF_IF),     rd_rm),
25267  cCL("urdsp",	ee08120, 2, (RF, RF_IF),     rd_rm),
25268  cCL("urdsm",	ee08140, 2, (RF, RF_IF),     rd_rm),
25269  cCL("urdsz",	ee08160, 2, (RF, RF_IF),     rd_rm),
25270  cCL("urdd",	ee08180, 2, (RF, RF_IF),     rd_rm),
25271  cCL("urddp",	ee081a0, 2, (RF, RF_IF),     rd_rm),
25272  cCL("urddm",	ee081c0, 2, (RF, RF_IF),     rd_rm),
25273  cCL("urddz",	ee081e0, 2, (RF, RF_IF),     rd_rm),
25274  cCL("urde",	ee88100, 2, (RF, RF_IF),     rd_rm),
25275  cCL("urdep",	ee88120, 2, (RF, RF_IF),     rd_rm),
25276  cCL("urdem",	ee88140, 2, (RF, RF_IF),     rd_rm),
25277  cCL("urdez",	ee88160, 2, (RF, RF_IF),     rd_rm),
25278 
25279  cCL("nrms",	ef08100, 2, (RF, RF_IF),     rd_rm),
25280  cCL("nrmsp",	ef08120, 2, (RF, RF_IF),     rd_rm),
25281  cCL("nrmsm",	ef08140, 2, (RF, RF_IF),     rd_rm),
25282  cCL("nrmsz",	ef08160, 2, (RF, RF_IF),     rd_rm),
25283  cCL("nrmd",	ef08180, 2, (RF, RF_IF),     rd_rm),
25284  cCL("nrmdp",	ef081a0, 2, (RF, RF_IF),     rd_rm),
25285  cCL("nrmdm",	ef081c0, 2, (RF, RF_IF),     rd_rm),
25286  cCL("nrmdz",	ef081e0, 2, (RF, RF_IF),     rd_rm),
25287  cCL("nrme",	ef88100, 2, (RF, RF_IF),     rd_rm),
25288  cCL("nrmep",	ef88120, 2, (RF, RF_IF),     rd_rm),
25289  cCL("nrmem",	ef88140, 2, (RF, RF_IF),     rd_rm),
25290  cCL("nrmez",	ef88160, 2, (RF, RF_IF),     rd_rm),
25291 
25292  cCL("adfs",	e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
25293  cCL("adfsp",	e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
25294  cCL("adfsm",	e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
25295  cCL("adfsz",	e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
25296  cCL("adfd",	e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
25297  cCL("adfdp",	e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25298  cCL("adfdm",	e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25299  cCL("adfdz",	e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25300  cCL("adfe",	e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
25301  cCL("adfep",	e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
25302  cCL("adfem",	e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
25303  cCL("adfez",	e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
25304 
25305  cCL("sufs",	e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
25306  cCL("sufsp",	e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
25307  cCL("sufsm",	e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
25308  cCL("sufsz",	e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
25309  cCL("sufd",	e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
25310  cCL("sufdp",	e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25311  cCL("sufdm",	e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25312  cCL("sufdz",	e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25313  cCL("sufe",	e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
25314  cCL("sufep",	e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
25315  cCL("sufem",	e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
25316  cCL("sufez",	e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
25317 
25318  cCL("rsfs",	e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
25319  cCL("rsfsp",	e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
25320  cCL("rsfsm",	e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
25321  cCL("rsfsz",	e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
25322  cCL("rsfd",	e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
25323  cCL("rsfdp",	e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25324  cCL("rsfdm",	e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25325  cCL("rsfdz",	e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25326  cCL("rsfe",	e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
25327  cCL("rsfep",	e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
25328  cCL("rsfem",	e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
25329  cCL("rsfez",	e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
25330 
25331  cCL("mufs",	e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
25332  cCL("mufsp",	e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
25333  cCL("mufsm",	e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
25334  cCL("mufsz",	e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
25335  cCL("mufd",	e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
25336  cCL("mufdp",	e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25337  cCL("mufdm",	e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25338  cCL("mufdz",	e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25339  cCL("mufe",	e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
25340  cCL("mufep",	e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
25341  cCL("mufem",	e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
25342  cCL("mufez",	e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
25343 
25344  cCL("dvfs",	e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
25345  cCL("dvfsp",	e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
25346  cCL("dvfsm",	e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
25347  cCL("dvfsz",	e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
25348  cCL("dvfd",	e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
25349  cCL("dvfdp",	e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25350  cCL("dvfdm",	e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25351  cCL("dvfdz",	e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25352  cCL("dvfe",	e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
25353  cCL("dvfep",	e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
25354  cCL("dvfem",	e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
25355  cCL("dvfez",	e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
25356 
25357  cCL("rdfs",	e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
25358  cCL("rdfsp",	e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
25359  cCL("rdfsm",	e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
25360  cCL("rdfsz",	e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
25361  cCL("rdfd",	e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
25362  cCL("rdfdp",	e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25363  cCL("rdfdm",	e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25364  cCL("rdfdz",	e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25365  cCL("rdfe",	e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
25366  cCL("rdfep",	e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
25367  cCL("rdfem",	e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
25368  cCL("rdfez",	e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
25369 
25370  cCL("pows",	e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
25371  cCL("powsp",	e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
25372  cCL("powsm",	e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
25373  cCL("powsz",	e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
25374  cCL("powd",	e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
25375  cCL("powdp",	e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25376  cCL("powdm",	e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25377  cCL("powdz",	e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25378  cCL("powe",	e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
25379  cCL("powep",	e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
25380  cCL("powem",	e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
25381  cCL("powez",	e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
25382 
25383  cCL("rpws",	e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
25384  cCL("rpwsp",	e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
25385  cCL("rpwsm",	e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
25386  cCL("rpwsz",	e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
25387  cCL("rpwd",	e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
25388  cCL("rpwdp",	e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25389  cCL("rpwdm",	e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25390  cCL("rpwdz",	e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25391  cCL("rpwe",	e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
25392  cCL("rpwep",	e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
25393  cCL("rpwem",	e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
25394  cCL("rpwez",	e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
25395 
25396  cCL("rmfs",	e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
25397  cCL("rmfsp",	e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
25398  cCL("rmfsm",	e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
25399  cCL("rmfsz",	e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
25400  cCL("rmfd",	e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
25401  cCL("rmfdp",	e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25402  cCL("rmfdm",	e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25403  cCL("rmfdz",	e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25404  cCL("rmfe",	e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
25405  cCL("rmfep",	e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
25406  cCL("rmfem",	e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
25407  cCL("rmfez",	e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
25408 
25409  cCL("fmls",	e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
25410  cCL("fmlsp",	e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
25411  cCL("fmlsm",	e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
25412  cCL("fmlsz",	e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
25413  cCL("fmld",	e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
25414  cCL("fmldp",	e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25415  cCL("fmldm",	e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25416  cCL("fmldz",	e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25417  cCL("fmle",	e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
25418  cCL("fmlep",	e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
25419  cCL("fmlem",	e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
25420  cCL("fmlez",	e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
25421 
25422  cCL("fdvs",	ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
25423  cCL("fdvsp",	ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
25424  cCL("fdvsm",	ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
25425  cCL("fdvsz",	ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
25426  cCL("fdvd",	ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
25427  cCL("fdvdp",	ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25428  cCL("fdvdm",	ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25429  cCL("fdvdz",	ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25430  cCL("fdve",	ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
25431  cCL("fdvep",	ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
25432  cCL("fdvem",	ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
25433  cCL("fdvez",	ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
25434 
25435  cCL("frds",	eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
25436  cCL("frdsp",	eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
25437  cCL("frdsm",	eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
25438  cCL("frdsz",	eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
25439  cCL("frdd",	eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
25440  cCL("frddp",	eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25441  cCL("frddm",	eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25442  cCL("frddz",	eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25443  cCL("frde",	eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
25444  cCL("frdep",	eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
25445  cCL("frdem",	eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
25446  cCL("frdez",	eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
25447 
25448  cCL("pols",	ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
25449  cCL("polsp",	ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
25450  cCL("polsm",	ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
25451  cCL("polsz",	ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
25452  cCL("pold",	ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
25453  cCL("poldp",	ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25454  cCL("poldm",	ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25455  cCL("poldz",	ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25456  cCL("pole",	ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
25457  cCL("polep",	ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
25458  cCL("polem",	ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
25459  cCL("polez",	ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
25460 
25461  cCE("cmf",	e90f110, 2, (RF, RF_IF),     fpa_cmp),
25462  C3E("cmfe",	ed0f110, 2, (RF, RF_IF),     fpa_cmp),
25463  cCE("cnf",	eb0f110, 2, (RF, RF_IF),     fpa_cmp),
25464  C3E("cnfe",	ef0f110, 2, (RF, RF_IF),     fpa_cmp),
25465 
25466  cCL("flts",	e000110, 2, (RF, RR),	     rn_rd),
25467  cCL("fltsp",	e000130, 2, (RF, RR),	     rn_rd),
25468  cCL("fltsm",	e000150, 2, (RF, RR),	     rn_rd),
25469  cCL("fltsz",	e000170, 2, (RF, RR),	     rn_rd),
25470  cCL("fltd",	e000190, 2, (RF, RR),	     rn_rd),
25471  cCL("fltdp",	e0001b0, 2, (RF, RR),	     rn_rd),
25472  cCL("fltdm",	e0001d0, 2, (RF, RR),	     rn_rd),
25473  cCL("fltdz",	e0001f0, 2, (RF, RR),	     rn_rd),
25474  cCL("flte",	e080110, 2, (RF, RR),	     rn_rd),
25475  cCL("fltep",	e080130, 2, (RF, RR),	     rn_rd),
25476  cCL("fltem",	e080150, 2, (RF, RR),	     rn_rd),
25477  cCL("fltez",	e080170, 2, (RF, RR),	     rn_rd),
25478 
25479   /* The implementation of the FIX instruction is broken on some
25480      assemblers, in that it accepts a precision specifier as well as a
25481      rounding specifier, despite the fact that this is meaningless.
25482      To be more compatible, we accept it as well, though of course it
25483      does not set any bits.  */
25484  cCE("fix",	e100110, 2, (RR, RF),	     rd_rm),
25485  cCL("fixp",	e100130, 2, (RR, RF),	     rd_rm),
25486  cCL("fixm",	e100150, 2, (RR, RF),	     rd_rm),
25487  cCL("fixz",	e100170, 2, (RR, RF),	     rd_rm),
25488  cCL("fixsp",	e100130, 2, (RR, RF),	     rd_rm),
25489  cCL("fixsm",	e100150, 2, (RR, RF),	     rd_rm),
25490  cCL("fixsz",	e100170, 2, (RR, RF),	     rd_rm),
25491  cCL("fixdp",	e100130, 2, (RR, RF),	     rd_rm),
25492  cCL("fixdm",	e100150, 2, (RR, RF),	     rd_rm),
25493  cCL("fixdz",	e100170, 2, (RR, RF),	     rd_rm),
25494  cCL("fixep",	e100130, 2, (RR, RF),	     rd_rm),
25495  cCL("fixem",	e100150, 2, (RR, RF),	     rd_rm),
25496  cCL("fixez",	e100170, 2, (RR, RF),	     rd_rm),
25497 
25498   /* Instructions that were new with the real FPA, call them V2.  */
25499 #undef  ARM_VARIANT
25500 #define ARM_VARIANT  & fpu_fpa_ext_v2
25501 
25502  cCE("lfm",	c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
25503  cCL("lfmfd",	c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
25504  cCL("lfmea",	d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
25505  cCE("sfm",	c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
25506  cCL("sfmfd",	d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
25507  cCL("sfmea",	c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
25508 
25509 #undef  ARM_VARIANT
25510 #define ARM_VARIANT  & fpu_vfp_ext_v1xd  /* VFP V1xD (single precision).  */
25511 #undef THUMB_VARIANT
25512 #define THUMB_VARIANT  & arm_ext_v6t2
25513  mcCE(vmrs,	ef00a10, 2, (APSR_RR, RVC),   vmrs),
25514  mcCE(vmsr,	ee00a10, 2, (RVC, RR),        vmsr),
25515  mcCE(fldd,	d100b00, 2, (RVD, ADDRGLDC),  vfp_dp_ldst),
25516  mcCE(fstd,	d000b00, 2, (RVD, ADDRGLDC),  vfp_dp_ldst),
25517  mcCE(flds,	d100a00, 2, (RVS, ADDRGLDC),  vfp_sp_ldst),
25518  mcCE(fsts,	d000a00, 2, (RVS, ADDRGLDC),  vfp_sp_ldst),
25519 
25520   /* Memory operations.	 */
25521  mcCE(fldmias,	c900a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
25522  mcCE(fldmdbs,	d300a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
25523  mcCE(fstmias,	c800a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
25524  mcCE(fstmdbs,	d200a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
25525 #undef THUMB_VARIANT
25526 
25527   /* Moves and type conversions.  */
25528  cCE("fmstat",	ef1fa10, 0, (),		      noargs),
25529  cCE("fsitos",	eb80ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
25530  cCE("fuitos",	eb80a40, 2, (RVS, RVS),	      vfp_sp_monadic),
25531  cCE("ftosis",	ebd0a40, 2, (RVS, RVS),	      vfp_sp_monadic),
25532  cCE("ftosizs",	ebd0ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
25533  cCE("ftouis",	ebc0a40, 2, (RVS, RVS),	      vfp_sp_monadic),
25534  cCE("ftouizs",	ebc0ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
25535  cCE("fmrx",	ef00a10, 2, (RR, RVC),	      rd_rn),
25536  cCE("fmxr",	ee00a10, 2, (RVC, RR),	      rn_rd),
25537 
25538   /* Memory operations.	 */
25539  cCE("fldmfds",	c900a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
25540  cCE("fldmeas",	d300a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
25541  cCE("fldmiax",	c900b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
25542  cCE("fldmfdx",	c900b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
25543  cCE("fldmdbx",	d300b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
25544  cCE("fldmeax",	d300b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
25545  cCE("fstmeas",	c800a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
25546  cCE("fstmfds",	d200a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
25547  cCE("fstmiax",	c800b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
25548  cCE("fstmeax",	c800b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
25549  cCE("fstmdbx",	d200b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
25550  cCE("fstmfdx",	d200b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
25551 
25552   /* Monadic operations.  */
25553  cCE("fabss",	eb00ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
25554  cCE("fnegs",	eb10a40, 2, (RVS, RVS),	      vfp_sp_monadic),
25555  cCE("fsqrts",	eb10ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
25556 
25557   /* Dyadic operations.	 */
25558  cCE("fadds",	e300a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25559  cCE("fsubs",	e300a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25560  cCE("fmuls",	e200a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25561  cCE("fdivs",	e800a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25562  cCE("fmacs",	e000a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25563  cCE("fmscs",	e100a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25564  cCE("fnmuls",	e200a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25565  cCE("fnmacs",	e000a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25566  cCE("fnmscs",	e100a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25567 
25568   /* Comparisons.  */
25569  cCE("fcmps",	eb40a40, 2, (RVS, RVS),	      vfp_sp_monadic),
25570  cCE("fcmpzs",	eb50a40, 1, (RVS),	      vfp_sp_compare_z),
25571  cCE("fcmpes",	eb40ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
25572  cCE("fcmpezs",	eb50ac0, 1, (RVS),	      vfp_sp_compare_z),
25573 
25574  /* Double precision load/store are still present on single precision
25575     implementations.  */
25576  cCE("fldmiad",	c900b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
25577  cCE("fldmfdd",	c900b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
25578  cCE("fldmdbd",	d300b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
25579  cCE("fldmead",	d300b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
25580  cCE("fstmiad",	c800b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
25581  cCE("fstmead",	c800b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
25582  cCE("fstmdbd",	d200b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
25583  cCE("fstmfdd",	d200b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
25584 
25585 #undef  ARM_VARIANT
25586 #define ARM_VARIANT  & fpu_vfp_ext_v1 /* VFP V1 (Double precision).  */
25587 
25588   /* Moves and type conversions.  */
25589  cCE("fcvtds",	eb70ac0, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
25590  cCE("fcvtsd",	eb70bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
25591  cCE("fmdhr",	e200b10, 2, (RVD, RR),	      vfp_dp_rn_rd),
25592  cCE("fmdlr",	e000b10, 2, (RVD, RR),	      vfp_dp_rn_rd),
25593  cCE("fmrdh",	e300b10, 2, (RR, RVD),	      vfp_dp_rd_rn),
25594  cCE("fmrdl",	e100b10, 2, (RR, RVD),	      vfp_dp_rd_rn),
25595  cCE("fsitod",	eb80bc0, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
25596  cCE("fuitod",	eb80b40, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
25597  cCE("ftosid",	ebd0b40, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
25598  cCE("ftosizd",	ebd0bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
25599  cCE("ftouid",	ebc0b40, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
25600  cCE("ftouizd",	ebc0bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
25601 
25602   /* Monadic operations.  */
25603  cCE("fabsd",	eb00bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
25604  cCE("fnegd",	eb10b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
25605  cCE("fsqrtd",	eb10bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
25606 
25607   /* Dyadic operations.	 */
25608  cCE("faddd",	e300b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25609  cCE("fsubd",	e300b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25610  cCE("fmuld",	e200b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25611  cCE("fdivd",	e800b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25612  cCE("fmacd",	e000b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25613  cCE("fmscd",	e100b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25614  cCE("fnmuld",	e200b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25615  cCE("fnmacd",	e000b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25616  cCE("fnmscd",	e100b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25617 
25618   /* Comparisons.  */
25619  cCE("fcmpd",	eb40b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
25620  cCE("fcmpzd",	eb50b40, 1, (RVD),	      vfp_dp_rd),
25621  cCE("fcmped",	eb40bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
25622  cCE("fcmpezd",	eb50bc0, 1, (RVD),	      vfp_dp_rd),
25623 
25624 /* Instructions which may belong to either the Neon or VFP instruction sets.
25625    Individual encoder functions perform additional architecture checks.  */
25626 #undef  ARM_VARIANT
25627 #define ARM_VARIANT    & fpu_vfp_ext_v1xd
25628 #undef  THUMB_VARIANT
25629 #define THUMB_VARIANT  & arm_ext_v6t2
25630 
25631  NCE(vldm,      c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25632  NCE(vldmia,    c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25633  NCE(vldmdb,    d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25634  NCE(vstm,      c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25635  NCE(vstmia,    c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25636  NCE(vstmdb,    d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25637 
25638  NCE(vpop,      0,       1, (VRSDLST),          vfp_nsyn_pop),
25639  NCE(vpush,     0,       1, (VRSDLST),          vfp_nsyn_push),
25640 
25641 #undef  THUMB_VARIANT
25642 #define THUMB_VARIANT  & fpu_vfp_ext_v1xd
25643 
25644   /* These mnemonics are unique to VFP.  */
25645  NCE(vsqrt,     0,       2, (RVSD, RVSD),       vfp_nsyn_sqrt),
25646  NCE(vdiv,      0,       3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
25647  nCE(vnmul,     _vnmul,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25648  nCE(vnmla,     _vnmla,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25649  nCE(vnmls,     _vnmls,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25650  NCE(vcvtz,     0,       2, (RVSD, RVSD),       vfp_nsyn_cvtz),
25651 
25652   /* Mnemonics shared by Neon and VFP.  */
25653  nCEF(vmls,     _vmls,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
25654 
25655  mnCEF(vcvt,     _vcvt,   3, (RNSDQMQ, RNSDQMQ, oI32z), neon_cvt),
25656  nCEF(vcvtr,    _vcvt,   2, (RNSDQ, RNSDQ), neon_cvtr),
25657  MNCEF(vcvtb,	eb20a40, 3, (RVSDMQ, RVSDMQ, oI32b), neon_cvtb),
25658  MNCEF(vcvtt,	eb20a40, 3, (RVSDMQ, RVSDMQ, oI32b), neon_cvtt),
25659 
25660 
25661   /* NOTE: All VMOV encoding is special-cased!  */
25662  NCE(vmovq,     0,       1, (VMOV), neon_mov),
25663 
25664 #undef  THUMB_VARIANT
25665 /* Could be either VLDR/VSTR or VLDR/VSTR (system register) which are guarded
25666    by different feature bits.  Since we are setting the Thumb guard, we can
25667    require Thumb-1 which makes it a nop guard and set the right feature bit in
25668    do_vldr_vstr ().  */
25669 #define THUMB_VARIANT  & arm_ext_v4t
25670  NCE(vldr,      d100b00, 2, (VLDR, ADDRGLDC), vldr_vstr),
25671  NCE(vstr,      d000b00, 2, (VLDR, ADDRGLDC), vldr_vstr),
25672 
25673 #undef  ARM_VARIANT
25674 #define ARM_VARIANT    & arm_ext_fp16
25675 #undef  THUMB_VARIANT
25676 #define THUMB_VARIANT  & arm_ext_fp16
25677  /* New instructions added from v8.2, allowing the extraction and insertion of
25678     the upper 16 bits of a 32-bit vector register.  */
25679  NCE (vmovx,     eb00a40,       2, (RVS, RVS), neon_movhf),
25680  NCE (vins,      eb00ac0,       2, (RVS, RVS), neon_movhf),
25681 
25682  /* New backported fma/fms instructions optional in v8.2.  */
25683  NUF (vfmsl, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmsl),
25684  NUF (vfmal, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmal),
25685 
25686 #undef  THUMB_VARIANT
25687 #define THUMB_VARIANT  & fpu_neon_ext_v1
25688 #undef  ARM_VARIANT
25689 #define ARM_VARIANT    & fpu_neon_ext_v1
25690 
25691   /* Data processing with three registers of the same length.  */
25692   /* integer ops, valid types S8 S16 S32 U8 U16 U32.  */
25693  NUF(vaba,      0000710, 3, (RNDQ, RNDQ,  RNDQ), neon_dyadic_i_su),
25694  NUF(vabaq,     0000710, 3, (RNQ,  RNQ,   RNQ),  neon_dyadic_i_su),
25695  NUF(vhaddq,    0000000, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
25696  NUF(vrhaddq,   0000100, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
25697  NUF(vhsubq,    0000200, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
25698   /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64.  */
25699  NUF(vqaddq,    0000010, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i64_su),
25700  NUF(vqsubq,    0000210, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i64_su),
25701  NUF(vrshlq,    0000500, 3, (RNQ,  oRNQ,  RNQ),  neon_rshl),
25702  NUF(vqrshlq,   0000510, 3, (RNQ,  oRNQ,  RNQ),  neon_rshl),
25703   /* If not immediate, fall back to neon_dyadic_i64_su.
25704      shl should accept I8 I16 I32 I64,
25705      qshl should accept S8 S16 S32 S64 U8 U16 U32 U64.  */
25706  nUF(vshlq,     _vshl,    3, (RNQ,  oRNQ,  RNDQ_I63b), neon_shl),
25707  nUF(vqshlq,    _vqshl,   3, (RNQ,  oRNQ,  RNDQ_I63b), neon_qshl),
25708   /* Logic ops, types optional & ignored.  */
25709  nUF(vandq,     _vand,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
25710  nUF(vbicq,     _vbic,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
25711  nUF(vorrq,     _vorr,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
25712  nUF(vornq,     _vorn,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
25713  nUF(veorq,     _veor,    3, (RNQ,  oRNQ,  RNQ),       neon_logic),
25714   /* Bitfield ops, untyped.  */
25715  NUF(vbsl,      1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
25716  NUF(vbslq,     1100110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
25717  NUF(vbit,      1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
25718  NUF(vbitq,     1200110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
25719  NUF(vbif,      1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
25720  NUF(vbifq,     1300110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
25721   /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32.  */
25722  nUF(vabdq,     _vabd,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
25723  nUF(vmaxq,     _vmax,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
25724  nUF(vminq,     _vmin,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
25725   /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
25726      back to neon_dyadic_if_su.  */
25727  nUF(vcge,      _vcge,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
25728  nUF(vcgeq,     _vcge,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp),
25729  nUF(vcgt,      _vcgt,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
25730  nUF(vcgtq,     _vcgt,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp),
25731  nUF(vclt,      _vclt,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
25732  nUF(vcltq,     _vclt,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp_inv),
25733  nUF(vcle,      _vcle,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
25734  nUF(vcleq,     _vcle,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp_inv),
25735   /* Comparison. Type I8 I16 I32 F32.  */
25736  nUF(vceq,      _vceq,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
25737  nUF(vceqq,     _vceq,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_ceq),
25738   /* As above, D registers only.  */
25739  nUF(vpmax,     _vpmax,   3, (RND, oRND, RND), neon_dyadic_if_su_d),
25740  nUF(vpmin,     _vpmin,   3, (RND, oRND, RND), neon_dyadic_if_su_d),
25741   /* Int and float variants, signedness unimportant.  */
25742  nUF(vmlaq,     _vmla,    3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mac_maybe_scalar),
25743  nUF(vmlsq,     _vmls,    3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mac_maybe_scalar),
25744  nUF(vpadd,     _vpadd,   3, (RND,  oRND,  RND),       neon_dyadic_if_i_d),
25745   /* Add/sub take types I8 I16 I32 I64 F32.  */
25746  nUF(vaddq,     _vadd,    3, (RNQ,  oRNQ,  RNQ),  neon_addsub_if_i),
25747  nUF(vsubq,     _vsub,    3, (RNQ,  oRNQ,  RNQ),  neon_addsub_if_i),
25748   /* vtst takes sizes 8, 16, 32.  */
25749  NUF(vtst,      0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
25750  NUF(vtstq,     0000810, 3, (RNQ,  oRNQ,  RNQ),  neon_tst),
25751   /* VMUL takes I8 I16 I32 F32 P8.  */
25752  nUF(vmulq,     _vmul,     3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mul),
25753   /* VQD{R}MULH takes S16 S32.  */
25754  nUF(vqdmulhq,  _vqdmulh,  3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qdmulh),
25755  nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qdmulh),
25756  NUF(vacge,     0000e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
25757  NUF(vacgeq,    0000e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute),
25758  NUF(vacgt,     0200e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
25759  NUF(vacgtq,    0200e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute),
25760  NUF(vaclt,     0200e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
25761  NUF(vacltq,    0200e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute_inv),
25762  NUF(vacle,     0000e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
25763  NUF(vacleq,    0000e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute_inv),
25764  NUF(vrecps,    0000f10,  3, (RNDQ, oRNDQ, RNDQ), neon_step),
25765  NUF(vrecpsq,   0000f10,  3, (RNQ,  oRNQ,  RNQ),  neon_step),
25766  NUF(vrsqrts,   0200f10,  3, (RNDQ, oRNDQ, RNDQ), neon_step),
25767  NUF(vrsqrtsq,  0200f10,  3, (RNQ,  oRNQ,  RNQ),  neon_step),
25768  /* ARM v8.1 extension.  */
25769  nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qrdmlah),
25770  nUF (vqrdmlsh,  _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
25771  nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qrdmlah),
25772 
25773   /* Two address, int/float. Types S8 S16 S32 F32.  */
25774  NUF(vabsq,     1b10300, 2, (RNQ,  RNQ),      neon_abs_neg),
25775  NUF(vnegq,     1b10380, 2, (RNQ,  RNQ),      neon_abs_neg),
25776 
25777   /* Data processing with two registers and a shift amount.  */
25778   /* Right shifts, and variants with rounding.
25779      Types accepted S8 S16 S32 S64 U8 U16 U32 U64.  */
25780  NUF(vshrq,     0800010, 3, (RNQ,  oRNQ,  I64z), neon_rshift_round_imm),
25781  NUF(vrshrq,    0800210, 3, (RNQ,  oRNQ,  I64z), neon_rshift_round_imm),
25782  NUF(vsra,      0800110, 3, (RNDQ, oRNDQ, I64),  neon_rshift_round_imm),
25783  NUF(vsraq,     0800110, 3, (RNQ,  oRNQ,  I64),  neon_rshift_round_imm),
25784  NUF(vrsra,     0800310, 3, (RNDQ, oRNDQ, I64),  neon_rshift_round_imm),
25785  NUF(vrsraq,    0800310, 3, (RNQ,  oRNQ,  I64),  neon_rshift_round_imm),
25786   /* Shift and insert. Sizes accepted 8 16 32 64.  */
25787  NUF(vsliq,     1800510, 3, (RNQ,  oRNQ,  I63), neon_sli),
25788  NUF(vsriq,     1800410, 3, (RNQ,  oRNQ,  I64), neon_sri),
25789   /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64.  */
25790  NUF(vqshluq,   1800610, 3, (RNQ,  oRNQ,  I63), neon_qshlu_imm),
25791   /* Right shift immediate, saturating & narrowing, with rounding variants.
25792      Types accepted S16 S32 S64 U16 U32 U64.  */
25793  NUF(vqshrn,    0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
25794  NUF(vqrshrn,   0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
25795   /* As above, unsigned. Types accepted S16 S32 S64.  */
25796  NUF(vqshrun,   0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
25797  NUF(vqrshrun,  0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
25798   /* Right shift narrowing. Types accepted I16 I32 I64.  */
25799  NUF(vshrn,     0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
25800  NUF(vrshrn,    0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
25801   /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant.  */
25802  nUF(vshll,     _vshll,   3, (RNQ, RND, I32),  neon_shll),
25803   /* CVT with optional immediate for fixed-point variant.  */
25804  nUF(vcvtq,     _vcvt,    3, (RNQ, RNQ, oI32b), neon_cvt),
25805 
25806  nUF(vmvnq,     _vmvn,    2, (RNQ,  RNDQ_Ibig), neon_mvn),
25807 
25808   /* Data processing, three registers of different lengths.  */
25809   /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32.  */
25810  NUF(vabal,     0800500, 3, (RNQ, RND, RND),  neon_abal),
25811   /* If not scalar, fall back to neon_dyadic_long.
25812      Vector types as above, scalar types S16 S32 U16 U32.  */
25813  nUF(vmlal,     _vmlal,   3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
25814  nUF(vmlsl,     _vmlsl,   3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
25815   /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32.  */
25816  NUF(vaddw,     0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
25817  NUF(vsubw,     0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
25818   /* Dyadic, narrowing insns. Types I16 I32 I64.  */
25819  NUF(vaddhn,    0800400, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
25820  NUF(vraddhn,   1800400, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
25821  NUF(vsubhn,    0800600, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
25822  NUF(vrsubhn,   1800600, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
25823   /* Saturating doubling multiplies. Types S16 S32.  */
25824  nUF(vqdmlal,   _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
25825  nUF(vqdmlsl,   _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
25826  nUF(vqdmull,   _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
25827   /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
25828      S16 S32 U16 U32.  */
25829  nUF(vmull,     _vmull,   3, (RNQ, RND, RND_RNSC), neon_vmull),
25830 
25831   /* Extract. Size 8.  */
25832  NUF(vext,      0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
25833  NUF(vextq,     0b00000, 4, (RNQ,  oRNQ,  RNQ,  I15), neon_ext),
25834 
25835   /* Two registers, miscellaneous.  */
25836   /* Reverse. Sizes 8 16 32 (must be < size in opcode).  */
25837  NUF(vrev64q,   1b00000, 2, (RNQ,  RNQ),      neon_rev),
25838  NUF(vrev32q,   1b00080, 2, (RNQ,  RNQ),      neon_rev),
25839  NUF(vrev16q,   1b00100, 2, (RNQ,  RNQ),      neon_rev),
25840   /* Vector replicate. Sizes 8 16 32.  */
25841  nCE(vdupq,     _vdup,    2, (RNQ,  RR_RNSC),  neon_dup),
25842   /* VMOVL. Types S8 S16 S32 U8 U16 U32.  */
25843  NUF(vmovl,     0800a10, 2, (RNQ, RND),       neon_movl),
25844   /* VMOVN. Types I16 I32 I64.  */
25845  nUF(vmovn,     _vmovn,   2, (RND, RNQ),       neon_movn),
25846   /* VQMOVN. Types S16 S32 S64 U16 U32 U64.  */
25847  nUF(vqmovn,    _vqmovn,  2, (RND, RNQ),       neon_qmovn),
25848   /* VQMOVUN. Types S16 S32 S64.  */
25849  nUF(vqmovun,   _vqmovun, 2, (RND, RNQ),       neon_qmovun),
25850   /* VZIP / VUZP. Sizes 8 16 32.  */
25851  NUF(vzip,      1b20180, 2, (RNDQ, RNDQ),     neon_zip_uzp),
25852  NUF(vzipq,     1b20180, 2, (RNQ,  RNQ),      neon_zip_uzp),
25853  NUF(vuzp,      1b20100, 2, (RNDQ, RNDQ),     neon_zip_uzp),
25854  NUF(vuzpq,     1b20100, 2, (RNQ,  RNQ),      neon_zip_uzp),
25855   /* VQABS / VQNEG. Types S8 S16 S32.  */
25856  NUF(vqabsq,    1b00700, 2, (RNQ,  RNQ),      neon_sat_abs_neg),
25857  NUF(vqnegq,    1b00780, 2, (RNQ,  RNQ),      neon_sat_abs_neg),
25858   /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32.  */
25859  NUF(vpadal,    1b00600, 2, (RNDQ, RNDQ),     neon_pair_long),
25860  NUF(vpadalq,   1b00600, 2, (RNQ,  RNQ),      neon_pair_long),
25861  NUF(vpaddl,    1b00200, 2, (RNDQ, RNDQ),     neon_pair_long),
25862  NUF(vpaddlq,   1b00200, 2, (RNQ,  RNQ),      neon_pair_long),
25863   /* Reciprocal estimates.  Types U32 F16 F32.  */
25864  NUF(vrecpe,    1b30400, 2, (RNDQ, RNDQ),     neon_recip_est),
25865  NUF(vrecpeq,   1b30400, 2, (RNQ,  RNQ),      neon_recip_est),
25866  NUF(vrsqrte,   1b30480, 2, (RNDQ, RNDQ),     neon_recip_est),
25867  NUF(vrsqrteq,  1b30480, 2, (RNQ,  RNQ),      neon_recip_est),
25868   /* VCLS. Types S8 S16 S32.  */
25869  NUF(vclsq,     1b00400, 2, (RNQ,  RNQ),      neon_cls),
25870   /* VCLZ. Types I8 I16 I32.  */
25871  NUF(vclzq,     1b00480, 2, (RNQ,  RNQ),      neon_clz),
25872   /* VCNT. Size 8.  */
25873  NUF(vcnt,      1b00500, 2, (RNDQ, RNDQ),     neon_cnt),
25874  NUF(vcntq,     1b00500, 2, (RNQ,  RNQ),      neon_cnt),
25875   /* Two address, untyped.  */
25876  NUF(vswp,      1b20000, 2, (RNDQ, RNDQ),     neon_swp),
25877  NUF(vswpq,     1b20000, 2, (RNQ,  RNQ),      neon_swp),
25878   /* VTRN. Sizes 8 16 32.  */
25879  nUF(vtrn,      _vtrn,    2, (RNDQ, RNDQ),     neon_trn),
25880  nUF(vtrnq,     _vtrn,    2, (RNQ,  RNQ),      neon_trn),
25881 
25882   /* Table lookup. Size 8.  */
25883  NUF(vtbl,      1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
25884  NUF(vtbx,      1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
25885 
25886 #undef  THUMB_VARIANT
25887 #define THUMB_VARIANT  & fpu_vfp_v3_or_neon_ext
25888 #undef  ARM_VARIANT
25889 #define ARM_VARIANT    & fpu_vfp_v3_or_neon_ext
25890 
25891   /* Neon element/structure load/store.  */
25892  nUF(vld1,      _vld1,    2, (NSTRLST, ADDR),  neon_ldx_stx),
25893  nUF(vst1,      _vst1,    2, (NSTRLST, ADDR),  neon_ldx_stx),
25894  nUF(vld2,      _vld2,    2, (NSTRLST, ADDR),  neon_ldx_stx),
25895  nUF(vst2,      _vst2,    2, (NSTRLST, ADDR),  neon_ldx_stx),
25896  nUF(vld3,      _vld3,    2, (NSTRLST, ADDR),  neon_ldx_stx),
25897  nUF(vst3,      _vst3,    2, (NSTRLST, ADDR),  neon_ldx_stx),
25898  nUF(vld4,      _vld4,    2, (NSTRLST, ADDR),  neon_ldx_stx),
25899  nUF(vst4,      _vst4,    2, (NSTRLST, ADDR),  neon_ldx_stx),
25900 
25901 #undef  THUMB_VARIANT
25902 #define THUMB_VARIANT & fpu_vfp_ext_v3xd
25903 #undef  ARM_VARIANT
25904 #define ARM_VARIANT   & fpu_vfp_ext_v3xd
25905  cCE("fconsts",   eb00a00, 2, (RVS, I255),      vfp_sp_const),
25906  cCE("fshtos",    eba0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
25907  cCE("fsltos",    eba0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
25908  cCE("fuhtos",    ebb0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
25909  cCE("fultos",    ebb0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
25910  cCE("ftoshs",    ebe0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
25911  cCE("ftosls",    ebe0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
25912  cCE("ftouhs",    ebf0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
25913  cCE("ftouls",    ebf0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
25914 
25915 #undef  THUMB_VARIANT
25916 #define THUMB_VARIANT  & fpu_vfp_ext_v3
25917 #undef  ARM_VARIANT
25918 #define ARM_VARIANT    & fpu_vfp_ext_v3
25919 
25920  cCE("fconstd",   eb00b00, 2, (RVD, I255),      vfp_dp_const),
25921  cCE("fshtod",    eba0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
25922  cCE("fsltod",    eba0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
25923  cCE("fuhtod",    ebb0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
25924  cCE("fultod",    ebb0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
25925  cCE("ftoshd",    ebe0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
25926  cCE("ftosld",    ebe0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
25927  cCE("ftouhd",    ebf0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
25928  cCE("ftould",    ebf0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
25929 
25930 #undef  ARM_VARIANT
25931 #define ARM_VARIANT    & fpu_vfp_ext_fma
25932 #undef  THUMB_VARIANT
25933 #define THUMB_VARIANT  & fpu_vfp_ext_fma
25934  /* Mnemonics shared by Neon, VFP, MVE and BF16.  These are included in the
25935     VFP FMA variant; NEON and VFP FMA always includes the NEON
25936     FMA instructions.  */
25937  mnCEF(vfma,     _vfma,    3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR), neon_fmac),
25938  TUF ("vfmat",    c300850,    fc300850,  3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ_RR), mve_vfma, mve_vfma),
25939  mnCEF(vfms,     _vfms,    3, (RNSDQMQ, oRNSDQMQ, RNSDQMQ),  neon_fmac),
25940 
25941  /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
25942     the v form should always be used.  */
25943  cCE("ffmas",	ea00a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25944  cCE("ffnmas",	ea00a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25945  cCE("ffmad",	ea00b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25946  cCE("ffnmad",	ea00b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25947  nCE(vfnma,     _vfnma,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25948  nCE(vfnms,     _vfnms,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25949 
25950 #undef THUMB_VARIANT
25951 #undef  ARM_VARIANT
25952 #define ARM_VARIANT  & arm_cext_xscale /* Intel XScale extensions.  */
25953 
25954  cCE("mia",	e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25955  cCE("miaph",	e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25956  cCE("miabb",	e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25957  cCE("miabt",	e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25958  cCE("miatb",	e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25959  cCE("miatt",	e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25960  cCE("mar",	c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
25961  cCE("mra",	c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
25962 
25963 #undef  ARM_VARIANT
25964 #define ARM_VARIANT  & arm_cext_iwmmxt /* Intel Wireless MMX technology.  */
25965 
25966  cCE("tandcb",	e13f130, 1, (RR),		    iwmmxt_tandorc),
25967  cCE("tandch",	e53f130, 1, (RR),		    iwmmxt_tandorc),
25968  cCE("tandcw",	e93f130, 1, (RR),		    iwmmxt_tandorc),
25969  cCE("tbcstb",	e400010, 2, (RIWR, RR),		    rn_rd),
25970  cCE("tbcsth",	e400050, 2, (RIWR, RR),		    rn_rd),
25971  cCE("tbcstw",	e400090, 2, (RIWR, RR),		    rn_rd),
25972  cCE("textrcb",	e130170, 2, (RR, I7),		    iwmmxt_textrc),
25973  cCE("textrch",	e530170, 2, (RR, I7),		    iwmmxt_textrc),
25974  cCE("textrcw",	e930170, 2, (RR, I7),		    iwmmxt_textrc),
25975  cCE("textrmub",e100070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
25976  cCE("textrmuh",e500070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
25977  cCE("textrmuw",e900070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
25978  cCE("textrmsb",e100078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
25979  cCE("textrmsh",e500078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
25980  cCE("textrmsw",e900078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
25981  cCE("tinsrb",	e600010, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
25982  cCE("tinsrh",	e600050, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
25983  cCE("tinsrw",	e600090, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
25984  cCE("tmcr",	e000110, 2, (RIWC_RIWG, RR),	    rn_rd),
25985  cCE("tmcrr",	c400000, 3, (RIWR, RR, RR),	    rm_rd_rn),
25986  cCE("tmia",	e200010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
25987  cCE("tmiaph",	e280010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
25988  cCE("tmiabb",	e2c0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
25989  cCE("tmiabt",	e2d0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
25990  cCE("tmiatb",	e2e0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
25991  cCE("tmiatt",	e2f0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
25992  cCE("tmovmskb",e100030, 2, (RR, RIWR),		    rd_rn),
25993  cCE("tmovmskh",e500030, 2, (RR, RIWR),		    rd_rn),
25994  cCE("tmovmskw",e900030, 2, (RR, RIWR),		    rd_rn),
25995  cCE("tmrc",	e100110, 2, (RR, RIWC_RIWG),	    rd_rn),
25996  cCE("tmrrc",	c500000, 3, (RR, RR, RIWR),	    rd_rn_rm),
25997  cCE("torcb",	e13f150, 1, (RR),		    iwmmxt_tandorc),
25998  cCE("torch",	e53f150, 1, (RR),		    iwmmxt_tandorc),
25999  cCE("torcw",	e93f150, 1, (RR),		    iwmmxt_tandorc),
26000  cCE("waccb",	e0001c0, 2, (RIWR, RIWR),	    rd_rn),
26001  cCE("wacch",	e4001c0, 2, (RIWR, RIWR),	    rd_rn),
26002  cCE("waccw",	e8001c0, 2, (RIWR, RIWR),	    rd_rn),
26003  cCE("waddbss",	e300180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26004  cCE("waddb",	e000180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26005  cCE("waddbus",	e100180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26006  cCE("waddhss",	e700180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26007  cCE("waddh",	e400180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26008  cCE("waddhus",	e500180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26009  cCE("waddwss",	eb00180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26010  cCE("waddw",	e800180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26011  cCE("waddwus",	e900180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26012  cCE("waligni",	e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
26013  cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26014  cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26015  cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26016  cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26017  cCE("wand",	e200000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26018  cCE("wandn",	e300000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26019  cCE("wavg2b",	e800000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26020  cCE("wavg2br",	e900000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26021  cCE("wavg2h",	ec00000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26022  cCE("wavg2hr",	ed00000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26023  cCE("wcmpeqb",	e000060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26024  cCE("wcmpeqh",	e400060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26025  cCE("wcmpeqw",	e800060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26026  cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26027  cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26028  cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26029  cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26030  cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26031  cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26032  cCE("wldrb",	c100000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
26033  cCE("wldrh",	c500000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
26034  cCE("wldrw",	c100100, 2, (RIWR_RIWC, ADDR),	    iwmmxt_wldstw),
26035  cCE("wldrd",	c500100, 2, (RIWR, ADDR),	    iwmmxt_wldstd),
26036  cCE("wmacs",	e600100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26037  cCE("wmacsz",	e700100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26038  cCE("wmacu",	e400100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26039  cCE("wmacuz",	e500100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26040  cCE("wmadds",	ea00100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26041  cCE("wmaddu",	e800100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26042  cCE("wmaxsb",	e200160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26043  cCE("wmaxsh",	e600160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26044  cCE("wmaxsw",	ea00160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26045  cCE("wmaxub",	e000160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26046  cCE("wmaxuh",	e400160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26047  cCE("wmaxuw",	e800160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26048  cCE("wminsb",	e300160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26049  cCE("wminsh",	e700160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26050  cCE("wminsw",	eb00160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26051  cCE("wminub",	e100160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26052  cCE("wminuh",	e500160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26053  cCE("wminuw",	e900160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26054  cCE("wmov",	e000000, 2, (RIWR, RIWR),	    iwmmxt_wmov),
26055  cCE("wmulsm",	e300100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26056  cCE("wmulsl",	e200100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26057  cCE("wmulum",	e100100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26058  cCE("wmulul",	e000100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26059  cCE("wor",	e000000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26060  cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26061  cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26062  cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26063  cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26064  cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26065  cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26066  cCE("wrorh",	e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26067  cCE("wrorhg",	e700148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
26068  cCE("wrorw",	eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26069  cCE("wrorwg",	eb00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
26070  cCE("wrord",	ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26071  cCE("wrordg",	ef00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
26072  cCE("wsadb",	e000120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26073  cCE("wsadbz",	e100120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26074  cCE("wsadh",	e400120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26075  cCE("wsadhz",	e500120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26076  cCE("wshufh",	e0001e0, 3, (RIWR, RIWR, I255),	    iwmmxt_wshufh),
26077  cCE("wsllh",	e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26078  cCE("wsllhg",	e500148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
26079  cCE("wsllw",	e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26080  cCE("wsllwg",	e900148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
26081  cCE("wslld",	ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26082  cCE("wslldg",	ed00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
26083  cCE("wsrah",	e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26084  cCE("wsrahg",	e400148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
26085  cCE("wsraw",	e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26086  cCE("wsrawg",	e800148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
26087  cCE("wsrad",	ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26088  cCE("wsradg",	ec00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
26089  cCE("wsrlh",	e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26090  cCE("wsrlhg",	e600148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
26091  cCE("wsrlw",	ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26092  cCE("wsrlwg",	ea00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
26093  cCE("wsrld",	ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26094  cCE("wsrldg",	ee00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
26095  cCE("wstrb",	c000000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
26096  cCE("wstrh",	c400000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
26097  cCE("wstrw",	c000100, 2, (RIWR_RIWC, ADDR),	    iwmmxt_wldstw),
26098  cCE("wstrd",	c400100, 2, (RIWR, ADDR),	    iwmmxt_wldstd),
26099  cCE("wsubbss",	e3001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26100  cCE("wsubb",	e0001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26101  cCE("wsubbus",	e1001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26102  cCE("wsubhss",	e7001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26103  cCE("wsubh",	e4001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26104  cCE("wsubhus",	e5001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26105  cCE("wsubwss",	eb001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26106  cCE("wsubw",	e8001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26107  cCE("wsubwus",	e9001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26108  cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR),	    rd_rn),
26109  cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR),	    rd_rn),
26110  cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR),	    rd_rn),
26111  cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR),	    rd_rn),
26112  cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR),	    rd_rn),
26113  cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR),	    rd_rn),
26114  cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26115  cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26116  cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26117  cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR),	    rd_rn),
26118  cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR),	    rd_rn),
26119  cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR),	    rd_rn),
26120  cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR),	    rd_rn),
26121  cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR),	    rd_rn),
26122  cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR),	    rd_rn),
26123  cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26124  cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26125  cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26126  cCE("wxor",	e100000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26127  cCE("wzero",	e300000, 1, (RIWR),		    iwmmxt_wzero),
26128 
26129 #undef  ARM_VARIANT
26130 #define ARM_VARIANT  & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2.  */
26131 
26132  cCE("torvscb",   e12f190, 1, (RR),		    iwmmxt_tandorc),
26133  cCE("torvsch",   e52f190, 1, (RR),		    iwmmxt_tandorc),
26134  cCE("torvscw",   e92f190, 1, (RR),		    iwmmxt_tandorc),
26135  cCE("wabsb",     e2001c0, 2, (RIWR, RIWR),           rd_rn),
26136  cCE("wabsh",     e6001c0, 2, (RIWR, RIWR),           rd_rn),
26137  cCE("wabsw",     ea001c0, 2, (RIWR, RIWR),           rd_rn),
26138  cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26139  cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26140  cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26141  cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26142  cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26143  cCE("waddhc",    e600180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26144  cCE("waddwc",    ea00180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26145  cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26146  cCE("wavg4",	e400000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26147  cCE("wavg4r",    e500000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26148  cCE("wmaddsn",   ee00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26149  cCE("wmaddsx",   eb00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26150  cCE("wmaddun",   ec00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26151  cCE("wmaddux",   e900100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26152  cCE("wmerge",    e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
26153  cCE("wmiabb",    e0000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26154  cCE("wmiabt",    e1000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26155  cCE("wmiatb",    e2000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26156  cCE("wmiatt",    e3000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26157  cCE("wmiabbn",   e4000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26158  cCE("wmiabtn",   e5000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26159  cCE("wmiatbn",   e6000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26160  cCE("wmiattn",   e7000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26161  cCE("wmiawbb",   e800120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26162  cCE("wmiawbt",   e900120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26163  cCE("wmiawtb",   ea00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26164  cCE("wmiawtt",   eb00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26165  cCE("wmiawbbn",  ec00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26166  cCE("wmiawbtn",  ed00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26167  cCE("wmiawtbn",  ee00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26168  cCE("wmiawttn",  ef00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26169  cCE("wmulsmr",   ef00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26170  cCE("wmulumr",   ed00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26171  cCE("wmulwumr",  ec000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26172  cCE("wmulwsmr",  ee000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26173  cCE("wmulwum",   ed000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26174  cCE("wmulwsm",   ef000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26175  cCE("wmulwl",    eb000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26176  cCE("wqmiabb",   e8000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26177  cCE("wqmiabt",   e9000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26178  cCE("wqmiatb",   ea000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26179  cCE("wqmiatt",   eb000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26180  cCE("wqmiabbn",  ec000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26181  cCE("wqmiabtn",  ed000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26182  cCE("wqmiatbn",  ee000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26183  cCE("wqmiattn",  ef000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26184  cCE("wqmulm",    e100080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26185  cCE("wqmulmr",   e300080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26186  cCE("wqmulwm",   ec000e0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26187  cCE("wqmulwmr",  ee000e0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26188  cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26189 
26190 #undef  ARM_VARIANT
26191 #define ARM_VARIANT  & arm_cext_maverick /* Cirrus Maverick instructions.  */
26192 
26193  cCE("cfldrs",	c100400, 2, (RMF, ADDRGLDC),	      rd_cpaddr),
26194  cCE("cfldrd",	c500400, 2, (RMD, ADDRGLDC),	      rd_cpaddr),
26195  cCE("cfldr32",	c100500, 2, (RMFX, ADDRGLDC),	      rd_cpaddr),
26196  cCE("cfldr64",	c500500, 2, (RMDX, ADDRGLDC),	      rd_cpaddr),
26197  cCE("cfstrs",	c000400, 2, (RMF, ADDRGLDC),	      rd_cpaddr),
26198  cCE("cfstrd",	c400400, 2, (RMD, ADDRGLDC),	      rd_cpaddr),
26199  cCE("cfstr32",	c000500, 2, (RMFX, ADDRGLDC),	      rd_cpaddr),
26200  cCE("cfstr64",	c400500, 2, (RMDX, ADDRGLDC),	      rd_cpaddr),
26201  cCE("cfmvsr",	e000450, 2, (RMF, RR),		      rn_rd),
26202  cCE("cfmvrs",	e100450, 2, (RR, RMF),		      rd_rn),
26203  cCE("cfmvdlr",	e000410, 2, (RMD, RR),		      rn_rd),
26204  cCE("cfmvrdl",	e100410, 2, (RR, RMD),		      rd_rn),
26205  cCE("cfmvdhr",	e000430, 2, (RMD, RR),		      rn_rd),
26206  cCE("cfmvrdh",	e100430, 2, (RR, RMD),		      rd_rn),
26207  cCE("cfmv64lr",e000510, 2, (RMDX, RR),		      rn_rd),
26208  cCE("cfmvr64l",e100510, 2, (RR, RMDX),		      rd_rn),
26209  cCE("cfmv64hr",e000530, 2, (RMDX, RR),		      rn_rd),
26210  cCE("cfmvr64h",e100530, 2, (RR, RMDX),		      rd_rn),
26211  cCE("cfmval32",e200440, 2, (RMAX, RMFX),	      rd_rn),
26212  cCE("cfmv32al",e100440, 2, (RMFX, RMAX),	      rd_rn),
26213  cCE("cfmvam32",e200460, 2, (RMAX, RMFX),	      rd_rn),
26214  cCE("cfmv32am",e100460, 2, (RMFX, RMAX),	      rd_rn),
26215  cCE("cfmvah32",e200480, 2, (RMAX, RMFX),	      rd_rn),
26216  cCE("cfmv32ah",e100480, 2, (RMFX, RMAX),	      rd_rn),
26217  cCE("cfmva32",	e2004a0, 2, (RMAX, RMFX),	      rd_rn),
26218  cCE("cfmv32a",	e1004a0, 2, (RMFX, RMAX),	      rd_rn),
26219  cCE("cfmva64",	e2004c0, 2, (RMAX, RMDX),	      rd_rn),
26220  cCE("cfmv64a",	e1004c0, 2, (RMDX, RMAX),	      rd_rn),
26221  cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX),	      mav_dspsc),
26222  cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS),	      rd),
26223  cCE("cfcpys",	e000400, 2, (RMF, RMF),		      rd_rn),
26224  cCE("cfcpyd",	e000420, 2, (RMD, RMD),		      rd_rn),
26225  cCE("cfcvtsd",	e000460, 2, (RMD, RMF),		      rd_rn),
26226  cCE("cfcvtds",	e000440, 2, (RMF, RMD),		      rd_rn),
26227  cCE("cfcvt32s",e000480, 2, (RMF, RMFX),	      rd_rn),
26228  cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX),	      rd_rn),
26229  cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX),	      rd_rn),
26230  cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX),	      rd_rn),
26231  cCE("cfcvts32",e100580, 2, (RMFX, RMF),	      rd_rn),
26232  cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD),	      rd_rn),
26233  cCE("cftruncs32",e1005c0, 2, (RMFX, RMF),	      rd_rn),
26234  cCE("cftruncd32",e1005e0, 2, (RMFX, RMD),	      rd_rn),
26235  cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR),	      mav_triple),
26236  cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR),	      mav_triple),
26237  cCE("cfsh32",	e000500, 3, (RMFX, RMFX, I63s),	      mav_shift),
26238  cCE("cfsh64",	e200500, 3, (RMDX, RMDX, I63s),	      mav_shift),
26239  cCE("cfcmps",	e100490, 3, (RR, RMF, RMF),	      rd_rn_rm),
26240  cCE("cfcmpd",	e1004b0, 3, (RR, RMD, RMD),	      rd_rn_rm),
26241  cCE("cfcmp32",	e100590, 3, (RR, RMFX, RMFX),	      rd_rn_rm),
26242  cCE("cfcmp64",	e1005b0, 3, (RR, RMDX, RMDX),	      rd_rn_rm),
26243  cCE("cfabss",	e300400, 2, (RMF, RMF),		      rd_rn),
26244  cCE("cfabsd",	e300420, 2, (RMD, RMD),		      rd_rn),
26245  cCE("cfnegs",	e300440, 2, (RMF, RMF),		      rd_rn),
26246  cCE("cfnegd",	e300460, 2, (RMD, RMD),		      rd_rn),
26247  cCE("cfadds",	e300480, 3, (RMF, RMF, RMF),	      rd_rn_rm),
26248  cCE("cfaddd",	e3004a0, 3, (RMD, RMD, RMD),	      rd_rn_rm),
26249  cCE("cfsubs",	e3004c0, 3, (RMF, RMF, RMF),	      rd_rn_rm),
26250  cCE("cfsubd",	e3004e0, 3, (RMD, RMD, RMD),	      rd_rn_rm),
26251  cCE("cfmuls",	e100400, 3, (RMF, RMF, RMF),	      rd_rn_rm),
26252  cCE("cfmuld",	e100420, 3, (RMD, RMD, RMD),	      rd_rn_rm),
26253  cCE("cfabs32",	e300500, 2, (RMFX, RMFX),	      rd_rn),
26254  cCE("cfabs64",	e300520, 2, (RMDX, RMDX),	      rd_rn),
26255  cCE("cfneg32",	e300540, 2, (RMFX, RMFX),	      rd_rn),
26256  cCE("cfneg64",	e300560, 2, (RMDX, RMDX),	      rd_rn),
26257  cCE("cfadd32",	e300580, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
26258  cCE("cfadd64",	e3005a0, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
26259  cCE("cfsub32",	e3005c0, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
26260  cCE("cfsub64",	e3005e0, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
26261  cCE("cfmul32",	e100500, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
26262  cCE("cfmul64",	e100520, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
26263  cCE("cfmac32",	e100540, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
26264  cCE("cfmsc32",	e100560, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
26265  cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
26266  cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
26267  cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
26268  cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
26269 
26270  /* ARMv8.5-A instructions.  */
26271 #undef  ARM_VARIANT
26272 #define ARM_VARIANT   & arm_ext_sb
26273 #undef  THUMB_VARIANT
26274 #define THUMB_VARIANT & arm_ext_sb
26275  TUF("sb", 57ff070, f3bf8f70, 0, (), noargs, noargs),
26276 
26277 #undef  ARM_VARIANT
26278 #define ARM_VARIANT   & arm_ext_predres
26279 #undef  THUMB_VARIANT
26280 #define THUMB_VARIANT & arm_ext_predres
26281  CE("cfprctx", e070f93, 1, (RRnpc), rd),
26282  CE("dvprctx", e070fb3, 1, (RRnpc), rd),
26283  CE("cpprctx", e070ff3, 1, (RRnpc), rd),
26284 
26285  /* ARMv8-M instructions.  */
26286 #undef  ARM_VARIANT
26287 #define ARM_VARIANT NULL
26288 #undef  THUMB_VARIANT
26289 #define THUMB_VARIANT & arm_ext_v8m
26290  ToU("sg",    e97fe97f,	0, (),		   noargs),
26291  ToC("blxns", 4784,	1, (RRnpc),	   t_blx),
26292  ToC("bxns",  4704,	1, (RRnpc),	   t_bx),
26293  ToC("tt",    e840f000,	2, (RRnpc, RRnpc), tt),
26294  ToC("ttt",   e840f040,	2, (RRnpc, RRnpc), tt),
26295  ToC("tta",   e840f080,	2, (RRnpc, RRnpc), tt),
26296  ToC("ttat",  e840f0c0,	2, (RRnpc, RRnpc), tt),
26297 
26298  /* FP for ARMv8-M Mainline.  Enabled for ARMv8-M Mainline because the
26299     instructions behave as nop if no VFP is present.  */
26300 #undef  THUMB_VARIANT
26301 #define THUMB_VARIANT & arm_ext_v8m_main
26302  ToC("vlldm", ec300a00, 1, (RRnpc), rn),
26303  ToC("vlstm", ec200a00, 1, (RRnpc), rn),
26304 
26305  /* Armv8.1-M Mainline instructions.  */
26306 #undef  THUMB_VARIANT
26307 #define THUMB_VARIANT & arm_ext_v8_1m_main
26308  toU("cinc",  _cinc,  3, (RRnpcsp, RR_ZR, COND),	t_cond),
26309  toU("cinv",  _cinv,  3, (RRnpcsp, RR_ZR, COND),	t_cond),
26310  toU("cneg",  _cneg,  3, (RRnpcsp, RR_ZR, COND),	t_cond),
26311  toU("csel",  _csel,  4, (RRnpcsp, RR_ZR, RR_ZR, COND),	t_cond),
26312  toU("csetm", _csetm, 2, (RRnpcsp, COND),		t_cond),
26313  toU("cset",  _cset,  2, (RRnpcsp, COND),		t_cond),
26314  toU("csinc", _csinc, 4, (RRnpcsp, RR_ZR, RR_ZR, COND),	t_cond),
26315  toU("csinv", _csinv, 4, (RRnpcsp, RR_ZR, RR_ZR, COND),	t_cond),
26316  toU("csneg", _csneg, 4, (RRnpcsp, RR_ZR, RR_ZR, COND),	t_cond),
26317 
26318  toC("bf",     _bf,	2, (EXPs, EXPs),	     t_branch_future),
26319  toU("bfcsel", _bfcsel,	4, (EXPs, EXPs, EXPs, COND), t_branch_future),
26320  toC("bfx",    _bfx,	2, (EXPs, RRnpcsp),	     t_branch_future),
26321  toC("bfl",    _bfl,	2, (EXPs, EXPs),	     t_branch_future),
26322  toC("bflx",   _bflx,	2, (EXPs, RRnpcsp),	     t_branch_future),
26323 
26324  toU("dls", _dls, 2, (LR, RRnpcsp),	 t_loloop),
26325  toU("wls", _wls, 3, (LR, RRnpcsp, EXP), t_loloop),
26326  toU("le",  _le,  2, (oLR, EXP),	 t_loloop),
26327 
26328  ToC("clrm",	e89f0000, 1, (CLRMLST),  t_clrm),
26329  ToC("vscclrm",	ec9f0a00, 1, (VRSDVLST), t_vscclrm),
26330 
26331 #undef  THUMB_VARIANT
26332 #define THUMB_VARIANT & mve_ext
26333  ToC("lsll",	ea50010d, 3, (RRe, RRo, RRnpcsp_I32), mve_scalar_shift),
26334  ToC("lsrl",	ea50011f, 3, (RRe, RRo, I32),	      mve_scalar_shift),
26335  ToC("asrl",	ea50012d, 3, (RRe, RRo, RRnpcsp_I32), mve_scalar_shift),
26336  ToC("uqrshll",	ea51010d, 4, (RRe, RRo, I48_I64, RRnpcsp), mve_scalar_shift1),
26337  ToC("sqrshrl",	ea51012d, 4, (RRe, RRo, I48_I64, RRnpcsp), mve_scalar_shift1),
26338  ToC("uqshll",	ea51010f, 3, (RRe, RRo, I32),	      mve_scalar_shift),
26339  ToC("urshrl",	ea51011f, 3, (RRe, RRo, I32),	      mve_scalar_shift),
26340  ToC("srshrl",	ea51012f, 3, (RRe, RRo, I32),	      mve_scalar_shift),
26341  ToC("sqshll",	ea51013f, 3, (RRe, RRo, I32),	      mve_scalar_shift),
26342  ToC("uqrshl",	ea500f0d, 2, (RRnpcsp, RRnpcsp),      mve_scalar_shift),
26343  ToC("sqrshr",	ea500f2d, 2, (RRnpcsp, RRnpcsp),      mve_scalar_shift),
26344  ToC("uqshl",	ea500f0f, 2, (RRnpcsp, I32),	      mve_scalar_shift),
26345  ToC("urshr",	ea500f1f, 2, (RRnpcsp, I32),	      mve_scalar_shift),
26346  ToC("srshr",	ea500f2f, 2, (RRnpcsp, I32),	      mve_scalar_shift),
26347  ToC("sqshl",	ea500f3f, 2, (RRnpcsp, I32),	      mve_scalar_shift),
26348 
26349  ToC("vpt",	ee410f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26350  ToC("vptt",	ee018f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26351  ToC("vpte",	ee418f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26352  ToC("vpttt",	ee014f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26353  ToC("vptte",	ee01cf00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26354  ToC("vptet",	ee41cf00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26355  ToC("vptee",	ee414f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26356  ToC("vptttt",	ee012f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26357  ToC("vpttte",	ee016f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26358  ToC("vpttet",	ee01ef00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26359  ToC("vpttee",	ee01af00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26360  ToC("vptett",	ee41af00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26361  ToC("vptete",	ee41ef00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26362  ToC("vpteet",	ee416f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26363  ToC("vpteee",	ee412f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26364 
26365  ToC("vpst",	fe710f4d, 0, (), mve_vpt),
26366  ToC("vpstt",	fe318f4d, 0, (), mve_vpt),
26367  ToC("vpste",	fe718f4d, 0, (), mve_vpt),
26368  ToC("vpsttt",	fe314f4d, 0, (), mve_vpt),
26369  ToC("vpstte",	fe31cf4d, 0, (), mve_vpt),
26370  ToC("vpstet",	fe71cf4d, 0, (), mve_vpt),
26371  ToC("vpstee",	fe714f4d, 0, (), mve_vpt),
26372  ToC("vpstttt",	fe312f4d, 0, (), mve_vpt),
26373  ToC("vpsttte", fe316f4d, 0, (), mve_vpt),
26374  ToC("vpsttet",	fe31ef4d, 0, (), mve_vpt),
26375  ToC("vpsttee",	fe31af4d, 0, (), mve_vpt),
26376  ToC("vpstett",	fe71af4d, 0, (), mve_vpt),
26377  ToC("vpstete",	fe71ef4d, 0, (), mve_vpt),
26378  ToC("vpsteet",	fe716f4d, 0, (), mve_vpt),
26379  ToC("vpsteee",	fe712f4d, 0, (), mve_vpt),
26380 
26381  /* MVE and MVE FP only.  */
26382  mToC("vhcadd",	ee000f00,   4, (RMQ, RMQ, RMQ, EXPi),		  mve_vhcadd),
26383  mCEF(vctp,	_vctp,      1, (RRnpc),				  mve_vctp),
26384  mCEF(vadc,	_vadc,      3, (RMQ, RMQ, RMQ),			  mve_vadc),
26385  mCEF(vadci,	_vadci,     3, (RMQ, RMQ, RMQ),			  mve_vadc),
26386  mToC("vsbc",	fe300f00,   3, (RMQ, RMQ, RMQ),			  mve_vsbc),
26387  mToC("vsbci",	fe301f00,   3, (RMQ, RMQ, RMQ),			  mve_vsbc),
26388  mCEF(vmullb,	_vmullb,    3, (RMQ, RMQ, RMQ),			  mve_vmull),
26389  mCEF(vabav,	_vabav,	    3, (RRnpcsp, RMQ, RMQ),		  mve_vabav),
26390  mCEF(vmladav,	  _vmladav,	3, (RRe, RMQ, RMQ),		mve_vmladav),
26391  mCEF(vmladava,	  _vmladava,	3, (RRe, RMQ, RMQ),		mve_vmladav),
26392  mCEF(vmladavx,	  _vmladavx,	3, (RRe, RMQ, RMQ),		mve_vmladav),
26393  mCEF(vmladavax,  _vmladavax,	3, (RRe, RMQ, RMQ),		mve_vmladav),
26394  mCEF(vmlav,	  _vmladav,	3, (RRe, RMQ, RMQ),		mve_vmladav),
26395  mCEF(vmlava,	  _vmladava,	3, (RRe, RMQ, RMQ),		mve_vmladav),
26396  mCEF(vmlsdav,	  _vmlsdav,	3, (RRe, RMQ, RMQ),		mve_vmladav),
26397  mCEF(vmlsdava,	  _vmlsdava,	3, (RRe, RMQ, RMQ),		mve_vmladav),
26398  mCEF(vmlsdavx,	  _vmlsdavx,	3, (RRe, RMQ, RMQ),		mve_vmladav),
26399  mCEF(vmlsdavax,  _vmlsdavax,	3, (RRe, RMQ, RMQ),		mve_vmladav),
26400 
26401  mCEF(vst20,	_vst20,	    2, (MSTRLST2, ADDRMVE),		mve_vst_vld),
26402  mCEF(vst21,	_vst21,	    2, (MSTRLST2, ADDRMVE),		mve_vst_vld),
26403  mCEF(vst40,	_vst40,	    2, (MSTRLST4, ADDRMVE),		mve_vst_vld),
26404  mCEF(vst41,	_vst41,	    2, (MSTRLST4, ADDRMVE),		mve_vst_vld),
26405  mCEF(vst42,	_vst42,	    2, (MSTRLST4, ADDRMVE),		mve_vst_vld),
26406  mCEF(vst43,	_vst43,	    2, (MSTRLST4, ADDRMVE),		mve_vst_vld),
26407  mCEF(vld20,	_vld20,	    2, (MSTRLST2, ADDRMVE),		mve_vst_vld),
26408  mCEF(vld21,	_vld21,	    2, (MSTRLST2, ADDRMVE),		mve_vst_vld),
26409  mCEF(vld40,	_vld40,	    2, (MSTRLST4, ADDRMVE),		mve_vst_vld),
26410  mCEF(vld41,	_vld41,	    2, (MSTRLST4, ADDRMVE),		mve_vst_vld),
26411  mCEF(vld42,	_vld42,	    2, (MSTRLST4, ADDRMVE),		mve_vst_vld),
26412  mCEF(vld43,	_vld43,	    2, (MSTRLST4, ADDRMVE),		mve_vst_vld),
26413  mCEF(vstrb,	_vstrb,	    2, (RMQ, ADDRMVE),			mve_vstr_vldr),
26414  mCEF(vstrh,	_vstrh,	    2, (RMQ, ADDRMVE),			mve_vstr_vldr),
26415  mCEF(vstrw,	_vstrw,	    2, (RMQ, ADDRMVE),			mve_vstr_vldr),
26416  mCEF(vstrd,	_vstrd,	    2, (RMQ, ADDRMVE),			mve_vstr_vldr),
26417  mCEF(vldrb,	_vldrb,	    2, (RMQ, ADDRMVE),			mve_vstr_vldr),
26418  mCEF(vldrh,	_vldrh,	    2, (RMQ, ADDRMVE),			mve_vstr_vldr),
26419  mCEF(vldrw,	_vldrw,	    2, (RMQ, ADDRMVE),			mve_vstr_vldr),
26420  mCEF(vldrd,	_vldrd,	    2, (RMQ, ADDRMVE),			mve_vstr_vldr),
26421 
26422  mCEF(vmovnt,	_vmovnt,    2, (RMQ, RMQ),			  mve_movn),
26423  mCEF(vmovnb,	_vmovnb,    2, (RMQ, RMQ),			  mve_movn),
26424  mCEF(vbrsr,	_vbrsr,     3, (RMQ, RMQ, RR),			  mve_vbrsr),
26425  mCEF(vaddlv,	_vaddlv,    3, (RRe, RRo, RMQ),			  mve_vaddlv),
26426  mCEF(vaddlva,	_vaddlva,   3, (RRe, RRo, RMQ),			  mve_vaddlv),
26427  mCEF(vaddv,	_vaddv,	    2, (RRe, RMQ),			  mve_vaddv),
26428  mCEF(vaddva,	_vaddva,    2, (RRe, RMQ),			  mve_vaddv),
26429  mCEF(vddup,	_vddup,	    3, (RMQ, RRe, EXPi),		  mve_viddup),
26430  mCEF(vdwdup,	_vdwdup,    4, (RMQ, RRe, RR, EXPi),		  mve_viddup),
26431  mCEF(vidup,	_vidup,	    3, (RMQ, RRe, EXPi),		  mve_viddup),
26432  mCEF(viwdup,	_viwdup,    4, (RMQ, RRe, RR, EXPi),		  mve_viddup),
26433  mToC("vmaxa",	ee330e81,   2, (RMQ, RMQ),			  mve_vmaxa_vmina),
26434  mToC("vmina",	ee331e81,   2, (RMQ, RMQ),			  mve_vmaxa_vmina),
26435  mCEF(vmaxv,	_vmaxv,	  2, (RR, RMQ),				  mve_vmaxv),
26436  mCEF(vmaxav,	_vmaxav,  2, (RR, RMQ),				  mve_vmaxv),
26437  mCEF(vminv,	_vminv,	  2, (RR, RMQ),				  mve_vmaxv),
26438  mCEF(vminav,	_vminav,  2, (RR, RMQ),				  mve_vmaxv),
26439 
26440  mCEF(vmlaldav,	  _vmlaldav,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
26441  mCEF(vmlaldava,  _vmlaldava,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
26442  mCEF(vmlaldavx,  _vmlaldavx,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
26443  mCEF(vmlaldavax, _vmlaldavax,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
26444  mCEF(vmlalv,	  _vmlaldav,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
26445  mCEF(vmlalva,	  _vmlaldava,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
26446  mCEF(vmlsldav,	  _vmlsldav,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
26447  mCEF(vmlsldava,  _vmlsldava,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
26448  mCEF(vmlsldavx,  _vmlsldavx,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
26449  mCEF(vmlsldavax, _vmlsldavax,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
26450  mToC("vrmlaldavh", ee800f00,	   4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
26451  mToC("vrmlaldavha",ee800f20,	   4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
26452  mCEF(vrmlaldavhx,  _vrmlaldavhx,  4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
26453  mCEF(vrmlaldavhax, _vrmlaldavhax, 4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
26454  mToC("vrmlalvh",   ee800f00,	   4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
26455  mToC("vrmlalvha",  ee800f20,	   4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
26456  mCEF(vrmlsldavh,   _vrmlsldavh,   4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
26457  mCEF(vrmlsldavha,  _vrmlsldavha,  4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
26458  mCEF(vrmlsldavhx,  _vrmlsldavhx,  4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
26459  mCEF(vrmlsldavhax, _vrmlsldavhax, 4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
26460 
26461  mToC("vmlas",	  ee011e40,	3, (RMQ, RMQ, RR),		mve_vmlas),
26462  mToC("vmulh",	  ee010e01,	3, (RMQ, RMQ, RMQ),		mve_vmulh),
26463  mToC("vrmulh",	  ee011e01,	3, (RMQ, RMQ, RMQ),		mve_vmulh),
26464  mToC("vpnot",	  fe310f4d,	0, (),				mve_vpnot),
26465  mToC("vpsel",	  fe310f01,	3, (RMQ, RMQ, RMQ),		mve_vpsel),
26466 
26467  mToC("vqdmladh",  ee000e00,	3, (RMQ, RMQ, RMQ),		mve_vqdmladh),
26468  mToC("vqdmladhx", ee001e00,	3, (RMQ, RMQ, RMQ),		mve_vqdmladh),
26469  mToC("vqrdmladh", ee000e01,	3, (RMQ, RMQ, RMQ),		mve_vqdmladh),
26470  mToC("vqrdmladhx",ee001e01,	3, (RMQ, RMQ, RMQ),		mve_vqdmladh),
26471  mToC("vqdmlsdh",  fe000e00,	3, (RMQ, RMQ, RMQ),		mve_vqdmladh),
26472  mToC("vqdmlsdhx", fe001e00,	3, (RMQ, RMQ, RMQ),		mve_vqdmladh),
26473  mToC("vqrdmlsdh", fe000e01,	3, (RMQ, RMQ, RMQ),		mve_vqdmladh),
26474  mToC("vqrdmlsdhx",fe001e01,	3, (RMQ, RMQ, RMQ),		mve_vqdmladh),
26475  mToC("vqdmlah",   ee000e60,	3, (RMQ, RMQ, RR),		mve_vqdmlah),
26476  mToC("vqdmlash",  ee001e60,	3, (RMQ, RMQ, RR),		mve_vqdmlah),
26477  mToC("vqrdmlash", ee001e40,	3, (RMQ, RMQ, RR),		mve_vqdmlah),
26478  mToC("vqdmullt",  ee301f00,	3, (RMQ, RMQ, RMQRR),		mve_vqdmull),
26479  mToC("vqdmullb",  ee300f00,	3, (RMQ, RMQ, RMQRR),		mve_vqdmull),
26480  mCEF(vqmovnt,	  _vqmovnt,	2, (RMQ, RMQ),			mve_vqmovn),
26481  mCEF(vqmovnb,	  _vqmovnb,	2, (RMQ, RMQ),			mve_vqmovn),
26482  mCEF(vqmovunt,	  _vqmovunt,	2, (RMQ, RMQ),			mve_vqmovn),
26483  mCEF(vqmovunb,	  _vqmovunb,	2, (RMQ, RMQ),			mve_vqmovn),
26484 
26485  mCEF(vshrnt,	  _vshrnt,	3, (RMQ, RMQ, I32z),	mve_vshrn),
26486  mCEF(vshrnb,	  _vshrnb,	3, (RMQ, RMQ, I32z),	mve_vshrn),
26487  mCEF(vrshrnt,	  _vrshrnt,	3, (RMQ, RMQ, I32z),	mve_vshrn),
26488  mCEF(vrshrnb,	  _vrshrnb,	3, (RMQ, RMQ, I32z),	mve_vshrn),
26489  mCEF(vqshrnt,	  _vqrshrnt,	3, (RMQ, RMQ, I32z),	mve_vshrn),
26490  mCEF(vqshrnb,	  _vqrshrnb,	3, (RMQ, RMQ, I32z),	mve_vshrn),
26491  mCEF(vqshrunt,	  _vqrshrunt,	3, (RMQ, RMQ, I32z),	mve_vshrn),
26492  mCEF(vqshrunb,	  _vqrshrunb,	3, (RMQ, RMQ, I32z),	mve_vshrn),
26493  mCEF(vqrshrnt,	  _vqrshrnt,	3, (RMQ, RMQ, I32z),	mve_vshrn),
26494  mCEF(vqrshrnb,	  _vqrshrnb,	3, (RMQ, RMQ, I32z),	mve_vshrn),
26495  mCEF(vqrshrunt,  _vqrshrunt,	3, (RMQ, RMQ, I32z),	mve_vshrn),
26496  mCEF(vqrshrunb,  _vqrshrunb,	3, (RMQ, RMQ, I32z),	mve_vshrn),
26497 
26498  mToC("vshlc",	    eea00fc0,	   3, (RMQ, RR, I32z),	    mve_vshlc),
26499  mToC("vshllt",	    ee201e00,	   3, (RMQ, RMQ, I32),	    mve_vshll),
26500  mToC("vshllb",	    ee200e00,	   3, (RMQ, RMQ, I32),	    mve_vshll),
26501 
26502  toU("dlstp",	_dlstp, 2, (LR, RR),      t_loloop),
26503  toU("wlstp",	_wlstp, 3, (LR, RR, EXP), t_loloop),
26504  toU("letp",	_letp,  2, (LR, EXP),	  t_loloop),
26505  toU("lctp",	_lctp,  0, (),		  t_loloop),
26506 
26507 #undef THUMB_VARIANT
26508 #define THUMB_VARIANT & mve_fp_ext
26509  mToC("vcmul", ee300e00,   4, (RMQ, RMQ, RMQ, EXPi),		  mve_vcmul),
26510  mToC("vfmas", ee311e40,   3, (RMQ, RMQ, RR),			  mve_vfmas),
26511  mToC("vmaxnma", ee3f0e81, 2, (RMQ, RMQ),			  mve_vmaxnma_vminnma),
26512  mToC("vminnma", ee3f1e81, 2, (RMQ, RMQ),			  mve_vmaxnma_vminnma),
26513  mToC("vmaxnmv", eeee0f00, 2, (RR, RMQ),			  mve_vmaxnmv),
26514  mToC("vmaxnmav",eeec0f00, 2, (RR, RMQ),			  mve_vmaxnmv),
26515  mToC("vminnmv", eeee0f80, 2, (RR, RMQ),			  mve_vmaxnmv),
26516  mToC("vminnmav",eeec0f80, 2, (RR, RMQ),			  mve_vmaxnmv),
26517 
26518 #undef  ARM_VARIANT
26519 #define ARM_VARIANT  & fpu_vfp_ext_v1
26520 #undef  THUMB_VARIANT
26521 #define THUMB_VARIANT  & arm_ext_v6t2
26522 
26523  mcCE(fcpyd,	eb00b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
26524 
26525 #undef  ARM_VARIANT
26526 #define ARM_VARIANT  & fpu_vfp_ext_v1xd
26527 
26528  mnCEF(vmla,     _vmla,    3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ_RR), neon_mac_maybe_scalar),
26529  mnCEF(vmul,     _vmul,    3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ_RR), neon_mul),
26530  MNCE(vmov,   0,	1, (VMOV),	      neon_mov),
26531  mcCE(fmrs,	e100a10, 2, (RR, RVS),	      vfp_reg_from_sp),
26532  mcCE(fmsr,	e000a10, 2, (RVS, RR),	      vfp_sp_from_reg),
26533  mcCE(fcpys,	eb00a40, 2, (RVS, RVS),	      vfp_sp_monadic),
26534 
26535  mCEF(vmullt, _vmullt,	3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ),	mve_vmull),
26536  mnCEF(vadd,  _vadd,	3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR),	neon_addsub_if_i),
26537  mnCEF(vsub,  _vsub,	3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR),	neon_addsub_if_i),
26538 
26539  MNCEF(vabs,  1b10300,	2, (RNSDQMQ, RNSDQMQ),	neon_abs_neg),
26540  MNCEF(vneg,  1b10380,	2, (RNSDQMQ, RNSDQMQ),	neon_abs_neg),
26541 
26542  mCEF(vmovlt, _vmovlt,	1, (VMOV),		mve_movl),
26543  mCEF(vmovlb, _vmovlb,	1, (VMOV),		mve_movl),
26544 
26545  mnCE(vcmp,      _vcmp,    3, (RVSD_COND, RSVDMQ_FI0, oRMQRZ),    vfp_nsyn_cmp),
26546  mnCE(vcmpe,     _vcmpe,   3, (RVSD_COND, RSVDMQ_FI0, oRMQRZ),    vfp_nsyn_cmp),
26547 
26548 #undef  ARM_VARIANT
26549 #define ARM_VARIANT  & fpu_vfp_ext_v2
26550 
26551  mcCE(fmsrr,	c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
26552  mcCE(fmrrs,	c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
26553  mcCE(fmdrr,	c400b10, 3, (RVD, RR, RR),    vfp_dp_rm_rd_rn),
26554  mcCE(fmrrd,	c500b10, 3, (RR, RR, RVD),    vfp_dp_rd_rn_rm),
26555 
26556 #undef  ARM_VARIANT
26557 #define ARM_VARIANT    & fpu_vfp_ext_armv8xd
26558  mnUF(vcvta,  _vcvta,  2, (RNSDQMQ, oRNSDQMQ),		neon_cvta),
26559  mnUF(vcvtp,  _vcvta,  2, (RNSDQMQ, oRNSDQMQ),		neon_cvtp),
26560  mnUF(vcvtn,  _vcvta,  3, (RNSDQMQ, oRNSDQMQ, oI32z),	neon_cvtn),
26561  mnUF(vcvtm,  _vcvta,  2, (RNSDQMQ, oRNSDQMQ),		neon_cvtm),
26562  mnUF(vmaxnm, _vmaxnm, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQ),	vmaxnm),
26563  mnUF(vminnm, _vminnm, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQ),	vmaxnm),
26564 
26565 #undef	ARM_VARIANT
26566 #define ARM_VARIANT & fpu_neon_ext_v1
26567  mnUF(vabd,      _vabd,		  3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_if_su),
26568  mnUF(vabdl,     _vabdl,	  3, (RNQMQ, RNDMQ, RNDMQ),   neon_dyadic_long),
26569  mnUF(vaddl,     _vaddl,	  3, (RNSDQMQ, oRNSDMQ, RNSDMQR),  neon_dyadic_long),
26570  mnUF(vsubl,     _vsubl,	  3, (RNSDQMQ, oRNSDMQ, RNSDMQR),  neon_dyadic_long),
26571  mnUF(vand,      _vand,		  3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
26572  mnUF(vbic,      _vbic,		  3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
26573  mnUF(vorr,      _vorr,		  3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
26574  mnUF(vorn,      _vorn,		  3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
26575  mnUF(veor,      _veor,		  3, (RNDQMQ, oRNDQMQ, RNDQMQ),      neon_logic),
26576  MNUF(vcls,      1b00400,	  2, (RNDQMQ, RNDQMQ),		     neon_cls),
26577  MNUF(vclz,      1b00480,	  2, (RNDQMQ, RNDQMQ),		     neon_clz),
26578  mnCE(vdup,      _vdup,		  2, (RNDQMQ, RR_RNSC),		     neon_dup),
26579  MNUF(vhadd,     00000000,	  3, (RNDQMQ, oRNDQMQ, RNDQMQR),  neon_dyadic_i_su),
26580  MNUF(vrhadd,    00000100,	  3, (RNDQMQ, oRNDQMQ, RNDQMQ),	  neon_dyadic_i_su),
26581  MNUF(vhsub,     00000200,	  3, (RNDQMQ, oRNDQMQ, RNDQMQR),  neon_dyadic_i_su),
26582  mnUF(vmin,      _vmin,    3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_if_su),
26583  mnUF(vmax,      _vmax,    3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_if_su),
26584  MNUF(vqadd,     0000010,  3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_dyadic_i64_su),
26585  MNUF(vqsub,     0000210,  3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_dyadic_i64_su),
26586  mnUF(vmvn,      _vmvn,    2, (RNDQMQ, RNDQMQ_Ibig), neon_mvn),
26587  MNUF(vqabs,     1b00700,  2, (RNDQMQ, RNDQMQ),     neon_sat_abs_neg),
26588  MNUF(vqneg,     1b00780,  2, (RNDQMQ, RNDQMQ),     neon_sat_abs_neg),
26589  mnUF(vqrdmlah,  _vqrdmlah,3, (RNDQMQ, oRNDQMQ, RNDQ_RNSC_RR), neon_qrdmlah),
26590  mnUF(vqdmulh,   _vqdmulh, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_RNSC_RR), neon_qdmulh),
26591  mnUF(vqrdmulh,  _vqrdmulh,3, (RNDQMQ, oRNDQMQ, RNDQMQ_RNSC_RR), neon_qdmulh),
26592  MNUF(vqrshl,    0000510,  3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_rshl),
26593  MNUF(vrshl,     0000500,  3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_rshl),
26594  MNUF(vshr,      0800010,  3, (RNDQMQ, oRNDQMQ, I64z), neon_rshift_round_imm),
26595  MNUF(vrshr,     0800210,  3, (RNDQMQ, oRNDQMQ, I64z), neon_rshift_round_imm),
26596  MNUF(vsli,      1800510,  3, (RNDQMQ, oRNDQMQ, I63),  neon_sli),
26597  MNUF(vsri,      1800410,  3, (RNDQMQ, oRNDQMQ, I64z), neon_sri),
26598  MNUF(vrev64,    1b00000,  2, (RNDQMQ, RNDQMQ),     neon_rev),
26599  MNUF(vrev32,    1b00080,  2, (RNDQMQ, RNDQMQ),     neon_rev),
26600  MNUF(vrev16,    1b00100,  2, (RNDQMQ, RNDQMQ),     neon_rev),
26601  mnUF(vshl,	 _vshl,    3, (RNDQMQ, oRNDQMQ, RNDQMQ_I63b_RR), neon_shl),
26602  mnUF(vqshl,     _vqshl,   3, (RNDQMQ, oRNDQMQ, RNDQMQ_I63b_RR), neon_qshl),
26603  MNUF(vqshlu,    1800610,  3, (RNDQMQ, oRNDQMQ, I63),		 neon_qshlu_imm),
26604 
26605 #undef	ARM_VARIANT
26606 #define ARM_VARIANT & arm_ext_v8_3
26607 #undef	THUMB_VARIANT
26608 #define	THUMB_VARIANT & arm_ext_v6t2_v8m
26609  MNUF (vcadd, 0, 4, (RNDQMQ, RNDQMQ, RNDQMQ, EXPi), vcadd),
26610  MNUF (vcmla, 0, 4, (RNDQMQ, RNDQMQ, RNDQMQ_RNSC, EXPi), vcmla),
26611 
26612 #undef	ARM_VARIANT
26613 #define ARM_VARIANT &arm_ext_bf16
26614 #undef	THUMB_VARIANT
26615 #define	THUMB_VARIANT &arm_ext_bf16
26616  TUF ("vdot", c000d00, fc000d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), vdot, vdot),
26617  TUF ("vmmla", c000c40, fc000c40, 3, (RNQ, RNQ, RNQ), vmmla, vmmla),
26618  TUF ("vfmab", c300810, fc300810, 3, (RNDQ, RNDQ, RNDQ_RNSC), bfloat_vfma, bfloat_vfma),
26619 
26620 #undef	ARM_VARIANT
26621 #define ARM_VARIANT &arm_ext_i8mm
26622 #undef	THUMB_VARIANT
26623 #define	THUMB_VARIANT &arm_ext_i8mm
26624  TUF ("vsmmla", c200c40, fc200c40, 3, (RNQ, RNQ, RNQ), vsmmla, vsmmla),
26625  TUF ("vummla", c200c50, fc200c50, 3, (RNQ, RNQ, RNQ), vummla, vummla),
26626  TUF ("vusmmla", ca00c40, fca00c40, 3, (RNQ, RNQ, RNQ), vsmmla, vsmmla),
26627  TUF ("vusdot", c800d00, fc800d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), vusdot, vusdot),
26628  TUF ("vsudot", c800d10, fc800d10, 3, (RNDQ, RNDQ, RNSC), vsudot, vsudot),
26629 
26630 #undef	ARM_VARIANT
26631 #undef	THUMB_VARIANT
26632 #define	THUMB_VARIANT &arm_ext_cde
26633  ToC ("cx1", ee000000, 3, (RCP, APSR_RR, I8191), cx1),
26634  ToC ("cx1a", fe000000, 3, (RCP, APSR_RR, I8191), cx1a),
26635  ToC ("cx1d", ee000040, 4, (RCP, RR, APSR_RR, I8191), cx1d),
26636  ToC ("cx1da", fe000040, 4, (RCP, RR, APSR_RR, I8191), cx1da),
26637 
26638  ToC ("cx2", ee400000, 4, (RCP, APSR_RR, APSR_RR, I511), cx2),
26639  ToC ("cx2a", fe400000, 4, (RCP, APSR_RR, APSR_RR, I511), cx2a),
26640  ToC ("cx2d", ee400040, 5, (RCP, RR, APSR_RR, APSR_RR, I511), cx2d),
26641  ToC ("cx2da", fe400040, 5, (RCP, RR, APSR_RR, APSR_RR, I511), cx2da),
26642 
26643  ToC ("cx3", ee800000, 5, (RCP, APSR_RR, APSR_RR, APSR_RR, I63), cx3),
26644  ToC ("cx3a", fe800000, 5, (RCP, APSR_RR, APSR_RR, APSR_RR, I63), cx3a),
26645  ToC ("cx3d", ee800040, 6, (RCP, RR, APSR_RR, APSR_RR, APSR_RR, I63), cx3d),
26646  ToC ("cx3da", fe800040, 6, (RCP, RR, APSR_RR, APSR_RR, APSR_RR, I63), cx3da),
26647 
26648  mToC ("vcx1", ec200000, 3, (RCP, RNSDMQ, I4095), vcx1),
26649  mToC ("vcx1a", fc200000, 3, (RCP, RNSDMQ, I4095), vcx1),
26650 
26651  mToC ("vcx2", ec300000, 4, (RCP, RNSDMQ, RNSDMQ, I127), vcx2),
26652  mToC ("vcx2a", fc300000, 4, (RCP, RNSDMQ, RNSDMQ, I127), vcx2),
26653 
26654  mToC ("vcx3", ec800000, 5, (RCP, RNSDMQ, RNSDMQ, RNSDMQ, I15), vcx3),
26655  mToC ("vcx3a", fc800000, 5, (RCP, RNSDMQ, RNSDMQ, RNSDMQ, I15), vcx3),
26656 };
26657 
26658 #undef ARM_VARIANT
26659 #undef THUMB_VARIANT
26660 #undef TCE
26661 #undef TUE
26662 #undef TUF
26663 #undef TCC
26664 #undef cCE
26665 #undef cCL
26666 #undef C3E
26667 #undef C3
26668 #undef CE
26669 #undef CM
26670 #undef CL
26671 #undef UE
26672 #undef UF
26673 #undef UT
26674 #undef NUF
26675 #undef nUF
26676 #undef NCE
26677 #undef nCE
26678 #undef OPS0
26679 #undef OPS1
26680 #undef OPS2
26681 #undef OPS3
26682 #undef OPS4
26683 #undef OPS5
26684 #undef OPS6
26685 #undef do_0
26686 #undef ToC
26687 #undef toC
26688 #undef ToU
26689 #undef toU
26690 
26691 /* MD interface: bits in the object file.  */
26692 
26693 /* Turn an integer of n bytes (in val) into a stream of bytes appropriate
26694    for use in the a.out file, and stores them in the array pointed to by buf.
26695    This knows about the endian-ness of the target machine and does
26696    THE RIGHT THING, whatever it is.  Possible values for n are 1 (byte)
26697    2 (short) and 4 (long)  Floating numbers are put out as a series of
26698    LITTLENUMS (shorts, here at least).	*/
26699 
26700 void
md_number_to_chars(char * buf,valueT val,int n)26701 md_number_to_chars (char * buf, valueT val, int n)
26702 {
26703   if (target_big_endian)
26704     number_to_chars_bigendian (buf, val, n);
26705   else
26706     number_to_chars_littleendian (buf, val, n);
26707 }
26708 
26709 static valueT
md_chars_to_number(char * buf,int n)26710 md_chars_to_number (char * buf, int n)
26711 {
26712   valueT result = 0;
26713   unsigned char * where = (unsigned char *) buf;
26714 
26715   if (target_big_endian)
26716     {
26717       while (n--)
26718 	{
26719 	  result <<= 8;
26720 	  result |= (*where++ & 255);
26721 	}
26722     }
26723   else
26724     {
26725       while (n--)
26726 	{
26727 	  result <<= 8;
26728 	  result |= (where[n] & 255);
26729 	}
26730     }
26731 
26732   return result;
26733 }
26734 
26735 /* MD interface: Sections.  */
26736 
26737 /* Calculate the maximum variable size (i.e., excluding fr_fix)
26738    that an rs_machine_dependent frag may reach.  */
26739 
26740 unsigned int
arm_frag_max_var(fragS * fragp)26741 arm_frag_max_var (fragS *fragp)
26742 {
26743   /* We only use rs_machine_dependent for variable-size Thumb instructions,
26744      which are either THUMB_SIZE (2) or INSN_SIZE (4).
26745 
26746      Note that we generate relaxable instructions even for cases that don't
26747      really need it, like an immediate that's a trivial constant.  So we're
26748      overestimating the instruction size for some of those cases.  Rather
26749      than putting more intelligence here, it would probably be better to
26750      avoid generating a relaxation frag in the first place when it can be
26751      determined up front that a short instruction will suffice.  */
26752 
26753   gas_assert (fragp->fr_type == rs_machine_dependent);
26754   return INSN_SIZE;
26755 }
26756 
26757 /* Estimate the size of a frag before relaxing.  Assume everything fits in
26758    2 bytes.  */
26759 
26760 int
md_estimate_size_before_relax(fragS * fragp,segT segtype ATTRIBUTE_UNUSED)26761 md_estimate_size_before_relax (fragS * fragp,
26762 			       segT    segtype ATTRIBUTE_UNUSED)
26763 {
26764   fragp->fr_var = 2;
26765   return 2;
26766 }
26767 
26768 /* Convert a machine dependent frag.  */
26769 
26770 void
md_convert_frag(bfd * abfd,segT asec ATTRIBUTE_UNUSED,fragS * fragp)26771 md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
26772 {
26773   unsigned long insn;
26774   unsigned long old_op;
26775   char *buf;
26776   expressionS exp;
26777   fixS *fixp;
26778   int reloc_type;
26779   int pc_rel;
26780   int opcode;
26781 
26782   buf = fragp->fr_literal + fragp->fr_fix;
26783 
26784   old_op = bfd_get_16(abfd, buf);
26785   if (fragp->fr_symbol)
26786     {
26787       exp.X_op = O_symbol;
26788       exp.X_add_symbol = fragp->fr_symbol;
26789     }
26790   else
26791     {
26792       exp.X_op = O_constant;
26793     }
26794   exp.X_add_number = fragp->fr_offset;
26795   opcode = fragp->fr_subtype;
26796   switch (opcode)
26797     {
26798     case T_MNEM_ldr_pc:
26799     case T_MNEM_ldr_pc2:
26800     case T_MNEM_ldr_sp:
26801     case T_MNEM_str_sp:
26802     case T_MNEM_ldr:
26803     case T_MNEM_ldrb:
26804     case T_MNEM_ldrh:
26805     case T_MNEM_str:
26806     case T_MNEM_strb:
26807     case T_MNEM_strh:
26808       if (fragp->fr_var == 4)
26809 	{
26810 	  insn = THUMB_OP32 (opcode);
26811 	  if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
26812 	    {
26813 	      insn |= (old_op & 0x700) << 4;
26814 	    }
26815 	  else
26816 	    {
26817 	      insn |= (old_op & 7) << 12;
26818 	      insn |= (old_op & 0x38) << 13;
26819 	    }
26820 	  insn |= 0x00000c00;
26821 	  put_thumb32_insn (buf, insn);
26822 	  reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
26823 	}
26824       else
26825 	{
26826 	  reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
26827 	}
26828       pc_rel = (opcode == T_MNEM_ldr_pc2);
26829       break;
26830     case T_MNEM_adr:
26831       /* Thumb bits should be set in the frag handling so we process them
26832 	 after all symbols have been seen.  PR gas/25235.  */
26833       if (exp.X_op == O_symbol
26834 	  && exp.X_add_symbol != NULL
26835 	  && S_IS_DEFINED (exp.X_add_symbol)
26836 	  && THUMB_IS_FUNC (exp.X_add_symbol))
26837 	exp.X_add_number |= 1;
26838 
26839       if (fragp->fr_var == 4)
26840 	{
26841 	  insn = THUMB_OP32 (opcode);
26842 	  insn |= (old_op & 0xf0) << 4;
26843 	  put_thumb32_insn (buf, insn);
26844 	  reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
26845 	}
26846       else
26847 	{
26848 	  reloc_type = BFD_RELOC_ARM_THUMB_ADD;
26849 	  exp.X_add_number -= 4;
26850 	}
26851       pc_rel = 1;
26852       break;
26853     case T_MNEM_mov:
26854     case T_MNEM_movs:
26855     case T_MNEM_cmp:
26856     case T_MNEM_cmn:
26857       if (fragp->fr_var == 4)
26858 	{
26859 	  int r0off = (opcode == T_MNEM_mov
26860 		       || opcode == T_MNEM_movs) ? 0 : 8;
26861 	  insn = THUMB_OP32 (opcode);
26862 	  insn = (insn & 0xe1ffffff) | 0x10000000;
26863 	  insn |= (old_op & 0x700) << r0off;
26864 	  put_thumb32_insn (buf, insn);
26865 	  reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
26866 	}
26867       else
26868 	{
26869 	  reloc_type = BFD_RELOC_ARM_THUMB_IMM;
26870 	}
26871       pc_rel = 0;
26872       break;
26873     case T_MNEM_b:
26874       if (fragp->fr_var == 4)
26875 	{
26876 	  insn = THUMB_OP32(opcode);
26877 	  put_thumb32_insn (buf, insn);
26878 	  reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
26879 	}
26880       else
26881 	reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
26882       pc_rel = 1;
26883       break;
26884     case T_MNEM_bcond:
26885       if (fragp->fr_var == 4)
26886 	{
26887 	  insn = THUMB_OP32(opcode);
26888 	  insn |= (old_op & 0xf00) << 14;
26889 	  put_thumb32_insn (buf, insn);
26890 	  reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
26891 	}
26892       else
26893 	reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
26894       pc_rel = 1;
26895       break;
26896     case T_MNEM_add_sp:
26897     case T_MNEM_add_pc:
26898     case T_MNEM_inc_sp:
26899     case T_MNEM_dec_sp:
26900       if (fragp->fr_var == 4)
26901 	{
26902 	  /* ??? Choose between add and addw.  */
26903 	  insn = THUMB_OP32 (opcode);
26904 	  insn |= (old_op & 0xf0) << 4;
26905 	  put_thumb32_insn (buf, insn);
26906 	  if (opcode == T_MNEM_add_pc)
26907 	    reloc_type = BFD_RELOC_ARM_T32_IMM12;
26908 	  else
26909 	    reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
26910 	}
26911       else
26912 	reloc_type = BFD_RELOC_ARM_THUMB_ADD;
26913       pc_rel = 0;
26914       break;
26915 
26916     case T_MNEM_addi:
26917     case T_MNEM_addis:
26918     case T_MNEM_subi:
26919     case T_MNEM_subis:
26920       if (fragp->fr_var == 4)
26921 	{
26922 	  insn = THUMB_OP32 (opcode);
26923 	  insn |= (old_op & 0xf0) << 4;
26924 	  insn |= (old_op & 0xf) << 16;
26925 	  put_thumb32_insn (buf, insn);
26926 	  if (insn & (1 << 20))
26927 	    reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
26928 	  else
26929 	    reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
26930 	}
26931       else
26932 	reloc_type = BFD_RELOC_ARM_THUMB_ADD;
26933       pc_rel = 0;
26934       break;
26935     default:
26936       abort ();
26937     }
26938   fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
26939 		      (enum bfd_reloc_code_real) reloc_type);
26940   fixp->fx_file = fragp->fr_file;
26941   fixp->fx_line = fragp->fr_line;
26942   fragp->fr_fix += fragp->fr_var;
26943 
26944   /* Set whether we use thumb-2 ISA based on final relaxation results.  */
26945   if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
26946       && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
26947     ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
26948 }
26949 
26950 /* Return the size of a relaxable immediate operand instruction.
26951    SHIFT and SIZE specify the form of the allowable immediate.  */
26952 static int
relax_immediate(fragS * fragp,int size,int shift)26953 relax_immediate (fragS *fragp, int size, int shift)
26954 {
26955   offsetT offset;
26956   offsetT mask;
26957   offsetT low;
26958 
26959   /* ??? Should be able to do better than this.  */
26960   if (fragp->fr_symbol)
26961     return 4;
26962 
26963   low = (1 << shift) - 1;
26964   mask = (1 << (shift + size)) - (1 << shift);
26965   offset = fragp->fr_offset;
26966   /* Force misaligned offsets to 32-bit variant.  */
26967   if (offset & low)
26968     return 4;
26969   if (offset & ~mask)
26970     return 4;
26971   return 2;
26972 }
26973 
26974 /* Get the address of a symbol during relaxation.  */
26975 static addressT
relaxed_symbol_addr(fragS * fragp,long stretch)26976 relaxed_symbol_addr (fragS *fragp, long stretch)
26977 {
26978   fragS *sym_frag;
26979   addressT addr;
26980   symbolS *sym;
26981 
26982   sym = fragp->fr_symbol;
26983   sym_frag = symbol_get_frag (sym);
26984   know (S_GET_SEGMENT (sym) != absolute_section
26985 	|| sym_frag == &zero_address_frag);
26986   addr = S_GET_VALUE (sym) + fragp->fr_offset;
26987 
26988   /* If frag has yet to be reached on this pass, assume it will
26989      move by STRETCH just as we did.  If this is not so, it will
26990      be because some frag between grows, and that will force
26991      another pass.  */
26992 
26993   if (stretch != 0
26994       && sym_frag->relax_marker != fragp->relax_marker)
26995     {
26996       fragS *f;
26997 
26998       /* Adjust stretch for any alignment frag.  Note that if have
26999 	 been expanding the earlier code, the symbol may be
27000 	 defined in what appears to be an earlier frag.  FIXME:
27001 	 This doesn't handle the fr_subtype field, which specifies
27002 	 a maximum number of bytes to skip when doing an
27003 	 alignment.  */
27004       for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
27005 	{
27006 	  if (f->fr_type == rs_align || f->fr_type == rs_align_code)
27007 	    {
27008 	      if (stretch < 0)
27009 		stretch = - ((- stretch)
27010 			     & ~ ((1 << (int) f->fr_offset) - 1));
27011 	      else
27012 		stretch &= ~ ((1 << (int) f->fr_offset) - 1);
27013 	      if (stretch == 0)
27014 		break;
27015 	    }
27016 	}
27017       if (f != NULL)
27018 	addr += stretch;
27019     }
27020 
27021   return addr;
27022 }
27023 
27024 /* Return the size of a relaxable adr pseudo-instruction or PC-relative
27025    load.  */
27026 static int
relax_adr(fragS * fragp,asection * sec,long stretch)27027 relax_adr (fragS *fragp, asection *sec, long stretch)
27028 {
27029   addressT addr;
27030   offsetT val;
27031 
27032   /* Assume worst case for symbols not known to be in the same section.  */
27033   if (fragp->fr_symbol == NULL
27034       || !S_IS_DEFINED (fragp->fr_symbol)
27035       || sec != S_GET_SEGMENT (fragp->fr_symbol)
27036       || S_IS_WEAK (fragp->fr_symbol)
27037       || THUMB_IS_FUNC (fragp->fr_symbol))
27038     return 4;
27039 
27040   val = relaxed_symbol_addr (fragp, stretch);
27041   addr = fragp->fr_address + fragp->fr_fix;
27042   addr = (addr + 4) & ~3;
27043   /* Force misaligned targets to 32-bit variant.  */
27044   if (val & 3)
27045     return 4;
27046   val -= addr;
27047   if (val < 0 || val > 1020)
27048     return 4;
27049   return 2;
27050 }
27051 
27052 /* Return the size of a relaxable add/sub immediate instruction.  */
27053 static int
relax_addsub(fragS * fragp,asection * sec)27054 relax_addsub (fragS *fragp, asection *sec)
27055 {
27056   char *buf;
27057   int op;
27058 
27059   buf = fragp->fr_literal + fragp->fr_fix;
27060   op = bfd_get_16(sec->owner, buf);
27061   if ((op & 0xf) == ((op >> 4) & 0xf))
27062     return relax_immediate (fragp, 8, 0);
27063   else
27064     return relax_immediate (fragp, 3, 0);
27065 }
27066 
27067 /* Return TRUE iff the definition of symbol S could be pre-empted
27068    (overridden) at link or load time.  */
27069 static bool
symbol_preemptible(symbolS * s)27070 symbol_preemptible (symbolS *s)
27071 {
27072   /* Weak symbols can always be pre-empted.  */
27073   if (S_IS_WEAK (s))
27074     return true;
27075 
27076   /* Non-global symbols cannot be pre-empted. */
27077   if (! S_IS_EXTERNAL (s))
27078     return false;
27079 
27080 #ifdef OBJ_ELF
27081   /* In ELF, a global symbol can be marked protected, or private.  In that
27082      case it can't be pre-empted (other definitions in the same link unit
27083      would violate the ODR).  */
27084   if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
27085     return false;
27086 #endif
27087 
27088   /* Other global symbols might be pre-empted.  */
27089   return true;
27090 }
27091 
27092 /* Return the size of a relaxable branch instruction.  BITS is the
27093    size of the offset field in the narrow instruction.  */
27094 
27095 static int
relax_branch(fragS * fragp,asection * sec,int bits,long stretch)27096 relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
27097 {
27098   addressT addr;
27099   offsetT val;
27100   offsetT limit;
27101 
27102   /* Assume worst case for symbols not known to be in the same section.  */
27103   if (!S_IS_DEFINED (fragp->fr_symbol)
27104       || sec != S_GET_SEGMENT (fragp->fr_symbol)
27105       || S_IS_WEAK (fragp->fr_symbol))
27106     return 4;
27107 
27108 #ifdef OBJ_ELF
27109   /* A branch to a function in ARM state will require interworking.  */
27110   if (S_IS_DEFINED (fragp->fr_symbol)
27111       && ARM_IS_FUNC (fragp->fr_symbol))
27112       return 4;
27113 #endif
27114 
27115   if (symbol_preemptible (fragp->fr_symbol))
27116     return 4;
27117 
27118   val = relaxed_symbol_addr (fragp, stretch);
27119   addr = fragp->fr_address + fragp->fr_fix + 4;
27120   val -= addr;
27121 
27122   /* Offset is a signed value *2 */
27123   limit = 1 << bits;
27124   if (val >= limit || val < -limit)
27125     return 4;
27126   return 2;
27127 }
27128 
27129 
27130 /* Relax a machine dependent frag.  This returns the amount by which
27131    the current size of the frag should change.  */
27132 
27133 int
arm_relax_frag(asection * sec,fragS * fragp,long stretch)27134 arm_relax_frag (asection *sec, fragS *fragp, long stretch)
27135 {
27136   int oldsize;
27137   int newsize;
27138 
27139   oldsize = fragp->fr_var;
27140   switch (fragp->fr_subtype)
27141     {
27142     case T_MNEM_ldr_pc2:
27143       newsize = relax_adr (fragp, sec, stretch);
27144       break;
27145     case T_MNEM_ldr_pc:
27146     case T_MNEM_ldr_sp:
27147     case T_MNEM_str_sp:
27148       newsize = relax_immediate (fragp, 8, 2);
27149       break;
27150     case T_MNEM_ldr:
27151     case T_MNEM_str:
27152       newsize = relax_immediate (fragp, 5, 2);
27153       break;
27154     case T_MNEM_ldrh:
27155     case T_MNEM_strh:
27156       newsize = relax_immediate (fragp, 5, 1);
27157       break;
27158     case T_MNEM_ldrb:
27159     case T_MNEM_strb:
27160       newsize = relax_immediate (fragp, 5, 0);
27161       break;
27162     case T_MNEM_adr:
27163       newsize = relax_adr (fragp, sec, stretch);
27164       break;
27165     case T_MNEM_mov:
27166     case T_MNEM_movs:
27167     case T_MNEM_cmp:
27168     case T_MNEM_cmn:
27169       newsize = relax_immediate (fragp, 8, 0);
27170       break;
27171     case T_MNEM_b:
27172       newsize = relax_branch (fragp, sec, 11, stretch);
27173       break;
27174     case T_MNEM_bcond:
27175       newsize = relax_branch (fragp, sec, 8, stretch);
27176       break;
27177     case T_MNEM_add_sp:
27178     case T_MNEM_add_pc:
27179       newsize = relax_immediate (fragp, 8, 2);
27180       break;
27181     case T_MNEM_inc_sp:
27182     case T_MNEM_dec_sp:
27183       newsize = relax_immediate (fragp, 7, 2);
27184       break;
27185     case T_MNEM_addi:
27186     case T_MNEM_addis:
27187     case T_MNEM_subi:
27188     case T_MNEM_subis:
27189       newsize = relax_addsub (fragp, sec);
27190       break;
27191     default:
27192       abort ();
27193     }
27194 
27195   fragp->fr_var = newsize;
27196   /* Freeze wide instructions that are at or before the same location as
27197      in the previous pass.  This avoids infinite loops.
27198      Don't freeze them unconditionally because targets may be artificially
27199      misaligned by the expansion of preceding frags.  */
27200   if (stretch <= 0 && newsize > 2)
27201     {
27202       md_convert_frag (sec->owner, sec, fragp);
27203       frag_wane (fragp);
27204     }
27205 
27206   return newsize - oldsize;
27207 }
27208 
27209 /* Round up a section size to the appropriate boundary.	 */
27210 
27211 valueT
md_section_align(segT segment ATTRIBUTE_UNUSED,valueT size)27212 md_section_align (segT	 segment ATTRIBUTE_UNUSED,
27213 		  valueT size)
27214 {
27215   return size;
27216 }
27217 
27218 /* This is called from HANDLE_ALIGN in write.c.	 Fill in the contents
27219    of an rs_align_code fragment.  */
27220 
27221 void
arm_handle_align(fragS * fragP)27222 arm_handle_align (fragS * fragP)
27223 {
27224   static unsigned char const arm_noop[2][2][4] =
27225     {
27226       {  /* ARMv1 */
27227 	{0x00, 0x00, 0xa0, 0xe1},  /* LE */
27228 	{0xe1, 0xa0, 0x00, 0x00},  /* BE */
27229       },
27230       {  /* ARMv6k */
27231 	{0x00, 0xf0, 0x20, 0xe3},  /* LE */
27232 	{0xe3, 0x20, 0xf0, 0x00},  /* BE */
27233       },
27234     };
27235   static unsigned char const thumb_noop[2][2][2] =
27236     {
27237       {  /* Thumb-1 */
27238 	{0xc0, 0x46},  /* LE */
27239 	{0x46, 0xc0},  /* BE */
27240       },
27241       {  /* Thumb-2 */
27242 	{0x00, 0xbf},  /* LE */
27243 	{0xbf, 0x00}   /* BE */
27244       }
27245     };
27246   static unsigned char const wide_thumb_noop[2][4] =
27247     {  /* Wide Thumb-2 */
27248       {0xaf, 0xf3, 0x00, 0x80},  /* LE */
27249       {0xf3, 0xaf, 0x80, 0x00},  /* BE */
27250     };
27251 
27252   unsigned bytes, fix, noop_size;
27253   char * p;
27254   const unsigned char * noop;
27255   const unsigned char *narrow_noop = NULL;
27256 #ifdef OBJ_ELF
27257   enum mstate state;
27258 #endif
27259 
27260   if (fragP->fr_type != rs_align_code)
27261     return;
27262 
27263   bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
27264   p = fragP->fr_literal + fragP->fr_fix;
27265   fix = 0;
27266 
27267   if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
27268     bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
27269 
27270   gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
27271 
27272   if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
27273     {
27274       if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
27275 			       ? selected_cpu : arm_arch_none, arm_ext_v6t2))
27276 	{
27277 	  narrow_noop = thumb_noop[1][target_big_endian];
27278 	  noop = wide_thumb_noop[target_big_endian];
27279 	}
27280       else
27281 	noop = thumb_noop[0][target_big_endian];
27282       noop_size = 2;
27283 #ifdef OBJ_ELF
27284       state = MAP_THUMB;
27285 #endif
27286     }
27287   else
27288     {
27289       noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
27290 					   ? selected_cpu : arm_arch_none,
27291 					   arm_ext_v6k) != 0]
27292 		     [target_big_endian];
27293       noop_size = 4;
27294 #ifdef OBJ_ELF
27295       state = MAP_ARM;
27296 #endif
27297     }
27298 
27299   fragP->fr_var = noop_size;
27300 
27301   if (bytes & (noop_size - 1))
27302     {
27303       fix = bytes & (noop_size - 1);
27304 #ifdef OBJ_ELF
27305       insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
27306 #endif
27307       memset (p, 0, fix);
27308       p += fix;
27309       bytes -= fix;
27310     }
27311 
27312   if (narrow_noop)
27313     {
27314       if (bytes & noop_size)
27315 	{
27316 	  /* Insert a narrow noop.  */
27317 	  memcpy (p, narrow_noop, noop_size);
27318 	  p += noop_size;
27319 	  bytes -= noop_size;
27320 	  fix += noop_size;
27321 	}
27322 
27323       /* Use wide noops for the remainder */
27324       noop_size = 4;
27325     }
27326 
27327   while (bytes >= noop_size)
27328     {
27329       memcpy (p, noop, noop_size);
27330       p += noop_size;
27331       bytes -= noop_size;
27332       fix += noop_size;
27333     }
27334 
27335   fragP->fr_fix += fix;
27336 }
27337 
27338 /* Called from md_do_align.  Used to create an alignment
27339    frag in a code section.  */
27340 
27341 void
arm_frag_align_code(int n,int max)27342 arm_frag_align_code (int n, int max)
27343 {
27344   char * p;
27345 
27346   /* We assume that there will never be a requirement
27347      to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes.  */
27348   if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
27349     {
27350       char err_msg[128];
27351 
27352       sprintf (err_msg,
27353 	_("alignments greater than %d bytes not supported in .text sections."),
27354 	MAX_MEM_FOR_RS_ALIGN_CODE + 1);
27355       as_fatal ("%s", err_msg);
27356     }
27357 
27358   p = frag_var (rs_align_code,
27359 		MAX_MEM_FOR_RS_ALIGN_CODE,
27360 		1,
27361 		(relax_substateT) max,
27362 		(symbolS *) NULL,
27363 		(offsetT) n,
27364 		(char *) NULL);
27365   *p = 0;
27366 }
27367 
27368 /* Perform target specific initialisation of a frag.
27369    Note - despite the name this initialisation is not done when the frag
27370    is created, but only when its type is assigned.  A frag can be created
27371    and used a long time before its type is set, so beware of assuming that
27372    this initialisation is performed first.  */
27373 
27374 #ifndef OBJ_ELF
27375 void
arm_init_frag(fragS * fragP,int max_chars ATTRIBUTE_UNUSED)27376 arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
27377 {
27378   /* Record whether this frag is in an ARM or a THUMB area.  */
27379   fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
27380 }
27381 
27382 #else /* OBJ_ELF is defined.  */
27383 void
arm_init_frag(fragS * fragP,int max_chars)27384 arm_init_frag (fragS * fragP, int max_chars)
27385 {
27386   bool frag_thumb_mode;
27387 
27388   /* If the current ARM vs THUMB mode has not already
27389      been recorded into this frag then do so now.  */
27390   if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
27391     fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
27392 
27393   /* PR 21809: Do not set a mapping state for debug sections
27394      - it just confuses other tools.  */
27395   if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
27396     return;
27397 
27398   frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
27399 
27400   /* Record a mapping symbol for alignment frags.  We will delete this
27401      later if the alignment ends up empty.  */
27402   switch (fragP->fr_type)
27403     {
27404     case rs_align:
27405     case rs_align_test:
27406     case rs_fill:
27407       mapping_state_2 (MAP_DATA, max_chars);
27408       break;
27409     case rs_align_code:
27410       mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
27411       break;
27412     default:
27413       break;
27414     }
27415 }
27416 
27417 /* When we change sections we need to issue a new mapping symbol.  */
27418 
27419 void
arm_elf_change_section(void)27420 arm_elf_change_section (void)
27421 {
27422   /* Link an unlinked unwind index table section to the .text section.	*/
27423   if (elf_section_type (now_seg) == SHT_ARM_EXIDX
27424       && elf_linked_to_section (now_seg) == NULL)
27425     elf_linked_to_section (now_seg) = text_section;
27426 }
27427 
27428 int
arm_elf_section_type(const char * str,size_t len)27429 arm_elf_section_type (const char * str, size_t len)
27430 {
27431   if (len == 5 && startswith (str, "exidx"))
27432     return SHT_ARM_EXIDX;
27433 
27434   return -1;
27435 }
27436 
27437 /* Code to deal with unwinding tables.	*/
27438 
27439 static void add_unwind_adjustsp (offsetT);
27440 
27441 /* Generate any deferred unwind frame offset.  */
27442 
27443 static void
flush_pending_unwind(void)27444 flush_pending_unwind (void)
27445 {
27446   offsetT offset;
27447 
27448   offset = unwind.pending_offset;
27449   unwind.pending_offset = 0;
27450   if (offset != 0)
27451     add_unwind_adjustsp (offset);
27452 }
27453 
27454 /* Add an opcode to this list for this function.  Two-byte opcodes should
27455    be passed as op[0] << 8 | op[1].  The list of opcodes is built in reverse
27456    order.  */
27457 
27458 static void
add_unwind_opcode(valueT op,int length)27459 add_unwind_opcode (valueT op, int length)
27460 {
27461   /* Add any deferred stack adjustment.	 */
27462   if (unwind.pending_offset)
27463     flush_pending_unwind ();
27464 
27465   unwind.sp_restored = 0;
27466 
27467   if (unwind.opcode_count + length > unwind.opcode_alloc)
27468     {
27469       unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
27470       if (unwind.opcodes)
27471 	unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
27472 				     unwind.opcode_alloc);
27473       else
27474 	unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
27475     }
27476   while (length > 0)
27477     {
27478       length--;
27479       unwind.opcodes[unwind.opcode_count] = op & 0xff;
27480       op >>= 8;
27481       unwind.opcode_count++;
27482     }
27483 }
27484 
27485 /* Add unwind opcodes to adjust the stack pointer.  */
27486 
27487 static void
add_unwind_adjustsp(offsetT offset)27488 add_unwind_adjustsp (offsetT offset)
27489 {
27490   valueT op;
27491 
27492   if (offset > 0x200)
27493     {
27494       /* We need at most 5 bytes to hold a 32-bit value in a uleb128.  */
27495       char bytes[5];
27496       int n;
27497       valueT o;
27498 
27499       /* Long form: 0xb2, uleb128.  */
27500       /* This might not fit in a word so add the individual bytes,
27501 	 remembering the list is built in reverse order.  */
27502       o = (valueT) ((offset - 0x204) >> 2);
27503       if (o == 0)
27504 	add_unwind_opcode (0, 1);
27505 
27506       /* Calculate the uleb128 encoding of the offset.	*/
27507       n = 0;
27508       while (o)
27509 	{
27510 	  bytes[n] = o & 0x7f;
27511 	  o >>= 7;
27512 	  if (o)
27513 	    bytes[n] |= 0x80;
27514 	  n++;
27515 	}
27516       /* Add the insn.	*/
27517       for (; n; n--)
27518 	add_unwind_opcode (bytes[n - 1], 1);
27519       add_unwind_opcode (0xb2, 1);
27520     }
27521   else if (offset > 0x100)
27522     {
27523       /* Two short opcodes.  */
27524       add_unwind_opcode (0x3f, 1);
27525       op = (offset - 0x104) >> 2;
27526       add_unwind_opcode (op, 1);
27527     }
27528   else if (offset > 0)
27529     {
27530       /* Short opcode.	*/
27531       op = (offset - 4) >> 2;
27532       add_unwind_opcode (op, 1);
27533     }
27534   else if (offset < 0)
27535     {
27536       offset = -offset;
27537       while (offset > 0x100)
27538 	{
27539 	  add_unwind_opcode (0x7f, 1);
27540 	  offset -= 0x100;
27541 	}
27542       op = ((offset - 4) >> 2) | 0x40;
27543       add_unwind_opcode (op, 1);
27544     }
27545 }
27546 
27547 /* Finish the list of unwind opcodes for this function.	 */
27548 
27549 static void
finish_unwind_opcodes(void)27550 finish_unwind_opcodes (void)
27551 {
27552   valueT op;
27553 
27554   if (unwind.fp_used)
27555     {
27556       /* Adjust sp as necessary.  */
27557       unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
27558       flush_pending_unwind ();
27559 
27560       /* After restoring sp from the frame pointer.  */
27561       op = 0x90 | unwind.fp_reg;
27562       add_unwind_opcode (op, 1);
27563     }
27564   else
27565     flush_pending_unwind ();
27566 }
27567 
27568 
27569 /* Start an exception table entry.  If idx is nonzero this is an index table
27570    entry.  */
27571 
27572 static void
start_unwind_section(const segT text_seg,int idx)27573 start_unwind_section (const segT text_seg, int idx)
27574 {
27575   const char * text_name;
27576   const char * prefix;
27577   const char * prefix_once;
27578   struct elf_section_match match;
27579   char * sec_name;
27580   int type;
27581   int flags;
27582   int linkonce;
27583 
27584   if (idx)
27585     {
27586       prefix = ELF_STRING_ARM_unwind;
27587       prefix_once = ELF_STRING_ARM_unwind_once;
27588       type = SHT_ARM_EXIDX;
27589     }
27590   else
27591     {
27592       prefix = ELF_STRING_ARM_unwind_info;
27593       prefix_once = ELF_STRING_ARM_unwind_info_once;
27594       type = SHT_PROGBITS;
27595     }
27596 
27597   text_name = segment_name (text_seg);
27598   if (streq (text_name, ".text"))
27599     text_name = "";
27600 
27601   if (startswith (text_name, ".gnu.linkonce.t."))
27602     {
27603       prefix = prefix_once;
27604       text_name += strlen (".gnu.linkonce.t.");
27605     }
27606 
27607   sec_name = concat (prefix, text_name, (char *) NULL);
27608 
27609   flags = SHF_ALLOC;
27610   linkonce = 0;
27611   memset (&match, 0, sizeof (match));
27612 
27613   /* Handle COMDAT group.  */
27614   if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
27615     {
27616       match.group_name = elf_group_name (text_seg);
27617       if (match.group_name == NULL)
27618 	{
27619 	  as_bad (_("Group section `%s' has no group signature"),
27620 		  segment_name (text_seg));
27621 	  ignore_rest_of_line ();
27622 	  return;
27623 	}
27624       flags |= SHF_GROUP;
27625       linkonce = 1;
27626     }
27627 
27628   obj_elf_change_section (sec_name, type, flags, 0, &match,
27629 			  linkonce, 0);
27630 
27631   /* Set the section link for index tables.  */
27632   if (idx)
27633     elf_linked_to_section (now_seg) = text_seg;
27634 }
27635 
27636 
27637 /* Start an unwind table entry.	 HAVE_DATA is nonzero if we have additional
27638    personality routine data.  Returns zero, or the index table value for
27639    an inline entry.  */
27640 
27641 static valueT
create_unwind_entry(int have_data)27642 create_unwind_entry (int have_data)
27643 {
27644   int size;
27645   addressT where;
27646   char *ptr;
27647   /* The current word of data.	*/
27648   valueT data;
27649   /* The number of bytes left in this word.  */
27650   int n;
27651 
27652   finish_unwind_opcodes ();
27653 
27654   /* Remember the current text section.	 */
27655   unwind.saved_seg = now_seg;
27656   unwind.saved_subseg = now_subseg;
27657 
27658   start_unwind_section (now_seg, 0);
27659 
27660   if (unwind.personality_routine == NULL)
27661     {
27662       if (unwind.personality_index == -2)
27663 	{
27664 	  if (have_data)
27665 	    as_bad (_("handlerdata in cantunwind frame"));
27666 	  return 1; /* EXIDX_CANTUNWIND.  */
27667 	}
27668 
27669       /* Use a default personality routine if none is specified.  */
27670       if (unwind.personality_index == -1)
27671 	{
27672 	  if (unwind.opcode_count > 3)
27673 	    unwind.personality_index = 1;
27674 	  else
27675 	    unwind.personality_index = 0;
27676 	}
27677 
27678       /* Space for the personality routine entry.  */
27679       if (unwind.personality_index == 0)
27680 	{
27681 	  if (unwind.opcode_count > 3)
27682 	    as_bad (_("too many unwind opcodes for personality routine 0"));
27683 
27684 	  if (!have_data)
27685 	    {
27686 	      /* All the data is inline in the index table.  */
27687 	      data = 0x80;
27688 	      n = 3;
27689 	      while (unwind.opcode_count > 0)
27690 		{
27691 		  unwind.opcode_count--;
27692 		  data = (data << 8) | unwind.opcodes[unwind.opcode_count];
27693 		  n--;
27694 		}
27695 
27696 	      /* Pad with "finish" opcodes.  */
27697 	      while (n--)
27698 		data = (data << 8) | 0xb0;
27699 
27700 	      return data;
27701 	    }
27702 	  size = 0;
27703 	}
27704       else
27705 	/* We get two opcodes "free" in the first word.	 */
27706 	size = unwind.opcode_count - 2;
27707     }
27708   else
27709     {
27710       /* PR 16765: Missing or misplaced unwind directives can trigger this.  */
27711       if (unwind.personality_index != -1)
27712 	{
27713 	  as_bad (_("attempt to recreate an unwind entry"));
27714 	  return 1;
27715 	}
27716 
27717       /* An extra byte is required for the opcode count.	*/
27718       size = unwind.opcode_count + 1;
27719     }
27720 
27721   size = (size + 3) >> 2;
27722   if (size > 0xff)
27723     as_bad (_("too many unwind opcodes"));
27724 
27725   frag_align (2, 0, 0);
27726   record_alignment (now_seg, 2);
27727   unwind.table_entry = expr_build_dot ();
27728 
27729   /* Allocate the table entry.	*/
27730   ptr = frag_more ((size << 2) + 4);
27731   /* PR 13449: Zero the table entries in case some of them are not used.  */
27732   memset (ptr, 0, (size << 2) + 4);
27733   where = frag_now_fix () - ((size << 2) + 4);
27734 
27735   switch (unwind.personality_index)
27736     {
27737     case -1:
27738       /* ??? Should this be a PLT generating relocation?  */
27739       /* Custom personality routine.  */
27740       fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
27741 	       BFD_RELOC_ARM_PREL31);
27742 
27743       where += 4;
27744       ptr += 4;
27745 
27746       /* Set the first byte to the number of additional words.	*/
27747       data = size > 0 ? size - 1 : 0;
27748       n = 3;
27749       break;
27750 
27751     /* ABI defined personality routines.  */
27752     case 0:
27753       /* Three opcodes bytes are packed into the first word.  */
27754       data = 0x80;
27755       n = 3;
27756       break;
27757 
27758     case 1:
27759     case 2:
27760       /* The size and first two opcode bytes go in the first word.  */
27761       data = ((0x80 + unwind.personality_index) << 8) | size;
27762       n = 2;
27763       break;
27764 
27765     default:
27766       /* Should never happen.  */
27767       abort ();
27768     }
27769 
27770   /* Pack the opcodes into words (MSB first), reversing the list at the same
27771      time.  */
27772   while (unwind.opcode_count > 0)
27773     {
27774       if (n == 0)
27775 	{
27776 	  md_number_to_chars (ptr, data, 4);
27777 	  ptr += 4;
27778 	  n = 4;
27779 	  data = 0;
27780 	}
27781       unwind.opcode_count--;
27782       n--;
27783       data = (data << 8) | unwind.opcodes[unwind.opcode_count];
27784     }
27785 
27786   /* Finish off the last word.	*/
27787   if (n < 4)
27788     {
27789       /* Pad with "finish" opcodes.  */
27790       while (n--)
27791 	data = (data << 8) | 0xb0;
27792 
27793       md_number_to_chars (ptr, data, 4);
27794     }
27795 
27796   if (!have_data)
27797     {
27798       /* Add an empty descriptor if there is no user-specified data.   */
27799       ptr = frag_more (4);
27800       md_number_to_chars (ptr, 0, 4);
27801     }
27802 
27803   return 0;
27804 }
27805 
27806 
27807 /* Initialize the DWARF-2 unwind information for this procedure.  */
27808 
27809 void
tc_arm_frame_initial_instructions(void)27810 tc_arm_frame_initial_instructions (void)
27811 {
27812   cfi_add_CFA_def_cfa (REG_SP, 0);
27813 }
27814 #endif /* OBJ_ELF */
27815 
27816 /* Convert REGNAME to a DWARF-2 register number.  */
27817 
27818 int
tc_arm_regname_to_dw2regnum(char * regname)27819 tc_arm_regname_to_dw2regnum (char *regname)
27820 {
27821   int reg = arm_reg_parse (&regname, REG_TYPE_RN);
27822   if (reg != FAIL)
27823     return reg;
27824 
27825   /* PR 16694: Allow VFP registers as well.  */
27826   reg = arm_reg_parse (&regname, REG_TYPE_VFS);
27827   if (reg != FAIL)
27828     return 64 + reg;
27829 
27830   reg = arm_reg_parse (&regname, REG_TYPE_VFD);
27831   if (reg != FAIL)
27832     return reg + 256;
27833 
27834   return FAIL;
27835 }
27836 
27837 #ifdef TE_PE
27838 void
tc_pe_dwarf2_emit_offset(symbolS * symbol,unsigned int size)27839 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
27840 {
27841   expressionS exp;
27842 
27843   exp.X_op = O_secrel;
27844   exp.X_add_symbol = symbol;
27845   exp.X_add_number = 0;
27846   emit_expr (&exp, size);
27847 }
27848 #endif
27849 
27850 /* MD interface: Symbol and relocation handling.  */
27851 
27852 /* Return the address within the segment that a PC-relative fixup is
27853    relative to.  For ARM, PC-relative fixups applied to instructions
27854    are generally relative to the location of the fixup plus 8 bytes.
27855    Thumb branches are offset by 4, and Thumb loads relative to PC
27856    require special handling.  */
27857 
27858 long
md_pcrel_from_section(fixS * fixP,segT seg)27859 md_pcrel_from_section (fixS * fixP, segT seg)
27860 {
27861   offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
27862 
27863   /* If this is pc-relative and we are going to emit a relocation
27864      then we just want to put out any pipeline compensation that the linker
27865      will need.  Otherwise we want to use the calculated base.
27866      For WinCE we skip the bias for externals as well, since this
27867      is how the MS ARM-CE assembler behaves and we want to be compatible.  */
27868   if (fixP->fx_pcrel
27869       && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
27870 	  || (arm_force_relocation (fixP)
27871 #ifdef TE_WINCE
27872 	      && !S_IS_EXTERNAL (fixP->fx_addsy)
27873 #endif
27874 	      )))
27875     base = 0;
27876 
27877 
27878   switch (fixP->fx_r_type)
27879     {
27880       /* PC relative addressing on the Thumb is slightly odd as the
27881 	 bottom two bits of the PC are forced to zero for the
27882 	 calculation.  This happens *after* application of the
27883 	 pipeline offset.  However, Thumb adrl already adjusts for
27884 	 this, so we need not do it again.  */
27885     case BFD_RELOC_ARM_THUMB_ADD:
27886       return base & ~3;
27887 
27888     case BFD_RELOC_ARM_THUMB_OFFSET:
27889     case BFD_RELOC_ARM_T32_OFFSET_IMM:
27890     case BFD_RELOC_ARM_T32_ADD_PC12:
27891     case BFD_RELOC_ARM_T32_CP_OFF_IMM:
27892       return (base + 4) & ~3;
27893 
27894       /* Thumb branches are simply offset by +4.  */
27895     case BFD_RELOC_THUMB_PCREL_BRANCH5:
27896     case BFD_RELOC_THUMB_PCREL_BRANCH7:
27897     case BFD_RELOC_THUMB_PCREL_BRANCH9:
27898     case BFD_RELOC_THUMB_PCREL_BRANCH12:
27899     case BFD_RELOC_THUMB_PCREL_BRANCH20:
27900     case BFD_RELOC_THUMB_PCREL_BRANCH25:
27901     case BFD_RELOC_THUMB_PCREL_BFCSEL:
27902     case BFD_RELOC_ARM_THUMB_BF17:
27903     case BFD_RELOC_ARM_THUMB_BF19:
27904     case BFD_RELOC_ARM_THUMB_BF13:
27905     case BFD_RELOC_ARM_THUMB_LOOP12:
27906       return base + 4;
27907 
27908     case BFD_RELOC_THUMB_PCREL_BRANCH23:
27909       if (fixP->fx_addsy
27910 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27911 	  && !S_FORCE_RELOC (fixP->fx_addsy, true)
27912 	  && ARM_IS_FUNC (fixP->fx_addsy)
27913 	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
27914 	base = fixP->fx_where + fixP->fx_frag->fr_address;
27915        return base + 4;
27916 
27917       /* BLX is like branches above, but forces the low two bits of PC to
27918 	 zero.  */
27919     case BFD_RELOC_THUMB_PCREL_BLX:
27920       if (fixP->fx_addsy
27921 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27922 	  && !S_FORCE_RELOC (fixP->fx_addsy, true)
27923 	  && THUMB_IS_FUNC (fixP->fx_addsy)
27924 	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
27925 	base = fixP->fx_where + fixP->fx_frag->fr_address;
27926       return (base + 4) & ~3;
27927 
27928       /* ARM mode branches are offset by +8.  However, the Windows CE
27929 	 loader expects the relocation not to take this into account.  */
27930     case BFD_RELOC_ARM_PCREL_BLX:
27931       if (fixP->fx_addsy
27932 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27933 	  && !S_FORCE_RELOC (fixP->fx_addsy, true)
27934 	  && ARM_IS_FUNC (fixP->fx_addsy)
27935 	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
27936 	base = fixP->fx_where + fixP->fx_frag->fr_address;
27937       return base + 8;
27938 
27939     case BFD_RELOC_ARM_PCREL_CALL:
27940       if (fixP->fx_addsy
27941 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27942 	  && !S_FORCE_RELOC (fixP->fx_addsy, true)
27943 	  && THUMB_IS_FUNC (fixP->fx_addsy)
27944 	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
27945 	base = fixP->fx_where + fixP->fx_frag->fr_address;
27946       return base + 8;
27947 
27948     case BFD_RELOC_ARM_PCREL_BRANCH:
27949     case BFD_RELOC_ARM_PCREL_JUMP:
27950     case BFD_RELOC_ARM_PLT32:
27951 #ifdef TE_WINCE
27952       /* When handling fixups immediately, because we have already
27953 	 discovered the value of a symbol, or the address of the frag involved
27954 	 we must account for the offset by +8, as the OS loader will never see the reloc.
27955 	 see fixup_segment() in write.c
27956 	 The S_IS_EXTERNAL test handles the case of global symbols.
27957 	 Those need the calculated base, not just the pipe compensation the linker will need.  */
27958       if (fixP->fx_pcrel
27959 	  && fixP->fx_addsy != NULL
27960 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27961 	  && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
27962 	return base + 8;
27963       return base;
27964 #else
27965       return base + 8;
27966 #endif
27967 
27968 
27969       /* ARM mode loads relative to PC are also offset by +8.  Unlike
27970 	 branches, the Windows CE loader *does* expect the relocation
27971 	 to take this into account.  */
27972     case BFD_RELOC_ARM_OFFSET_IMM:
27973     case BFD_RELOC_ARM_OFFSET_IMM8:
27974     case BFD_RELOC_ARM_HWLITERAL:
27975     case BFD_RELOC_ARM_LITERAL:
27976     case BFD_RELOC_ARM_CP_OFF_IMM:
27977       return base + 8;
27978 
27979 
27980       /* Other PC-relative relocations are un-offset.  */
27981     default:
27982       return base;
27983     }
27984 }
27985 
27986 static bool flag_warn_syms = true;
27987 
27988 bool
arm_tc_equal_in_insn(int c ATTRIBUTE_UNUSED,char * name)27989 arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
27990 {
27991   /* PR 18347 - Warn if the user attempts to create a symbol with the same
27992      name as an ARM instruction.  Whilst strictly speaking it is allowed, it
27993      does mean that the resulting code might be very confusing to the reader.
27994      Also this warning can be triggered if the user omits an operand before
27995      an immediate address, eg:
27996 
27997        LDR =foo
27998 
27999      GAS treats this as an assignment of the value of the symbol foo to a
28000      symbol LDR, and so (without this code) it will not issue any kind of
28001      warning or error message.
28002 
28003      Note - ARM instructions are case-insensitive but the strings in the hash
28004      table are all stored in lower case, so we must first ensure that name is
28005      lower case too.  */
28006   if (flag_warn_syms && arm_ops_hsh)
28007     {
28008       char * nbuf = strdup (name);
28009       char * p;
28010 
28011       for (p = nbuf; *p; p++)
28012 	*p = TOLOWER (*p);
28013       if (str_hash_find (arm_ops_hsh, nbuf) != NULL)
28014 	{
28015 	  static htab_t  already_warned = NULL;
28016 
28017 	  if (already_warned == NULL)
28018 	    already_warned = str_htab_create ();
28019 	  /* Only warn about the symbol once.  To keep the code
28020 	     simple we let str_hash_insert do the lookup for us.  */
28021 	  if (str_hash_find (already_warned, nbuf) == NULL)
28022 	    {
28023 	      as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
28024 	      str_hash_insert (already_warned, nbuf, NULL, 0);
28025 	    }
28026 	}
28027       else
28028 	free (nbuf);
28029     }
28030 
28031   return false;
28032 }
28033 
28034 /* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
28035    Otherwise we have no need to default values of symbols.  */
28036 
28037 symbolS *
md_undefined_symbol(char * name ATTRIBUTE_UNUSED)28038 md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
28039 {
28040 #ifdef OBJ_ELF
28041   if (name[0] == '_' && name[1] == 'G'
28042       && streq (name, GLOBAL_OFFSET_TABLE_NAME))
28043     {
28044       if (!GOT_symbol)
28045 	{
28046 	  if (symbol_find (name))
28047 	    as_bad (_("GOT already in the symbol table"));
28048 
28049 	  GOT_symbol = symbol_new (name, undefined_section,
28050 				   &zero_address_frag, 0);
28051 	}
28052 
28053       return GOT_symbol;
28054     }
28055 #endif
28056 
28057   return NULL;
28058 }
28059 
28060 /* Subroutine of md_apply_fix.	 Check to see if an immediate can be
28061    computed as two separate immediate values, added together.  We
28062    already know that this value cannot be computed by just one ARM
28063    instruction.	 */
28064 
28065 static unsigned int
validate_immediate_twopart(unsigned int val,unsigned int * highpart)28066 validate_immediate_twopart (unsigned int   val,
28067 			    unsigned int * highpart)
28068 {
28069   unsigned int a;
28070   unsigned int i;
28071 
28072   for (i = 0; i < 32; i += 2)
28073     if (((a = rotate_left (val, i)) & 0xff) != 0)
28074       {
28075 	if (a & 0xff00)
28076 	  {
28077 	    if (a & ~ 0xffff)
28078 	      continue;
28079 	    * highpart = (a  >> 8) | ((i + 24) << 7);
28080 	  }
28081 	else if (a & 0xff0000)
28082 	  {
28083 	    if (a & 0xff000000)
28084 	      continue;
28085 	    * highpart = (a >> 16) | ((i + 16) << 7);
28086 	  }
28087 	else
28088 	  {
28089 	    gas_assert (a & 0xff000000);
28090 	    * highpart = (a >> 24) | ((i + 8) << 7);
28091 	  }
28092 
28093 	return (a & 0xff) | (i << 7);
28094       }
28095 
28096   return FAIL;
28097 }
28098 
28099 static int
validate_offset_imm(unsigned int val,int hwse)28100 validate_offset_imm (unsigned int val, int hwse)
28101 {
28102   if ((hwse && val > 255) || val > 4095)
28103     return FAIL;
28104   return val;
28105 }
28106 
28107 /* Subroutine of md_apply_fix.	 Do those data_ops which can take a
28108    negative immediate constant by altering the instruction.  A bit of
28109    a hack really.
28110 	MOV <-> MVN
28111 	AND <-> BIC
28112 	ADC <-> SBC
28113 	by inverting the second operand, and
28114 	ADD <-> SUB
28115 	CMP <-> CMN
28116 	by negating the second operand.	 */
28117 
28118 static int
negate_data_op(unsigned long * instruction,unsigned long value)28119 negate_data_op (unsigned long * instruction,
28120 		unsigned long	value)
28121 {
28122   int op, new_inst;
28123   unsigned long negated, inverted;
28124 
28125   negated = encode_arm_immediate (-value);
28126   inverted = encode_arm_immediate (~value);
28127 
28128   op = (*instruction >> DATA_OP_SHIFT) & 0xf;
28129   switch (op)
28130     {
28131       /* First negates.	 */
28132     case OPCODE_SUB:		 /* ADD <-> SUB	 */
28133       new_inst = OPCODE_ADD;
28134       value = negated;
28135       break;
28136 
28137     case OPCODE_ADD:
28138       new_inst = OPCODE_SUB;
28139       value = negated;
28140       break;
28141 
28142     case OPCODE_CMP:		 /* CMP <-> CMN	 */
28143       new_inst = OPCODE_CMN;
28144       value = negated;
28145       break;
28146 
28147     case OPCODE_CMN:
28148       new_inst = OPCODE_CMP;
28149       value = negated;
28150       break;
28151 
28152       /* Now Inverted ops.  */
28153     case OPCODE_MOV:		 /* MOV <-> MVN	 */
28154       new_inst = OPCODE_MVN;
28155       value = inverted;
28156       break;
28157 
28158     case OPCODE_MVN:
28159       new_inst = OPCODE_MOV;
28160       value = inverted;
28161       break;
28162 
28163     case OPCODE_AND:		 /* AND <-> BIC	 */
28164       new_inst = OPCODE_BIC;
28165       value = inverted;
28166       break;
28167 
28168     case OPCODE_BIC:
28169       new_inst = OPCODE_AND;
28170       value = inverted;
28171       break;
28172 
28173     case OPCODE_ADC:		  /* ADC <-> SBC  */
28174       new_inst = OPCODE_SBC;
28175       value = inverted;
28176       break;
28177 
28178     case OPCODE_SBC:
28179       new_inst = OPCODE_ADC;
28180       value = inverted;
28181       break;
28182 
28183       /* We cannot do anything.	 */
28184     default:
28185       return FAIL;
28186     }
28187 
28188   if (value == (unsigned) FAIL)
28189     return FAIL;
28190 
28191   *instruction &= OPCODE_MASK;
28192   *instruction |= new_inst << DATA_OP_SHIFT;
28193   return value;
28194 }
28195 
28196 /* Like negate_data_op, but for Thumb-2.   */
28197 
28198 static unsigned int
thumb32_negate_data_op(valueT * instruction,unsigned int value)28199 thumb32_negate_data_op (valueT *instruction, unsigned int value)
28200 {
28201   unsigned int op, new_inst;
28202   unsigned int rd;
28203   unsigned int negated, inverted;
28204 
28205   negated = encode_thumb32_immediate (-value);
28206   inverted = encode_thumb32_immediate (~value);
28207 
28208   rd = (*instruction >> 8) & 0xf;
28209   op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
28210   switch (op)
28211     {
28212       /* ADD <-> SUB.  Includes CMP <-> CMN.  */
28213     case T2_OPCODE_SUB:
28214       new_inst = T2_OPCODE_ADD;
28215       value = negated;
28216       break;
28217 
28218     case T2_OPCODE_ADD:
28219       new_inst = T2_OPCODE_SUB;
28220       value = negated;
28221       break;
28222 
28223       /* ORR <-> ORN.  Includes MOV <-> MVN.  */
28224     case T2_OPCODE_ORR:
28225       new_inst = T2_OPCODE_ORN;
28226       value = inverted;
28227       break;
28228 
28229     case T2_OPCODE_ORN:
28230       new_inst = T2_OPCODE_ORR;
28231       value = inverted;
28232       break;
28233 
28234       /* AND <-> BIC.  TST has no inverted equivalent.  */
28235     case T2_OPCODE_AND:
28236       new_inst = T2_OPCODE_BIC;
28237       if (rd == 15)
28238 	value = FAIL;
28239       else
28240 	value = inverted;
28241       break;
28242 
28243     case T2_OPCODE_BIC:
28244       new_inst = T2_OPCODE_AND;
28245       value = inverted;
28246       break;
28247 
28248       /* ADC <-> SBC  */
28249     case T2_OPCODE_ADC:
28250       new_inst = T2_OPCODE_SBC;
28251       value = inverted;
28252       break;
28253 
28254     case T2_OPCODE_SBC:
28255       new_inst = T2_OPCODE_ADC;
28256       value = inverted;
28257       break;
28258 
28259       /* We cannot do anything.	 */
28260     default:
28261       return FAIL;
28262     }
28263 
28264   if (value == (unsigned int)FAIL)
28265     return FAIL;
28266 
28267   *instruction &= T2_OPCODE_MASK;
28268   *instruction |= new_inst << T2_DATA_OP_SHIFT;
28269   return value;
28270 }
28271 
28272 /* Read a 32-bit thumb instruction from buf.  */
28273 
28274 static unsigned long
get_thumb32_insn(char * buf)28275 get_thumb32_insn (char * buf)
28276 {
28277   unsigned long insn;
28278   insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
28279   insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
28280 
28281   return insn;
28282 }
28283 
28284 /* We usually want to set the low bit on the address of thumb function
28285    symbols.  In particular .word foo - . should have the low bit set.
28286    Generic code tries to fold the difference of two symbols to
28287    a constant.  Prevent this and force a relocation when the first symbols
28288    is a thumb function.  */
28289 
28290 bool
arm_optimize_expr(expressionS * l,operatorT op,expressionS * r)28291 arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
28292 {
28293   if (op == O_subtract
28294       && l->X_op == O_symbol
28295       && r->X_op == O_symbol
28296       && THUMB_IS_FUNC (l->X_add_symbol))
28297     {
28298       l->X_op = O_subtract;
28299       l->X_op_symbol = r->X_add_symbol;
28300       l->X_add_number -= r->X_add_number;
28301       return true;
28302     }
28303 
28304   /* Process as normal.  */
28305   return false;
28306 }
28307 
28308 /* Encode Thumb2 unconditional branches and calls. The encoding
28309    for the 2 are identical for the immediate values.  */
28310 
28311 static void
encode_thumb2_b_bl_offset(char * buf,offsetT value)28312 encode_thumb2_b_bl_offset (char * buf, offsetT value)
28313 {
28314 #define T2I1I2MASK  ((1 << 13) | (1 << 11))
28315   offsetT newval;
28316   offsetT newval2;
28317   addressT S, I1, I2, lo, hi;
28318 
28319   S = (value >> 24) & 0x01;
28320   I1 = (value >> 23) & 0x01;
28321   I2 = (value >> 22) & 0x01;
28322   hi = (value >> 12) & 0x3ff;
28323   lo = (value >> 1) & 0x7ff;
28324   newval   = md_chars_to_number (buf, THUMB_SIZE);
28325   newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
28326   newval  |= (S << 10) | hi;
28327   newval2 &=  ~T2I1I2MASK;
28328   newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
28329   md_number_to_chars (buf, newval, THUMB_SIZE);
28330   md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
28331 }
28332 
28333 void
md_apply_fix(fixS * fixP,valueT * valP,segT seg)28334 md_apply_fix (fixS *	fixP,
28335 	       valueT * valP,
28336 	       segT	seg)
28337 {
28338   valueT	 value = * valP;
28339   valueT	 newval;
28340   unsigned int	 newimm;
28341   unsigned long	 temp;
28342   int		 sign;
28343   char *	 buf = fixP->fx_where + fixP->fx_frag->fr_literal;
28344 
28345   gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
28346 
28347   /* Note whether this will delete the relocation.  */
28348 
28349   if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
28350     fixP->fx_done = 1;
28351 
28352   /* On a 64-bit host, silently truncate 'value' to 32 bits for
28353      consistency with the behaviour on 32-bit hosts.  Remember value
28354      for emit_reloc.  */
28355   value &= 0xffffffff;
28356   value ^= 0x80000000;
28357   value -= 0x80000000;
28358 
28359   *valP = value;
28360   fixP->fx_addnumber = value;
28361 
28362   /* Same treatment for fixP->fx_offset.  */
28363   fixP->fx_offset &= 0xffffffff;
28364   fixP->fx_offset ^= 0x80000000;
28365   fixP->fx_offset -= 0x80000000;
28366 
28367   switch (fixP->fx_r_type)
28368     {
28369     case BFD_RELOC_NONE:
28370       /* This will need to go in the object file.  */
28371       fixP->fx_done = 0;
28372       break;
28373 
28374     case BFD_RELOC_ARM_IMMEDIATE:
28375       /* We claim that this fixup has been processed here,
28376 	 even if in fact we generate an error because we do
28377 	 not have a reloc for it, so tc_gen_reloc will reject it.  */
28378       fixP->fx_done = 1;
28379 
28380       if (fixP->fx_addsy)
28381 	{
28382 	  const char *msg = 0;
28383 
28384 	  if (! S_IS_DEFINED (fixP->fx_addsy))
28385 	    msg = _("undefined symbol %s used as an immediate value");
28386 	  else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
28387 	    msg = _("symbol %s is in a different section");
28388 	  else if (S_IS_WEAK (fixP->fx_addsy))
28389 	    msg = _("symbol %s is weak and may be overridden later");
28390 
28391 	  if (msg)
28392 	    {
28393 	      as_bad_where (fixP->fx_file, fixP->fx_line,
28394 			    msg, S_GET_NAME (fixP->fx_addsy));
28395 	      break;
28396 	    }
28397 	}
28398 
28399       temp = md_chars_to_number (buf, INSN_SIZE);
28400 
28401       /* If the offset is negative, we should use encoding A2 for ADR.  */
28402       if ((temp & 0xfff0000) == 0x28f0000 && (offsetT) value < 0)
28403 	newimm = negate_data_op (&temp, value);
28404       else
28405 	{
28406 	  newimm = encode_arm_immediate (value);
28407 
28408 	  /* If the instruction will fail, see if we can fix things up by
28409 	     changing the opcode.  */
28410 	  if (newimm == (unsigned int) FAIL)
28411 	    newimm = negate_data_op (&temp, value);
28412 	  /* MOV accepts both ARM modified immediate (A1 encoding) and
28413 	     UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
28414 	     When disassembling, MOV is preferred when there is no encoding
28415 	     overlap.  */
28416 	  if (newimm == (unsigned int) FAIL
28417 	      && ((temp >> DATA_OP_SHIFT) & 0xf) == OPCODE_MOV
28418 	      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
28419 	      && !((temp >> SBIT_SHIFT) & 0x1)
28420 	      && value <= 0xffff)
28421 	    {
28422 	      /* Clear bits[23:20] to change encoding from A1 to A2.  */
28423 	      temp &= 0xff0fffff;
28424 	      /* Encoding high 4bits imm.  Code below will encode the remaining
28425 		 low 12bits.  */
28426 	      temp |= (value & 0x0000f000) << 4;
28427 	      newimm = value & 0x00000fff;
28428 	    }
28429 	}
28430 
28431       if (newimm == (unsigned int) FAIL)
28432 	{
28433 	  as_bad_where (fixP->fx_file, fixP->fx_line,
28434 			_("invalid constant (%lx) after fixup"),
28435 			(unsigned long) value);
28436 	  break;
28437 	}
28438 
28439       newimm |= (temp & 0xfffff000);
28440       md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
28441       break;
28442 
28443     case BFD_RELOC_ARM_ADRL_IMMEDIATE:
28444       {
28445 	unsigned int highpart = 0;
28446 	unsigned int newinsn  = 0xe1a00000; /* nop.  */
28447 
28448 	if (fixP->fx_addsy)
28449 	  {
28450 	    const char *msg = 0;
28451 
28452 	    if (! S_IS_DEFINED (fixP->fx_addsy))
28453 	      msg = _("undefined symbol %s used as an immediate value");
28454 	    else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
28455 	      msg = _("symbol %s is in a different section");
28456 	    else if (S_IS_WEAK (fixP->fx_addsy))
28457 	      msg = _("symbol %s is weak and may be overridden later");
28458 
28459 	    if (msg)
28460 	      {
28461 		as_bad_where (fixP->fx_file, fixP->fx_line,
28462 			      msg, S_GET_NAME (fixP->fx_addsy));
28463 		break;
28464 	      }
28465 	  }
28466 
28467 	newimm = encode_arm_immediate (value);
28468 	temp = md_chars_to_number (buf, INSN_SIZE);
28469 
28470 	/* If the instruction will fail, see if we can fix things up by
28471 	   changing the opcode.	 */
28472 	if (newimm == (unsigned int) FAIL
28473 	    && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
28474 	  {
28475 	    /* No ?  OK - try using two ADD instructions to generate
28476 	       the value.  */
28477 	    newimm = validate_immediate_twopart (value, & highpart);
28478 
28479 	    /* Yes - then make sure that the second instruction is
28480 	       also an add.  */
28481 	    if (newimm != (unsigned int) FAIL)
28482 	      newinsn = temp;
28483 	    /* Still No ?  Try using a negated value.  */
28484 	    else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
28485 	      temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
28486 	    /* Otherwise - give up.  */
28487 	    else
28488 	      {
28489 		as_bad_where (fixP->fx_file, fixP->fx_line,
28490 			      _("unable to compute ADRL instructions for PC offset of 0x%lx"),
28491 			      (long) value);
28492 		break;
28493 	      }
28494 
28495 	    /* Replace the first operand in the 2nd instruction (which
28496 	       is the PC) with the destination register.  We have
28497 	       already added in the PC in the first instruction and we
28498 	       do not want to do it again.  */
28499 	    newinsn &= ~ 0xf0000;
28500 	    newinsn |= ((newinsn & 0x0f000) << 4);
28501 	  }
28502 
28503 	newimm |= (temp & 0xfffff000);
28504 	md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
28505 
28506 	highpart |= (newinsn & 0xfffff000);
28507 	md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
28508       }
28509       break;
28510 
28511     case BFD_RELOC_ARM_OFFSET_IMM:
28512       if (!fixP->fx_done && seg->use_rela_p)
28513 	value = 0;
28514       /* Fall through.  */
28515 
28516     case BFD_RELOC_ARM_LITERAL:
28517       sign = (offsetT) value > 0;
28518 
28519       if ((offsetT) value < 0)
28520 	value = - value;
28521 
28522       if (validate_offset_imm (value, 0) == FAIL)
28523 	{
28524 	  if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
28525 	    as_bad_where (fixP->fx_file, fixP->fx_line,
28526 			  _("invalid literal constant: pool needs to be closer"));
28527 	  else
28528 	    as_bad_where (fixP->fx_file, fixP->fx_line,
28529 			  _("bad immediate value for offset (%ld)"),
28530 			  (long) value);
28531 	  break;
28532 	}
28533 
28534       newval = md_chars_to_number (buf, INSN_SIZE);
28535       if (value == 0)
28536 	newval &= 0xfffff000;
28537       else
28538 	{
28539 	  newval &= 0xff7ff000;
28540 	  newval |= value | (sign ? INDEX_UP : 0);
28541 	}
28542       md_number_to_chars (buf, newval, INSN_SIZE);
28543       break;
28544 
28545     case BFD_RELOC_ARM_OFFSET_IMM8:
28546     case BFD_RELOC_ARM_HWLITERAL:
28547       sign = (offsetT) value > 0;
28548 
28549       if ((offsetT) value < 0)
28550 	value = - value;
28551 
28552       if (validate_offset_imm (value, 1) == FAIL)
28553 	{
28554 	  if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
28555 	    as_bad_where (fixP->fx_file, fixP->fx_line,
28556 			  _("invalid literal constant: pool needs to be closer"));
28557 	  else
28558 	    as_bad_where (fixP->fx_file, fixP->fx_line,
28559 			  _("bad immediate value for 8-bit offset (%ld)"),
28560 			  (long) value);
28561 	  break;
28562 	}
28563 
28564       newval = md_chars_to_number (buf, INSN_SIZE);
28565       if (value == 0)
28566 	newval &= 0xfffff0f0;
28567       else
28568 	{
28569 	  newval &= 0xff7ff0f0;
28570 	  newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
28571 	}
28572       md_number_to_chars (buf, newval, INSN_SIZE);
28573       break;
28574 
28575     case BFD_RELOC_ARM_T32_OFFSET_U8:
28576       if (value > 1020 || value % 4 != 0)
28577 	as_bad_where (fixP->fx_file, fixP->fx_line,
28578 		      _("bad immediate value for offset (%ld)"), (long) value);
28579       value /= 4;
28580 
28581       newval = md_chars_to_number (buf+2, THUMB_SIZE);
28582       newval |= value;
28583       md_number_to_chars (buf+2, newval, THUMB_SIZE);
28584       break;
28585 
28586     case BFD_RELOC_ARM_T32_OFFSET_IMM:
28587       /* This is a complicated relocation used for all varieties of Thumb32
28588 	 load/store instruction with immediate offset:
28589 
28590 	 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
28591 						   *4, optional writeback(W)
28592 						   (doubleword load/store)
28593 
28594 	 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
28595 	 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
28596 	 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
28597 	 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
28598 	 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
28599 
28600 	 Uppercase letters indicate bits that are already encoded at
28601 	 this point.  Lowercase letters are our problem.  For the
28602 	 second block of instructions, the secondary opcode nybble
28603 	 (bits 8..11) is present, and bit 23 is zero, even if this is
28604 	 a PC-relative operation.  */
28605       newval = md_chars_to_number (buf, THUMB_SIZE);
28606       newval <<= 16;
28607       newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
28608 
28609       if ((newval & 0xf0000000) == 0xe0000000)
28610 	{
28611 	  /* Doubleword load/store: 8-bit offset, scaled by 4.  */
28612 	  if ((offsetT) value >= 0)
28613 	    newval |= (1 << 23);
28614 	  else
28615 	    value = -value;
28616 	  if (value % 4 != 0)
28617 	    {
28618 	      as_bad_where (fixP->fx_file, fixP->fx_line,
28619 			    _("offset not a multiple of 4"));
28620 	      break;
28621 	    }
28622 	  value /= 4;
28623 	  if (value > 0xff)
28624 	    {
28625 	      as_bad_where (fixP->fx_file, fixP->fx_line,
28626 			    _("offset out of range"));
28627 	      break;
28628 	    }
28629 	  newval &= ~0xff;
28630 	}
28631       else if ((newval & 0x000f0000) == 0x000f0000)
28632 	{
28633 	  /* PC-relative, 12-bit offset.  */
28634 	  if ((offsetT) value >= 0)
28635 	    newval |= (1 << 23);
28636 	  else
28637 	    value = -value;
28638 	  if (value > 0xfff)
28639 	    {
28640 	      as_bad_where (fixP->fx_file, fixP->fx_line,
28641 			    _("offset out of range"));
28642 	      break;
28643 	    }
28644 	  newval &= ~0xfff;
28645 	}
28646       else if ((newval & 0x00000100) == 0x00000100)
28647 	{
28648 	  /* Writeback: 8-bit, +/- offset.  */
28649 	  if ((offsetT) value >= 0)
28650 	    newval |= (1 << 9);
28651 	  else
28652 	    value = -value;
28653 	  if (value > 0xff)
28654 	    {
28655 	      as_bad_where (fixP->fx_file, fixP->fx_line,
28656 			    _("offset out of range"));
28657 	      break;
28658 	    }
28659 	  newval &= ~0xff;
28660 	}
28661       else if ((newval & 0x00000f00) == 0x00000e00)
28662 	{
28663 	  /* T-instruction: positive 8-bit offset.  */
28664 	  if (value > 0xff)
28665 	    {
28666 	      as_bad_where (fixP->fx_file, fixP->fx_line,
28667 			    _("offset out of range"));
28668 	      break;
28669 	    }
28670 	  newval &= ~0xff;
28671 	  newval |= value;
28672 	}
28673       else
28674 	{
28675 	  /* Positive 12-bit or negative 8-bit offset.  */
28676 	  unsigned int limit;
28677 	  if ((offsetT) value >= 0)
28678 	    {
28679 	      newval |= (1 << 23);
28680 	      limit = 0xfff;
28681 	    }
28682 	  else
28683 	    {
28684 	      value = -value;
28685 	      limit = 0xff;
28686 	    }
28687 	  if (value > limit)
28688 	    {
28689 	      as_bad_where (fixP->fx_file, fixP->fx_line,
28690 			    _("offset out of range"));
28691 	      break;
28692 	    }
28693 	  newval &= ~limit;
28694 	}
28695 
28696       newval |= value;
28697       md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
28698       md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
28699       break;
28700 
28701     case BFD_RELOC_ARM_SHIFT_IMM:
28702       newval = md_chars_to_number (buf, INSN_SIZE);
28703       if (value > 32
28704 	  || (value == 32
28705 	      && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
28706 	{
28707 	  as_bad_where (fixP->fx_file, fixP->fx_line,
28708 			_("shift expression is too large"));
28709 	  break;
28710 	}
28711 
28712       if (value == 0)
28713 	/* Shifts of zero must be done as lsl.	*/
28714 	newval &= ~0x60;
28715       else if (value == 32)
28716 	value = 0;
28717       newval &= 0xfffff07f;
28718       newval |= (value & 0x1f) << 7;
28719       md_number_to_chars (buf, newval, INSN_SIZE);
28720       break;
28721 
28722     case BFD_RELOC_ARM_T32_IMMEDIATE:
28723     case BFD_RELOC_ARM_T32_ADD_IMM:
28724     case BFD_RELOC_ARM_T32_IMM12:
28725     case BFD_RELOC_ARM_T32_ADD_PC12:
28726       /* We claim that this fixup has been processed here,
28727 	 even if in fact we generate an error because we do
28728 	 not have a reloc for it, so tc_gen_reloc will reject it.  */
28729       fixP->fx_done = 1;
28730 
28731       if (fixP->fx_addsy
28732 	  && ! S_IS_DEFINED (fixP->fx_addsy))
28733 	{
28734 	  as_bad_where (fixP->fx_file, fixP->fx_line,
28735 			_("undefined symbol %s used as an immediate value"),
28736 			S_GET_NAME (fixP->fx_addsy));
28737 	  break;
28738 	}
28739 
28740       newval = md_chars_to_number (buf, THUMB_SIZE);
28741       newval <<= 16;
28742       newval |= md_chars_to_number (buf+2, THUMB_SIZE);
28743 
28744       newimm = FAIL;
28745       if ((fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
28746 	   /* ARMv8-M Baseline MOV will reach here, but it doesn't support
28747 	      Thumb2 modified immediate encoding (T2).  */
28748 	   && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
28749 	  || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
28750 	{
28751 	  newimm = encode_thumb32_immediate (value);
28752 	  if (newimm == (unsigned int) FAIL)
28753 	    newimm = thumb32_negate_data_op (&newval, value);
28754 	}
28755       if (newimm == (unsigned int) FAIL)
28756 	{
28757 	  if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE)
28758 	    {
28759 	      /* Turn add/sum into addw/subw.  */
28760 	      if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
28761 		newval = (newval & 0xfeffffff) | 0x02000000;
28762 	      /* No flat 12-bit imm encoding for addsw/subsw.  */
28763 	      if ((newval & 0x00100000) == 0)
28764 		{
28765 		  /* 12 bit immediate for addw/subw.  */
28766 		  if ((offsetT) value < 0)
28767 		    {
28768 		      value = -value;
28769 		      newval ^= 0x00a00000;
28770 		    }
28771 		  if (value > 0xfff)
28772 		    newimm = (unsigned int) FAIL;
28773 		  else
28774 		    newimm = value;
28775 		}
28776 	    }
28777 	  else
28778 	    {
28779 	      /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
28780 		 UINT16 (T3 encoding), MOVW only accepts UINT16.  When
28781 		 disassembling, MOV is preferred when there is no encoding
28782 		 overlap.  */
28783 	      if (((newval >> T2_DATA_OP_SHIFT) & 0xf) == T2_OPCODE_ORR
28784 		  /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
28785 		     but with the Rn field [19:16] set to 1111.  */
28786 		  && (((newval >> 16) & 0xf) == 0xf)
28787 		  && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)
28788 		  && !((newval >> T2_SBIT_SHIFT) & 0x1)
28789 		  && value <= 0xffff)
28790 		{
28791 		  /* Toggle bit[25] to change encoding from T2 to T3.  */
28792 		  newval ^= 1 << 25;
28793 		  /* Clear bits[19:16].  */
28794 		  newval &= 0xfff0ffff;
28795 		  /* Encoding high 4bits imm.  Code below will encode the
28796 		     remaining low 12bits.  */
28797 		  newval |= (value & 0x0000f000) << 4;
28798 		  newimm = value & 0x00000fff;
28799 		}
28800 	    }
28801 	}
28802 
28803       if (newimm == (unsigned int)FAIL)
28804 	{
28805 	  as_bad_where (fixP->fx_file, fixP->fx_line,
28806 			_("invalid constant (%lx) after fixup"),
28807 			(unsigned long) value);
28808 	  break;
28809 	}
28810 
28811       newval |= (newimm & 0x800) << 15;
28812       newval |= (newimm & 0x700) << 4;
28813       newval |= (newimm & 0x0ff);
28814 
28815       md_number_to_chars (buf,   (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
28816       md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
28817       break;
28818 
28819     case BFD_RELOC_ARM_SMC:
28820       if (value > 0xf)
28821 	as_bad_where (fixP->fx_file, fixP->fx_line,
28822 		      _("invalid smc expression"));
28823 
28824       newval = md_chars_to_number (buf, INSN_SIZE);
28825       newval |= (value & 0xf);
28826       md_number_to_chars (buf, newval, INSN_SIZE);
28827       break;
28828 
28829     case BFD_RELOC_ARM_HVC:
28830       if (value > 0xffff)
28831 	as_bad_where (fixP->fx_file, fixP->fx_line,
28832 		      _("invalid hvc expression"));
28833       newval = md_chars_to_number (buf, INSN_SIZE);
28834       newval |= (value & 0xf) | ((value & 0xfff0) << 4);
28835       md_number_to_chars (buf, newval, INSN_SIZE);
28836       break;
28837 
28838     case BFD_RELOC_ARM_SWI:
28839       if (fixP->tc_fix_data != 0)
28840 	{
28841 	  if (value > 0xff)
28842 	    as_bad_where (fixP->fx_file, fixP->fx_line,
28843 			  _("invalid swi expression"));
28844 	  newval = md_chars_to_number (buf, THUMB_SIZE);
28845 	  newval |= value;
28846 	  md_number_to_chars (buf, newval, THUMB_SIZE);
28847 	}
28848       else
28849 	{
28850 	  if (value > 0x00ffffff)
28851 	    as_bad_where (fixP->fx_file, fixP->fx_line,
28852 			  _("invalid swi expression"));
28853 	  newval = md_chars_to_number (buf, INSN_SIZE);
28854 	  newval |= value;
28855 	  md_number_to_chars (buf, newval, INSN_SIZE);
28856 	}
28857       break;
28858 
28859     case BFD_RELOC_ARM_MULTI:
28860       if (value > 0xffff)
28861 	as_bad_where (fixP->fx_file, fixP->fx_line,
28862 		      _("invalid expression in load/store multiple"));
28863       newval = value | md_chars_to_number (buf, INSN_SIZE);
28864       md_number_to_chars (buf, newval, INSN_SIZE);
28865       break;
28866 
28867 #ifdef OBJ_ELF
28868     case BFD_RELOC_ARM_PCREL_CALL:
28869 
28870       if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
28871 	  && fixP->fx_addsy
28872 	  && !S_FORCE_RELOC (fixP->fx_addsy, true)
28873 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28874 	  && THUMB_IS_FUNC (fixP->fx_addsy))
28875 	/* Flip the bl to blx. This is a simple flip
28876 	   bit here because we generate PCREL_CALL for
28877 	   unconditional bls.  */
28878 	{
28879 	  newval = md_chars_to_number (buf, INSN_SIZE);
28880 	  newval = newval | 0x10000000;
28881 	  md_number_to_chars (buf, newval, INSN_SIZE);
28882 	  temp = 1;
28883 	  fixP->fx_done = 1;
28884 	}
28885       else
28886 	temp = 3;
28887       goto arm_branch_common;
28888 
28889     case BFD_RELOC_ARM_PCREL_JUMP:
28890       if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
28891 	  && fixP->fx_addsy
28892 	  && !S_FORCE_RELOC (fixP->fx_addsy, true)
28893 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28894 	  && THUMB_IS_FUNC (fixP->fx_addsy))
28895 	{
28896 	  /* This would map to a bl<cond>, b<cond>,
28897 	     b<always> to a Thumb function. We
28898 	     need to force a relocation for this particular
28899 	     case.  */
28900 	  newval = md_chars_to_number (buf, INSN_SIZE);
28901 	  fixP->fx_done = 0;
28902 	}
28903       /* Fall through.  */
28904 
28905     case BFD_RELOC_ARM_PLT32:
28906 #endif
28907     case BFD_RELOC_ARM_PCREL_BRANCH:
28908       temp = 3;
28909       goto arm_branch_common;
28910 
28911     case BFD_RELOC_ARM_PCREL_BLX:
28912 
28913       temp = 1;
28914       if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
28915 	  && fixP->fx_addsy
28916 	  && !S_FORCE_RELOC (fixP->fx_addsy, true)
28917 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28918 	  && ARM_IS_FUNC (fixP->fx_addsy))
28919 	{
28920 	  /* Flip the blx to a bl and warn.  */
28921 	  const char *name = S_GET_NAME (fixP->fx_addsy);
28922 	  newval = 0xeb000000;
28923 	  as_warn_where (fixP->fx_file, fixP->fx_line,
28924 			 _("blx to '%s' an ARM ISA state function changed to bl"),
28925 			  name);
28926 	  md_number_to_chars (buf, newval, INSN_SIZE);
28927 	  temp = 3;
28928 	  fixP->fx_done = 1;
28929 	}
28930 
28931 #ifdef OBJ_ELF
28932        if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
28933 	 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
28934 #endif
28935 
28936     arm_branch_common:
28937       /* We are going to store value (shifted right by two) in the
28938 	 instruction, in a 24 bit, signed field.  Bits 26 through 32 either
28939 	 all clear or all set and bit 0 must be clear.  For B/BL bit 1 must
28940 	 also be clear.  */
28941       if (value & temp)
28942 	as_bad_where (fixP->fx_file, fixP->fx_line,
28943 		      _("misaligned branch destination"));
28944       if ((value & 0xfe000000) != 0
28945 	  && (value & 0xfe000000) != 0xfe000000)
28946 	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28947 
28948       if (fixP->fx_done || !seg->use_rela_p)
28949 	{
28950 	  newval = md_chars_to_number (buf, INSN_SIZE);
28951 	  newval |= (value >> 2) & 0x00ffffff;
28952 	  /* Set the H bit on BLX instructions.  */
28953 	  if (temp == 1)
28954 	    {
28955 	      if (value & 2)
28956 		newval |= 0x01000000;
28957 	      else
28958 		newval &= ~0x01000000;
28959 	    }
28960 	  md_number_to_chars (buf, newval, INSN_SIZE);
28961 	}
28962       break;
28963 
28964     case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
28965       /* CBZ can only branch forward.  */
28966 
28967       /* Attempts to use CBZ to branch to the next instruction
28968 	 (which, strictly speaking, are prohibited) will be turned into
28969 	 no-ops.
28970 
28971 	 FIXME: It may be better to remove the instruction completely and
28972 	 perform relaxation.  */
28973       if ((offsetT) value == -2)
28974 	{
28975 	  newval = md_chars_to_number (buf, THUMB_SIZE);
28976 	  newval = 0xbf00; /* NOP encoding T1 */
28977 	  md_number_to_chars (buf, newval, THUMB_SIZE);
28978 	}
28979       else
28980 	{
28981 	  if (value & ~0x7e)
28982 	    as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28983 
28984 	  if (fixP->fx_done || !seg->use_rela_p)
28985 	    {
28986 	      newval = md_chars_to_number (buf, THUMB_SIZE);
28987 	      newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
28988 	      md_number_to_chars (buf, newval, THUMB_SIZE);
28989 	    }
28990 	}
28991       break;
28992 
28993     case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch.	*/
28994       if (out_of_range_p (value, 8))
28995 	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28996 
28997       if (fixP->fx_done || !seg->use_rela_p)
28998 	{
28999 	  newval = md_chars_to_number (buf, THUMB_SIZE);
29000 	  newval |= (value & 0x1ff) >> 1;
29001 	  md_number_to_chars (buf, newval, THUMB_SIZE);
29002 	}
29003       break;
29004 
29005     case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch.  */
29006       if (out_of_range_p (value, 11))
29007 	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
29008 
29009       if (fixP->fx_done || !seg->use_rela_p)
29010 	{
29011 	  newval = md_chars_to_number (buf, THUMB_SIZE);
29012 	  newval |= (value & 0xfff) >> 1;
29013 	  md_number_to_chars (buf, newval, THUMB_SIZE);
29014 	}
29015       break;
29016 
29017     /* This relocation is misnamed, it should be BRANCH21.  */
29018     case BFD_RELOC_THUMB_PCREL_BRANCH20:
29019       if (fixP->fx_addsy
29020 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29021 	  && !S_FORCE_RELOC (fixP->fx_addsy, true)
29022 	  && ARM_IS_FUNC (fixP->fx_addsy)
29023 	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
29024 	{
29025 	  /* Force a relocation for a branch 20 bits wide.  */
29026 	  fixP->fx_done = 0;
29027 	}
29028       if (out_of_range_p (value, 20))
29029 	as_bad_where (fixP->fx_file, fixP->fx_line,
29030 		      _("conditional branch out of range"));
29031 
29032       if (fixP->fx_done || !seg->use_rela_p)
29033 	{
29034 	  offsetT newval2;
29035 	  addressT S, J1, J2, lo, hi;
29036 
29037 	  S  = (value & 0x00100000) >> 20;
29038 	  J2 = (value & 0x00080000) >> 19;
29039 	  J1 = (value & 0x00040000) >> 18;
29040 	  hi = (value & 0x0003f000) >> 12;
29041 	  lo = (value & 0x00000ffe) >> 1;
29042 
29043 	  newval   = md_chars_to_number (buf, THUMB_SIZE);
29044 	  newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29045 	  newval  |= (S << 10) | hi;
29046 	  newval2 |= (J1 << 13) | (J2 << 11) | lo;
29047 	  md_number_to_chars (buf, newval, THUMB_SIZE);
29048 	  md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
29049 	}
29050       break;
29051 
29052     case BFD_RELOC_THUMB_PCREL_BLX:
29053       /* If there is a blx from a thumb state function to
29054 	 another thumb function flip this to a bl and warn
29055 	 about it.  */
29056 
29057       if (fixP->fx_addsy
29058 	  && !S_FORCE_RELOC (fixP->fx_addsy, true)
29059 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29060 	  && THUMB_IS_FUNC (fixP->fx_addsy))
29061 	{
29062 	  const char *name = S_GET_NAME (fixP->fx_addsy);
29063 	  as_warn_where (fixP->fx_file, fixP->fx_line,
29064 			 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
29065 			 name);
29066 	  newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29067 	  newval = newval | 0x1000;
29068 	  md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
29069 	  fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
29070 	  fixP->fx_done = 1;
29071 	}
29072 
29073 
29074       goto thumb_bl_common;
29075 
29076     case BFD_RELOC_THUMB_PCREL_BRANCH23:
29077       /* A bl from Thumb state ISA to an internal ARM state function
29078 	 is converted to a blx.  */
29079       if (fixP->fx_addsy
29080 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29081 	  && !S_FORCE_RELOC (fixP->fx_addsy, true)
29082 	  && ARM_IS_FUNC (fixP->fx_addsy)
29083 	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
29084 	{
29085 	  newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29086 	  newval = newval & ~0x1000;
29087 	  md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
29088 	  fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
29089 	  fixP->fx_done = 1;
29090 	}
29091 
29092     thumb_bl_common:
29093 
29094       if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
29095 	/* For a BLX instruction, make sure that the relocation is rounded up
29096 	   to a word boundary.  This follows the semantics of the instruction
29097 	   which specifies that bit 1 of the target address will come from bit
29098 	   1 of the base address.  */
29099 	value = (value + 3) & ~ 3;
29100 
29101 #ifdef OBJ_ELF
29102        if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
29103 	   && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
29104 	 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
29105 #endif
29106 
29107       if (out_of_range_p (value, 22))
29108 	{
29109 	  if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
29110 	    as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
29111 	  else if (out_of_range_p (value, 24))
29112 	    as_bad_where (fixP->fx_file, fixP->fx_line,
29113 			  _("Thumb2 branch out of range"));
29114 	}
29115 
29116       if (fixP->fx_done || !seg->use_rela_p)
29117 	encode_thumb2_b_bl_offset (buf, value);
29118 
29119       break;
29120 
29121     case BFD_RELOC_THUMB_PCREL_BRANCH25:
29122       if (out_of_range_p (value, 24))
29123 	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
29124 
29125       if (fixP->fx_done || !seg->use_rela_p)
29126 	  encode_thumb2_b_bl_offset (buf, value);
29127 
29128       break;
29129 
29130     case BFD_RELOC_8:
29131       if (fixP->fx_done || !seg->use_rela_p)
29132 	*buf = value;
29133       break;
29134 
29135     case BFD_RELOC_16:
29136       if (fixP->fx_done || !seg->use_rela_p)
29137 	md_number_to_chars (buf, value, 2);
29138       break;
29139 
29140 #ifdef OBJ_ELF
29141     case BFD_RELOC_ARM_TLS_CALL:
29142     case BFD_RELOC_ARM_THM_TLS_CALL:
29143     case BFD_RELOC_ARM_TLS_DESCSEQ:
29144     case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
29145     case BFD_RELOC_ARM_TLS_GOTDESC:
29146     case BFD_RELOC_ARM_TLS_GD32:
29147     case BFD_RELOC_ARM_TLS_LE32:
29148     case BFD_RELOC_ARM_TLS_IE32:
29149     case BFD_RELOC_ARM_TLS_LDM32:
29150     case BFD_RELOC_ARM_TLS_LDO32:
29151       S_SET_THREAD_LOCAL (fixP->fx_addsy);
29152       break;
29153 
29154       /* Same handling as above, but with the arm_fdpic guard.  */
29155     case BFD_RELOC_ARM_TLS_GD32_FDPIC:
29156     case BFD_RELOC_ARM_TLS_IE32_FDPIC:
29157     case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
29158       if (arm_fdpic)
29159 	{
29160 	  S_SET_THREAD_LOCAL (fixP->fx_addsy);
29161 	}
29162       else
29163 	{
29164 	  as_bad_where (fixP->fx_file, fixP->fx_line,
29165 			_("Relocation supported only in FDPIC mode"));
29166 	}
29167       break;
29168 
29169     case BFD_RELOC_ARM_GOT32:
29170     case BFD_RELOC_ARM_GOTOFF:
29171       break;
29172 
29173     case BFD_RELOC_ARM_GOT_PREL:
29174       if (fixP->fx_done || !seg->use_rela_p)
29175 	md_number_to_chars (buf, value, 4);
29176       break;
29177 
29178     case BFD_RELOC_ARM_TARGET2:
29179       /* TARGET2 is not partial-inplace, so we need to write the
29180 	 addend here for REL targets, because it won't be written out
29181 	 during reloc processing later.  */
29182       if (fixP->fx_done || !seg->use_rela_p)
29183 	md_number_to_chars (buf, fixP->fx_offset, 4);
29184       break;
29185 
29186       /* Relocations for FDPIC.  */
29187     case BFD_RELOC_ARM_GOTFUNCDESC:
29188     case BFD_RELOC_ARM_GOTOFFFUNCDESC:
29189     case BFD_RELOC_ARM_FUNCDESC:
29190       if (arm_fdpic)
29191 	{
29192 	  if (fixP->fx_done || !seg->use_rela_p)
29193 	    md_number_to_chars (buf, 0, 4);
29194 	}
29195       else
29196 	{
29197 	  as_bad_where (fixP->fx_file, fixP->fx_line,
29198 			_("Relocation supported only in FDPIC mode"));
29199       }
29200       break;
29201 #endif
29202 
29203     case BFD_RELOC_RVA:
29204     case BFD_RELOC_32:
29205     case BFD_RELOC_ARM_TARGET1:
29206     case BFD_RELOC_ARM_ROSEGREL32:
29207     case BFD_RELOC_ARM_SBREL32:
29208     case BFD_RELOC_32_PCREL:
29209 #ifdef TE_PE
29210     case BFD_RELOC_32_SECREL:
29211 #endif
29212       if (fixP->fx_done || !seg->use_rela_p)
29213 #ifdef TE_WINCE
29214 	/* For WinCE we only do this for pcrel fixups.  */
29215 	if (fixP->fx_done || fixP->fx_pcrel)
29216 #endif
29217 	  md_number_to_chars (buf, value, 4);
29218       break;
29219 
29220 #ifdef OBJ_ELF
29221     case BFD_RELOC_ARM_PREL31:
29222       if (fixP->fx_done || !seg->use_rela_p)
29223 	{
29224 	  newval = md_chars_to_number (buf, 4) & 0x80000000;
29225 	  if ((value ^ (value >> 1)) & 0x40000000)
29226 	    {
29227 	      as_bad_where (fixP->fx_file, fixP->fx_line,
29228 			    _("rel31 relocation overflow"));
29229 	    }
29230 	  newval |= value & 0x7fffffff;
29231 	  md_number_to_chars (buf, newval, 4);
29232 	}
29233       break;
29234 #endif
29235 
29236     case BFD_RELOC_ARM_CP_OFF_IMM:
29237     case BFD_RELOC_ARM_T32_CP_OFF_IMM:
29238     case BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM:
29239       if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
29240 	newval = md_chars_to_number (buf, INSN_SIZE);
29241       else
29242 	newval = get_thumb32_insn (buf);
29243       if ((newval & 0x0f200f00) == 0x0d000900)
29244 	{
29245 	  /* This is a fp16 vstr/vldr.  The immediate offset in the mnemonic
29246 	     has permitted values that are multiples of 2, in the range -510
29247 	     to 510.  */
29248 	  if (value + 510 > 510 + 510 || (value & 1))
29249 	    as_bad_where (fixP->fx_file, fixP->fx_line,
29250 			  _("co-processor offset out of range"));
29251 	}
29252       else if ((newval & 0xfe001f80) == 0xec000f80)
29253 	{
29254 	  if (value + 511 > 512 + 511 || (value & 3))
29255 	    as_bad_where (fixP->fx_file, fixP->fx_line,
29256 			  _("co-processor offset out of range"));
29257 	}
29258       else if (value + 1023 > 1023 + 1023 || (value & 3))
29259 	as_bad_where (fixP->fx_file, fixP->fx_line,
29260 		      _("co-processor offset out of range"));
29261     cp_off_common:
29262       sign = (offsetT) value > 0;
29263       if ((offsetT) value < 0)
29264 	value = -value;
29265       if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
29266 	  || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
29267 	newval = md_chars_to_number (buf, INSN_SIZE);
29268       else
29269 	newval = get_thumb32_insn (buf);
29270       if (value == 0)
29271 	{
29272 	  if (fixP->fx_r_type == BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM)
29273 	    newval &= 0xffffff80;
29274 	  else
29275 	    newval &= 0xffffff00;
29276 	}
29277       else
29278 	{
29279 	  if (fixP->fx_r_type == BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM)
29280 	    newval &= 0xff7fff80;
29281 	  else
29282 	    newval &= 0xff7fff00;
29283 	  if ((newval & 0x0f200f00) == 0x0d000900)
29284 	    {
29285 	      /* This is a fp16 vstr/vldr.
29286 
29287 		 It requires the immediate offset in the instruction is shifted
29288 		 left by 1 to be a half-word offset.
29289 
29290 		 Here, left shift by 1 first, and later right shift by 2
29291 		 should get the right offset.  */
29292 	      value <<= 1;
29293 	    }
29294 	  newval |= (value >> 2) | (sign ? INDEX_UP : 0);
29295 	}
29296       if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
29297 	  || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
29298 	md_number_to_chars (buf, newval, INSN_SIZE);
29299       else
29300 	put_thumb32_insn (buf, newval);
29301       break;
29302 
29303     case BFD_RELOC_ARM_CP_OFF_IMM_S2:
29304     case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
29305       if (value + 255 > 255 + 255)
29306 	as_bad_where (fixP->fx_file, fixP->fx_line,
29307 		      _("co-processor offset out of range"));
29308       value *= 4;
29309       goto cp_off_common;
29310 
29311     case BFD_RELOC_ARM_THUMB_OFFSET:
29312       newval = md_chars_to_number (buf, THUMB_SIZE);
29313       /* Exactly what ranges, and where the offset is inserted depends
29314 	 on the type of instruction, we can establish this from the
29315 	 top 4 bits.  */
29316       switch (newval >> 12)
29317 	{
29318 	case 4: /* PC load.  */
29319 	  /* Thumb PC loads are somewhat odd, bit 1 of the PC is
29320 	     forced to zero for these loads; md_pcrel_from has already
29321 	     compensated for this.  */
29322 	  if (value & 3)
29323 	    as_bad_where (fixP->fx_file, fixP->fx_line,
29324 			  _("invalid offset, target not word aligned (0x%08lX)"),
29325 			  (((unsigned long) fixP->fx_frag->fr_address
29326 			    + (unsigned long) fixP->fx_where) & ~3)
29327 			  + (unsigned long) value);
29328 	  else if (get_recorded_alignment (seg) < 2)
29329 	    as_warn_where (fixP->fx_file, fixP->fx_line,
29330 			   _("section does not have enough alignment to ensure safe PC-relative loads"));
29331 
29332 	  if (value & ~0x3fc)
29333 	    as_bad_where (fixP->fx_file, fixP->fx_line,
29334 			  _("invalid offset, value too big (0x%08lX)"),
29335 			  (long) value);
29336 
29337 	  newval |= value >> 2;
29338 	  break;
29339 
29340 	case 9: /* SP load/store.  */
29341 	  if (value & ~0x3fc)
29342 	    as_bad_where (fixP->fx_file, fixP->fx_line,
29343 			  _("invalid offset, value too big (0x%08lX)"),
29344 			  (long) value);
29345 	  newval |= value >> 2;
29346 	  break;
29347 
29348 	case 6: /* Word load/store.  */
29349 	  if (value & ~0x7c)
29350 	    as_bad_where (fixP->fx_file, fixP->fx_line,
29351 			  _("invalid offset, value too big (0x%08lX)"),
29352 			  (long) value);
29353 	  newval |= value << 4; /* 6 - 2.  */
29354 	  break;
29355 
29356 	case 7: /* Byte load/store.  */
29357 	  if (value & ~0x1f)
29358 	    as_bad_where (fixP->fx_file, fixP->fx_line,
29359 			  _("invalid offset, value too big (0x%08lX)"),
29360 			  (long) value);
29361 	  newval |= value << 6;
29362 	  break;
29363 
29364 	case 8: /* Halfword load/store.	 */
29365 	  if (value & ~0x3e)
29366 	    as_bad_where (fixP->fx_file, fixP->fx_line,
29367 			  _("invalid offset, value too big (0x%08lX)"),
29368 			  (long) value);
29369 	  newval |= value << 5; /* 6 - 1.  */
29370 	  break;
29371 
29372 	default:
29373 	  as_bad_where (fixP->fx_file, fixP->fx_line,
29374 			"Unable to process relocation for thumb opcode: %lx",
29375 			(unsigned long) newval);
29376 	  break;
29377 	}
29378       md_number_to_chars (buf, newval, THUMB_SIZE);
29379       break;
29380 
29381     case BFD_RELOC_ARM_THUMB_ADD:
29382       /* This is a complicated relocation, since we use it for all of
29383 	 the following immediate relocations:
29384 
29385 	    3bit ADD/SUB
29386 	    8bit ADD/SUB
29387 	    9bit ADD/SUB SP word-aligned
29388 	   10bit ADD PC/SP word-aligned
29389 
29390 	 The type of instruction being processed is encoded in the
29391 	 instruction field:
29392 
29393 	   0x8000  SUB
29394 	   0x00F0  Rd
29395 	   0x000F  Rs
29396       */
29397       newval = md_chars_to_number (buf, THUMB_SIZE);
29398       {
29399 	int rd = (newval >> 4) & 0xf;
29400 	int rs = newval & 0xf;
29401 	int subtract = !!(newval & 0x8000);
29402 
29403 	/* Check for HI regs, only very restricted cases allowed:
29404 	   Adjusting SP, and using PC or SP to get an address.	*/
29405 	if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
29406 	    || (rs > 7 && rs != REG_SP && rs != REG_PC))
29407 	  as_bad_where (fixP->fx_file, fixP->fx_line,
29408 			_("invalid Hi register with immediate"));
29409 
29410 	/* If value is negative, choose the opposite instruction.  */
29411 	if ((offsetT) value < 0)
29412 	  {
29413 	    value = -value;
29414 	    subtract = !subtract;
29415 	    if ((offsetT) value < 0)
29416 	      as_bad_where (fixP->fx_file, fixP->fx_line,
29417 			    _("immediate value out of range"));
29418 	  }
29419 
29420 	if (rd == REG_SP)
29421 	  {
29422  	    if (value & ~0x1fc)
29423 	      as_bad_where (fixP->fx_file, fixP->fx_line,
29424 			    _("invalid immediate for stack address calculation"));
29425 	    newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
29426 	    newval |= value >> 2;
29427 	  }
29428 	else if (rs == REG_PC || rs == REG_SP)
29429 	  {
29430 	    /* PR gas/18541.  If the addition is for a defined symbol
29431 	       within range of an ADR instruction then accept it.  */
29432 	    if (subtract
29433 		&& value == 4
29434 		&& fixP->fx_addsy != NULL)
29435 	      {
29436 		subtract = 0;
29437 
29438 		if (! S_IS_DEFINED (fixP->fx_addsy)
29439 		    || S_GET_SEGMENT (fixP->fx_addsy) != seg
29440 		    || S_IS_WEAK (fixP->fx_addsy))
29441 		  {
29442 		    as_bad_where (fixP->fx_file, fixP->fx_line,
29443 				  _("address calculation needs a strongly defined nearby symbol"));
29444 		  }
29445 		else
29446 		  {
29447 		    offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
29448 
29449 		    /* Round up to the next 4-byte boundary.  */
29450 		    if (v & 3)
29451 		      v = (v + 3) & ~ 3;
29452 		    else
29453 		      v += 4;
29454 		    v = S_GET_VALUE (fixP->fx_addsy) - v;
29455 
29456 		    if (v & ~0x3fc)
29457 		      {
29458 			as_bad_where (fixP->fx_file, fixP->fx_line,
29459 				      _("symbol too far away"));
29460 		      }
29461 		    else
29462 		      {
29463 			fixP->fx_done = 1;
29464 			value = v;
29465 		      }
29466 		  }
29467 	      }
29468 
29469 	    if (subtract || value & ~0x3fc)
29470 	      as_bad_where (fixP->fx_file, fixP->fx_line,
29471 			    _("invalid immediate for address calculation (value = 0x%08lX)"),
29472 			    (unsigned long) (subtract ? - value : value));
29473 	    newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
29474 	    newval |= rd << 8;
29475 	    newval |= value >> 2;
29476 	  }
29477 	else if (rs == rd)
29478 	  {
29479 	    if (value & ~0xff)
29480 	      as_bad_where (fixP->fx_file, fixP->fx_line,
29481 			    _("immediate value out of range"));
29482 	    newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
29483 	    newval |= (rd << 8) | value;
29484 	  }
29485 	else
29486 	  {
29487 	    if (value & ~0x7)
29488 	      as_bad_where (fixP->fx_file, fixP->fx_line,
29489 			    _("immediate value out of range"));
29490 	    newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
29491 	    newval |= rd | (rs << 3) | (value << 6);
29492 	  }
29493       }
29494       md_number_to_chars (buf, newval, THUMB_SIZE);
29495       break;
29496 
29497     case BFD_RELOC_ARM_THUMB_IMM:
29498       newval = md_chars_to_number (buf, THUMB_SIZE);
29499       if (value > 255)
29500 	as_bad_where (fixP->fx_file, fixP->fx_line,
29501 		      _("invalid immediate: %ld is out of range"),
29502 		      (long) value);
29503       newval |= value;
29504       md_number_to_chars (buf, newval, THUMB_SIZE);
29505       break;
29506 
29507     case BFD_RELOC_ARM_THUMB_SHIFT:
29508       /* 5bit shift value (0..32).  LSL cannot take 32.	 */
29509       newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
29510       temp = newval & 0xf800;
29511       if (value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
29512 	as_bad_where (fixP->fx_file, fixP->fx_line,
29513 		      _("invalid shift value: %ld"), (long) value);
29514       /* Shifts of zero must be encoded as LSL.	 */
29515       if (value == 0)
29516 	newval = (newval & 0x003f) | T_OPCODE_LSL_I;
29517       /* Shifts of 32 are encoded as zero.  */
29518       else if (value == 32)
29519 	value = 0;
29520       newval |= value << 6;
29521       md_number_to_chars (buf, newval, THUMB_SIZE);
29522       break;
29523 
29524     case BFD_RELOC_VTABLE_INHERIT:
29525     case BFD_RELOC_VTABLE_ENTRY:
29526       fixP->fx_done = 0;
29527       return;
29528 
29529     case BFD_RELOC_ARM_MOVW:
29530     case BFD_RELOC_ARM_MOVT:
29531     case BFD_RELOC_ARM_THUMB_MOVW:
29532     case BFD_RELOC_ARM_THUMB_MOVT:
29533       if (fixP->fx_done || !seg->use_rela_p)
29534 	{
29535 	  /* REL format relocations are limited to a 16-bit addend.  */
29536 	  if (!fixP->fx_done)
29537 	    {
29538 	      if (value + 0x8000 > 0x7fff + 0x8000)
29539 		  as_bad_where (fixP->fx_file, fixP->fx_line,
29540 				_("offset out of range"));
29541 	    }
29542 	  else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
29543 		   || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
29544 	    {
29545 	      value >>= 16;
29546 	    }
29547 
29548 	  if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
29549 	      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
29550 	    {
29551 	      newval = get_thumb32_insn (buf);
29552 	      newval &= 0xfbf08f00;
29553 	      newval |= (value & 0xf000) << 4;
29554 	      newval |= (value & 0x0800) << 15;
29555 	      newval |= (value & 0x0700) << 4;
29556 	      newval |= (value & 0x00ff);
29557 	      put_thumb32_insn (buf, newval);
29558 	    }
29559 	  else
29560 	    {
29561 	      newval = md_chars_to_number (buf, 4);
29562 	      newval &= 0xfff0f000;
29563 	      newval |= value & 0x0fff;
29564 	      newval |= (value & 0xf000) << 4;
29565 	      md_number_to_chars (buf, newval, 4);
29566 	    }
29567 	}
29568       return;
29569 
29570    case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
29571    case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
29572    case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
29573    case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
29574       gas_assert (!fixP->fx_done);
29575       {
29576 	bfd_vma insn;
29577 	bool is_mov;
29578 	bfd_vma encoded_addend = value;
29579 
29580 	/* Check that addend can be encoded in instruction.  */
29581 	if (!seg->use_rela_p && value > 255)
29582 	  as_bad_where (fixP->fx_file, fixP->fx_line,
29583 			_("the offset 0x%08lX is not representable"),
29584 			(unsigned long) encoded_addend);
29585 
29586 	/* Extract the instruction.  */
29587 	insn = md_chars_to_number (buf, THUMB_SIZE);
29588 	is_mov = (insn & 0xf800) == 0x2000;
29589 
29590 	/* Encode insn.  */
29591 	if (is_mov)
29592 	  {
29593 	    if (!seg->use_rela_p)
29594 	      insn |= encoded_addend;
29595 	  }
29596 	else
29597 	  {
29598 	    int rd, rs;
29599 
29600 	    /* Extract the instruction.  */
29601 	     /* Encoding is the following
29602 		0x8000  SUB
29603 		0x00F0  Rd
29604 		0x000F  Rs
29605 	     */
29606 	     /* The following conditions must be true :
29607 		- ADD
29608 		- Rd == Rs
29609 		- Rd <= 7
29610 	     */
29611 	    rd = (insn >> 4) & 0xf;
29612 	    rs = insn & 0xf;
29613 	    if ((insn & 0x8000) || (rd != rs) || rd > 7)
29614 	      as_bad_where (fixP->fx_file, fixP->fx_line,
29615 			_("Unable to process relocation for thumb opcode: %lx"),
29616 			(unsigned long) insn);
29617 
29618 	    /* Encode as ADD immediate8 thumb 1 code.  */
29619 	    insn = 0x3000 | (rd << 8);
29620 
29621 	    /* Place the encoded addend into the first 8 bits of the
29622 	       instruction.  */
29623 	    if (!seg->use_rela_p)
29624 	      insn |= encoded_addend;
29625 	  }
29626 
29627 	/* Update the instruction.  */
29628 	md_number_to_chars (buf, insn, THUMB_SIZE);
29629       }
29630       break;
29631 
29632    case BFD_RELOC_ARM_ALU_PC_G0_NC:
29633    case BFD_RELOC_ARM_ALU_PC_G0:
29634    case BFD_RELOC_ARM_ALU_PC_G1_NC:
29635    case BFD_RELOC_ARM_ALU_PC_G1:
29636    case BFD_RELOC_ARM_ALU_PC_G2:
29637    case BFD_RELOC_ARM_ALU_SB_G0_NC:
29638    case BFD_RELOC_ARM_ALU_SB_G0:
29639    case BFD_RELOC_ARM_ALU_SB_G1_NC:
29640    case BFD_RELOC_ARM_ALU_SB_G1:
29641    case BFD_RELOC_ARM_ALU_SB_G2:
29642      gas_assert (!fixP->fx_done);
29643      if (!seg->use_rela_p)
29644        {
29645 	 bfd_vma insn;
29646 	 bfd_vma encoded_addend;
29647 	 bfd_vma addend_abs = llabs ((offsetT) value);
29648 
29649 	 /* Check that the absolute value of the addend can be
29650 	    expressed as an 8-bit constant plus a rotation.  */
29651 	 encoded_addend = encode_arm_immediate (addend_abs);
29652 	 if (encoded_addend == (unsigned int) FAIL)
29653 	   as_bad_where (fixP->fx_file, fixP->fx_line,
29654 			 _("the offset 0x%08lX is not representable"),
29655 			 (unsigned long) addend_abs);
29656 
29657 	 /* Extract the instruction.  */
29658 	 insn = md_chars_to_number (buf, INSN_SIZE);
29659 
29660 	 /* If the addend is positive, use an ADD instruction.
29661 	    Otherwise use a SUB.  Take care not to destroy the S bit.  */
29662 	 insn &= 0xff1fffff;
29663 	 if ((offsetT) value < 0)
29664 	   insn |= 1 << 22;
29665 	 else
29666 	   insn |= 1 << 23;
29667 
29668 	 /* Place the encoded addend into the first 12 bits of the
29669 	    instruction.  */
29670 	 insn &= 0xfffff000;
29671 	 insn |= encoded_addend;
29672 
29673 	 /* Update the instruction.  */
29674 	 md_number_to_chars (buf, insn, INSN_SIZE);
29675        }
29676      break;
29677 
29678     case BFD_RELOC_ARM_LDR_PC_G0:
29679     case BFD_RELOC_ARM_LDR_PC_G1:
29680     case BFD_RELOC_ARM_LDR_PC_G2:
29681     case BFD_RELOC_ARM_LDR_SB_G0:
29682     case BFD_RELOC_ARM_LDR_SB_G1:
29683     case BFD_RELOC_ARM_LDR_SB_G2:
29684       gas_assert (!fixP->fx_done);
29685       if (!seg->use_rela_p)
29686 	{
29687 	  bfd_vma insn;
29688 	  bfd_vma addend_abs = llabs ((offsetT) value);
29689 
29690 	  /* Check that the absolute value of the addend can be
29691 	     encoded in 12 bits.  */
29692 	  if (addend_abs >= 0x1000)
29693 	    as_bad_where (fixP->fx_file, fixP->fx_line,
29694 			  _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
29695 			  (unsigned long) addend_abs);
29696 
29697 	  /* Extract the instruction.  */
29698 	  insn = md_chars_to_number (buf, INSN_SIZE);
29699 
29700 	  /* If the addend is negative, clear bit 23 of the instruction.
29701 	     Otherwise set it.  */
29702 	  if ((offsetT) value < 0)
29703 	    insn &= ~(1 << 23);
29704 	  else
29705 	    insn |= 1 << 23;
29706 
29707 	  /* Place the absolute value of the addend into the first 12 bits
29708 	     of the instruction.  */
29709 	  insn &= 0xfffff000;
29710 	  insn |= addend_abs;
29711 
29712 	  /* Update the instruction.  */
29713 	  md_number_to_chars (buf, insn, INSN_SIZE);
29714 	}
29715       break;
29716 
29717     case BFD_RELOC_ARM_LDRS_PC_G0:
29718     case BFD_RELOC_ARM_LDRS_PC_G1:
29719     case BFD_RELOC_ARM_LDRS_PC_G2:
29720     case BFD_RELOC_ARM_LDRS_SB_G0:
29721     case BFD_RELOC_ARM_LDRS_SB_G1:
29722     case BFD_RELOC_ARM_LDRS_SB_G2:
29723       gas_assert (!fixP->fx_done);
29724       if (!seg->use_rela_p)
29725 	{
29726 	  bfd_vma insn;
29727 	  bfd_vma addend_abs = llabs ((offsetT) value);
29728 
29729 	  /* Check that the absolute value of the addend can be
29730 	     encoded in 8 bits.  */
29731 	  if (addend_abs >= 0x100)
29732 	    as_bad_where (fixP->fx_file, fixP->fx_line,
29733 			  _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
29734 			  (unsigned long) addend_abs);
29735 
29736 	  /* Extract the instruction.  */
29737 	  insn = md_chars_to_number (buf, INSN_SIZE);
29738 
29739 	  /* If the addend is negative, clear bit 23 of the instruction.
29740 	     Otherwise set it.  */
29741 	  if ((offsetT) value < 0)
29742 	    insn &= ~(1 << 23);
29743 	  else
29744 	    insn |= 1 << 23;
29745 
29746 	  /* Place the first four bits of the absolute value of the addend
29747 	     into the first 4 bits of the instruction, and the remaining
29748 	     four into bits 8 .. 11.  */
29749 	  insn &= 0xfffff0f0;
29750 	  insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
29751 
29752 	  /* Update the instruction.  */
29753 	  md_number_to_chars (buf, insn, INSN_SIZE);
29754 	}
29755       break;
29756 
29757     case BFD_RELOC_ARM_LDC_PC_G0:
29758     case BFD_RELOC_ARM_LDC_PC_G1:
29759     case BFD_RELOC_ARM_LDC_PC_G2:
29760     case BFD_RELOC_ARM_LDC_SB_G0:
29761     case BFD_RELOC_ARM_LDC_SB_G1:
29762     case BFD_RELOC_ARM_LDC_SB_G2:
29763       gas_assert (!fixP->fx_done);
29764       if (!seg->use_rela_p)
29765 	{
29766 	  bfd_vma insn;
29767 	  bfd_vma addend_abs = llabs ((offsetT) value);
29768 
29769 	  /* Check that the absolute value of the addend is a multiple of
29770 	     four and, when divided by four, fits in 8 bits.  */
29771 	  if (addend_abs & 0x3)
29772 	    as_bad_where (fixP->fx_file, fixP->fx_line,
29773 			  _("bad offset 0x%08lX (must be word-aligned)"),
29774 			  (unsigned long) addend_abs);
29775 
29776 	  if ((addend_abs >> 2) > 0xff)
29777 	    as_bad_where (fixP->fx_file, fixP->fx_line,
29778 			  _("bad offset 0x%08lX (must be an 8-bit number of words)"),
29779 			  (unsigned long) addend_abs);
29780 
29781 	  /* Extract the instruction.  */
29782 	  insn = md_chars_to_number (buf, INSN_SIZE);
29783 
29784 	  /* If the addend is negative, clear bit 23 of the instruction.
29785 	     Otherwise set it.  */
29786 	  if ((offsetT) value < 0)
29787 	    insn &= ~(1 << 23);
29788 	  else
29789 	    insn |= 1 << 23;
29790 
29791 	  /* Place the addend (divided by four) into the first eight
29792 	     bits of the instruction.  */
29793 	  insn &= 0xfffffff0;
29794 	  insn |= addend_abs >> 2;
29795 
29796 	  /* Update the instruction.  */
29797 	  md_number_to_chars (buf, insn, INSN_SIZE);
29798 	}
29799       break;
29800 
29801     case BFD_RELOC_THUMB_PCREL_BRANCH5:
29802       if (fixP->fx_addsy
29803 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29804 	  && !S_FORCE_RELOC (fixP->fx_addsy, true)
29805 	  && ARM_IS_FUNC (fixP->fx_addsy)
29806 	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29807 	{
29808 	  /* Force a relocation for a branch 5 bits wide.  */
29809 	  fixP->fx_done = 0;
29810 	}
29811       if (v8_1_branch_value_check (value, 5, false) == FAIL)
29812 	as_bad_where (fixP->fx_file, fixP->fx_line,
29813 		      BAD_BRANCH_OFF);
29814 
29815       if (fixP->fx_done || !seg->use_rela_p)
29816 	{
29817 	  addressT boff = value >> 1;
29818 
29819 	  newval  = md_chars_to_number (buf, THUMB_SIZE);
29820 	  newval |= (boff << 7);
29821 	  md_number_to_chars (buf, newval, THUMB_SIZE);
29822 	}
29823       break;
29824 
29825     case BFD_RELOC_THUMB_PCREL_BFCSEL:
29826       if (fixP->fx_addsy
29827 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29828 	  && !S_FORCE_RELOC (fixP->fx_addsy, true)
29829 	  && ARM_IS_FUNC (fixP->fx_addsy)
29830 	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29831 	{
29832 	  fixP->fx_done = 0;
29833 	}
29834       if ((value & ~0x7f) && ((value & ~0x3f) != (valueT) ~0x3f))
29835 	as_bad_where (fixP->fx_file, fixP->fx_line,
29836 		      _("branch out of range"));
29837 
29838       if (fixP->fx_done || !seg->use_rela_p)
29839 	{
29840 	  newval  = md_chars_to_number (buf, THUMB_SIZE);
29841 
29842 	  addressT boff = ((newval & 0x0780) >> 7) << 1;
29843 	  addressT diff = value - boff;
29844 
29845 	  if (diff == 4)
29846 	    {
29847 	      newval |= 1 << 1; /* T bit.  */
29848 	    }
29849 	  else if (diff != 2)
29850 	    {
29851 	      as_bad_where (fixP->fx_file, fixP->fx_line,
29852 			    _("out of range label-relative fixup value"));
29853 	    }
29854 	  md_number_to_chars (buf, newval, THUMB_SIZE);
29855 	}
29856       break;
29857 
29858     case BFD_RELOC_ARM_THUMB_BF17:
29859       if (fixP->fx_addsy
29860 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29861 	  && !S_FORCE_RELOC (fixP->fx_addsy, true)
29862 	  && ARM_IS_FUNC (fixP->fx_addsy)
29863 	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29864 	{
29865 	  /* Force a relocation for a branch 17 bits wide.  */
29866 	  fixP->fx_done = 0;
29867 	}
29868 
29869       if (v8_1_branch_value_check (value, 17, true) == FAIL)
29870 	as_bad_where (fixP->fx_file, fixP->fx_line,
29871 		      BAD_BRANCH_OFF);
29872 
29873       if (fixP->fx_done || !seg->use_rela_p)
29874 	{
29875 	  offsetT newval2;
29876 	  addressT immA, immB, immC;
29877 
29878 	  immA = (value & 0x0001f000) >> 12;
29879 	  immB = (value & 0x00000ffc) >> 2;
29880 	  immC = (value & 0x00000002) >> 1;
29881 
29882 	  newval   = md_chars_to_number (buf, THUMB_SIZE);
29883 	  newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29884 	  newval  |= immA;
29885 	  newval2 |= (immC << 11) | (immB << 1);
29886 	  md_number_to_chars (buf, newval, THUMB_SIZE);
29887 	  md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
29888 	}
29889       break;
29890 
29891     case BFD_RELOC_ARM_THUMB_BF19:
29892       if (fixP->fx_addsy
29893 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29894 	  && !S_FORCE_RELOC (fixP->fx_addsy, true)
29895 	  && ARM_IS_FUNC (fixP->fx_addsy)
29896 	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29897 	{
29898 	  /* Force a relocation for a branch 19 bits wide.  */
29899 	  fixP->fx_done = 0;
29900 	}
29901 
29902       if (v8_1_branch_value_check (value, 19, true) == FAIL)
29903 	as_bad_where (fixP->fx_file, fixP->fx_line,
29904 		      BAD_BRANCH_OFF);
29905 
29906       if (fixP->fx_done || !seg->use_rela_p)
29907 	{
29908 	  offsetT newval2;
29909 	  addressT immA, immB, immC;
29910 
29911 	  immA = (value & 0x0007f000) >> 12;
29912 	  immB = (value & 0x00000ffc) >> 2;
29913 	  immC = (value & 0x00000002) >> 1;
29914 
29915 	  newval   = md_chars_to_number (buf, THUMB_SIZE);
29916 	  newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29917 	  newval  |= immA;
29918 	  newval2 |= (immC << 11) | (immB << 1);
29919 	  md_number_to_chars (buf, newval, THUMB_SIZE);
29920 	  md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
29921 	}
29922       break;
29923 
29924     case BFD_RELOC_ARM_THUMB_BF13:
29925       if (fixP->fx_addsy
29926 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29927 	  && !S_FORCE_RELOC (fixP->fx_addsy, true)
29928 	  && ARM_IS_FUNC (fixP->fx_addsy)
29929 	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29930 	{
29931 	  /* Force a relocation for a branch 13 bits wide.  */
29932 	  fixP->fx_done = 0;
29933 	}
29934 
29935       if (v8_1_branch_value_check (value, 13, true) == FAIL)
29936 	as_bad_where (fixP->fx_file, fixP->fx_line,
29937 		      BAD_BRANCH_OFF);
29938 
29939       if (fixP->fx_done || !seg->use_rela_p)
29940 	{
29941 	  offsetT newval2;
29942 	  addressT immA, immB, immC;
29943 
29944 	  immA = (value & 0x00001000) >> 12;
29945 	  immB = (value & 0x00000ffc) >> 2;
29946 	  immC = (value & 0x00000002) >> 1;
29947 
29948 	  newval   = md_chars_to_number (buf, THUMB_SIZE);
29949 	  newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29950 	  newval  |= immA;
29951 	  newval2 |= (immC << 11) | (immB << 1);
29952 	  md_number_to_chars (buf, newval, THUMB_SIZE);
29953 	  md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
29954 	}
29955       break;
29956 
29957     case BFD_RELOC_ARM_THUMB_LOOP12:
29958       if (fixP->fx_addsy
29959 	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29960 	  && !S_FORCE_RELOC (fixP->fx_addsy, true)
29961 	  && ARM_IS_FUNC (fixP->fx_addsy)
29962 	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29963 	{
29964 	  /* Force a relocation for a branch 12 bits wide.  */
29965 	  fixP->fx_done = 0;
29966 	}
29967 
29968       bfd_vma insn = get_thumb32_insn (buf);
29969       /* le lr, <label>, le <label> or letp lr, <label> */
29970       if (((insn & 0xffffffff) == 0xf00fc001)
29971 	  || ((insn & 0xffffffff) == 0xf02fc001)
29972 	  || ((insn & 0xffffffff) == 0xf01fc001))
29973 	value = -value;
29974 
29975       if (v8_1_branch_value_check (value, 12, false) == FAIL)
29976 	as_bad_where (fixP->fx_file, fixP->fx_line,
29977 		      BAD_BRANCH_OFF);
29978       if (fixP->fx_done || !seg->use_rela_p)
29979 	{
29980 	  addressT imml, immh;
29981 
29982 	  immh = (value & 0x00000ffc) >> 2;
29983 	  imml = (value & 0x00000002) >> 1;
29984 
29985 	  newval  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29986 	  newval |= (imml << 11) | (immh << 1);
29987 	  md_number_to_chars (buf + THUMB_SIZE, newval, THUMB_SIZE);
29988 	}
29989       break;
29990 
29991     case BFD_RELOC_ARM_V4BX:
29992       /* This will need to go in the object file.  */
29993       fixP->fx_done = 0;
29994       break;
29995 
29996     case BFD_RELOC_UNUSED:
29997     default:
29998       as_bad_where (fixP->fx_file, fixP->fx_line,
29999 		    _("bad relocation fixup type (%d)"), fixP->fx_r_type);
30000     }
30001 }
30002 
30003 /* Translate internal representation of relocation info to BFD target
30004    format.  */
30005 
30006 arelent *
tc_gen_reloc(asection * section,fixS * fixp)30007 tc_gen_reloc (asection *section, fixS *fixp)
30008 {
30009   arelent * reloc;
30010   bfd_reloc_code_real_type code;
30011 
30012   reloc = XNEW (arelent);
30013 
30014   reloc->sym_ptr_ptr = XNEW (asymbol *);
30015   *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
30016   reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
30017 
30018   if (fixp->fx_pcrel)
30019     {
30020       if (section->use_rela_p)
30021 	fixp->fx_offset -= md_pcrel_from_section (fixp, section);
30022       else
30023 	fixp->fx_offset = reloc->address;
30024     }
30025   reloc->addend = fixp->fx_offset;
30026 
30027   switch (fixp->fx_r_type)
30028     {
30029     case BFD_RELOC_8:
30030       if (fixp->fx_pcrel)
30031 	{
30032 	  code = BFD_RELOC_8_PCREL;
30033 	  break;
30034 	}
30035       /* Fall through.  */
30036 
30037     case BFD_RELOC_16:
30038       if (fixp->fx_pcrel)
30039 	{
30040 	  code = BFD_RELOC_16_PCREL;
30041 	  break;
30042 	}
30043       /* Fall through.  */
30044 
30045     case BFD_RELOC_32:
30046       if (fixp->fx_pcrel)
30047 	{
30048 	  code = BFD_RELOC_32_PCREL;
30049 	  break;
30050 	}
30051       /* Fall through.  */
30052 
30053     case BFD_RELOC_ARM_MOVW:
30054       if (fixp->fx_pcrel)
30055 	{
30056 	  code = BFD_RELOC_ARM_MOVW_PCREL;
30057 	  break;
30058 	}
30059       /* Fall through.  */
30060 
30061     case BFD_RELOC_ARM_MOVT:
30062       if (fixp->fx_pcrel)
30063 	{
30064 	  code = BFD_RELOC_ARM_MOVT_PCREL;
30065 	  break;
30066 	}
30067       /* Fall through.  */
30068 
30069     case BFD_RELOC_ARM_THUMB_MOVW:
30070       if (fixp->fx_pcrel)
30071 	{
30072 	  code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
30073 	  break;
30074 	}
30075       /* Fall through.  */
30076 
30077     case BFD_RELOC_ARM_THUMB_MOVT:
30078       if (fixp->fx_pcrel)
30079 	{
30080 	  code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
30081 	  break;
30082 	}
30083       /* Fall through.  */
30084 
30085     case BFD_RELOC_NONE:
30086     case BFD_RELOC_ARM_PCREL_BRANCH:
30087     case BFD_RELOC_ARM_PCREL_BLX:
30088     case BFD_RELOC_RVA:
30089     case BFD_RELOC_THUMB_PCREL_BRANCH7:
30090     case BFD_RELOC_THUMB_PCREL_BRANCH9:
30091     case BFD_RELOC_THUMB_PCREL_BRANCH12:
30092     case BFD_RELOC_THUMB_PCREL_BRANCH20:
30093     case BFD_RELOC_THUMB_PCREL_BRANCH23:
30094     case BFD_RELOC_THUMB_PCREL_BRANCH25:
30095     case BFD_RELOC_VTABLE_ENTRY:
30096     case BFD_RELOC_VTABLE_INHERIT:
30097 #ifdef TE_PE
30098     case BFD_RELOC_32_SECREL:
30099 #endif
30100       code = fixp->fx_r_type;
30101       break;
30102 
30103     case BFD_RELOC_THUMB_PCREL_BLX:
30104 #ifdef OBJ_ELF
30105       if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
30106 	code = BFD_RELOC_THUMB_PCREL_BRANCH23;
30107       else
30108 #endif
30109 	code = BFD_RELOC_THUMB_PCREL_BLX;
30110       break;
30111 
30112     case BFD_RELOC_ARM_LITERAL:
30113     case BFD_RELOC_ARM_HWLITERAL:
30114       /* If this is called then the a literal has
30115 	 been referenced across a section boundary.  */
30116       as_bad_where (fixp->fx_file, fixp->fx_line,
30117 		    _("literal referenced across section boundary"));
30118       return NULL;
30119 
30120 #ifdef OBJ_ELF
30121     case BFD_RELOC_ARM_TLS_CALL:
30122     case BFD_RELOC_ARM_THM_TLS_CALL:
30123     case BFD_RELOC_ARM_TLS_DESCSEQ:
30124     case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
30125     case BFD_RELOC_ARM_GOT32:
30126     case BFD_RELOC_ARM_GOTOFF:
30127     case BFD_RELOC_ARM_GOT_PREL:
30128     case BFD_RELOC_ARM_PLT32:
30129     case BFD_RELOC_ARM_TARGET1:
30130     case BFD_RELOC_ARM_ROSEGREL32:
30131     case BFD_RELOC_ARM_SBREL32:
30132     case BFD_RELOC_ARM_PREL31:
30133     case BFD_RELOC_ARM_TARGET2:
30134     case BFD_RELOC_ARM_TLS_LDO32:
30135     case BFD_RELOC_ARM_PCREL_CALL:
30136     case BFD_RELOC_ARM_PCREL_JUMP:
30137     case BFD_RELOC_ARM_ALU_PC_G0_NC:
30138     case BFD_RELOC_ARM_ALU_PC_G0:
30139     case BFD_RELOC_ARM_ALU_PC_G1_NC:
30140     case BFD_RELOC_ARM_ALU_PC_G1:
30141     case BFD_RELOC_ARM_ALU_PC_G2:
30142     case BFD_RELOC_ARM_LDR_PC_G0:
30143     case BFD_RELOC_ARM_LDR_PC_G1:
30144     case BFD_RELOC_ARM_LDR_PC_G2:
30145     case BFD_RELOC_ARM_LDRS_PC_G0:
30146     case BFD_RELOC_ARM_LDRS_PC_G1:
30147     case BFD_RELOC_ARM_LDRS_PC_G2:
30148     case BFD_RELOC_ARM_LDC_PC_G0:
30149     case BFD_RELOC_ARM_LDC_PC_G1:
30150     case BFD_RELOC_ARM_LDC_PC_G2:
30151     case BFD_RELOC_ARM_ALU_SB_G0_NC:
30152     case BFD_RELOC_ARM_ALU_SB_G0:
30153     case BFD_RELOC_ARM_ALU_SB_G1_NC:
30154     case BFD_RELOC_ARM_ALU_SB_G1:
30155     case BFD_RELOC_ARM_ALU_SB_G2:
30156     case BFD_RELOC_ARM_LDR_SB_G0:
30157     case BFD_RELOC_ARM_LDR_SB_G1:
30158     case BFD_RELOC_ARM_LDR_SB_G2:
30159     case BFD_RELOC_ARM_LDRS_SB_G0:
30160     case BFD_RELOC_ARM_LDRS_SB_G1:
30161     case BFD_RELOC_ARM_LDRS_SB_G2:
30162     case BFD_RELOC_ARM_LDC_SB_G0:
30163     case BFD_RELOC_ARM_LDC_SB_G1:
30164     case BFD_RELOC_ARM_LDC_SB_G2:
30165     case BFD_RELOC_ARM_V4BX:
30166     case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
30167     case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
30168     case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
30169     case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
30170     case BFD_RELOC_ARM_GOTFUNCDESC:
30171     case BFD_RELOC_ARM_GOTOFFFUNCDESC:
30172     case BFD_RELOC_ARM_FUNCDESC:
30173     case BFD_RELOC_ARM_THUMB_BF17:
30174     case BFD_RELOC_ARM_THUMB_BF19:
30175     case BFD_RELOC_ARM_THUMB_BF13:
30176       code = fixp->fx_r_type;
30177       break;
30178 
30179     case BFD_RELOC_ARM_TLS_GOTDESC:
30180     case BFD_RELOC_ARM_TLS_GD32:
30181     case BFD_RELOC_ARM_TLS_GD32_FDPIC:
30182     case BFD_RELOC_ARM_TLS_LE32:
30183     case BFD_RELOC_ARM_TLS_IE32:
30184     case BFD_RELOC_ARM_TLS_IE32_FDPIC:
30185     case BFD_RELOC_ARM_TLS_LDM32:
30186     case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
30187       /* BFD will include the symbol's address in the addend.
30188 	 But we don't want that, so subtract it out again here.  */
30189       if (!S_IS_COMMON (fixp->fx_addsy))
30190 	reloc->addend -= (*reloc->sym_ptr_ptr)->value;
30191       code = fixp->fx_r_type;
30192       break;
30193 #endif
30194 
30195     case BFD_RELOC_ARM_IMMEDIATE:
30196       as_bad_where (fixp->fx_file, fixp->fx_line,
30197 		    _("internal relocation (type: IMMEDIATE) not fixed up"));
30198       return NULL;
30199 
30200     case BFD_RELOC_ARM_ADRL_IMMEDIATE:
30201       as_bad_where (fixp->fx_file, fixp->fx_line,
30202 		    _("ADRL used for a symbol not defined in the same file"));
30203       return NULL;
30204 
30205     case BFD_RELOC_THUMB_PCREL_BRANCH5:
30206     case BFD_RELOC_THUMB_PCREL_BFCSEL:
30207     case BFD_RELOC_ARM_THUMB_LOOP12:
30208       as_bad_where (fixp->fx_file, fixp->fx_line,
30209 		    _("%s used for a symbol not defined in the same file"),
30210 		    bfd_get_reloc_code_name (fixp->fx_r_type));
30211       return NULL;
30212 
30213     case BFD_RELOC_ARM_OFFSET_IMM:
30214       if (section->use_rela_p)
30215 	{
30216 	  code = fixp->fx_r_type;
30217 	  break;
30218 	}
30219 
30220       if (fixp->fx_addsy != NULL
30221 	  && !S_IS_DEFINED (fixp->fx_addsy)
30222 	  && S_IS_LOCAL (fixp->fx_addsy))
30223 	{
30224 	  as_bad_where (fixp->fx_file, fixp->fx_line,
30225 			_("undefined local label `%s'"),
30226 			S_GET_NAME (fixp->fx_addsy));
30227 	  return NULL;
30228 	}
30229 
30230       as_bad_where (fixp->fx_file, fixp->fx_line,
30231 		    _("internal_relocation (type: OFFSET_IMM) not fixed up"));
30232       return NULL;
30233 
30234     default:
30235       {
30236 	const char * type;
30237 
30238 	switch (fixp->fx_r_type)
30239 	  {
30240 	  case BFD_RELOC_NONE:		   type = "NONE";	  break;
30241 	  case BFD_RELOC_ARM_OFFSET_IMM8:  type = "OFFSET_IMM8";  break;
30242 	  case BFD_RELOC_ARM_SHIFT_IMM:	   type = "SHIFT_IMM";	  break;
30243 	  case BFD_RELOC_ARM_SMC:	   type = "SMC";	  break;
30244 	  case BFD_RELOC_ARM_SWI:	   type = "SWI";	  break;
30245 	  case BFD_RELOC_ARM_MULTI:	   type = "MULTI";	  break;
30246 	  case BFD_RELOC_ARM_CP_OFF_IMM:   type = "CP_OFF_IMM";	  break;
30247 	  case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
30248 	  case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
30249 	  case BFD_RELOC_ARM_THUMB_ADD:	   type = "THUMB_ADD";	  break;
30250 	  case BFD_RELOC_ARM_THUMB_SHIFT:  type = "THUMB_SHIFT";  break;
30251 	  case BFD_RELOC_ARM_THUMB_IMM:	   type = "THUMB_IMM";	  break;
30252 	  case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
30253 	  default:			   type = _("<unknown>"); break;
30254 	  }
30255 	as_bad_where (fixp->fx_file, fixp->fx_line,
30256 		      _("cannot represent %s relocation in this object file format"),
30257 		      type);
30258 	return NULL;
30259       }
30260     }
30261 
30262 #ifdef OBJ_ELF
30263   if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
30264       && GOT_symbol
30265       && fixp->fx_addsy == GOT_symbol)
30266     {
30267       code = BFD_RELOC_ARM_GOTPC;
30268       reloc->addend = fixp->fx_offset = reloc->address;
30269     }
30270 #endif
30271 
30272   reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
30273 
30274   if (reloc->howto == NULL)
30275     {
30276       as_bad_where (fixp->fx_file, fixp->fx_line,
30277 		    _("cannot represent %s relocation in this object file format"),
30278 		    bfd_get_reloc_code_name (code));
30279       return NULL;
30280     }
30281 
30282   /* HACK: Since arm ELF uses Rel instead of Rela, encode the
30283      vtable entry to be used in the relocation's section offset.  */
30284   if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
30285     reloc->address = fixp->fx_offset;
30286 
30287   return reloc;
30288 }
30289 
30290 /* This fix_new is called by cons via TC_CONS_FIX_NEW.	*/
30291 
30292 void
cons_fix_new_arm(fragS * frag,int where,int size,expressionS * exp,bfd_reloc_code_real_type reloc)30293 cons_fix_new_arm (fragS *	frag,
30294 		  int		where,
30295 		  int		size,
30296 		  expressionS * exp,
30297 		  bfd_reloc_code_real_type reloc)
30298 {
30299   int pcrel = 0;
30300 
30301   /* Pick a reloc.
30302      FIXME: @@ Should look at CPU word size.  */
30303   switch (size)
30304     {
30305     case 1:
30306       reloc = BFD_RELOC_8;
30307       break;
30308     case 2:
30309       reloc = BFD_RELOC_16;
30310       break;
30311     case 4:
30312     default:
30313       reloc = BFD_RELOC_32;
30314       break;
30315     case 8:
30316       reloc = BFD_RELOC_64;
30317       break;
30318     }
30319 
30320 #ifdef TE_PE
30321   if (exp->X_op == O_secrel)
30322   {
30323     exp->X_op = O_symbol;
30324     reloc = BFD_RELOC_32_SECREL;
30325   }
30326 #endif
30327 
30328   fix_new_exp (frag, where, size, exp, pcrel, reloc);
30329 }
30330 
30331 #if defined (OBJ_COFF)
30332 void
arm_validate_fix(fixS * fixP)30333 arm_validate_fix (fixS * fixP)
30334 {
30335   /* If the destination of the branch is a defined symbol which does not have
30336      the THUMB_FUNC attribute, then we must be calling a function which has
30337      the (interfacearm) attribute.  We look for the Thumb entry point to that
30338      function and change the branch to refer to that function instead.	*/
30339   if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
30340       && fixP->fx_addsy != NULL
30341       && S_IS_DEFINED (fixP->fx_addsy)
30342       && ! THUMB_IS_FUNC (fixP->fx_addsy))
30343     {
30344       fixP->fx_addsy = find_real_start (fixP->fx_addsy);
30345     }
30346 }
30347 #endif
30348 
30349 
30350 int
arm_force_relocation(struct fix * fixp)30351 arm_force_relocation (struct fix * fixp)
30352 {
30353 #if defined (OBJ_COFF) && defined (TE_PE)
30354   if (fixp->fx_r_type == BFD_RELOC_RVA)
30355     return 1;
30356 #endif
30357 
30358   /* In case we have a call or a branch to a function in ARM ISA mode from
30359      a thumb function or vice-versa force the relocation. These relocations
30360      are cleared off for some cores that might have blx and simple transformations
30361      are possible.  */
30362 
30363 #ifdef OBJ_ELF
30364   switch (fixp->fx_r_type)
30365     {
30366     case BFD_RELOC_ARM_PCREL_JUMP:
30367     case BFD_RELOC_ARM_PCREL_CALL:
30368     case BFD_RELOC_THUMB_PCREL_BLX:
30369       if (THUMB_IS_FUNC (fixp->fx_addsy))
30370 	return 1;
30371       break;
30372 
30373     case BFD_RELOC_ARM_PCREL_BLX:
30374     case BFD_RELOC_THUMB_PCREL_BRANCH25:
30375     case BFD_RELOC_THUMB_PCREL_BRANCH20:
30376     case BFD_RELOC_THUMB_PCREL_BRANCH23:
30377       if (ARM_IS_FUNC (fixp->fx_addsy))
30378 	return 1;
30379       break;
30380 
30381     default:
30382       break;
30383     }
30384 #endif
30385 
30386   /* Resolve these relocations even if the symbol is extern or weak.
30387      Technically this is probably wrong due to symbol preemption.
30388      In practice these relocations do not have enough range to be useful
30389      at dynamic link time, and some code (e.g. in the Linux kernel)
30390      expects these references to be resolved.  */
30391   if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
30392       || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
30393       || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
30394       || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
30395       || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
30396       || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
30397       || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
30398       || fixp->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH12
30399       || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
30400       || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
30401       || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
30402       || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
30403       || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
30404       || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
30405       || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
30406     return 0;
30407 
30408   /* Always leave these relocations for the linker.  */
30409   if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
30410        && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
30411       || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
30412     return 1;
30413 
30414   /* Always generate relocations against function symbols.  */
30415   if (fixp->fx_r_type == BFD_RELOC_32
30416       && fixp->fx_addsy
30417       && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
30418     return 1;
30419 
30420   return generic_force_reloc (fixp);
30421 }
30422 
30423 #if defined (OBJ_ELF) || defined (OBJ_COFF)
30424 /* Relocations against function names must be left unadjusted,
30425    so that the linker can use this information to generate interworking
30426    stubs.  The MIPS version of this function
30427    also prevents relocations that are mips-16 specific, but I do not
30428    know why it does this.
30429 
30430    FIXME:
30431    There is one other problem that ought to be addressed here, but
30432    which currently is not:  Taking the address of a label (rather
30433    than a function) and then later jumping to that address.  Such
30434    addresses also ought to have their bottom bit set (assuming that
30435    they reside in Thumb code), but at the moment they will not.	 */
30436 
30437 bool
arm_fix_adjustable(fixS * fixP)30438 arm_fix_adjustable (fixS * fixP)
30439 {
30440   if (fixP->fx_addsy == NULL)
30441     return 1;
30442 
30443   /* Preserve relocations against symbols with function type.  */
30444   if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
30445     return false;
30446 
30447   if (THUMB_IS_FUNC (fixP->fx_addsy)
30448       && fixP->fx_subsy == NULL)
30449     return false;
30450 
30451   /* We need the symbol name for the VTABLE entries.  */
30452   if (	 fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
30453       || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
30454     return false;
30455 
30456   /* Don't allow symbols to be discarded on GOT related relocs.	 */
30457   if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
30458       || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
30459       || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
30460       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
30461       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32_FDPIC
30462       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
30463       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
30464       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32_FDPIC
30465       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
30466       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32_FDPIC
30467       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
30468       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
30469       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
30470       || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
30471       || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
30472       || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
30473       || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
30474     return false;
30475 
30476   /* Similarly for group relocations.  */
30477   if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
30478        && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
30479       || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
30480     return false;
30481 
30482   /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols.  */
30483   if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
30484       || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
30485       || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
30486       || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
30487       || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
30488       || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
30489       || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
30490       || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
30491     return false;
30492 
30493   /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
30494      offsets, so keep these symbols.  */
30495   if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
30496       && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
30497     return false;
30498 
30499   return true;
30500 }
30501 #endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
30502 
30503 #ifdef OBJ_ELF
30504 const char *
elf32_arm_target_format(void)30505 elf32_arm_target_format (void)
30506 {
30507 #if defined (TE_VXWORKS)
30508   return (target_big_endian
30509 	  ? "elf32-bigarm-vxworks"
30510 	  : "elf32-littlearm-vxworks");
30511 #elif defined (TE_NACL)
30512   return (target_big_endian
30513 	  ? "elf32-bigarm-nacl"
30514 	  : "elf32-littlearm-nacl");
30515 #else
30516   if (arm_fdpic)
30517     {
30518       if (target_big_endian)
30519 	return "elf32-bigarm-fdpic";
30520       else
30521 	return "elf32-littlearm-fdpic";
30522     }
30523   else
30524     {
30525       if (target_big_endian)
30526 	return "elf32-bigarm";
30527       else
30528 	return "elf32-littlearm";
30529     }
30530 #endif
30531 }
30532 
30533 void
armelf_frob_symbol(symbolS * symp,int * puntp)30534 armelf_frob_symbol (symbolS * symp,
30535 		    int *     puntp)
30536 {
30537   elf_frob_symbol (symp, puntp);
30538 }
30539 #endif
30540 
30541 /* MD interface: Finalization.	*/
30542 
30543 void
arm_cleanup(void)30544 arm_cleanup (void)
30545 {
30546   literal_pool * pool;
30547 
30548   /* Ensure that all the predication blocks are properly closed.  */
30549   check_pred_blocks_finished ();
30550 
30551   for (pool = list_of_pools; pool; pool = pool->next)
30552     {
30553       /* Put it at the end of the relevant section.  */
30554       subseg_set (pool->section, pool->sub_section);
30555 #ifdef OBJ_ELF
30556       arm_elf_change_section ();
30557 #endif
30558       s_ltorg (0);
30559     }
30560 }
30561 
30562 #ifdef OBJ_ELF
30563 /* Remove any excess mapping symbols generated for alignment frags in
30564    SEC.  We may have created a mapping symbol before a zero byte
30565    alignment; remove it if there's a mapping symbol after the
30566    alignment.  */
30567 static void
check_mapping_symbols(bfd * abfd ATTRIBUTE_UNUSED,asection * sec,void * dummy ATTRIBUTE_UNUSED)30568 check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
30569 		       void *dummy ATTRIBUTE_UNUSED)
30570 {
30571   segment_info_type *seginfo = seg_info (sec);
30572   fragS *fragp;
30573 
30574   if (seginfo == NULL || seginfo->frchainP == NULL)
30575     return;
30576 
30577   for (fragp = seginfo->frchainP->frch_root;
30578        fragp != NULL;
30579        fragp = fragp->fr_next)
30580     {
30581       symbolS *sym = fragp->tc_frag_data.last_map;
30582       fragS *next = fragp->fr_next;
30583 
30584       /* Variable-sized frags have been converted to fixed size by
30585 	 this point.  But if this was variable-sized to start with,
30586 	 there will be a fixed-size frag after it.  So don't handle
30587 	 next == NULL.  */
30588       if (sym == NULL || next == NULL)
30589 	continue;
30590 
30591       if (S_GET_VALUE (sym) < next->fr_address)
30592 	/* Not at the end of this frag.  */
30593 	continue;
30594       know (S_GET_VALUE (sym) == next->fr_address);
30595 
30596       do
30597 	{
30598 	  if (next->tc_frag_data.first_map != NULL)
30599 	    {
30600 	      /* Next frag starts with a mapping symbol.  Discard this
30601 		 one.  */
30602 	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
30603 	      break;
30604 	    }
30605 
30606 	  if (next->fr_next == NULL)
30607 	    {
30608 	      /* This mapping symbol is at the end of the section.  Discard
30609 		 it.  */
30610 	      know (next->fr_fix == 0 && next->fr_var == 0);
30611 	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
30612 	      break;
30613 	    }
30614 
30615 	  /* As long as we have empty frags without any mapping symbols,
30616 	     keep looking.  */
30617 	  /* If the next frag is non-empty and does not start with a
30618 	     mapping symbol, then this mapping symbol is required.  */
30619 	  if (next->fr_address != next->fr_next->fr_address)
30620 	    break;
30621 
30622 	  next = next->fr_next;
30623 	}
30624       while (next != NULL);
30625     }
30626 }
30627 #endif
30628 
30629 /* Adjust the symbol table.  This marks Thumb symbols as distinct from
30630    ARM ones.  */
30631 
30632 void
arm_adjust_symtab(void)30633 arm_adjust_symtab (void)
30634 {
30635 #ifdef OBJ_COFF
30636   symbolS * sym;
30637 
30638   for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
30639     {
30640       if (ARM_IS_THUMB (sym))
30641 	{
30642 	  if (THUMB_IS_FUNC (sym))
30643 	    {
30644 	      /* Mark the symbol as a Thumb function.  */
30645 	      if (   S_GET_STORAGE_CLASS (sym) == C_STAT
30646 		  || S_GET_STORAGE_CLASS (sym) == C_LABEL)  /* This can happen!	 */
30647 		S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
30648 
30649 	      else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
30650 		S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
30651 	      else
30652 		as_bad (_("%s: unexpected function type: %d"),
30653 			S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
30654 	    }
30655 	  else switch (S_GET_STORAGE_CLASS (sym))
30656 	    {
30657 	    case C_EXT:
30658 	      S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
30659 	      break;
30660 	    case C_STAT:
30661 	      S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
30662 	      break;
30663 	    case C_LABEL:
30664 	      S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
30665 	      break;
30666 	    default:
30667 	      /* Do nothing.  */
30668 	      break;
30669 	    }
30670 	}
30671 
30672       if (ARM_IS_INTERWORK (sym))
30673 	coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
30674     }
30675 #endif
30676 #ifdef OBJ_ELF
30677   symbolS * sym;
30678   char	    bind;
30679 
30680   for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
30681     {
30682       if (ARM_IS_THUMB (sym))
30683 	{
30684 	  elf_symbol_type * elf_sym;
30685 
30686 	  elf_sym = elf_symbol (symbol_get_bfdsym (sym));
30687 	  bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
30688 
30689 	  if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
30690 		BFD_ARM_SPECIAL_SYM_TYPE_ANY))
30691 	    {
30692 	      /* If it's a .thumb_func, declare it as so,
30693 		 otherwise tag label as .code 16.  */
30694 	      if (THUMB_IS_FUNC (sym))
30695 		ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
30696 					 ST_BRANCH_TO_THUMB);
30697 	      else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
30698 		elf_sym->internal_elf_sym.st_info =
30699 		  ELF_ST_INFO (bind, STT_ARM_16BIT);
30700 	    }
30701 	}
30702     }
30703 
30704   /* Remove any overlapping mapping symbols generated by alignment frags.  */
30705   bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
30706   /* Now do generic ELF adjustments.  */
30707   elf_adjust_symtab ();
30708 #endif
30709 }
30710 
30711 /* MD interface: Initialization.  */
30712 
30713 static void
set_constant_flonums(void)30714 set_constant_flonums (void)
30715 {
30716   int i;
30717 
30718   for (i = 0; i < NUM_FLOAT_VALS; i++)
30719     if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
30720       abort ();
30721 }
30722 
30723 /* Auto-select Thumb mode if it's the only available instruction set for the
30724    given architecture.  */
30725 
30726 static void
autoselect_thumb_from_cpu_variant(void)30727 autoselect_thumb_from_cpu_variant (void)
30728 {
30729   if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
30730     opcode_select (16);
30731 }
30732 
30733 void
md_begin(void)30734 md_begin (void)
30735 {
30736   unsigned mach;
30737   unsigned int i;
30738 
30739   arm_ops_hsh = str_htab_create ();
30740   arm_cond_hsh = str_htab_create ();
30741   arm_vcond_hsh = str_htab_create ();
30742   arm_shift_hsh = str_htab_create ();
30743   arm_psr_hsh = str_htab_create ();
30744   arm_v7m_psr_hsh = str_htab_create ();
30745   arm_reg_hsh = str_htab_create ();
30746   arm_reloc_hsh = str_htab_create ();
30747   arm_barrier_opt_hsh = str_htab_create ();
30748 
30749   for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
30750     if (str_hash_find (arm_ops_hsh, insns[i].template_name) == NULL)
30751       str_hash_insert (arm_ops_hsh, insns[i].template_name, insns + i, 0);
30752   for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
30753     str_hash_insert (arm_cond_hsh, conds[i].template_name, conds + i, 0);
30754   for (i = 0; i < sizeof (vconds) / sizeof (struct asm_cond); i++)
30755     str_hash_insert (arm_vcond_hsh, vconds[i].template_name, vconds + i, 0);
30756   for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
30757     str_hash_insert (arm_shift_hsh, shift_names[i].name, shift_names + i, 0);
30758   for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
30759     str_hash_insert (arm_psr_hsh, psrs[i].template_name, psrs + i, 0);
30760   for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
30761     str_hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
30762 		     v7m_psrs + i, 0);
30763   for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
30764     str_hash_insert (arm_reg_hsh, reg_names[i].name, reg_names + i, 0);
30765   for (i = 0;
30766        i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
30767        i++)
30768     str_hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
30769 		     barrier_opt_names + i, 0);
30770 #ifdef OBJ_ELF
30771   for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
30772     {
30773       struct reloc_entry * entry = reloc_names + i;
30774 
30775       if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
30776 	/* This makes encode_branch() use the EABI versions of this relocation.  */
30777 	entry->reloc = BFD_RELOC_UNUSED;
30778 
30779       str_hash_insert (arm_reloc_hsh, entry->name, entry, 0);
30780     }
30781 #endif
30782 
30783   set_constant_flonums ();
30784 
30785   /* Set the cpu variant based on the command-line options.  We prefer
30786      -mcpu= over -march= if both are set (as for GCC); and we prefer
30787      -mfpu= over any other way of setting the floating point unit.
30788      Use of legacy options with new options are faulted.  */
30789   if (legacy_cpu)
30790     {
30791       if (mcpu_cpu_opt || march_cpu_opt)
30792 	as_bad (_("use of old and new-style options to set CPU type"));
30793 
30794       selected_arch = *legacy_cpu;
30795     }
30796   else if (mcpu_cpu_opt)
30797     {
30798       selected_arch = *mcpu_cpu_opt;
30799       selected_ext = *mcpu_ext_opt;
30800     }
30801   else if (march_cpu_opt)
30802     {
30803       selected_arch = *march_cpu_opt;
30804       selected_ext = *march_ext_opt;
30805     }
30806   ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
30807 
30808   if (legacy_fpu)
30809     {
30810       if (mfpu_opt)
30811 	as_bad (_("use of old and new-style options to set FPU type"));
30812 
30813       selected_fpu = *legacy_fpu;
30814     }
30815   else if (mfpu_opt)
30816     selected_fpu = *mfpu_opt;
30817   else
30818     {
30819 #if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
30820 	|| defined (TE_NetBSD) || defined (TE_VXWORKS))
30821       /* Some environments specify a default FPU.  If they don't, infer it
30822 	 from the processor.  */
30823       if (mcpu_fpu_opt)
30824 	selected_fpu = *mcpu_fpu_opt;
30825       else if (march_fpu_opt)
30826 	selected_fpu = *march_fpu_opt;
30827 #else
30828       selected_fpu = fpu_default;
30829 #endif
30830     }
30831 
30832   if (ARM_FEATURE_ZERO (selected_fpu))
30833     {
30834       if (!no_cpu_selected ())
30835 	selected_fpu = fpu_default;
30836       else
30837 	selected_fpu = fpu_arch_fpa;
30838     }
30839 
30840 #ifdef CPU_DEFAULT
30841   if (ARM_FEATURE_ZERO (selected_arch))
30842     {
30843       selected_arch = cpu_default;
30844       selected_cpu = selected_arch;
30845     }
30846   ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
30847 #else
30848   /*  Autodection of feature mode: allow all features in cpu_variant but leave
30849       selected_cpu unset.  It will be set in aeabi_set_public_attributes ()
30850       after all instruction have been processed and we can decide what CPU
30851       should be selected.  */
30852   if (ARM_FEATURE_ZERO (selected_arch))
30853     ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
30854   else
30855     ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
30856 #endif
30857 
30858   autoselect_thumb_from_cpu_variant ();
30859 
30860   arm_arch_used = thumb_arch_used = arm_arch_none;
30861 
30862 #if defined OBJ_COFF || defined OBJ_ELF
30863   {
30864     unsigned int flags = 0;
30865 
30866 #if defined OBJ_ELF
30867     flags = meabi_flags;
30868 
30869     switch (meabi_flags)
30870       {
30871       case EF_ARM_EABI_UNKNOWN:
30872 #endif
30873 	/* Set the flags in the private structure.  */
30874 	if (uses_apcs_26)      flags |= F_APCS26;
30875 	if (support_interwork) flags |= F_INTERWORK;
30876 	if (uses_apcs_float)   flags |= F_APCS_FLOAT;
30877 	if (pic_code)	       flags |= F_PIC;
30878 	if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
30879 	  flags |= F_SOFT_FLOAT;
30880 
30881 	switch (mfloat_abi_opt)
30882 	  {
30883 	  case ARM_FLOAT_ABI_SOFT:
30884 	  case ARM_FLOAT_ABI_SOFTFP:
30885 	    flags |= F_SOFT_FLOAT;
30886 	    break;
30887 
30888 	  case ARM_FLOAT_ABI_HARD:
30889 	    if (flags & F_SOFT_FLOAT)
30890 	      as_bad (_("hard-float conflicts with specified fpu"));
30891 	    break;
30892 	  }
30893 
30894 	/* Using pure-endian doubles (even if soft-float).	*/
30895 	if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
30896 	  flags |= F_VFP_FLOAT;
30897 
30898 #if defined OBJ_ELF
30899 	if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
30900 	    flags |= EF_ARM_MAVERICK_FLOAT;
30901 	break;
30902 
30903       case EF_ARM_EABI_VER4:
30904       case EF_ARM_EABI_VER5:
30905 	/* No additional flags to set.	*/
30906 	break;
30907 
30908       default:
30909 	abort ();
30910       }
30911 #endif
30912     bfd_set_private_flags (stdoutput, flags);
30913 
30914     /* We have run out flags in the COFF header to encode the
30915        status of ATPCS support, so instead we create a dummy,
30916        empty, debug section called .arm.atpcs.	*/
30917     if (atpcs)
30918       {
30919 	asection * sec;
30920 
30921 	sec = bfd_make_section (stdoutput, ".arm.atpcs");
30922 
30923 	if (sec != NULL)
30924 	  {
30925 	    bfd_set_section_flags (sec, SEC_READONLY | SEC_DEBUGGING);
30926 	    bfd_set_section_size (sec, 0);
30927 	    bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
30928 	  }
30929       }
30930   }
30931 #endif
30932 
30933   /* Record the CPU type as well.  */
30934   if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
30935     mach = bfd_mach_arm_iWMMXt2;
30936   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
30937     mach = bfd_mach_arm_iWMMXt;
30938   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
30939     mach = bfd_mach_arm_XScale;
30940   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
30941     mach = bfd_mach_arm_ep9312;
30942   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
30943     mach = bfd_mach_arm_5TE;
30944   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
30945     {
30946       if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
30947 	mach = bfd_mach_arm_5T;
30948       else
30949 	mach = bfd_mach_arm_5;
30950     }
30951   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
30952     {
30953       if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
30954 	mach = bfd_mach_arm_4T;
30955       else
30956 	mach = bfd_mach_arm_4;
30957     }
30958   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
30959     mach = bfd_mach_arm_3M;
30960   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
30961     mach = bfd_mach_arm_3;
30962   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
30963     mach = bfd_mach_arm_2a;
30964   else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
30965     mach = bfd_mach_arm_2;
30966   else
30967     mach = bfd_mach_arm_unknown;
30968 
30969   bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
30970 }
30971 
30972 /* Command line processing.  */
30973 
30974 /* md_parse_option
30975       Invocation line includes a switch not recognized by the base assembler.
30976       See if it's a processor-specific option.
30977 
30978       This routine is somewhat complicated by the need for backwards
30979       compatibility (since older releases of gcc can't be changed).
30980       The new options try to make the interface as compatible as
30981       possible with GCC.
30982 
30983       New options (supported) are:
30984 
30985 	      -mcpu=<cpu name>		 Assemble for selected processor
30986 	      -march=<architecture name> Assemble for selected architecture
30987 	      -mfpu=<fpu architecture>	 Assemble for selected FPU.
30988 	      -EB/-mbig-endian		 Big-endian
30989 	      -EL/-mlittle-endian	 Little-endian
30990 	      -k			 Generate PIC code
30991 	      -mthumb			 Start in Thumb mode
30992 	      -mthumb-interwork		 Code supports ARM/Thumb interworking
30993 
30994 	      -m[no-]warn-deprecated     Warn about deprecated features
30995 	      -m[no-]warn-syms		 Warn when symbols match instructions
30996 
30997       For now we will also provide support for:
30998 
30999 	      -mapcs-32			 32-bit Program counter
31000 	      -mapcs-26			 26-bit Program counter
31001 	      -macps-float		 Floats passed in FP registers
31002 	      -mapcs-reentrant		 Reentrant code
31003 	      -matpcs
31004       (sometime these will probably be replaced with -mapcs=<list of options>
31005       and -matpcs=<list of options>)
31006 
31007       The remaining options are only supported for back-wards compatibility.
31008       Cpu variants, the arm part is optional:
31009 	      -m[arm]1		      Currently not supported.
31010 	      -m[arm]2, -m[arm]250    Arm 2 and Arm 250 processor
31011 	      -m[arm]3		      Arm 3 processor
31012 	      -m[arm]6[xx],	      Arm 6 processors
31013 	      -m[arm]7[xx][t][[d]m]   Arm 7 processors
31014 	      -m[arm]8[10]	      Arm 8 processors
31015 	      -m[arm]9[20][tdmi]      Arm 9 processors
31016 	      -mstrongarm[110[0]]     StrongARM processors
31017 	      -mxscale		      XScale processors
31018 	      -m[arm]v[2345[t[e]]]    Arm architectures
31019 	      -mall		      All (except the ARM1)
31020       FP variants:
31021 	      -mfpa10, -mfpa11	      FPA10 and 11 co-processor instructions
31022 	      -mfpe-old		      (No float load/store multiples)
31023 	      -mvfpxd		      VFP Single precision
31024 	      -mvfp		      All VFP
31025 	      -mno-fpu		      Disable all floating point instructions
31026 
31027       The following CPU names are recognized:
31028 	      arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
31029 	      arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
31030 	      arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
31031 	      arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
31032 	      arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
31033 	      arm10t arm10e, arm1020t, arm1020e, arm10200e,
31034 	      strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
31035 
31036       */
31037 
31038 const char * md_shortopts = "m:k";
31039 
31040 #ifdef ARM_BI_ENDIAN
31041 #define OPTION_EB (OPTION_MD_BASE + 0)
31042 #define OPTION_EL (OPTION_MD_BASE + 1)
31043 #else
31044 #if TARGET_BYTES_BIG_ENDIAN
31045 #define OPTION_EB (OPTION_MD_BASE + 0)
31046 #else
31047 #define OPTION_EL (OPTION_MD_BASE + 1)
31048 #endif
31049 #endif
31050 #define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
31051 #define OPTION_FDPIC (OPTION_MD_BASE + 3)
31052 
31053 struct option md_longopts[] =
31054 {
31055 #ifdef OPTION_EB
31056   {"EB", no_argument, NULL, OPTION_EB},
31057 #endif
31058 #ifdef OPTION_EL
31059   {"EL", no_argument, NULL, OPTION_EL},
31060 #endif
31061   {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
31062 #ifdef OBJ_ELF
31063   {"fdpic", no_argument, NULL, OPTION_FDPIC},
31064 #endif
31065   {NULL, no_argument, NULL, 0}
31066 };
31067 
31068 size_t md_longopts_size = sizeof (md_longopts);
31069 
31070 struct arm_option_table
31071 {
31072   const char *  option;		/* Option name to match.  */
31073   const char *  help;		/* Help information.  */
31074   int *         var;		/* Variable to change.	*/
31075   int	        value;		/* What to change it to.  */
31076   const char *  deprecated;	/* If non-null, print this message.  */
31077 };
31078 
31079 struct arm_option_table arm_opts[] =
31080 {
31081   {"k",	     N_("generate PIC code"),	   &pic_code,	 1, NULL},
31082   {"mthumb", N_("assemble Thumb code"),	   &thumb_mode,	 1, NULL},
31083   {"mthumb-interwork", N_("support ARM/Thumb interworking"),
31084    &support_interwork, 1, NULL},
31085   {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
31086   {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
31087   {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
31088    1, NULL},
31089   {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
31090   {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
31091   {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
31092   {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
31093    NULL},
31094 
31095   /* These are recognized by the assembler, but have no affect on code.	 */
31096   {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
31097   {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
31098 
31099   {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
31100   {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
31101    &warn_on_deprecated, 0, NULL},
31102 
31103   {"mwarn-restrict-it", N_("warn about performance deprecated IT instructions"
31104    " in ARMv8-A and ARMv8-R"), &warn_on_restrict_it, 1, NULL},
31105   {"mno-warn-restrict-it", NULL, &warn_on_restrict_it, 0, NULL},
31106 
31107   {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), true, NULL},
31108   {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), false, NULL},
31109   {NULL, NULL, NULL, 0, NULL}
31110 };
31111 
31112 struct arm_legacy_option_table
31113 {
31114   const char *              option;		/* Option name to match.  */
31115   const arm_feature_set	**  var;		/* Variable to change.	*/
31116   const arm_feature_set	    value;		/* What to change it to.  */
31117   const char *              deprecated;		/* If non-null, print this message.  */
31118 };
31119 
31120 const struct arm_legacy_option_table arm_legacy_opts[] =
31121 {
31122   /* DON'T add any new processors to this list -- we want the whole list
31123      to go away...  Add them to the processors table instead.  */
31124   {"marm1",	 &legacy_cpu, ARM_ARCH_V1,  N_("use -mcpu=arm1")},
31125   {"m1",	 &legacy_cpu, ARM_ARCH_V1,  N_("use -mcpu=arm1")},
31126   {"marm2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -mcpu=arm2")},
31127   {"m2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -mcpu=arm2")},
31128   {"marm250",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
31129   {"m250",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
31130   {"marm3",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
31131   {"m3",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
31132   {"marm6",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm6")},
31133   {"m6",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm6")},
31134   {"marm600",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm600")},
31135   {"m600",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm600")},
31136   {"marm610",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm610")},
31137   {"m610",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm610")},
31138   {"marm620",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm620")},
31139   {"m620",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm620")},
31140   {"marm7",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7")},
31141   {"m7",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7")},
31142   {"marm70",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm70")},
31143   {"m70",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm70")},
31144   {"marm700",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700")},
31145   {"m700",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700")},
31146   {"marm700i",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700i")},
31147   {"m700i",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700i")},
31148   {"marm710",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710")},
31149   {"m710",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710")},
31150   {"marm710c",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710c")},
31151   {"m710c",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710c")},
31152   {"marm720",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm720")},
31153   {"m720",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm720")},
31154   {"marm7d",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7d")},
31155   {"m7d",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7d")},
31156   {"marm7di",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7di")},
31157   {"m7di",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7di")},
31158   {"marm7m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
31159   {"m7m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
31160   {"marm7dm",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
31161   {"m7dm",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
31162   {"marm7dmi",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
31163   {"m7dmi",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
31164   {"marm7100",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7100")},
31165   {"m7100",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7100")},
31166   {"marm7500",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500")},
31167   {"m7500",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500")},
31168   {"marm7500fe", &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500fe")},
31169   {"m7500fe",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500fe")},
31170   {"marm7t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
31171   {"m7t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
31172   {"marm7tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
31173   {"m7tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
31174   {"marm710t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
31175   {"m710t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
31176   {"marm720t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
31177   {"m720t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
31178   {"marm740t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
31179   {"m740t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
31180   {"marm8",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm8")},
31181   {"m8",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm8")},
31182   {"marm810",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm810")},
31183   {"m810",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm810")},
31184   {"marm9",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
31185   {"m9",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
31186   {"marm9tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
31187   {"m9tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
31188   {"marm920",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
31189   {"m920",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
31190   {"marm940",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
31191   {"m940",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
31192   {"mstrongarm", &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=strongarm")},
31193   {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
31194    N_("use -mcpu=strongarm110")},
31195   {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
31196    N_("use -mcpu=strongarm1100")},
31197   {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
31198    N_("use -mcpu=strongarm1110")},
31199   {"mxscale",	 &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
31200   {"miwmmxt",	 &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
31201   {"mall",	 &legacy_cpu, ARM_ANY,	       N_("use -mcpu=all")},
31202 
31203   /* Architecture variants -- don't add any more to this list either.  */
31204   {"mv2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -march=armv2")},
31205   {"marmv2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -march=armv2")},
31206   {"mv2a",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
31207   {"marmv2a",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
31208   {"mv3",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -march=armv3")},
31209   {"marmv3",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -march=armv3")},
31210   {"mv3m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
31211   {"marmv3m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
31212   {"mv4",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -march=armv4")},
31213   {"marmv4",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -march=armv4")},
31214   {"mv4t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
31215   {"marmv4t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
31216   {"mv5",	 &legacy_cpu, ARM_ARCH_V5,  N_("use -march=armv5")},
31217   {"marmv5",	 &legacy_cpu, ARM_ARCH_V5,  N_("use -march=armv5")},
31218   {"mv5t",	 &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
31219   {"marmv5t",	 &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
31220   {"mv5e",	 &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
31221   {"marmv5e",	 &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
31222 
31223   /* Floating point variants -- don't add any more to this list either.	 */
31224   {"mfpe-old",   &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
31225   {"mfpa10",     &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
31226   {"mfpa11",     &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
31227   {"mno-fpu",    &legacy_fpu, ARM_ARCH_NONE,
31228    N_("use either -mfpu=softfpa or -mfpu=softvfp")},
31229 
31230   {NULL, NULL, ARM_ARCH_NONE, NULL}
31231 };
31232 
31233 struct arm_cpu_option_table
31234 {
31235   const char *           name;
31236   size_t                 name_len;
31237   const arm_feature_set	 value;
31238   const arm_feature_set	 ext;
31239   /* For some CPUs we assume an FPU unless the user explicitly sets
31240      -mfpu=...	*/
31241   const arm_feature_set	 default_fpu;
31242   /* The canonical name of the CPU, or NULL to use NAME converted to upper
31243      case.  */
31244   const char *           canonical_name;
31245 };
31246 
31247 /* This list should, at a minimum, contain all the cpu names
31248    recognized by GCC.  */
31249 #define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
31250 
31251 static const struct arm_cpu_option_table arm_cpus[] =
31252 {
31253   ARM_CPU_OPT ("all",		  NULL,		       ARM_ANY,
31254 	       ARM_ARCH_NONE,
31255 	       FPU_ARCH_FPA),
31256   ARM_CPU_OPT ("arm1",		  NULL,		       ARM_ARCH_V1,
31257 	       ARM_ARCH_NONE,
31258 	       FPU_ARCH_FPA),
31259   ARM_CPU_OPT ("arm2",		  NULL,		       ARM_ARCH_V2,
31260 	       ARM_ARCH_NONE,
31261 	       FPU_ARCH_FPA),
31262   ARM_CPU_OPT ("arm250",	  NULL,		       ARM_ARCH_V2S,
31263 	       ARM_ARCH_NONE,
31264 	       FPU_ARCH_FPA),
31265   ARM_CPU_OPT ("arm3",		  NULL,		       ARM_ARCH_V2S,
31266 	       ARM_ARCH_NONE,
31267 	       FPU_ARCH_FPA),
31268   ARM_CPU_OPT ("arm6",		  NULL,		       ARM_ARCH_V3,
31269 	       ARM_ARCH_NONE,
31270 	       FPU_ARCH_FPA),
31271   ARM_CPU_OPT ("arm60",		  NULL,		       ARM_ARCH_V3,
31272 	       ARM_ARCH_NONE,
31273 	       FPU_ARCH_FPA),
31274   ARM_CPU_OPT ("arm600",	  NULL,		       ARM_ARCH_V3,
31275 	       ARM_ARCH_NONE,
31276 	       FPU_ARCH_FPA),
31277   ARM_CPU_OPT ("arm610",	  NULL,		       ARM_ARCH_V3,
31278 	       ARM_ARCH_NONE,
31279 	       FPU_ARCH_FPA),
31280   ARM_CPU_OPT ("arm620",	  NULL,		       ARM_ARCH_V3,
31281 	       ARM_ARCH_NONE,
31282 	       FPU_ARCH_FPA),
31283   ARM_CPU_OPT ("arm7",		  NULL,		       ARM_ARCH_V3,
31284 	       ARM_ARCH_NONE,
31285 	       FPU_ARCH_FPA),
31286   ARM_CPU_OPT ("arm7m",		  NULL,		       ARM_ARCH_V3M,
31287 	       ARM_ARCH_NONE,
31288 	       FPU_ARCH_FPA),
31289   ARM_CPU_OPT ("arm7d",		  NULL,		       ARM_ARCH_V3,
31290 	       ARM_ARCH_NONE,
31291 	       FPU_ARCH_FPA),
31292   ARM_CPU_OPT ("arm7dm",	  NULL,		       ARM_ARCH_V3M,
31293 	       ARM_ARCH_NONE,
31294 	       FPU_ARCH_FPA),
31295   ARM_CPU_OPT ("arm7di",	  NULL,		       ARM_ARCH_V3,
31296 	       ARM_ARCH_NONE,
31297 	       FPU_ARCH_FPA),
31298   ARM_CPU_OPT ("arm7dmi",	  NULL,		       ARM_ARCH_V3M,
31299 	       ARM_ARCH_NONE,
31300 	       FPU_ARCH_FPA),
31301   ARM_CPU_OPT ("arm70",		  NULL,		       ARM_ARCH_V3,
31302 	       ARM_ARCH_NONE,
31303 	       FPU_ARCH_FPA),
31304   ARM_CPU_OPT ("arm700",	  NULL,		       ARM_ARCH_V3,
31305 	       ARM_ARCH_NONE,
31306 	       FPU_ARCH_FPA),
31307   ARM_CPU_OPT ("arm700i",	  NULL,		       ARM_ARCH_V3,
31308 	       ARM_ARCH_NONE,
31309 	       FPU_ARCH_FPA),
31310   ARM_CPU_OPT ("arm710",	  NULL,		       ARM_ARCH_V3,
31311 	       ARM_ARCH_NONE,
31312 	       FPU_ARCH_FPA),
31313   ARM_CPU_OPT ("arm710t",	  NULL,		       ARM_ARCH_V4T,
31314 	       ARM_ARCH_NONE,
31315 	       FPU_ARCH_FPA),
31316   ARM_CPU_OPT ("arm720",	  NULL,		       ARM_ARCH_V3,
31317 	       ARM_ARCH_NONE,
31318 	       FPU_ARCH_FPA),
31319   ARM_CPU_OPT ("arm720t",	  NULL,		       ARM_ARCH_V4T,
31320 	       ARM_ARCH_NONE,
31321 	       FPU_ARCH_FPA),
31322   ARM_CPU_OPT ("arm740t",	  NULL,		       ARM_ARCH_V4T,
31323 	       ARM_ARCH_NONE,
31324 	       FPU_ARCH_FPA),
31325   ARM_CPU_OPT ("arm710c",	  NULL,		       ARM_ARCH_V3,
31326 	       ARM_ARCH_NONE,
31327 	       FPU_ARCH_FPA),
31328   ARM_CPU_OPT ("arm7100",	  NULL,		       ARM_ARCH_V3,
31329 	       ARM_ARCH_NONE,
31330 	       FPU_ARCH_FPA),
31331   ARM_CPU_OPT ("arm7500",	  NULL,		       ARM_ARCH_V3,
31332 	       ARM_ARCH_NONE,
31333 	       FPU_ARCH_FPA),
31334   ARM_CPU_OPT ("arm7500fe",	  NULL,		       ARM_ARCH_V3,
31335 	       ARM_ARCH_NONE,
31336 	       FPU_ARCH_FPA),
31337   ARM_CPU_OPT ("arm7t",		  NULL,		       ARM_ARCH_V4T,
31338 	       ARM_ARCH_NONE,
31339 	       FPU_ARCH_FPA),
31340   ARM_CPU_OPT ("arm7tdmi",	  NULL,		       ARM_ARCH_V4T,
31341 	       ARM_ARCH_NONE,
31342 	       FPU_ARCH_FPA),
31343   ARM_CPU_OPT ("arm7tdmi-s",	  NULL,		       ARM_ARCH_V4T,
31344 	       ARM_ARCH_NONE,
31345 	       FPU_ARCH_FPA),
31346   ARM_CPU_OPT ("arm8",		  NULL,		       ARM_ARCH_V4,
31347 	       ARM_ARCH_NONE,
31348 	       FPU_ARCH_FPA),
31349   ARM_CPU_OPT ("arm810",	  NULL,		       ARM_ARCH_V4,
31350 	       ARM_ARCH_NONE,
31351 	       FPU_ARCH_FPA),
31352   ARM_CPU_OPT ("strongarm",	  NULL,		       ARM_ARCH_V4,
31353 	       ARM_ARCH_NONE,
31354 	       FPU_ARCH_FPA),
31355   ARM_CPU_OPT ("strongarm1",	  NULL,		       ARM_ARCH_V4,
31356 	       ARM_ARCH_NONE,
31357 	       FPU_ARCH_FPA),
31358   ARM_CPU_OPT ("strongarm110",	  NULL,		       ARM_ARCH_V4,
31359 	       ARM_ARCH_NONE,
31360 	       FPU_ARCH_FPA),
31361   ARM_CPU_OPT ("strongarm1100",	  NULL,		       ARM_ARCH_V4,
31362 	       ARM_ARCH_NONE,
31363 	       FPU_ARCH_FPA),
31364   ARM_CPU_OPT ("strongarm1110",	  NULL,		       ARM_ARCH_V4,
31365 	       ARM_ARCH_NONE,
31366 	       FPU_ARCH_FPA),
31367   ARM_CPU_OPT ("arm9",		  NULL,		       ARM_ARCH_V4T,
31368 	       ARM_ARCH_NONE,
31369 	       FPU_ARCH_FPA),
31370   ARM_CPU_OPT ("arm920",	  "ARM920T",	       ARM_ARCH_V4T,
31371 	       ARM_ARCH_NONE,
31372 	       FPU_ARCH_FPA),
31373   ARM_CPU_OPT ("arm920t",	  NULL,		       ARM_ARCH_V4T,
31374 	       ARM_ARCH_NONE,
31375 	       FPU_ARCH_FPA),
31376   ARM_CPU_OPT ("arm922t",	  NULL,		       ARM_ARCH_V4T,
31377 	       ARM_ARCH_NONE,
31378 	       FPU_ARCH_FPA),
31379   ARM_CPU_OPT ("arm940t",	  NULL,		       ARM_ARCH_V4T,
31380 	       ARM_ARCH_NONE,
31381 	       FPU_ARCH_FPA),
31382   ARM_CPU_OPT ("arm9tdmi",	  NULL,		       ARM_ARCH_V4T,
31383 	       ARM_ARCH_NONE,
31384 	       FPU_ARCH_FPA),
31385   ARM_CPU_OPT ("fa526",		  NULL,		       ARM_ARCH_V4,
31386 	       ARM_ARCH_NONE,
31387 	       FPU_ARCH_FPA),
31388   ARM_CPU_OPT ("fa626",		  NULL,		       ARM_ARCH_V4,
31389 	       ARM_ARCH_NONE,
31390 	       FPU_ARCH_FPA),
31391 
31392   /* For V5 or later processors we default to using VFP; but the user
31393      should really set the FPU type explicitly.	 */
31394   ARM_CPU_OPT ("arm9e-r0",	  NULL,		       ARM_ARCH_V5TExP,
31395 	       ARM_ARCH_NONE,
31396 	       FPU_ARCH_VFP_V2),
31397   ARM_CPU_OPT ("arm9e",		  NULL,		       ARM_ARCH_V5TE,
31398 	       ARM_ARCH_NONE,
31399 	       FPU_ARCH_VFP_V2),
31400   ARM_CPU_OPT ("arm926ej",	  "ARM926EJ-S",	       ARM_ARCH_V5TEJ,
31401 	       ARM_ARCH_NONE,
31402 	       FPU_ARCH_VFP_V2),
31403   ARM_CPU_OPT ("arm926ejs",	  "ARM926EJ-S",	       ARM_ARCH_V5TEJ,
31404 	       ARM_ARCH_NONE,
31405 	       FPU_ARCH_VFP_V2),
31406   ARM_CPU_OPT ("arm926ej-s",	  NULL,		       ARM_ARCH_V5TEJ,
31407 	       ARM_ARCH_NONE,
31408 	       FPU_ARCH_VFP_V2),
31409   ARM_CPU_OPT ("arm946e-r0",	  NULL,		       ARM_ARCH_V5TExP,
31410 	       ARM_ARCH_NONE,
31411 	       FPU_ARCH_VFP_V2),
31412   ARM_CPU_OPT ("arm946e",	  "ARM946E-S",	       ARM_ARCH_V5TE,
31413 	       ARM_ARCH_NONE,
31414 	       FPU_ARCH_VFP_V2),
31415   ARM_CPU_OPT ("arm946e-s",	  NULL,		       ARM_ARCH_V5TE,
31416 	       ARM_ARCH_NONE,
31417 	       FPU_ARCH_VFP_V2),
31418   ARM_CPU_OPT ("arm966e-r0",	  NULL,		       ARM_ARCH_V5TExP,
31419 	       ARM_ARCH_NONE,
31420 	       FPU_ARCH_VFP_V2),
31421   ARM_CPU_OPT ("arm966e",	  "ARM966E-S",	       ARM_ARCH_V5TE,
31422 	       ARM_ARCH_NONE,
31423 	       FPU_ARCH_VFP_V2),
31424   ARM_CPU_OPT ("arm966e-s",	  NULL,		       ARM_ARCH_V5TE,
31425 	       ARM_ARCH_NONE,
31426 	       FPU_ARCH_VFP_V2),
31427   ARM_CPU_OPT ("arm968e-s",	  NULL,		       ARM_ARCH_V5TE,
31428 	       ARM_ARCH_NONE,
31429 	       FPU_ARCH_VFP_V2),
31430   ARM_CPU_OPT ("arm10t",	  NULL,		       ARM_ARCH_V5T,
31431 	       ARM_ARCH_NONE,
31432 	       FPU_ARCH_VFP_V1),
31433   ARM_CPU_OPT ("arm10tdmi",	  NULL,		       ARM_ARCH_V5T,
31434 	       ARM_ARCH_NONE,
31435 	       FPU_ARCH_VFP_V1),
31436   ARM_CPU_OPT ("arm10e",	  NULL,		       ARM_ARCH_V5TE,
31437 	       ARM_ARCH_NONE,
31438 	       FPU_ARCH_VFP_V2),
31439   ARM_CPU_OPT ("arm1020",	  "ARM1020E",	       ARM_ARCH_V5TE,
31440 	       ARM_ARCH_NONE,
31441 	       FPU_ARCH_VFP_V2),
31442   ARM_CPU_OPT ("arm1020t",	  NULL,		       ARM_ARCH_V5T,
31443 	       ARM_ARCH_NONE,
31444 	       FPU_ARCH_VFP_V1),
31445   ARM_CPU_OPT ("arm1020e",	  NULL,		       ARM_ARCH_V5TE,
31446 	       ARM_ARCH_NONE,
31447 	       FPU_ARCH_VFP_V2),
31448   ARM_CPU_OPT ("arm1022e",	  NULL,		       ARM_ARCH_V5TE,
31449 	       ARM_ARCH_NONE,
31450 	       FPU_ARCH_VFP_V2),
31451   ARM_CPU_OPT ("arm1026ejs",	  "ARM1026EJ-S",       ARM_ARCH_V5TEJ,
31452 	       ARM_ARCH_NONE,
31453 	       FPU_ARCH_VFP_V2),
31454   ARM_CPU_OPT ("arm1026ej-s",	  NULL,		       ARM_ARCH_V5TEJ,
31455 	       ARM_ARCH_NONE,
31456 	       FPU_ARCH_VFP_V2),
31457   ARM_CPU_OPT ("fa606te",	  NULL,		       ARM_ARCH_V5TE,
31458 	       ARM_ARCH_NONE,
31459 	       FPU_ARCH_VFP_V2),
31460   ARM_CPU_OPT ("fa616te",	  NULL,		       ARM_ARCH_V5TE,
31461 	       ARM_ARCH_NONE,
31462 	       FPU_ARCH_VFP_V2),
31463   ARM_CPU_OPT ("fa626te",	  NULL,		       ARM_ARCH_V5TE,
31464 	       ARM_ARCH_NONE,
31465 	       FPU_ARCH_VFP_V2),
31466   ARM_CPU_OPT ("fmp626",	  NULL,		       ARM_ARCH_V5TE,
31467 	       ARM_ARCH_NONE,
31468 	       FPU_ARCH_VFP_V2),
31469   ARM_CPU_OPT ("fa726te",	  NULL,		       ARM_ARCH_V5TE,
31470 	       ARM_ARCH_NONE,
31471 	       FPU_ARCH_VFP_V2),
31472   ARM_CPU_OPT ("arm1136js",	  "ARM1136J-S",	       ARM_ARCH_V6,
31473 	       ARM_ARCH_NONE,
31474 	       FPU_NONE),
31475   ARM_CPU_OPT ("arm1136j-s",	  NULL,		       ARM_ARCH_V6,
31476 	       ARM_ARCH_NONE,
31477 	       FPU_NONE),
31478   ARM_CPU_OPT ("arm1136jfs",	  "ARM1136JF-S",       ARM_ARCH_V6,
31479 	       ARM_ARCH_NONE,
31480 	       FPU_ARCH_VFP_V2),
31481   ARM_CPU_OPT ("arm1136jf-s",	  NULL,		       ARM_ARCH_V6,
31482 	       ARM_ARCH_NONE,
31483 	       FPU_ARCH_VFP_V2),
31484   ARM_CPU_OPT ("mpcore",	  "MPCore",	       ARM_ARCH_V6K,
31485 	       ARM_ARCH_NONE,
31486 	       FPU_ARCH_VFP_V2),
31487   ARM_CPU_OPT ("mpcorenovfp",	  "MPCore",	       ARM_ARCH_V6K,
31488 	       ARM_ARCH_NONE,
31489 	       FPU_NONE),
31490   ARM_CPU_OPT ("arm1156t2-s",	  NULL,		       ARM_ARCH_V6T2,
31491 	       ARM_ARCH_NONE,
31492 	       FPU_NONE),
31493   ARM_CPU_OPT ("arm1156t2f-s",	  NULL,		       ARM_ARCH_V6T2,
31494 	       ARM_ARCH_NONE,
31495 	       FPU_ARCH_VFP_V2),
31496   ARM_CPU_OPT ("arm1176jz-s",	  NULL,		       ARM_ARCH_V6KZ,
31497 	       ARM_ARCH_NONE,
31498 	       FPU_NONE),
31499   ARM_CPU_OPT ("arm1176jzf-s",	  NULL,		       ARM_ARCH_V6KZ,
31500 	       ARM_ARCH_NONE,
31501 	       FPU_ARCH_VFP_V2),
31502   ARM_CPU_OPT ("cortex-a5",	  "Cortex-A5",	       ARM_ARCH_V7A,
31503 	       ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
31504 	       FPU_NONE),
31505   ARM_CPU_OPT ("cortex-a7",	  "Cortex-A7",	       ARM_ARCH_V7VE,
31506 	       ARM_ARCH_NONE,
31507 	       FPU_ARCH_NEON_VFP_V4),
31508   ARM_CPU_OPT ("cortex-a8",	  "Cortex-A8",	       ARM_ARCH_V7A,
31509 	       ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
31510 	       ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
31511   ARM_CPU_OPT ("cortex-a9",	  "Cortex-A9",	       ARM_ARCH_V7A,
31512 	       ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
31513 	       ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
31514   ARM_CPU_OPT ("cortex-a12",	  "Cortex-A12",	       ARM_ARCH_V7VE,
31515 	       ARM_ARCH_NONE,
31516 	       FPU_ARCH_NEON_VFP_V4),
31517   ARM_CPU_OPT ("cortex-a15",	  "Cortex-A15",	       ARM_ARCH_V7VE,
31518 	       ARM_ARCH_NONE,
31519 	       FPU_ARCH_NEON_VFP_V4),
31520   ARM_CPU_OPT ("cortex-a17",	  "Cortex-A17",	       ARM_ARCH_V7VE,
31521 	       ARM_ARCH_NONE,
31522 	       FPU_ARCH_NEON_VFP_V4),
31523   ARM_CPU_OPT ("cortex-a32",	  "Cortex-A32",	       ARM_ARCH_V8A,
31524 	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31525 	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31526   ARM_CPU_OPT ("cortex-a35",	  "Cortex-A35",	       ARM_ARCH_V8A,
31527 	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31528 	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31529   ARM_CPU_OPT ("cortex-a53",	  "Cortex-A53",	       ARM_ARCH_V8A,
31530 	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31531 	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31532   ARM_CPU_OPT ("cortex-a55",    "Cortex-A55",	       ARM_ARCH_V8_2A,
31533 	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31534 	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31535   ARM_CPU_OPT ("cortex-a57",	  "Cortex-A57",	       ARM_ARCH_V8A,
31536 	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31537 	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31538   ARM_CPU_OPT ("cortex-a72",	  "Cortex-A72",	       ARM_ARCH_V8A,
31539 	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31540 	      FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31541   ARM_CPU_OPT ("cortex-a73",	  "Cortex-A73",	       ARM_ARCH_V8A,
31542 	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31543 	      FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31544   ARM_CPU_OPT ("cortex-a75",    "Cortex-A75",	       ARM_ARCH_V8_2A,
31545 	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31546 	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31547   ARM_CPU_OPT ("cortex-a76",    "Cortex-A76",	       ARM_ARCH_V8_2A,
31548 	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31549 	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31550   ARM_CPU_OPT ("cortex-a76ae",    "Cortex-A76AE",      ARM_ARCH_V8_2A,
31551 	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31552 	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31553   ARM_CPU_OPT ("cortex-a77",    "Cortex-A77",	       ARM_ARCH_V8_2A,
31554 	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31555 	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31556   ARM_CPU_OPT ("cortex-a78",   "Cortex-A78",	       ARM_ARCH_V8_2A,
31557 	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST | ARM_EXT2_SB),
31558 	       FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31559   ARM_CPU_OPT ("cortex-a78ae",   "Cortex-A78AE",	   ARM_ARCH_V8_2A,
31560 	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST | ARM_EXT2_SB),
31561 	       FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31562   ARM_CPU_OPT ("cortex-a78c",   "Cortex-A78C",	   ARM_ARCH_V8_2A,
31563 	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST | ARM_EXT2_SB),
31564 	       FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31565   ARM_CPU_OPT ("ares",    "Ares",	       ARM_ARCH_V8_2A,
31566 	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31567 	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31568   ARM_CPU_OPT ("cortex-r4",	  "Cortex-R4",	       ARM_ARCH_V7R,
31569 	       ARM_ARCH_NONE,
31570 	       FPU_NONE),
31571   ARM_CPU_OPT ("cortex-r4f",	  "Cortex-R4F",	       ARM_ARCH_V7R,
31572 	       ARM_ARCH_NONE,
31573 	       FPU_ARCH_VFP_V3D16),
31574   ARM_CPU_OPT ("cortex-r5",	  "Cortex-R5",	       ARM_ARCH_V7R,
31575 	       ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
31576 	       FPU_NONE),
31577   ARM_CPU_OPT ("cortex-r7",	  "Cortex-R7",	       ARM_ARCH_V7R,
31578 	       ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
31579 	       FPU_ARCH_VFP_V3D16),
31580   ARM_CPU_OPT ("cortex-r8",	  "Cortex-R8",	       ARM_ARCH_V7R,
31581 	       ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
31582 	       FPU_ARCH_VFP_V3D16),
31583   ARM_CPU_OPT ("cortex-r52",	  "Cortex-R52",	       ARM_ARCH_V8R,
31584 	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31585 	      FPU_ARCH_NEON_VFP_ARMV8),
31586   ARM_CPU_OPT ("cortex-m35p",	  "Cortex-M35P",       ARM_ARCH_V8M_MAIN,
31587 	       ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
31588 	       FPU_NONE),
31589   ARM_CPU_OPT ("cortex-m33",	  "Cortex-M33",	       ARM_ARCH_V8M_MAIN,
31590 	       ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
31591 	       FPU_NONE),
31592   ARM_CPU_OPT ("cortex-m23",	  "Cortex-M23",	       ARM_ARCH_V8M_BASE,
31593 	       ARM_ARCH_NONE,
31594 	       FPU_NONE),
31595   ARM_CPU_OPT ("cortex-m7",	  "Cortex-M7",	       ARM_ARCH_V7EM,
31596 	       ARM_ARCH_NONE,
31597 	       FPU_NONE),
31598   ARM_CPU_OPT ("cortex-m4",	  "Cortex-M4",	       ARM_ARCH_V7EM,
31599 	       ARM_ARCH_NONE,
31600 	       FPU_NONE),
31601   ARM_CPU_OPT ("cortex-m3",	  "Cortex-M3",	       ARM_ARCH_V7M,
31602 	       ARM_ARCH_NONE,
31603 	       FPU_NONE),
31604   ARM_CPU_OPT ("cortex-m1",	  "Cortex-M1",	       ARM_ARCH_V6SM,
31605 	       ARM_ARCH_NONE,
31606 	       FPU_NONE),
31607   ARM_CPU_OPT ("cortex-m0",	  "Cortex-M0",	       ARM_ARCH_V6SM,
31608 	       ARM_ARCH_NONE,
31609 	       FPU_NONE),
31610   ARM_CPU_OPT ("cortex-m0plus",	  "Cortex-M0+",	       ARM_ARCH_V6SM,
31611 	       ARM_ARCH_NONE,
31612 	       FPU_NONE),
31613   ARM_CPU_OPT ("cortex-x1",   "Cortex-X1",	       ARM_ARCH_V8_2A,
31614 	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST | ARM_EXT2_SB),
31615 	       FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31616   ARM_CPU_OPT ("exynos-m1",	  "Samsung Exynos M1", ARM_ARCH_V8A,
31617 	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31618 	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31619   ARM_CPU_OPT ("neoverse-n1",    "Neoverse N1",	       ARM_ARCH_V8_2A,
31620 	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31621 	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31622   ARM_CPU_OPT ("neoverse-n2",	 "Neoverse N2",	       ARM_ARCH_V8_5A,
31623 	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
31624 				    | ARM_EXT2_BF16
31625 				    | ARM_EXT2_I8MM),
31626 	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4),
31627   ARM_CPU_OPT ("neoverse-v1", "Neoverse V1", ARM_ARCH_V8_4A,
31628 	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
31629 				    | ARM_EXT2_BF16
31630 				    | ARM_EXT2_I8MM),
31631 	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4),
31632   /* ??? XSCALE is really an architecture.  */
31633   ARM_CPU_OPT ("xscale",	  NULL,		       ARM_ARCH_XSCALE,
31634 	       ARM_ARCH_NONE,
31635 	       FPU_ARCH_VFP_V2),
31636 
31637   /* ??? iwmmxt is not a processor.  */
31638   ARM_CPU_OPT ("iwmmxt",	  NULL,		       ARM_ARCH_IWMMXT,
31639 	       ARM_ARCH_NONE,
31640 	       FPU_ARCH_VFP_V2),
31641   ARM_CPU_OPT ("iwmmxt2",	  NULL,		       ARM_ARCH_IWMMXT2,
31642 	       ARM_ARCH_NONE,
31643 	       FPU_ARCH_VFP_V2),
31644   ARM_CPU_OPT ("i80200",	  NULL,		       ARM_ARCH_XSCALE,
31645 	       ARM_ARCH_NONE,
31646 	       FPU_ARCH_VFP_V2),
31647 
31648   /* Maverick.  */
31649   ARM_CPU_OPT ("ep9312",	  "ARM920T",
31650 	       ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
31651 	       ARM_ARCH_NONE, FPU_ARCH_MAVERICK),
31652 
31653   /* Marvell processors.  */
31654   ARM_CPU_OPT ("marvell-pj4",	  NULL,		       ARM_ARCH_V7A,
31655 	       ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
31656 	       FPU_ARCH_VFP_V3D16),
31657   ARM_CPU_OPT ("marvell-whitney", NULL,		       ARM_ARCH_V7A,
31658 	       ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
31659 	       FPU_ARCH_NEON_VFP_V4),
31660 
31661   /* APM X-Gene family.  */
31662   ARM_CPU_OPT ("xgene1",	  "APM X-Gene 1",      ARM_ARCH_V8A,
31663 	       ARM_ARCH_NONE,
31664 	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31665   ARM_CPU_OPT ("xgene2",	  "APM X-Gene 2",      ARM_ARCH_V8A,
31666 	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31667 	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31668 
31669   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
31670 };
31671 #undef ARM_CPU_OPT
31672 
31673 struct arm_ext_table
31674 {
31675   const char *		  name;
31676   size_t		  name_len;
31677   const arm_feature_set	  merge;
31678   const arm_feature_set	  clear;
31679 };
31680 
31681 struct arm_arch_option_table
31682 {
31683   const char *			name;
31684   size_t			name_len;
31685   const arm_feature_set		value;
31686   const arm_feature_set		default_fpu;
31687   const struct arm_ext_table *	ext_table;
31688 };
31689 
31690 /* Used to add support for +E and +noE extension.  */
31691 #define ARM_EXT(E, M, C) { E, sizeof (E) - 1, M, C }
31692 /* Used to add support for a +E extension.  */
31693 #define ARM_ADD(E, M) { E, sizeof(E) - 1, M, ARM_ARCH_NONE }
31694 /* Used to add support for a +noE extension.  */
31695 #define ARM_REMOVE(E, C) { E, sizeof(E) -1, ARM_ARCH_NONE, C }
31696 
31697 #define ALL_FP ARM_FEATURE (0, ARM_EXT2_FP16_INST | ARM_EXT2_FP16_FML, \
31698 			    ~0 & ~FPU_ENDIAN_PURE)
31699 
31700 static const struct arm_ext_table armv5te_ext_table[] =
31701 {
31702   ARM_EXT ("fp", FPU_ARCH_VFP_V2, ALL_FP),
31703   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31704 };
31705 
31706 static const struct arm_ext_table armv7_ext_table[] =
31707 {
31708   ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
31709   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31710 };
31711 
31712 static const struct arm_ext_table armv7ve_ext_table[] =
31713 {
31714   ARM_EXT ("fp", FPU_ARCH_VFP_V4D16, ALL_FP),
31715   ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16),
31716   ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
31717   ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
31718   ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
31719   ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16),  /* Alias for +fp.  */
31720   ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
31721 
31722   ARM_EXT ("simd", FPU_ARCH_NEON_VFP_V4,
31723 	   ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
31724 
31725   /* Aliases for +simd.  */
31726   ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
31727 
31728   ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
31729   ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
31730   ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
31731 
31732   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31733 };
31734 
31735 static const struct arm_ext_table armv7a_ext_table[] =
31736 {
31737   ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
31738   ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp.  */
31739   ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
31740   ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
31741   ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
31742   ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16),
31743   ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
31744 
31745   ARM_EXT ("simd", FPU_ARCH_VFP_V3_PLUS_NEON_V1,
31746 	   ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
31747 
31748   /* Aliases for +simd.  */
31749   ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
31750   ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
31751 
31752   ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
31753   ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
31754 
31755   ARM_ADD ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP)),
31756   ARM_ADD ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC)),
31757   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31758 };
31759 
31760 static const struct arm_ext_table armv7r_ext_table[] =
31761 {
31762   ARM_ADD ("fp.sp", FPU_ARCH_VFP_V3xD),
31763   ARM_ADD ("vfpv3xd", FPU_ARCH_VFP_V3xD), /* Alias for +fp.sp.  */
31764   ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
31765   ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp.  */
31766   ARM_ADD ("vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16),
31767   ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
31768   ARM_EXT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
31769 	   ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV)),
31770   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31771 };
31772 
31773 static const struct arm_ext_table armv7em_ext_table[] =
31774 {
31775   ARM_EXT ("fp", FPU_ARCH_VFP_V4_SP_D16, ALL_FP),
31776   /* Alias for +fp, used to be known as fpv4-sp-d16.  */
31777   ARM_ADD ("vfpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16),
31778   ARM_ADD ("fpv5", FPU_ARCH_VFP_V5_SP_D16),
31779   ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
31780   ARM_ADD ("fpv5-d16", FPU_ARCH_VFP_V5D16),
31781   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31782 };
31783 
31784 static const struct arm_ext_table armv8a_ext_table[] =
31785 {
31786   ARM_ADD ("crc", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC)),
31787   ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
31788   ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
31789 	   ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31790 
31791   /* Armv8-a does not allow an FP implementation without SIMD, so the user
31792      should use the +simd option to turn on FP.  */
31793   ARM_REMOVE ("fp", ALL_FP),
31794   ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31795   ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31796   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31797 };
31798 
31799 
31800 static const struct arm_ext_table armv81a_ext_table[] =
31801 {
31802   ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
31803   ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
31804 	   ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31805 
31806   /* Armv8-a does not allow an FP implementation without SIMD, so the user
31807      should use the +simd option to turn on FP.  */
31808   ARM_REMOVE ("fp", ALL_FP),
31809   ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31810   ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31811   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31812 };
31813 
31814 static const struct arm_ext_table armv82a_ext_table[] =
31815 {
31816   ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
31817   ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_2_FP16),
31818   ARM_ADD ("fp16fml", FPU_ARCH_NEON_VFP_ARMV8_2_FP16FML),
31819   ARM_ADD ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16)),
31820   ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
31821   ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
31822 	   ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31823   ARM_ADD ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31824 
31825   /* Armv8-a does not allow an FP implementation without SIMD, so the user
31826      should use the +simd option to turn on FP.  */
31827   ARM_REMOVE ("fp", ALL_FP),
31828   ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31829   ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31830   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31831 };
31832 
31833 static const struct arm_ext_table armv84a_ext_table[] =
31834 {
31835   ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31836   ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
31837   ARM_ADD ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16)),
31838   ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
31839   ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
31840 	   ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31841 
31842   /* Armv8-a does not allow an FP implementation without SIMD, so the user
31843      should use the +simd option to turn on FP.  */
31844   ARM_REMOVE ("fp", ALL_FP),
31845   ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31846   ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31847   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31848 };
31849 
31850 static const struct arm_ext_table armv85a_ext_table[] =
31851 {
31852   ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31853   ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
31854   ARM_ADD ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16)),
31855   ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
31856   ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
31857 	   ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31858 
31859   /* Armv8-a does not allow an FP implementation without SIMD, so the user
31860      should use the +simd option to turn on FP.  */
31861   ARM_REMOVE ("fp", ALL_FP),
31862   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31863 };
31864 
31865 static const struct arm_ext_table armv86a_ext_table[] =
31866 {
31867   ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
31868   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31869 };
31870 
31871 #define CDE_EXTENSIONS \
31872   ARM_ADD ("cdecp0", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE0)), \
31873   ARM_ADD ("cdecp1", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE1)), \
31874   ARM_ADD ("cdecp2", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE2)), \
31875   ARM_ADD ("cdecp3", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE3)), \
31876   ARM_ADD ("cdecp4", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE4)), \
31877   ARM_ADD ("cdecp5", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE5)), \
31878   ARM_ADD ("cdecp6", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE6)), \
31879   ARM_ADD ("cdecp7", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE7))
31880 
31881 static const struct arm_ext_table armv8m_main_ext_table[] =
31882 {
31883   ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_AEXT_V8M_MAIN_DSP),
31884 		  ARM_FEATURE_CORE_LOW (ARM_AEXT_V8M_MAIN_DSP)),
31885   ARM_EXT ("fp", FPU_ARCH_VFP_V5_SP_D16, ALL_FP),
31886   ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
31887   CDE_EXTENSIONS,
31888   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31889 };
31890 
31891 
31892 static const struct arm_ext_table armv8_1m_main_ext_table[] =
31893 {
31894   ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_AEXT_V8M_MAIN_DSP),
31895 		  ARM_FEATURE_CORE_LOW (ARM_AEXT_V8M_MAIN_DSP)),
31896   ARM_EXT ("fp",
31897 	   ARM_FEATURE (0, ARM_EXT2_FP16_INST,
31898 			FPU_VFP_V5_SP_D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA),
31899 	   ALL_FP),
31900   ARM_ADD ("fp.dp",
31901 	   ARM_FEATURE (0, ARM_EXT2_FP16_INST,
31902 			FPU_VFP_V5D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
31903   ARM_EXT ("mve", ARM_FEATURE (ARM_AEXT_V8M_MAIN_DSP, ARM_EXT2_MVE, 0),
31904 	   ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE | ARM_EXT2_MVE_FP)),
31905   ARM_ADD ("mve.fp",
31906 	   ARM_FEATURE (ARM_AEXT_V8M_MAIN_DSP,
31907 			ARM_EXT2_FP16_INST | ARM_EXT2_MVE | ARM_EXT2_MVE_FP,
31908 			FPU_VFP_V5_SP_D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
31909   CDE_EXTENSIONS,
31910   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31911 };
31912 
31913 #undef CDE_EXTENSIONS
31914 
31915 static const struct arm_ext_table armv8r_ext_table[] =
31916 {
31917   ARM_ADD ("crc", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC)),
31918   ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
31919   ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
31920 	   ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31921   ARM_REMOVE ("fp", ALL_FP),
31922   ARM_ADD ("fp.sp", FPU_ARCH_VFP_V5_SP_D16),
31923   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31924 };
31925 
31926 /* This list should, at a minimum, contain all the architecture names
31927    recognized by GCC.  */
31928 #define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF, NULL }
31929 #define ARM_ARCH_OPT2(N, V, DF, ext) \
31930   { N, sizeof (N) - 1, V, DF, ext##_ext_table }
31931 
31932 static const struct arm_arch_option_table arm_archs[] =
31933 {
31934   ARM_ARCH_OPT ("all",		  ARM_ANY,		FPU_ARCH_FPA),
31935   ARM_ARCH_OPT ("armv1",	  ARM_ARCH_V1,		FPU_ARCH_FPA),
31936   ARM_ARCH_OPT ("armv2",	  ARM_ARCH_V2,		FPU_ARCH_FPA),
31937   ARM_ARCH_OPT ("armv2a",	  ARM_ARCH_V2S,		FPU_ARCH_FPA),
31938   ARM_ARCH_OPT ("armv2s",	  ARM_ARCH_V2S,		FPU_ARCH_FPA),
31939   ARM_ARCH_OPT ("armv3",	  ARM_ARCH_V3,		FPU_ARCH_FPA),
31940   ARM_ARCH_OPT ("armv3m",	  ARM_ARCH_V3M,		FPU_ARCH_FPA),
31941   ARM_ARCH_OPT ("armv4",	  ARM_ARCH_V4,		FPU_ARCH_FPA),
31942   ARM_ARCH_OPT ("armv4xm",	  ARM_ARCH_V4xM,	FPU_ARCH_FPA),
31943   ARM_ARCH_OPT ("armv4t",	  ARM_ARCH_V4T,		FPU_ARCH_FPA),
31944   ARM_ARCH_OPT ("armv4txm",	  ARM_ARCH_V4TxM,	FPU_ARCH_FPA),
31945   ARM_ARCH_OPT ("armv5",	  ARM_ARCH_V5,		FPU_ARCH_VFP),
31946   ARM_ARCH_OPT ("armv5t",	  ARM_ARCH_V5T,		FPU_ARCH_VFP),
31947   ARM_ARCH_OPT ("armv5txm",	  ARM_ARCH_V5TxM,	FPU_ARCH_VFP),
31948   ARM_ARCH_OPT2 ("armv5te",	  ARM_ARCH_V5TE,	FPU_ARCH_VFP,	armv5te),
31949   ARM_ARCH_OPT2 ("armv5texp",	  ARM_ARCH_V5TExP,	FPU_ARCH_VFP, armv5te),
31950   ARM_ARCH_OPT2 ("armv5tej",	  ARM_ARCH_V5TEJ,	FPU_ARCH_VFP,	armv5te),
31951   ARM_ARCH_OPT2 ("armv6",	  ARM_ARCH_V6,		FPU_ARCH_VFP,	armv5te),
31952   ARM_ARCH_OPT2 ("armv6j",	  ARM_ARCH_V6,		FPU_ARCH_VFP,	armv5te),
31953   ARM_ARCH_OPT2 ("armv6k",	  ARM_ARCH_V6K,		FPU_ARCH_VFP,	armv5te),
31954   ARM_ARCH_OPT2 ("armv6z",	  ARM_ARCH_V6Z,		FPU_ARCH_VFP,	armv5te),
31955   /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
31956      kept to preserve existing behaviour.  */
31957   ARM_ARCH_OPT2 ("armv6kz",	  ARM_ARCH_V6KZ,	FPU_ARCH_VFP,	armv5te),
31958   ARM_ARCH_OPT2 ("armv6zk",	  ARM_ARCH_V6KZ,	FPU_ARCH_VFP,	armv5te),
31959   ARM_ARCH_OPT2 ("armv6t2",	  ARM_ARCH_V6T2,	FPU_ARCH_VFP,	armv5te),
31960   ARM_ARCH_OPT2 ("armv6kt2",	  ARM_ARCH_V6KT2,	FPU_ARCH_VFP,	armv5te),
31961   ARM_ARCH_OPT2 ("armv6zt2",	  ARM_ARCH_V6ZT2,	FPU_ARCH_VFP,	armv5te),
31962   /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
31963      kept to preserve existing behaviour.  */
31964   ARM_ARCH_OPT2 ("armv6kzt2",	  ARM_ARCH_V6KZT2,	FPU_ARCH_VFP,	armv5te),
31965   ARM_ARCH_OPT2 ("armv6zkt2",	  ARM_ARCH_V6KZT2,	FPU_ARCH_VFP,	armv5te),
31966   ARM_ARCH_OPT ("armv6-m",	  ARM_ARCH_V6M,		FPU_ARCH_VFP),
31967   ARM_ARCH_OPT ("armv6s-m",	  ARM_ARCH_V6SM,	FPU_ARCH_VFP),
31968   ARM_ARCH_OPT2 ("armv7",	  ARM_ARCH_V7,		FPU_ARCH_VFP, armv7),
31969   /* The official spelling of the ARMv7 profile variants is the dashed form.
31970      Accept the non-dashed form for compatibility with old toolchains.  */
31971   ARM_ARCH_OPT2 ("armv7a",	  ARM_ARCH_V7A,		FPU_ARCH_VFP, armv7a),
31972   ARM_ARCH_OPT2 ("armv7ve",	  ARM_ARCH_V7VE,	FPU_ARCH_VFP, armv7ve),
31973   ARM_ARCH_OPT2 ("armv7r",	  ARM_ARCH_V7R,		FPU_ARCH_VFP, armv7r),
31974   ARM_ARCH_OPT ("armv7m",	  ARM_ARCH_V7M,		FPU_ARCH_VFP),
31975   ARM_ARCH_OPT2 ("armv7-a",	  ARM_ARCH_V7A,		FPU_ARCH_VFP, armv7a),
31976   ARM_ARCH_OPT2 ("armv7-r",	  ARM_ARCH_V7R,		FPU_ARCH_VFP, armv7r),
31977   ARM_ARCH_OPT ("armv7-m",	  ARM_ARCH_V7M,		FPU_ARCH_VFP),
31978   ARM_ARCH_OPT2 ("armv7e-m",	  ARM_ARCH_V7EM,	FPU_ARCH_VFP, armv7em),
31979   ARM_ARCH_OPT ("armv8-m.base",	  ARM_ARCH_V8M_BASE,	FPU_ARCH_VFP),
31980   ARM_ARCH_OPT2 ("armv8-m.main",  ARM_ARCH_V8M_MAIN,	FPU_ARCH_VFP,
31981 		 armv8m_main),
31982   ARM_ARCH_OPT2 ("armv8.1-m.main", ARM_ARCH_V8_1M_MAIN,	FPU_ARCH_VFP,
31983 		 armv8_1m_main),
31984   ARM_ARCH_OPT2 ("armv8-a",	  ARM_ARCH_V8A,		FPU_ARCH_VFP, armv8a),
31985   ARM_ARCH_OPT2 ("armv8.1-a",	  ARM_ARCH_V8_1A,	FPU_ARCH_VFP, armv81a),
31986   ARM_ARCH_OPT2 ("armv8.2-a",	  ARM_ARCH_V8_2A,	FPU_ARCH_VFP, armv82a),
31987   ARM_ARCH_OPT2 ("armv8.3-a",	  ARM_ARCH_V8_3A,	FPU_ARCH_VFP, armv82a),
31988   ARM_ARCH_OPT2 ("armv8-r",	  ARM_ARCH_V8R,		FPU_ARCH_VFP, armv8r),
31989   ARM_ARCH_OPT2 ("armv8.4-a",	  ARM_ARCH_V8_4A,	FPU_ARCH_VFP, armv84a),
31990   ARM_ARCH_OPT2 ("armv8.5-a",	  ARM_ARCH_V8_5A,	FPU_ARCH_VFP, armv85a),
31991   ARM_ARCH_OPT2 ("armv8.6-a",	  ARM_ARCH_V8_6A,	FPU_ARCH_VFP, armv86a),
31992   ARM_ARCH_OPT ("xscale",	  ARM_ARCH_XSCALE,	FPU_ARCH_VFP),
31993   ARM_ARCH_OPT ("iwmmxt",	  ARM_ARCH_IWMMXT,	FPU_ARCH_VFP),
31994   ARM_ARCH_OPT ("iwmmxt2",	  ARM_ARCH_IWMMXT2,	FPU_ARCH_VFP),
31995   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
31996 };
31997 #undef ARM_ARCH_OPT
31998 
31999 /* ISA extensions in the co-processor and main instruction set space.  */
32000 
32001 struct arm_option_extension_value_table
32002 {
32003   const char *           name;
32004   size_t                 name_len;
32005   const arm_feature_set  merge_value;
32006   const arm_feature_set  clear_value;
32007   /* List of architectures for which an extension is available.  ARM_ARCH_NONE
32008      indicates that an extension is available for all architectures while
32009      ARM_ANY marks an empty entry.  */
32010   const arm_feature_set  allowed_archs[2];
32011 };
32012 
32013 /* The following table must be in alphabetical order with a NULL last entry.  */
32014 
32015 #define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
32016 #define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
32017 
32018 /* DEPRECATED: Refrain from using this table to add any new extensions, instead
32019    use the context sensitive approach using arm_ext_table's.  */
32020 static const struct arm_option_extension_value_table arm_extensions[] =
32021 {
32022   ARM_EXT_OPT ("crc",	 ARM_FEATURE_CORE_HIGH(ARM_EXT2_CRC),
32023 			 ARM_FEATURE_CORE_HIGH(ARM_EXT2_CRC),
32024 			 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
32025   ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
32026 			 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
32027 				   ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
32028   ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8,
32029 			  ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD),
32030 			  ARM_ARCH_V8_2A),
32031   ARM_EXT_OPT ("dsp",	ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
32032 			ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
32033 			ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
32034   ARM_EXT_OPT ("fp",     FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
32035 				   ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
32036   ARM_EXT_OPT ("fp16",  ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
32037 			ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
32038 			ARM_ARCH_V8_2A),
32039   ARM_EXT_OPT ("fp16fml",  ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
32040 						  | ARM_EXT2_FP16_FML),
32041 			   ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
32042 						  | ARM_EXT2_FP16_FML),
32043 			   ARM_ARCH_V8_2A),
32044   ARM_EXT_OPT2 ("idiv",	ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
32045 			ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
32046 			ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
32047 			ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
32048   /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
32049      Thumb divide instruction.  Due to this having the same name as the
32050      previous entry, this will be ignored when doing command-line parsing and
32051      only considered by build attribute selection code.  */
32052   ARM_EXT_OPT ("idiv",	ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
32053 			ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
32054 			ARM_FEATURE_CORE_LOW (ARM_EXT_V7)),
32055   ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
32056 			ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
32057   ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
32058 			ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
32059   ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
32060 			ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
32061   ARM_EXT_OPT2 ("mp",	ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
32062 			ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
32063 			ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
32064 			ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
32065   ARM_EXT_OPT ("os",	ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
32066 			ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
32067 				   ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
32068   ARM_EXT_OPT ("pan",	ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
32069 			ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
32070 			ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
32071   ARM_EXT_OPT ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
32072 			ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
32073 			ARM_ARCH_V8A),
32074   ARM_EXT_OPT ("ras",	ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
32075 			ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
32076 			ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
32077   ARM_EXT_OPT ("rdma",  FPU_ARCH_NEON_VFP_ARMV8_1,
32078 			ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
32079 			ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
32080   ARM_EXT_OPT ("sb",	ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
32081 			ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
32082 			ARM_ARCH_V8A),
32083   ARM_EXT_OPT2 ("sec",	ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
32084 			ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
32085 			ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
32086 			ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
32087   ARM_EXT_OPT ("simd",  FPU_ARCH_NEON_VFP_ARMV8,
32088 			ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
32089 			ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
32090   ARM_EXT_OPT ("virt",	ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
32091 				     | ARM_EXT_DIV),
32092 			ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
32093 				   ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
32094   ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
32095 			ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
32096   { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
32097 };
32098 #undef ARM_EXT_OPT
32099 
32100 /* ISA floating-point and Advanced SIMD extensions.  */
32101 struct arm_option_fpu_value_table
32102 {
32103   const char *           name;
32104   const arm_feature_set  value;
32105 };
32106 
32107 /* This list should, at a minimum, contain all the fpu names
32108    recognized by GCC.  */
32109 static const struct arm_option_fpu_value_table arm_fpus[] =
32110 {
32111   {"softfpa",		FPU_NONE},
32112   {"fpe",		FPU_ARCH_FPE},
32113   {"fpe2",		FPU_ARCH_FPE},
32114   {"fpe3",		FPU_ARCH_FPA},	/* Third release supports LFM/SFM.  */
32115   {"fpa",		FPU_ARCH_FPA},
32116   {"fpa10",		FPU_ARCH_FPA},
32117   {"fpa11",		FPU_ARCH_FPA},
32118   {"arm7500fe",		FPU_ARCH_FPA},
32119   {"softvfp",		FPU_ARCH_VFP},
32120   {"softvfp+vfp",	FPU_ARCH_VFP_V2},
32121   {"vfp",		FPU_ARCH_VFP_V2},
32122   {"vfp9",		FPU_ARCH_VFP_V2},
32123   {"vfp3",		FPU_ARCH_VFP_V3}, /* Undocumented, use vfpv3.  */
32124   {"vfp10",		FPU_ARCH_VFP_V2},
32125   {"vfp10-r0",		FPU_ARCH_VFP_V1},
32126   {"vfpxd",		FPU_ARCH_VFP_V1xD},
32127   {"vfpv2",		FPU_ARCH_VFP_V2},
32128   {"vfpv3",		FPU_ARCH_VFP_V3},
32129   {"vfpv3-fp16",	FPU_ARCH_VFP_V3_FP16},
32130   {"vfpv3-d16",		FPU_ARCH_VFP_V3D16},
32131   {"vfpv3-d16-fp16",	FPU_ARCH_VFP_V3D16_FP16},
32132   {"vfpv3xd",		FPU_ARCH_VFP_V3xD},
32133   {"vfpv3xd-fp16",	FPU_ARCH_VFP_V3xD_FP16},
32134   {"arm1020t",		FPU_ARCH_VFP_V1},
32135   {"arm1020e",		FPU_ARCH_VFP_V2},
32136   {"arm1136jfs",	FPU_ARCH_VFP_V2}, /* Undocumented, use arm1136jf-s.  */
32137   {"arm1136jf-s",	FPU_ARCH_VFP_V2},
32138   {"maverick",		FPU_ARCH_MAVERICK},
32139   {"neon",		FPU_ARCH_VFP_V3_PLUS_NEON_V1},
32140   {"neon-vfpv3",	FPU_ARCH_VFP_V3_PLUS_NEON_V1},
32141   {"neon-fp16",		FPU_ARCH_NEON_FP16},
32142   {"vfpv4",		FPU_ARCH_VFP_V4},
32143   {"vfpv4-d16",		FPU_ARCH_VFP_V4D16},
32144   {"fpv4-sp-d16",	FPU_ARCH_VFP_V4_SP_D16},
32145   {"fpv5-d16",		FPU_ARCH_VFP_V5D16},
32146   {"fpv5-sp-d16",	FPU_ARCH_VFP_V5_SP_D16},
32147   {"neon-vfpv4",	FPU_ARCH_NEON_VFP_V4},
32148   {"fp-armv8",		FPU_ARCH_VFP_ARMV8},
32149   {"neon-fp-armv8",	FPU_ARCH_NEON_VFP_ARMV8},
32150   {"crypto-neon-fp-armv8",
32151 			FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
32152   {"neon-fp-armv8.1",	FPU_ARCH_NEON_VFP_ARMV8_1},
32153   {"crypto-neon-fp-armv8.1",
32154 			FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
32155   {NULL,		ARM_ARCH_NONE}
32156 };
32157 
32158 struct arm_option_value_table
32159 {
32160   const char *name;
32161   long value;
32162 };
32163 
32164 static const struct arm_option_value_table arm_float_abis[] =
32165 {
32166   {"hard",	ARM_FLOAT_ABI_HARD},
32167   {"softfp",	ARM_FLOAT_ABI_SOFTFP},
32168   {"soft",	ARM_FLOAT_ABI_SOFT},
32169   {NULL,	0}
32170 };
32171 
32172 #ifdef OBJ_ELF
32173 /* We only know how to output GNU and ver 4/5 (AAELF) formats.  */
32174 static const struct arm_option_value_table arm_eabis[] =
32175 {
32176   {"gnu",	EF_ARM_EABI_UNKNOWN},
32177   {"4",		EF_ARM_EABI_VER4},
32178   {"5",		EF_ARM_EABI_VER5},
32179   {NULL,	0}
32180 };
32181 #endif
32182 
32183 struct arm_long_option_table
32184 {
32185   const char *option;			/* Substring to match.	*/
32186   const char *help;			/* Help information.  */
32187   bool (*func) (const char *subopt);	/* Function to decode sub-option.  */
32188   const char *deprecated;		/* If non-null, print this message.  */
32189 };
32190 
32191 static bool
arm_parse_extension(const char * str,const arm_feature_set * opt_set,arm_feature_set * ext_set,const struct arm_ext_table * ext_table)32192 arm_parse_extension (const char *str, const arm_feature_set *opt_set,
32193 		     arm_feature_set *ext_set,
32194 		     const struct arm_ext_table *ext_table)
32195 {
32196   /* We insist on extensions being specified in alphabetical order, and with
32197      extensions being added before being removed.  We achieve this by having
32198      the global ARM_EXTENSIONS table in alphabetical order, and using the
32199      ADDING_VALUE variable to indicate whether we are adding an extension (1)
32200      or removing it (0) and only allowing it to change in the order
32201      -1 -> 1 -> 0.  */
32202   const struct arm_option_extension_value_table * opt = NULL;
32203   const arm_feature_set arm_any = ARM_ANY;
32204   int adding_value = -1;
32205 
32206   while (str != NULL && *str != 0)
32207     {
32208       const char *ext;
32209       size_t len;
32210 
32211       if (*str != '+')
32212 	{
32213 	  as_bad (_("invalid architectural extension"));
32214 	  return false;
32215 	}
32216 
32217       str++;
32218       ext = strchr (str, '+');
32219 
32220       if (ext != NULL)
32221 	len = ext - str;
32222       else
32223 	len = strlen (str);
32224 
32225       if (len >= 2 && startswith (str, "no"))
32226 	{
32227 	  if (adding_value != 0)
32228 	    {
32229 	      adding_value = 0;
32230 	      opt = arm_extensions;
32231 	    }
32232 
32233 	  len -= 2;
32234 	  str += 2;
32235 	}
32236       else if (len > 0)
32237 	{
32238 	  if (adding_value == -1)
32239 	    {
32240 	      adding_value = 1;
32241 	      opt = arm_extensions;
32242 	    }
32243 	  else if (adding_value != 1)
32244 	    {
32245 	      as_bad (_("must specify extensions to add before specifying "
32246 			"those to remove"));
32247 	      return false;
32248 	    }
32249 	}
32250 
32251       if (len == 0)
32252 	{
32253 	  as_bad (_("missing architectural extension"));
32254 	  return false;
32255 	}
32256 
32257       gas_assert (adding_value != -1);
32258       gas_assert (opt != NULL);
32259 
32260       if (ext_table != NULL)
32261 	{
32262 	  const struct arm_ext_table * ext_opt = ext_table;
32263 	  bool found = false;
32264 	  for (; ext_opt->name != NULL; ext_opt++)
32265 	    if (ext_opt->name_len == len
32266 		&& strncmp (ext_opt->name, str, len) == 0)
32267 	      {
32268 		if (adding_value)
32269 		  {
32270 		    if (ARM_FEATURE_ZERO (ext_opt->merge))
32271 			/* TODO: Option not supported.  When we remove the
32272 			   legacy table this case should error out.  */
32273 			continue;
32274 
32275 		    ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, ext_opt->merge);
32276 		  }
32277 		else
32278 		  {
32279 		    if (ARM_FEATURE_ZERO (ext_opt->clear))
32280 			/* TODO: Option not supported.  When we remove the
32281 			   legacy table this case should error out.  */
32282 			continue;
32283 		    ARM_CLEAR_FEATURE (*ext_set, *ext_set, ext_opt->clear);
32284 		  }
32285 		found = true;
32286 		break;
32287 	      }
32288 	  if (found)
32289 	    {
32290 	      str = ext;
32291 	      continue;
32292 	    }
32293 	}
32294 
32295       /* Scan over the options table trying to find an exact match. */
32296       for (; opt->name != NULL; opt++)
32297 	if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
32298 	  {
32299 	    int i, nb_allowed_archs =
32300 	      sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
32301 	    /* Check we can apply the extension to this architecture.  */
32302 	    for (i = 0; i < nb_allowed_archs; i++)
32303 	      {
32304 		/* Empty entry.  */
32305 		if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
32306 		  continue;
32307 		if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *opt_set))
32308 		  break;
32309 	      }
32310 	    if (i == nb_allowed_archs)
32311 	      {
32312 		as_bad (_("extension does not apply to the base architecture"));
32313 		return false;
32314 	      }
32315 
32316 	    /* Add or remove the extension.  */
32317 	    if (adding_value)
32318 	      ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
32319 	    else
32320 	      ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
32321 
32322 	    /* Allowing Thumb division instructions for ARMv7 in autodetection
32323 	       rely on this break so that duplicate extensions (extensions
32324 	       with the same name as a previous extension in the list) are not
32325 	       considered for command-line parsing.  */
32326 	    break;
32327 	  }
32328 
32329       if (opt->name == NULL)
32330 	{
32331 	  /* Did we fail to find an extension because it wasn't specified in
32332 	     alphabetical order, or because it does not exist?  */
32333 
32334 	  for (opt = arm_extensions; opt->name != NULL; opt++)
32335 	    if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
32336 	      break;
32337 
32338 	  if (opt->name == NULL)
32339 	    as_bad (_("unknown architectural extension `%s'"), str);
32340 	  else
32341 	    as_bad (_("architectural extensions must be specified in "
32342 		      "alphabetical order"));
32343 
32344 	  return false;
32345 	}
32346       else
32347 	{
32348 	  /* We should skip the extension we've just matched the next time
32349 	     round.  */
32350 	  opt++;
32351 	}
32352 
32353       str = ext;
32354     };
32355 
32356   return true;
32357 }
32358 
32359 static bool
arm_parse_fp16_opt(const char * str)32360 arm_parse_fp16_opt (const char *str)
32361 {
32362   if (strcasecmp (str, "ieee") == 0)
32363     fp16_format = ARM_FP16_FORMAT_IEEE;
32364   else if (strcasecmp (str, "alternative") == 0)
32365     fp16_format = ARM_FP16_FORMAT_ALTERNATIVE;
32366   else
32367     {
32368       as_bad (_("unrecognised float16 format \"%s\""), str);
32369       return false;
32370     }
32371 
32372   return true;
32373 }
32374 
32375 static bool
arm_parse_cpu(const char * str)32376 arm_parse_cpu (const char *str)
32377 {
32378   const struct arm_cpu_option_table *opt;
32379   const char *ext = strchr (str, '+');
32380   size_t len;
32381 
32382   if (ext != NULL)
32383     len = ext - str;
32384   else
32385     len = strlen (str);
32386 
32387   if (len == 0)
32388     {
32389       as_bad (_("missing cpu name `%s'"), str);
32390       return false;
32391     }
32392 
32393   for (opt = arm_cpus; opt->name != NULL; opt++)
32394     if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
32395       {
32396 	mcpu_cpu_opt = &opt->value;
32397 	if (mcpu_ext_opt == NULL)
32398 	  mcpu_ext_opt = XNEW (arm_feature_set);
32399 	*mcpu_ext_opt = opt->ext;
32400 	mcpu_fpu_opt = &opt->default_fpu;
32401 	if (opt->canonical_name)
32402 	  {
32403 	    gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
32404 	    strcpy (selected_cpu_name, opt->canonical_name);
32405 	  }
32406 	else
32407 	  {
32408 	    size_t i;
32409 
32410 	    if (len >= sizeof selected_cpu_name)
32411 	      len = (sizeof selected_cpu_name) - 1;
32412 
32413 	    for (i = 0; i < len; i++)
32414 	      selected_cpu_name[i] = TOUPPER (opt->name[i]);
32415 	    selected_cpu_name[i] = 0;
32416 	  }
32417 
32418 	if (ext != NULL)
32419 	  return arm_parse_extension (ext, mcpu_cpu_opt, mcpu_ext_opt, NULL);
32420 
32421 	return true;
32422       }
32423 
32424   as_bad (_("unknown cpu `%s'"), str);
32425   return false;
32426 }
32427 
32428 static bool
arm_parse_arch(const char * str)32429 arm_parse_arch (const char *str)
32430 {
32431   const struct arm_arch_option_table *opt;
32432   const char *ext = strchr (str, '+');
32433   size_t len;
32434 
32435   if (ext != NULL)
32436     len = ext - str;
32437   else
32438     len = strlen (str);
32439 
32440   if (len == 0)
32441     {
32442       as_bad (_("missing architecture name `%s'"), str);
32443       return false;
32444     }
32445 
32446   for (opt = arm_archs; opt->name != NULL; opt++)
32447     if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
32448       {
32449 	march_cpu_opt = &opt->value;
32450 	if (march_ext_opt == NULL)
32451 	  march_ext_opt = XNEW (arm_feature_set);
32452 	*march_ext_opt = arm_arch_none;
32453 	march_fpu_opt = &opt->default_fpu;
32454 	selected_ctx_ext_table = opt->ext_table;
32455 	strcpy (selected_cpu_name, opt->name);
32456 
32457 	if (ext != NULL)
32458 	  return arm_parse_extension (ext, march_cpu_opt, march_ext_opt,
32459 				      opt->ext_table);
32460 
32461 	return true;
32462       }
32463 
32464   as_bad (_("unknown architecture `%s'\n"), str);
32465   return false;
32466 }
32467 
32468 static bool
arm_parse_fpu(const char * str)32469 arm_parse_fpu (const char * str)
32470 {
32471   const struct arm_option_fpu_value_table * opt;
32472 
32473   for (opt = arm_fpus; opt->name != NULL; opt++)
32474     if (streq (opt->name, str))
32475       {
32476 	mfpu_opt = &opt->value;
32477 	return true;
32478       }
32479 
32480   as_bad (_("unknown floating point format `%s'\n"), str);
32481   return false;
32482 }
32483 
32484 static bool
arm_parse_float_abi(const char * str)32485 arm_parse_float_abi (const char * str)
32486 {
32487   const struct arm_option_value_table * opt;
32488 
32489   for (opt = arm_float_abis; opt->name != NULL; opt++)
32490     if (streq (opt->name, str))
32491       {
32492 	mfloat_abi_opt = opt->value;
32493 	return true;
32494       }
32495 
32496   as_bad (_("unknown floating point abi `%s'\n"), str);
32497   return false;
32498 }
32499 
32500 #ifdef OBJ_ELF
32501 static bool
arm_parse_eabi(const char * str)32502 arm_parse_eabi (const char * str)
32503 {
32504   const struct arm_option_value_table *opt;
32505 
32506   for (opt = arm_eabis; opt->name != NULL; opt++)
32507     if (streq (opt->name, str))
32508       {
32509 	meabi_flags = opt->value;
32510 	return true;
32511       }
32512   as_bad (_("unknown EABI `%s'\n"), str);
32513   return false;
32514 }
32515 #endif
32516 
32517 static bool
arm_parse_it_mode(const char * str)32518 arm_parse_it_mode (const char * str)
32519 {
32520   bool ret = true;
32521 
32522   if (streq ("arm", str))
32523     implicit_it_mode = IMPLICIT_IT_MODE_ARM;
32524   else if (streq ("thumb", str))
32525     implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
32526   else if (streq ("always", str))
32527     implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
32528   else if (streq ("never", str))
32529     implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
32530   else
32531     {
32532       as_bad (_("unknown implicit IT mode `%s', should be "\
32533 		"arm, thumb, always, or never."), str);
32534       ret = false;
32535     }
32536 
32537   return ret;
32538 }
32539 
32540 static bool
arm_ccs_mode(const char * unused ATTRIBUTE_UNUSED)32541 arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
32542 {
32543   codecomposer_syntax = true;
32544   arm_comment_chars[0] = ';';
32545   arm_line_separator_chars[0] = 0;
32546   return true;
32547 }
32548 
32549 struct arm_long_option_table arm_long_opts[] =
32550 {
32551   {"mcpu=", N_("<cpu name>\t  assemble for CPU <cpu name>"),
32552    arm_parse_cpu, NULL},
32553   {"march=", N_("<arch name>\t  assemble for architecture <arch name>"),
32554    arm_parse_arch, NULL},
32555   {"mfpu=", N_("<fpu name>\t  assemble for FPU architecture <fpu name>"),
32556    arm_parse_fpu, NULL},
32557   {"mfloat-abi=", N_("<abi>\t  assemble for floating point ABI <abi>"),
32558    arm_parse_float_abi, NULL},
32559 #ifdef OBJ_ELF
32560   {"meabi=", N_("<ver>\t\t  assemble for eabi version <ver>"),
32561    arm_parse_eabi, NULL},
32562 #endif
32563   {"mimplicit-it=", N_("<mode>\t  controls implicit insertion of IT instructions"),
32564    arm_parse_it_mode, NULL},
32565   {"mccs", N_("\t\t\t  TI CodeComposer Studio syntax compatibility mode"),
32566    arm_ccs_mode, NULL},
32567   {"mfp16-format=",
32568    N_("[ieee|alternative]\n\
32569                           set the encoding for half precision floating point "
32570 			  "numbers to IEEE\n\
32571                           or Arm alternative format."),
32572    arm_parse_fp16_opt, NULL },
32573   {NULL, NULL, 0, NULL}
32574 };
32575 
32576 int
md_parse_option(int c,const char * arg)32577 md_parse_option (int c, const char * arg)
32578 {
32579   struct arm_option_table *opt;
32580   const struct arm_legacy_option_table *fopt;
32581   struct arm_long_option_table *lopt;
32582 
32583   switch (c)
32584     {
32585 #ifdef OPTION_EB
32586     case OPTION_EB:
32587       target_big_endian = 1;
32588       break;
32589 #endif
32590 
32591 #ifdef OPTION_EL
32592     case OPTION_EL:
32593       target_big_endian = 0;
32594       break;
32595 #endif
32596 
32597     case OPTION_FIX_V4BX:
32598       fix_v4bx = true;
32599       break;
32600 
32601 #ifdef OBJ_ELF
32602     case OPTION_FDPIC:
32603       arm_fdpic = true;
32604       break;
32605 #endif /* OBJ_ELF */
32606 
32607     case 'a':
32608       /* Listing option.  Just ignore these, we don't support additional
32609 	 ones.	*/
32610       return 0;
32611 
32612     default:
32613       for (opt = arm_opts; opt->option != NULL; opt++)
32614 	{
32615 	  if (c == opt->option[0]
32616 	      && ((arg == NULL && opt->option[1] == 0)
32617 		  || streq (arg, opt->option + 1)))
32618 	    {
32619 	      /* If the option is deprecated, tell the user.  */
32620 	      if (warn_on_deprecated && opt->deprecated != NULL)
32621 		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
32622 			   arg ? arg : "", _(opt->deprecated));
32623 
32624 	      if (opt->var != NULL)
32625 		*opt->var = opt->value;
32626 
32627 	      return 1;
32628 	    }
32629 	}
32630 
32631       for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
32632 	{
32633 	  if (c == fopt->option[0]
32634 	      && ((arg == NULL && fopt->option[1] == 0)
32635 		  || streq (arg, fopt->option + 1)))
32636 	    {
32637 	      /* If the option is deprecated, tell the user.  */
32638 	      if (warn_on_deprecated && fopt->deprecated != NULL)
32639 		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
32640 			   arg ? arg : "", _(fopt->deprecated));
32641 
32642 	      if (fopt->var != NULL)
32643 		*fopt->var = &fopt->value;
32644 
32645 	      return 1;
32646 	    }
32647 	}
32648 
32649       for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
32650 	{
32651 	  /* These options are expected to have an argument.  */
32652 	  if (c == lopt->option[0]
32653 	      && arg != NULL
32654 	      && strncmp (arg, lopt->option + 1,
32655 			  strlen (lopt->option + 1)) == 0)
32656 	    {
32657 	      /* If the option is deprecated, tell the user.  */
32658 	      if (warn_on_deprecated && lopt->deprecated != NULL)
32659 		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
32660 			   _(lopt->deprecated));
32661 
32662 	      /* Call the sup-option parser.  */
32663 	      return lopt->func (arg + strlen (lopt->option) - 1);
32664 	    }
32665 	}
32666 
32667       return 0;
32668     }
32669 
32670   return 1;
32671 }
32672 
32673 void
md_show_usage(FILE * fp)32674 md_show_usage (FILE * fp)
32675 {
32676   struct arm_option_table *opt;
32677   struct arm_long_option_table *lopt;
32678 
32679   fprintf (fp, _(" ARM-specific assembler options:\n"));
32680 
32681   for (opt = arm_opts; opt->option != NULL; opt++)
32682     if (opt->help != NULL)
32683       fprintf (fp, "  -%-23s%s\n", opt->option, _(opt->help));
32684 
32685   for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
32686     if (lopt->help != NULL)
32687       fprintf (fp, "  -%s%s\n", lopt->option, _(lopt->help));
32688 
32689 #ifdef OPTION_EB
32690   fprintf (fp, _("\
32691   -EB                     assemble code for a big-endian cpu\n"));
32692 #endif
32693 
32694 #ifdef OPTION_EL
32695   fprintf (fp, _("\
32696   -EL                     assemble code for a little-endian cpu\n"));
32697 #endif
32698 
32699   fprintf (fp, _("\
32700   --fix-v4bx              Allow BX in ARMv4 code\n"));
32701 
32702 #ifdef OBJ_ELF
32703   fprintf (fp, _("\
32704   --fdpic                 generate an FDPIC object file\n"));
32705 #endif /* OBJ_ELF */
32706 }
32707 
32708 #ifdef OBJ_ELF
32709 
32710 typedef struct
32711 {
32712   int val;
32713   arm_feature_set flags;
32714 } cpu_arch_ver_table;
32715 
32716 /* Mapping from CPU features to EABI CPU arch values.  Table must be sorted
32717    chronologically for architectures, with an exception for ARMv6-M and
32718    ARMv6S-M due to legacy reasons.  No new architecture should have a
32719    special case.  This allows for build attribute selection results to be
32720    stable when new architectures are added.  */
32721 static const cpu_arch_ver_table cpu_arch_ver[] =
32722 {
32723     {TAG_CPU_ARCH_PRE_V4,     ARM_ARCH_V1},
32724     {TAG_CPU_ARCH_PRE_V4,     ARM_ARCH_V2},
32725     {TAG_CPU_ARCH_PRE_V4,     ARM_ARCH_V2S},
32726     {TAG_CPU_ARCH_PRE_V4,     ARM_ARCH_V3},
32727     {TAG_CPU_ARCH_PRE_V4,     ARM_ARCH_V3M},
32728     {TAG_CPU_ARCH_V4,	      ARM_ARCH_V4xM},
32729     {TAG_CPU_ARCH_V4,	      ARM_ARCH_V4},
32730     {TAG_CPU_ARCH_V4T,	      ARM_ARCH_V4TxM},
32731     {TAG_CPU_ARCH_V4T,	      ARM_ARCH_V4T},
32732     {TAG_CPU_ARCH_V5T,	      ARM_ARCH_V5xM},
32733     {TAG_CPU_ARCH_V5T,	      ARM_ARCH_V5},
32734     {TAG_CPU_ARCH_V5T,	      ARM_ARCH_V5TxM},
32735     {TAG_CPU_ARCH_V5T,	      ARM_ARCH_V5T},
32736     {TAG_CPU_ARCH_V5TE,	      ARM_ARCH_V5TExP},
32737     {TAG_CPU_ARCH_V5TE,	      ARM_ARCH_V5TE},
32738     {TAG_CPU_ARCH_V5TEJ,      ARM_ARCH_V5TEJ},
32739     {TAG_CPU_ARCH_V6,	      ARM_ARCH_V6},
32740     {TAG_CPU_ARCH_V6KZ,	      ARM_ARCH_V6Z},
32741     {TAG_CPU_ARCH_V6KZ,	      ARM_ARCH_V6KZ},
32742     {TAG_CPU_ARCH_V6K,	      ARM_ARCH_V6K},
32743     {TAG_CPU_ARCH_V6T2,	      ARM_ARCH_V6T2},
32744     {TAG_CPU_ARCH_V6T2,	      ARM_ARCH_V6KT2},
32745     {TAG_CPU_ARCH_V6T2,	      ARM_ARCH_V6ZT2},
32746     {TAG_CPU_ARCH_V6T2,	      ARM_ARCH_V6KZT2},
32747 
32748     /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
32749        always selected build attributes to match those of ARMv6-M
32750        (resp. ARMv6S-M).  However, due to these architectures being a strict
32751        subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
32752        would be selected when fully respecting chronology of architectures.
32753        It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
32754        move them before ARMv7 architectures.  */
32755     {TAG_CPU_ARCH_V6_M,	      ARM_ARCH_V6M},
32756     {TAG_CPU_ARCH_V6S_M,      ARM_ARCH_V6SM},
32757 
32758     {TAG_CPU_ARCH_V7,	      ARM_ARCH_V7},
32759     {TAG_CPU_ARCH_V7,	      ARM_ARCH_V7A},
32760     {TAG_CPU_ARCH_V7,	      ARM_ARCH_V7R},
32761     {TAG_CPU_ARCH_V7,	      ARM_ARCH_V7M},
32762     {TAG_CPU_ARCH_V7,	      ARM_ARCH_V7VE},
32763     {TAG_CPU_ARCH_V7E_M,      ARM_ARCH_V7EM},
32764     {TAG_CPU_ARCH_V8,	      ARM_ARCH_V8A},
32765     {TAG_CPU_ARCH_V8,	      ARM_ARCH_V8_1A},
32766     {TAG_CPU_ARCH_V8,	      ARM_ARCH_V8_2A},
32767     {TAG_CPU_ARCH_V8,	      ARM_ARCH_V8_3A},
32768     {TAG_CPU_ARCH_V8M_BASE,   ARM_ARCH_V8M_BASE},
32769     {TAG_CPU_ARCH_V8M_MAIN,   ARM_ARCH_V8M_MAIN},
32770     {TAG_CPU_ARCH_V8R,	      ARM_ARCH_V8R},
32771     {TAG_CPU_ARCH_V8,	      ARM_ARCH_V8_4A},
32772     {TAG_CPU_ARCH_V8,	      ARM_ARCH_V8_5A},
32773     {TAG_CPU_ARCH_V8_1M_MAIN, ARM_ARCH_V8_1M_MAIN},
32774     {TAG_CPU_ARCH_V8,	    ARM_ARCH_V8_6A},
32775     {-1,		    ARM_ARCH_NONE}
32776 };
32777 
32778 /* Set an attribute if it has not already been set by the user.  */
32779 
32780 static void
aeabi_set_attribute_int(int tag,int value)32781 aeabi_set_attribute_int (int tag, int value)
32782 {
32783   if (tag < 1
32784       || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
32785       || !attributes_set_explicitly[tag])
32786     bfd_elf_add_proc_attr_int (stdoutput, tag, value);
32787 }
32788 
32789 static void
aeabi_set_attribute_string(int tag,const char * value)32790 aeabi_set_attribute_string (int tag, const char *value)
32791 {
32792   if (tag < 1
32793       || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
32794       || !attributes_set_explicitly[tag])
32795     bfd_elf_add_proc_attr_string (stdoutput, tag, value);
32796 }
32797 
32798 /* Return whether features in the *NEEDED feature set are available via
32799    extensions for the architecture whose feature set is *ARCH_FSET.  */
32800 
32801 static bool
have_ext_for_needed_feat_p(const arm_feature_set * arch_fset,const arm_feature_set * needed)32802 have_ext_for_needed_feat_p (const arm_feature_set *arch_fset,
32803 			    const arm_feature_set *needed)
32804 {
32805   int i, nb_allowed_archs;
32806   arm_feature_set ext_fset;
32807   const struct arm_option_extension_value_table *opt;
32808 
32809   ext_fset = arm_arch_none;
32810   for (opt = arm_extensions; opt->name != NULL; opt++)
32811     {
32812       /* Extension does not provide any feature we need.  */
32813       if (!ARM_CPU_HAS_FEATURE (*needed, opt->merge_value))
32814 	continue;
32815 
32816       nb_allowed_archs =
32817 	sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
32818       for (i = 0; i < nb_allowed_archs; i++)
32819 	{
32820 	  /* Empty entry.  */
32821 	  if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_arch_any))
32822 	    break;
32823 
32824 	  /* Extension is available, add it.  */
32825 	  if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *arch_fset))
32826 	    ARM_MERGE_FEATURE_SETS (ext_fset, ext_fset, opt->merge_value);
32827 	}
32828     }
32829 
32830   /* Can we enable all features in *needed?  */
32831   return ARM_FSET_CPU_SUBSET (*needed, ext_fset);
32832 }
32833 
32834 /* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
32835    a given architecture feature set *ARCH_EXT_FSET including extension feature
32836    set *EXT_FSET.  Selection logic used depend on EXACT_MATCH:
32837    - if true, check for an exact match of the architecture modulo extensions;
32838    - otherwise, select build attribute value of the first superset
32839      architecture released so that results remains stable when new architectures
32840      are added.
32841    For -march/-mcpu=all the build attribute value of the most featureful
32842    architecture is returned.  Tag_CPU_arch_profile result is returned in
32843    PROFILE.  */
32844 
32845 static int
get_aeabi_cpu_arch_from_fset(const arm_feature_set * arch_ext_fset,const arm_feature_set * ext_fset,char * profile,int exact_match)32846 get_aeabi_cpu_arch_from_fset (const arm_feature_set *arch_ext_fset,
32847 			      const arm_feature_set *ext_fset,
32848 			      char *profile, int exact_match)
32849 {
32850   arm_feature_set arch_fset;
32851   const cpu_arch_ver_table *p_ver, *p_ver_ret = NULL;
32852 
32853   /* Select most featureful architecture with all its extensions if building
32854      for -march=all as the feature sets used to set build attributes.  */
32855   if (ARM_FEATURE_EQUAL (*arch_ext_fset, arm_arch_any))
32856     {
32857       /* Force revisiting of decision for each new architecture.  */
32858       gas_assert (MAX_TAG_CPU_ARCH <= TAG_CPU_ARCH_V8_1M_MAIN);
32859       *profile = 'A';
32860       return TAG_CPU_ARCH_V8;
32861     }
32862 
32863   ARM_CLEAR_FEATURE (arch_fset, *arch_ext_fset, *ext_fset);
32864 
32865   for (p_ver = cpu_arch_ver; p_ver->val != -1; p_ver++)
32866     {
32867       arm_feature_set known_arch_fset;
32868 
32869       ARM_CLEAR_FEATURE (known_arch_fset, p_ver->flags, fpu_any);
32870       if (exact_match)
32871 	{
32872 	  /* Base architecture match user-specified architecture and
32873 	     extensions, eg. ARMv6S-M matching -march=armv6-m+os.  */
32874 	  if (ARM_FEATURE_EQUAL (*arch_ext_fset, known_arch_fset))
32875 	    {
32876 	      p_ver_ret = p_ver;
32877 	      goto found;
32878 	    }
32879 	  /* Base architecture match user-specified architecture only
32880 	     (eg. ARMv6-M in the same case as above).  Record it in case we
32881 	     find a match with above condition.  */
32882 	  else if (p_ver_ret == NULL
32883 		   && ARM_FEATURE_EQUAL (arch_fset, known_arch_fset))
32884 	    p_ver_ret = p_ver;
32885 	}
32886       else
32887 	{
32888 
32889 	  /* Architecture has all features wanted.  */
32890 	  if (ARM_FSET_CPU_SUBSET (arch_fset, known_arch_fset))
32891 	    {
32892 	      arm_feature_set added_fset;
32893 
32894 	      /* Compute features added by this architecture over the one
32895 		 recorded in p_ver_ret.  */
32896 	      if (p_ver_ret != NULL)
32897 		ARM_CLEAR_FEATURE (added_fset, known_arch_fset,
32898 				   p_ver_ret->flags);
32899 	      /* First architecture that match incl. with extensions, or the
32900 		 only difference in features over the recorded match is
32901 		 features that were optional and are now mandatory.  */
32902 	      if (p_ver_ret == NULL
32903 		  || ARM_FSET_CPU_SUBSET (added_fset, arch_fset))
32904 		{
32905 		  p_ver_ret = p_ver;
32906 		  goto found;
32907 		}
32908 	    }
32909 	  else if (p_ver_ret == NULL)
32910 	    {
32911 	      arm_feature_set needed_ext_fset;
32912 
32913 	      ARM_CLEAR_FEATURE (needed_ext_fset, arch_fset, known_arch_fset);
32914 
32915 	      /* Architecture has all features needed when using some
32916 		 extensions.  Record it and continue searching in case there
32917 		 exist an architecture providing all needed features without
32918 		 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
32919 		 OS extension).  */
32920 	      if (have_ext_for_needed_feat_p (&known_arch_fset,
32921 					      &needed_ext_fset))
32922 		p_ver_ret = p_ver;
32923 	    }
32924 	}
32925     }
32926 
32927   if (p_ver_ret == NULL)
32928     return -1;
32929 
32930  found:
32931   /* Tag_CPU_arch_profile.  */
32932   if (!ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8r)
32933       && (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7a)
32934           || ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8)
32935           || (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_atomics)
32936               && !ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8m_m_only))))
32937     *profile = 'A';
32938   else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7r)
32939       || ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8r))
32940     *profile = 'R';
32941   else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_m))
32942     *profile = 'M';
32943   else
32944     *profile = '\0';
32945   return p_ver_ret->val;
32946 }
32947 
32948 /* Set the public EABI object attributes.  */
32949 
32950 static void
aeabi_set_public_attributes(void)32951 aeabi_set_public_attributes (void)
32952 {
32953   char profile = '\0';
32954   int arch = -1;
32955   int virt_sec = 0;
32956   int fp16_optional = 0;
32957   int skip_exact_match = 0;
32958   arm_feature_set flags, flags_arch, flags_ext;
32959 
32960   /* Autodetection mode, choose the architecture based the instructions
32961      actually used.  */
32962   if (no_cpu_selected ())
32963     {
32964       ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
32965 
32966       if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
32967 	ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
32968 
32969       if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
32970 	ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
32971 
32972       /* Code run during relaxation relies on selected_cpu being set.  */
32973       ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
32974       flags_ext = arm_arch_none;
32975       ARM_CLEAR_FEATURE (selected_arch, flags_arch, flags_ext);
32976       selected_ext = flags_ext;
32977       selected_cpu = flags;
32978     }
32979   /* Otherwise, choose the architecture based on the capabilities of the
32980      requested cpu.  */
32981   else
32982     {
32983       ARM_MERGE_FEATURE_SETS (flags_arch, selected_arch, selected_ext);
32984       ARM_CLEAR_FEATURE (flags_arch, flags_arch, fpu_any);
32985       flags_ext = selected_ext;
32986       flags = selected_cpu;
32987     }
32988   ARM_MERGE_FEATURE_SETS (flags, flags, selected_fpu);
32989 
32990   /* Allow the user to override the reported architecture.  */
32991   if (!ARM_FEATURE_ZERO (selected_object_arch))
32992     {
32993       ARM_CLEAR_FEATURE (flags_arch, selected_object_arch, fpu_any);
32994       flags_ext = arm_arch_none;
32995     }
32996   else
32997     skip_exact_match = ARM_FEATURE_EQUAL (selected_cpu, arm_arch_any);
32998 
32999   /* When this function is run again after relaxation has happened there is no
33000      way to determine whether an architecture or CPU was specified by the user:
33001      - selected_cpu is set above for relaxation to work;
33002      - march_cpu_opt is not set if only -mcpu or .cpu is used;
33003      - mcpu_cpu_opt is set to arm_arch_any for autodetection.
33004      Therefore, if not in -march=all case we first try an exact match and fall
33005      back to autodetection.  */
33006   if (!skip_exact_match)
33007     arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 1);
33008   if (arch == -1)
33009     arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 0);
33010   if (arch == -1)
33011     as_bad (_("no architecture contains all the instructions used\n"));
33012 
33013   /* Tag_CPU_name.  */
33014   if (selected_cpu_name[0])
33015     {
33016       char *q;
33017 
33018       q = selected_cpu_name;
33019       if (startswith (q, "armv"))
33020 	{
33021 	  int i;
33022 
33023 	  q += 4;
33024 	  for (i = 0; q[i]; i++)
33025 	    q[i] = TOUPPER (q[i]);
33026 	}
33027       aeabi_set_attribute_string (Tag_CPU_name, q);
33028     }
33029 
33030   /* Tag_CPU_arch.  */
33031   aeabi_set_attribute_int (Tag_CPU_arch, arch);
33032 
33033   /* Tag_CPU_arch_profile.  */
33034   if (profile != '\0')
33035     aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
33036 
33037   /* Tag_DSP_extension.  */
33038   if (ARM_CPU_HAS_FEATURE (selected_ext, arm_ext_dsp))
33039     aeabi_set_attribute_int (Tag_DSP_extension, 1);
33040 
33041   ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
33042   /* Tag_ARM_ISA_use.  */
33043   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
33044       || ARM_FEATURE_ZERO (flags_arch))
33045     aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
33046 
33047   /* Tag_THUMB_ISA_use.  */
33048   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
33049       || ARM_FEATURE_ZERO (flags_arch))
33050     {
33051       int thumb_isa_use;
33052 
33053       if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
33054 	  && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
33055 	thumb_isa_use = 3;
33056       else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
33057 	thumb_isa_use = 2;
33058       else
33059 	thumb_isa_use = 1;
33060       aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
33061     }
33062 
33063   /* Tag_VFP_arch.  */
33064   if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
33065     aeabi_set_attribute_int (Tag_VFP_arch,
33066 			     ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
33067 			     ? 7 : 8);
33068   else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
33069     aeabi_set_attribute_int (Tag_VFP_arch,
33070 			     ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
33071 			     ? 5 : 6);
33072   else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
33073     {
33074       fp16_optional = 1;
33075       aeabi_set_attribute_int (Tag_VFP_arch, 3);
33076     }
33077   else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
33078     {
33079       aeabi_set_attribute_int (Tag_VFP_arch, 4);
33080       fp16_optional = 1;
33081     }
33082   else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
33083     aeabi_set_attribute_int (Tag_VFP_arch, 2);
33084   else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
33085 	   || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
33086     aeabi_set_attribute_int (Tag_VFP_arch, 1);
33087 
33088   /* Tag_ABI_HardFP_use.  */
33089   if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
33090       && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
33091     aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
33092 
33093   /* Tag_WMMX_arch.  */
33094   if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
33095     aeabi_set_attribute_int (Tag_WMMX_arch, 2);
33096   else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
33097     aeabi_set_attribute_int (Tag_WMMX_arch, 1);
33098 
33099   /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch).  */
33100   if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
33101     aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
33102   else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
33103     aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
33104   else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
33105     {
33106       if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
33107 	{
33108 	  aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
33109 	}
33110       else
33111 	{
33112 	  aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
33113 	  fp16_optional = 1;
33114 	}
33115     }
33116 
33117   if (ARM_CPU_HAS_FEATURE (flags, mve_fp_ext))
33118     aeabi_set_attribute_int (Tag_MVE_arch, 2);
33119   else if (ARM_CPU_HAS_FEATURE (flags, mve_ext))
33120     aeabi_set_attribute_int (Tag_MVE_arch, 1);
33121 
33122   /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch).  */
33123   if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
33124     aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
33125 
33126   /* Tag_DIV_use.
33127 
33128      We set Tag_DIV_use to two when integer divide instructions have been used
33129      in ARM state, or when Thumb integer divide instructions have been used,
33130      but we have no architecture profile set, nor have we any ARM instructions.
33131 
33132      For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
33133      by the base architecture.
33134 
33135      For new architectures we will have to check these tests.  */
33136   gas_assert (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
33137   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
33138       || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
33139     aeabi_set_attribute_int (Tag_DIV_use, 0);
33140   else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
33141 	   || (profile == '\0'
33142 	       && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
33143 	       && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
33144     aeabi_set_attribute_int (Tag_DIV_use, 2);
33145 
33146   /* Tag_MP_extension_use.  */
33147   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
33148     aeabi_set_attribute_int (Tag_MPextension_use, 1);
33149 
33150   /* Tag Virtualization_use.  */
33151   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
33152     virt_sec |= 1;
33153   if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
33154     virt_sec |= 2;
33155   if (virt_sec != 0)
33156     aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
33157 
33158   if (fp16_format != ARM_FP16_FORMAT_DEFAULT)
33159     aeabi_set_attribute_int (Tag_ABI_FP_16bit_format, fp16_format);
33160 }
33161 
33162 /* Post relaxation hook.  Recompute ARM attributes now that relaxation is
33163    finished and free extension feature bits which will not be used anymore.  */
33164 
33165 void
arm_md_post_relax(void)33166 arm_md_post_relax (void)
33167 {
33168   aeabi_set_public_attributes ();
33169   XDELETE (mcpu_ext_opt);
33170   mcpu_ext_opt = NULL;
33171   XDELETE (march_ext_opt);
33172   march_ext_opt = NULL;
33173 }
33174 
33175 /* Add the default contents for the .ARM.attributes section.  */
33176 
33177 void
arm_md_end(void)33178 arm_md_end (void)
33179 {
33180   if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
33181     return;
33182 
33183   aeabi_set_public_attributes ();
33184 }
33185 #endif /* OBJ_ELF */
33186 
33187 /* Parse a .cpu directive.  */
33188 
33189 static void
s_arm_cpu(int ignored ATTRIBUTE_UNUSED)33190 s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
33191 {
33192   const struct arm_cpu_option_table *opt;
33193   char *name;
33194   char saved_char;
33195 
33196   name = input_line_pointer;
33197   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
33198     input_line_pointer++;
33199   saved_char = *input_line_pointer;
33200   *input_line_pointer = 0;
33201 
33202   /* Skip the first "all" entry.  */
33203   for (opt = arm_cpus + 1; opt->name != NULL; opt++)
33204     if (streq (opt->name, name))
33205       {
33206 	selected_arch = opt->value;
33207 	selected_ext = opt->ext;
33208 	ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
33209 	if (opt->canonical_name)
33210 	  strcpy (selected_cpu_name, opt->canonical_name);
33211 	else
33212 	  {
33213 	    int i;
33214 	    for (i = 0; opt->name[i]; i++)
33215 	      selected_cpu_name[i] = TOUPPER (opt->name[i]);
33216 
33217 	    selected_cpu_name[i] = 0;
33218 	  }
33219 	ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
33220 
33221 	*input_line_pointer = saved_char;
33222 	demand_empty_rest_of_line ();
33223 	return;
33224       }
33225   as_bad (_("unknown cpu `%s'"), name);
33226   *input_line_pointer = saved_char;
33227   ignore_rest_of_line ();
33228 }
33229 
33230 /* Parse a .arch directive.  */
33231 
33232 static void
s_arm_arch(int ignored ATTRIBUTE_UNUSED)33233 s_arm_arch (int ignored ATTRIBUTE_UNUSED)
33234 {
33235   const struct arm_arch_option_table *opt;
33236   char saved_char;
33237   char *name;
33238 
33239   name = input_line_pointer;
33240   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
33241     input_line_pointer++;
33242   saved_char = *input_line_pointer;
33243   *input_line_pointer = 0;
33244 
33245   /* Skip the first "all" entry.  */
33246   for (opt = arm_archs + 1; opt->name != NULL; opt++)
33247     if (streq (opt->name, name))
33248       {
33249 	selected_arch = opt->value;
33250 	selected_ctx_ext_table = opt->ext_table;
33251 	selected_ext = arm_arch_none;
33252 	selected_cpu = selected_arch;
33253 	strcpy (selected_cpu_name, opt->name);
33254 	ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
33255 	*input_line_pointer = saved_char;
33256 	demand_empty_rest_of_line ();
33257 	return;
33258       }
33259 
33260   as_bad (_("unknown architecture `%s'\n"), name);
33261   *input_line_pointer = saved_char;
33262   ignore_rest_of_line ();
33263 }
33264 
33265 /* Parse a .object_arch directive.  */
33266 
33267 static void
s_arm_object_arch(int ignored ATTRIBUTE_UNUSED)33268 s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
33269 {
33270   const struct arm_arch_option_table *opt;
33271   char saved_char;
33272   char *name;
33273 
33274   name = input_line_pointer;
33275   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
33276     input_line_pointer++;
33277   saved_char = *input_line_pointer;
33278   *input_line_pointer = 0;
33279 
33280   /* Skip the first "all" entry.  */
33281   for (opt = arm_archs + 1; opt->name != NULL; opt++)
33282     if (streq (opt->name, name))
33283       {
33284 	selected_object_arch = opt->value;
33285 	*input_line_pointer = saved_char;
33286 	demand_empty_rest_of_line ();
33287 	return;
33288       }
33289 
33290   as_bad (_("unknown architecture `%s'\n"), name);
33291   *input_line_pointer = saved_char;
33292   ignore_rest_of_line ();
33293 }
33294 
33295 /* Parse a .arch_extension directive.  */
33296 
33297 static void
s_arm_arch_extension(int ignored ATTRIBUTE_UNUSED)33298 s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
33299 {
33300   const struct arm_option_extension_value_table *opt;
33301   char saved_char;
33302   char *name;
33303   int adding_value = 1;
33304 
33305   name = input_line_pointer;
33306   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
33307     input_line_pointer++;
33308   saved_char = *input_line_pointer;
33309   *input_line_pointer = 0;
33310 
33311   if (strlen (name) >= 2
33312       && startswith (name, "no"))
33313     {
33314       adding_value = 0;
33315       name += 2;
33316     }
33317 
33318   /* Check the context specific extension table */
33319   if (selected_ctx_ext_table)
33320     {
33321       const struct arm_ext_table * ext_opt;
33322       for (ext_opt = selected_ctx_ext_table; ext_opt->name != NULL; ext_opt++)
33323         {
33324           if (streq (ext_opt->name, name))
33325 	    {
33326 	      if (adding_value)
33327 		{
33328 		  if (ARM_FEATURE_ZERO (ext_opt->merge))
33329 		    /* TODO: Option not supported.  When we remove the
33330 		    legacy table this case should error out.  */
33331 		    continue;
33332 		  ARM_MERGE_FEATURE_SETS (selected_ext, selected_ext,
33333 					  ext_opt->merge);
33334 		}
33335 	      else
33336 		ARM_CLEAR_FEATURE (selected_ext, selected_ext, ext_opt->clear);
33337 
33338 	      ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
33339 	      ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
33340 	      *input_line_pointer = saved_char;
33341 	      demand_empty_rest_of_line ();
33342 	      return;
33343 	    }
33344 	}
33345     }
33346 
33347   for (opt = arm_extensions; opt->name != NULL; opt++)
33348     if (streq (opt->name, name))
33349       {
33350 	int i, nb_allowed_archs =
33351 	  sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
33352 	for (i = 0; i < nb_allowed_archs; i++)
33353 	  {
33354 	    /* Empty entry.  */
33355 	    if (ARM_CPU_IS_ANY (opt->allowed_archs[i]))
33356 	      continue;
33357 	    if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], selected_arch))
33358 	      break;
33359 	  }
33360 
33361 	if (i == nb_allowed_archs)
33362 	  {
33363 	    as_bad (_("architectural extension `%s' is not allowed for the "
33364 		      "current base architecture"), name);
33365 	    break;
33366 	  }
33367 
33368 	if (adding_value)
33369 	  ARM_MERGE_FEATURE_SETS (selected_ext, selected_ext,
33370 				  opt->merge_value);
33371 	else
33372 	  ARM_CLEAR_FEATURE (selected_ext, selected_ext, opt->clear_value);
33373 
33374 	ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
33375 	ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
33376 	*input_line_pointer = saved_char;
33377 	demand_empty_rest_of_line ();
33378 	/* Allowing Thumb division instructions for ARMv7 in autodetection rely
33379 	   on this return so that duplicate extensions (extensions with the
33380 	   same name as a previous extension in the list) are not considered
33381 	   for command-line parsing.  */
33382 	return;
33383       }
33384 
33385   if (opt->name == NULL)
33386     as_bad (_("unknown architecture extension `%s'\n"), name);
33387 
33388   *input_line_pointer = saved_char;
33389   ignore_rest_of_line ();
33390 }
33391 
33392 /* Parse a .fpu directive.  */
33393 
33394 static void
s_arm_fpu(int ignored ATTRIBUTE_UNUSED)33395 s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
33396 {
33397   const struct arm_option_fpu_value_table *opt;
33398   char saved_char;
33399   char *name;
33400 
33401   name = input_line_pointer;
33402   while (*input_line_pointer && !ISSPACE (*input_line_pointer))
33403     input_line_pointer++;
33404   saved_char = *input_line_pointer;
33405   *input_line_pointer = 0;
33406 
33407   for (opt = arm_fpus; opt->name != NULL; opt++)
33408     if (streq (opt->name, name))
33409       {
33410 	selected_fpu = opt->value;
33411 	ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, fpu_any);
33412 #ifndef CPU_DEFAULT
33413 	if (no_cpu_selected ())
33414 	  ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
33415 	else
33416 #endif
33417 	  ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
33418 	*input_line_pointer = saved_char;
33419 	demand_empty_rest_of_line ();
33420 	return;
33421       }
33422 
33423   as_bad (_("unknown floating point format `%s'\n"), name);
33424   *input_line_pointer = saved_char;
33425   ignore_rest_of_line ();
33426 }
33427 
33428 /* Copy symbol information.  */
33429 
33430 void
arm_copy_symbol_attributes(symbolS * dest,symbolS * src)33431 arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
33432 {
33433   ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
33434 }
33435 
33436 #ifdef OBJ_ELF
33437 /* Given a symbolic attribute NAME, return the proper integer value.
33438    Returns -1 if the attribute is not known.  */
33439 
33440 int
arm_convert_symbolic_attribute(const char * name)33441 arm_convert_symbolic_attribute (const char *name)
33442 {
33443   static const struct
33444   {
33445     const char * name;
33446     const int    tag;
33447   }
33448   attribute_table[] =
33449     {
33450       /* When you modify this table you should
33451 	 also modify the list in doc/c-arm.texi.  */
33452 #define T(tag) {#tag, tag}
33453       T (Tag_CPU_raw_name),
33454       T (Tag_CPU_name),
33455       T (Tag_CPU_arch),
33456       T (Tag_CPU_arch_profile),
33457       T (Tag_ARM_ISA_use),
33458       T (Tag_THUMB_ISA_use),
33459       T (Tag_FP_arch),
33460       T (Tag_VFP_arch),
33461       T (Tag_WMMX_arch),
33462       T (Tag_Advanced_SIMD_arch),
33463       T (Tag_PCS_config),
33464       T (Tag_ABI_PCS_R9_use),
33465       T (Tag_ABI_PCS_RW_data),
33466       T (Tag_ABI_PCS_RO_data),
33467       T (Tag_ABI_PCS_GOT_use),
33468       T (Tag_ABI_PCS_wchar_t),
33469       T (Tag_ABI_FP_rounding),
33470       T (Tag_ABI_FP_denormal),
33471       T (Tag_ABI_FP_exceptions),
33472       T (Tag_ABI_FP_user_exceptions),
33473       T (Tag_ABI_FP_number_model),
33474       T (Tag_ABI_align_needed),
33475       T (Tag_ABI_align8_needed),
33476       T (Tag_ABI_align_preserved),
33477       T (Tag_ABI_align8_preserved),
33478       T (Tag_ABI_enum_size),
33479       T (Tag_ABI_HardFP_use),
33480       T (Tag_ABI_VFP_args),
33481       T (Tag_ABI_WMMX_args),
33482       T (Tag_ABI_optimization_goals),
33483       T (Tag_ABI_FP_optimization_goals),
33484       T (Tag_compatibility),
33485       T (Tag_CPU_unaligned_access),
33486       T (Tag_FP_HP_extension),
33487       T (Tag_VFP_HP_extension),
33488       T (Tag_ABI_FP_16bit_format),
33489       T (Tag_MPextension_use),
33490       T (Tag_DIV_use),
33491       T (Tag_nodefaults),
33492       T (Tag_also_compatible_with),
33493       T (Tag_conformance),
33494       T (Tag_T2EE_use),
33495       T (Tag_Virtualization_use),
33496       T (Tag_DSP_extension),
33497       T (Tag_MVE_arch),
33498       /* We deliberately do not include Tag_MPextension_use_legacy.  */
33499 #undef T
33500     };
33501   unsigned int i;
33502 
33503   if (name == NULL)
33504     return -1;
33505 
33506   for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
33507     if (streq (name, attribute_table[i].name))
33508       return attribute_table[i].tag;
33509 
33510   return -1;
33511 }
33512 
33513 /* Apply sym value for relocations only in the case that they are for
33514    local symbols in the same segment as the fixup and you have the
33515    respective architectural feature for blx and simple switches.  */
33516 
33517 int
arm_apply_sym_value(struct fix * fixP,segT this_seg)33518 arm_apply_sym_value (struct fix * fixP, segT this_seg)
33519 {
33520   if (fixP->fx_addsy
33521       && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
33522       /* PR 17444: If the local symbol is in a different section then a reloc
33523 	 will always be generated for it, so applying the symbol value now
33524 	 will result in a double offset being stored in the relocation.  */
33525       && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
33526       && !S_FORCE_RELOC (fixP->fx_addsy, true))
33527     {
33528       switch (fixP->fx_r_type)
33529 	{
33530 	case BFD_RELOC_ARM_PCREL_BLX:
33531 	case BFD_RELOC_THUMB_PCREL_BRANCH23:
33532 	  if (ARM_IS_FUNC (fixP->fx_addsy))
33533 	    return 1;
33534 	  break;
33535 
33536 	case BFD_RELOC_ARM_PCREL_CALL:
33537 	case BFD_RELOC_THUMB_PCREL_BLX:
33538 	  if (THUMB_IS_FUNC (fixP->fx_addsy))
33539 	    return 1;
33540 	  break;
33541 
33542 	default:
33543 	  break;
33544 	}
33545 
33546     }
33547   return 0;
33548 }
33549 #endif /* OBJ_ELF */
33550