1 /* tc-i386.c -- Assemble code for the Intel 80386
2    Copyright (C) 1989-2016 Free Software Foundation, Inc.
3 
4    This file is part of GAS, the GNU Assembler.
5 
6    GAS is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License as published by
8    the Free Software Foundation; either version 3, or (at your option)
9    any later version.
10 
11    GAS is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14    GNU General Public License for more details.
15 
16    You should have received a copy of the GNU General Public License
17    along with GAS; see the file COPYING.  If not, write to the Free
18    Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
19    02110-1301, USA.  */
20 
21 /* Intel 80386 machine specific gas.
22    Written by Eliot Dresselhaus (eliot@mgm.mit.edu).
23    x86_64 support by Jan Hubicka (jh@suse.cz)
24    VIA PadLock support by Michal Ludvig (mludvig@suse.cz)
25    Bugs & suggestions are completely welcome.  This is free software.
26    Please help us make it better.  */
27 
28 #include "as.h"
29 #include "safe-ctype.h"
30 #include "subsegs.h"
31 #include "dwarf2dbg.h"
32 #include "dw2gencfi.h"
33 #include "elf/x86-64.h"
34 #include "opcodes/i386-init.h"
35 
36 #ifndef REGISTER_WARNINGS
37 #define REGISTER_WARNINGS 1
38 #endif
39 
40 #ifndef INFER_ADDR_PREFIX
41 #define INFER_ADDR_PREFIX 1
42 #endif
43 
44 #ifndef DEFAULT_ARCH
45 #define DEFAULT_ARCH "i386"
46 #endif
47 
48 #ifndef INLINE
49 #if __GNUC__ >= 2
50 #define INLINE __inline__
51 #else
52 #define INLINE
53 #endif
54 #endif
55 
56 /* Prefixes will be emitted in the order defined below.
57    WAIT_PREFIX must be the first prefix since FWAIT is really is an
58    instruction, and so must come before any prefixes.
59    The preferred prefix order is SEG_PREFIX, ADDR_PREFIX, DATA_PREFIX,
60    REP_PREFIX/HLE_PREFIX, LOCK_PREFIX.  */
61 #define WAIT_PREFIX	0
62 #define SEG_PREFIX	1
63 #define ADDR_PREFIX	2
64 #define DATA_PREFIX	3
65 #define REP_PREFIX	4
66 #define HLE_PREFIX	REP_PREFIX
67 #define BND_PREFIX	REP_PREFIX
68 #define LOCK_PREFIX	5
69 #define REX_PREFIX	6       /* must come last.  */
70 #define MAX_PREFIXES	7	/* max prefixes per opcode */
71 
72 /* we define the syntax here (modulo base,index,scale syntax) */
73 #define REGISTER_PREFIX '%'
74 #define IMMEDIATE_PREFIX '$'
75 #define ABSOLUTE_PREFIX '*'
76 
77 /* these are the instruction mnemonic suffixes in AT&T syntax or
78    memory operand size in Intel syntax.  */
79 #define WORD_MNEM_SUFFIX  'w'
80 #define BYTE_MNEM_SUFFIX  'b'
81 #define SHORT_MNEM_SUFFIX 's'
82 #define LONG_MNEM_SUFFIX  'l'
83 #define QWORD_MNEM_SUFFIX  'q'
84 #define XMMWORD_MNEM_SUFFIX  'x'
85 #define YMMWORD_MNEM_SUFFIX 'y'
86 #define ZMMWORD_MNEM_SUFFIX 'z'
87 /* Intel Syntax.  Use a non-ascii letter since since it never appears
88    in instructions.  */
89 #define LONG_DOUBLE_MNEM_SUFFIX '\1'
90 
91 #define END_OF_INSN '\0'
92 
93 /*
94   'templates' is for grouping together 'template' structures for opcodes
95   of the same name.  This is only used for storing the insns in the grand
96   ole hash table of insns.
97   The templates themselves start at START and range up to (but not including)
98   END.
99   */
100 typedef struct
101 {
102   const insn_template *start;
103   const insn_template *end;
104 }
105 templates;
106 
107 /* 386 operand encoding bytes:  see 386 book for details of this.  */
108 typedef struct
109 {
110   unsigned int regmem;	/* codes register or memory operand */
111   unsigned int reg;	/* codes register operand (or extended opcode) */
112   unsigned int mode;	/* how to interpret regmem & reg */
113 }
114 modrm_byte;
115 
116 /* x86-64 extension prefix.  */
117 typedef int rex_byte;
118 
119 /* 386 opcode byte to code indirect addressing.  */
120 typedef struct
121 {
122   unsigned base;
123   unsigned index;
124   unsigned scale;
125 }
126 sib_byte;
127 
128 /* x86 arch names, types and features */
129 typedef struct
130 {
131   const char *name;		/* arch name */
132   unsigned int len;		/* arch string length */
133   enum processor_type type;	/* arch type */
134   i386_cpu_flags flags;		/* cpu feature flags */
135   unsigned int skip;		/* show_arch should skip this. */
136 }
137 arch_entry;
138 
139 /* Used to turn off indicated flags.  */
140 typedef struct
141 {
142   const char *name;		/* arch name */
143   unsigned int len;		/* arch string length */
144   i386_cpu_flags flags;		/* cpu feature flags */
145 }
146 noarch_entry;
147 
148 static void update_code_flag (int, int);
149 static void set_code_flag (int);
150 static void set_16bit_gcc_code_flag (int);
151 static void set_intel_syntax (int);
152 static void set_intel_mnemonic (int);
153 static void set_allow_index_reg (int);
154 static void set_check (int);
155 static void set_cpu_arch (int);
156 #ifdef TE_PE
157 static void pe_directive_secrel (int);
158 #endif
159 static void signed_cons (int);
160 static char *output_invalid (int c);
161 static int i386_finalize_immediate (segT, expressionS *, i386_operand_type,
162 				    const char *);
163 static int i386_finalize_displacement (segT, expressionS *, i386_operand_type,
164 				       const char *);
165 static int i386_att_operand (char *);
166 static int i386_intel_operand (char *, int);
167 static int i386_intel_simplify (expressionS *);
168 static int i386_intel_parse_name (const char *, expressionS *);
169 static const reg_entry *parse_register (char *, char **);
170 static char *parse_insn (char *, char *);
171 static char *parse_operands (char *, const char *);
172 static void swap_operands (void);
173 static void swap_2_operands (int, int);
174 static void optimize_imm (void);
175 static void optimize_disp (void);
176 static const insn_template *match_template (char);
177 static int check_string (void);
178 static int process_suffix (void);
179 static int check_byte_reg (void);
180 static int check_long_reg (void);
181 static int check_qword_reg (void);
182 static int check_word_reg (void);
183 static int finalize_imm (void);
184 static int process_operands (void);
185 static const seg_entry *build_modrm_byte (void);
186 static void output_insn (void);
187 static void output_imm (fragS *, offsetT);
188 static void output_disp (fragS *, offsetT);
189 #ifndef I386COFF
190 static void s_bss (int);
191 #endif
192 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
193 static void handle_large_common (int small ATTRIBUTE_UNUSED);
194 #endif
195 
196 static const char *default_arch = DEFAULT_ARCH;
197 
198 /* This struct describes rounding control and SAE in the instruction.  */
199 struct RC_Operation
200 {
201   enum rc_type
202     {
203       rne = 0,
204       rd,
205       ru,
206       rz,
207       saeonly
208     } type;
209   int operand;
210 };
211 
212 static struct RC_Operation rc_op;
213 
214 /* The struct describes masking, applied to OPERAND in the instruction.
215    MASK is a pointer to the corresponding mask register.  ZEROING tells
216    whether merging or zeroing mask is used.  */
217 struct Mask_Operation
218 {
219   const reg_entry *mask;
220   unsigned int zeroing;
221   /* The operand where this operation is associated.  */
222   int operand;
223 };
224 
225 static struct Mask_Operation mask_op;
226 
227 /* The struct describes broadcasting, applied to OPERAND.  FACTOR is
228    broadcast factor.  */
229 struct Broadcast_Operation
230 {
231   /* Type of broadcast: no broadcast, {1to8}, or {1to16}.  */
232   int type;
233 
234   /* Index of broadcasted operand.  */
235   int operand;
236 };
237 
238 static struct Broadcast_Operation broadcast_op;
239 
240 /* VEX prefix.  */
241 typedef struct
242 {
243   /* VEX prefix is either 2 byte or 3 byte.  EVEX is 4 byte.  */
244   unsigned char bytes[4];
245   unsigned int length;
246   /* Destination or source register specifier.  */
247   const reg_entry *register_specifier;
248 } vex_prefix;
249 
250 /* 'md_assemble ()' gathers together information and puts it into a
251    i386_insn.  */
252 
253 union i386_op
254   {
255     expressionS *disps;
256     expressionS *imms;
257     const reg_entry *regs;
258   };
259 
260 enum i386_error
261   {
262     operand_size_mismatch,
263     operand_type_mismatch,
264     register_type_mismatch,
265     number_of_operands_mismatch,
266     invalid_instruction_suffix,
267     bad_imm4,
268     old_gcc_only,
269     unsupported_with_intel_mnemonic,
270     unsupported_syntax,
271     unsupported,
272     invalid_vsib_address,
273     invalid_vector_register_set,
274     unsupported_vector_index_register,
275     unsupported_broadcast,
276     broadcast_not_on_src_operand,
277     broadcast_needed,
278     unsupported_masking,
279     mask_not_on_destination,
280     no_default_mask,
281     unsupported_rc_sae,
282     rc_sae_operand_not_last_imm,
283     invalid_register_operand,
284     try_vector_disp8
285   };
286 
287 struct _i386_insn
288   {
289     /* TM holds the template for the insn were currently assembling.  */
290     insn_template tm;
291 
292     /* SUFFIX holds the instruction size suffix for byte, word, dword
293        or qword, if given.  */
294     char suffix;
295 
296     /* OPERANDS gives the number of given operands.  */
297     unsigned int operands;
298 
299     /* REG_OPERANDS, DISP_OPERANDS, MEM_OPERANDS, IMM_OPERANDS give the number
300        of given register, displacement, memory operands and immediate
301        operands.  */
302     unsigned int reg_operands, disp_operands, mem_operands, imm_operands;
303 
304     /* TYPES [i] is the type (see above #defines) which tells us how to
305        use OP[i] for the corresponding operand.  */
306     i386_operand_type types[MAX_OPERANDS];
307 
308     /* Displacement expression, immediate expression, or register for each
309        operand.  */
310     union i386_op op[MAX_OPERANDS];
311 
312     /* Flags for operands.  */
313     unsigned int flags[MAX_OPERANDS];
314 #define Operand_PCrel 1
315 
316     /* Relocation type for operand */
317     enum bfd_reloc_code_real reloc[MAX_OPERANDS];
318 
319     /* BASE_REG, INDEX_REG, and LOG2_SCALE_FACTOR are used to encode
320        the base index byte below.  */
321     const reg_entry *base_reg;
322     const reg_entry *index_reg;
323     unsigned int log2_scale_factor;
324 
325     /* SEG gives the seg_entries of this insn.  They are zero unless
326        explicit segment overrides are given.  */
327     const seg_entry *seg[2];
328 
329     /* Copied first memory operand string, for re-checking.  */
330     char *memop1_string;
331 
332     /* PREFIX holds all the given prefix opcodes (usually null).
333        PREFIXES is the number of prefix opcodes.  */
334     unsigned int prefixes;
335     unsigned char prefix[MAX_PREFIXES];
336 
337     /* RM and SIB are the modrm byte and the sib byte where the
338        addressing modes of this insn are encoded.  */
339     modrm_byte rm;
340     rex_byte rex;
341     rex_byte vrex;
342     sib_byte sib;
343     vex_prefix vex;
344 
345     /* Masking attributes.  */
346     struct Mask_Operation *mask;
347 
348     /* Rounding control and SAE attributes.  */
349     struct RC_Operation *rounding;
350 
351     /* Broadcasting attributes.  */
352     struct Broadcast_Operation *broadcast;
353 
354     /* Compressed disp8*N attribute.  */
355     unsigned int memshift;
356 
357     /* Swap operand in encoding.  */
358     unsigned int swap_operand;
359 
360     /* Prefer 8bit or 32bit displacement in encoding.  */
361     enum
362       {
363 	disp_encoding_default = 0,
364 	disp_encoding_8bit,
365 	disp_encoding_32bit
366       } disp_encoding;
367 
368     /* REP prefix.  */
369     const char *rep_prefix;
370 
371     /* HLE prefix.  */
372     const char *hle_prefix;
373 
374     /* Have BND prefix.  */
375     const char *bnd_prefix;
376 
377     /* Need VREX to support upper 16 registers.  */
378     int need_vrex;
379 
380     /* Error message.  */
381     enum i386_error error;
382   };
383 
384 typedef struct _i386_insn i386_insn;
385 
386 /* Link RC type with corresponding string, that'll be looked for in
387    asm.  */
388 struct RC_name
389 {
390   enum rc_type type;
391   const char *name;
392   unsigned int len;
393 };
394 
395 static const struct RC_name RC_NamesTable[] =
396 {
397   {  rne, STRING_COMMA_LEN ("rn-sae") },
398   {  rd,  STRING_COMMA_LEN ("rd-sae") },
399   {  ru,  STRING_COMMA_LEN ("ru-sae") },
400   {  rz,  STRING_COMMA_LEN ("rz-sae") },
401   {  saeonly,  STRING_COMMA_LEN ("sae") },
402 };
403 
404 /* List of chars besides those in app.c:symbol_chars that can start an
405    operand.  Used to prevent the scrubber eating vital white-space.  */
406 const char extra_symbol_chars[] = "*%-([{"
407 #ifdef LEX_AT
408 	"@"
409 #endif
410 #ifdef LEX_QM
411 	"?"
412 #endif
413 	;
414 
415 #if (defined (TE_I386AIX)				\
416      || ((defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF))	\
417 	 && !defined (TE_GNU)				\
418 	 && !defined (TE_LINUX)				\
419 	 && !defined (TE_NACL)				\
420 	 && !defined (TE_NETWARE)			\
421 	 && !defined (TE_FreeBSD)			\
422 	 && !defined (TE_DragonFly)			\
423 	 && !defined (TE_NetBSD)))
424 /* This array holds the chars that always start a comment.  If the
425    pre-processor is disabled, these aren't very useful.  The option
426    --divide will remove '/' from this list.  */
427 const char *i386_comment_chars = "#/";
428 #define SVR4_COMMENT_CHARS 1
429 #define PREFIX_SEPARATOR '\\'
430 
431 #else
432 const char *i386_comment_chars = "#";
433 #define PREFIX_SEPARATOR '/'
434 #endif
435 
436 /* This array holds the chars that only start a comment at the beginning of
437    a line.  If the line seems to have the form '# 123 filename'
438    .line and .file directives will appear in the pre-processed output.
439    Note that input_file.c hand checks for '#' at the beginning of the
440    first line of the input file.  This is because the compiler outputs
441    #NO_APP at the beginning of its output.
442    Also note that comments started like this one will always work if
443    '/' isn't otherwise defined.  */
444 const char line_comment_chars[] = "#/";
445 
446 const char line_separator_chars[] = ";";
447 
448 /* Chars that can be used to separate mant from exp in floating point
449    nums.  */
450 const char EXP_CHARS[] = "eE";
451 
452 /* Chars that mean this number is a floating point constant
453    As in 0f12.456
454    or    0d1.2345e12.  */
455 const char FLT_CHARS[] = "fFdDxX";
456 
457 /* Tables for lexical analysis.  */
458 static char mnemonic_chars[256];
459 static char register_chars[256];
460 static char operand_chars[256];
461 static char identifier_chars[256];
462 static char digit_chars[256];
463 
464 /* Lexical macros.  */
465 #define is_mnemonic_char(x) (mnemonic_chars[(unsigned char) x])
466 #define is_operand_char(x) (operand_chars[(unsigned char) x])
467 #define is_register_char(x) (register_chars[(unsigned char) x])
468 #define is_space_char(x) ((x) == ' ')
469 #define is_identifier_char(x) (identifier_chars[(unsigned char) x])
470 #define is_digit_char(x) (digit_chars[(unsigned char) x])
471 
472 /* All non-digit non-letter characters that may occur in an operand.  */
473 static char operand_special_chars[] = "%$-+(,)*._~/<>|&^!:[@]";
474 
475 /* md_assemble() always leaves the strings it's passed unaltered.  To
476    effect this we maintain a stack of saved characters that we've smashed
477    with '\0's (indicating end of strings for various sub-fields of the
478    assembler instruction).  */
479 static char save_stack[32];
480 static char *save_stack_p;
481 #define END_STRING_AND_SAVE(s) \
482 	do { *save_stack_p++ = *(s); *(s) = '\0'; } while (0)
483 #define RESTORE_END_STRING(s) \
484 	do { *(s) = *--save_stack_p; } while (0)
485 
486 /* The instruction we're assembling.  */
487 static i386_insn i;
488 
489 /* Possible templates for current insn.  */
490 static const templates *current_templates;
491 
492 /* Per instruction expressionS buffers: max displacements & immediates.  */
493 static expressionS disp_expressions[MAX_MEMORY_OPERANDS];
494 static expressionS im_expressions[MAX_IMMEDIATE_OPERANDS];
495 
496 /* Current operand we are working on.  */
497 static int this_operand = -1;
498 
499 /* We support four different modes.  FLAG_CODE variable is used to distinguish
500    these.  */
501 
502 enum flag_code {
503 	CODE_32BIT,
504 	CODE_16BIT,
505 	CODE_64BIT };
506 
507 static enum flag_code flag_code;
508 static unsigned int object_64bit;
509 static unsigned int disallow_64bit_reloc;
510 static int use_rela_relocations = 0;
511 
512 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
513      || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
514      || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
515 
516 /* The ELF ABI to use.  */
517 enum x86_elf_abi
518 {
519   I386_ABI,
520   X86_64_ABI,
521   X86_64_X32_ABI
522 };
523 
524 static enum x86_elf_abi x86_elf_abi = I386_ABI;
525 #endif
526 
527 #if defined (TE_PE) || defined (TE_PEP)
528 /* Use big object file format.  */
529 static int use_big_obj = 0;
530 #endif
531 
532 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
533 /* 1 if generating code for a shared library.  */
534 static int shared = 0;
535 #endif
536 
537 /* 1 for intel syntax,
538    0 if att syntax.  */
539 static int intel_syntax = 0;
540 
541 /* 1 for Intel64 ISA,
542    0 if AMD64 ISA.  */
543 static int intel64;
544 
545 /* 1 for intel mnemonic,
546    0 if att mnemonic.  */
547 static int intel_mnemonic = !SYSV386_COMPAT;
548 
549 /* 1 if support old (<= 2.8.1) versions of gcc.  */
550 static int old_gcc = OLDGCC_COMPAT;
551 
552 /* 1 if pseudo registers are permitted.  */
553 static int allow_pseudo_reg = 0;
554 
555 /* 1 if register prefix % not required.  */
556 static int allow_naked_reg = 0;
557 
558 /* 1 if the assembler should add BND prefix for all control-tranferring
559    instructions supporting it, even if this prefix wasn't specified
560    explicitly.  */
561 static int add_bnd_prefix = 0;
562 
563 /* 1 if pseudo index register, eiz/riz, is allowed .  */
564 static int allow_index_reg = 0;
565 
566 /* 1 if the assembler should ignore LOCK prefix, even if it was
567    specified explicitly.  */
568 static int omit_lock_prefix = 0;
569 
570 /* 1 if the assembler should encode lfence, mfence, and sfence as
571    "lock addl $0, (%{re}sp)".  */
572 static int avoid_fence = 0;
573 
574 /* 1 if the assembler should generate relax relocations.  */
575 
576 static int generate_relax_relocations
577   = DEFAULT_GENERATE_X86_RELAX_RELOCATIONS;
578 
579 static enum check_kind
580   {
581     check_none = 0,
582     check_warning,
583     check_error
584   }
585 sse_check, operand_check = check_warning;
586 
587 /* Register prefix used for error message.  */
588 static const char *register_prefix = "%";
589 
590 /* Used in 16 bit gcc mode to add an l suffix to call, ret, enter,
591    leave, push, and pop instructions so that gcc has the same stack
592    frame as in 32 bit mode.  */
593 static char stackop_size = '\0';
594 
595 /* Non-zero to optimize code alignment.  */
596 int optimize_align_code = 1;
597 
598 /* Non-zero to quieten some warnings.  */
599 static int quiet_warnings = 0;
600 
601 /* CPU name.  */
602 static const char *cpu_arch_name = NULL;
603 static char *cpu_sub_arch_name = NULL;
604 
605 /* CPU feature flags.  */
606 static i386_cpu_flags cpu_arch_flags = CPU_UNKNOWN_FLAGS;
607 
608 /* If we have selected a cpu we are generating instructions for.  */
609 static int cpu_arch_tune_set = 0;
610 
611 /* Cpu we are generating instructions for.  */
612 enum processor_type cpu_arch_tune = PROCESSOR_UNKNOWN;
613 
614 /* CPU feature flags of cpu we are generating instructions for.  */
615 static i386_cpu_flags cpu_arch_tune_flags;
616 
617 /* CPU instruction set architecture used.  */
618 enum processor_type cpu_arch_isa = PROCESSOR_UNKNOWN;
619 
620 /* CPU feature flags of instruction set architecture used.  */
621 i386_cpu_flags cpu_arch_isa_flags;
622 
623 /* If set, conditional jumps are not automatically promoted to handle
624    larger than a byte offset.  */
625 static unsigned int no_cond_jump_promotion = 0;
626 
627 /* Encode SSE instructions with VEX prefix.  */
628 static unsigned int sse2avx;
629 
630 /* Encode scalar AVX instructions with specific vector length.  */
631 static enum
632   {
633     vex128 = 0,
634     vex256
635   } avxscalar;
636 
637 /* Encode scalar EVEX LIG instructions with specific vector length.  */
638 static enum
639   {
640     evexl128 = 0,
641     evexl256,
642     evexl512
643   } evexlig;
644 
645 /* Encode EVEX WIG instructions with specific evex.w.  */
646 static enum
647   {
648     evexw0 = 0,
649     evexw1
650   } evexwig;
651 
652 /* Value to encode in EVEX RC bits, for SAE-only instructions.  */
653 static enum rc_type evexrcig = rne;
654 
655 /* Pre-defined "_GLOBAL_OFFSET_TABLE_".  */
656 static symbolS *GOT_symbol;
657 
658 /* The dwarf2 return column, adjusted for 32 or 64 bit.  */
659 unsigned int x86_dwarf2_return_column;
660 
661 /* The dwarf2 data alignment, adjusted for 32 or 64 bit.  */
662 int x86_cie_data_alignment;
663 
664 /* Interface to relax_segment.
665    There are 3 major relax states for 386 jump insns because the
666    different types of jumps add different sizes to frags when we're
667    figuring out what sort of jump to choose to reach a given label.  */
668 
669 /* Types.  */
670 #define UNCOND_JUMP 0
671 #define COND_JUMP 1
672 #define COND_JUMP86 2
673 
674 /* Sizes.  */
675 #define CODE16	1
676 #define SMALL	0
677 #define SMALL16 (SMALL | CODE16)
678 #define BIG	2
679 #define BIG16	(BIG | CODE16)
680 
681 #ifndef INLINE
682 #ifdef __GNUC__
683 #define INLINE __inline__
684 #else
685 #define INLINE
686 #endif
687 #endif
688 
689 #define ENCODE_RELAX_STATE(type, size) \
690   ((relax_substateT) (((type) << 2) | (size)))
691 #define TYPE_FROM_RELAX_STATE(s) \
692   ((s) >> 2)
693 #define DISP_SIZE_FROM_RELAX_STATE(s) \
694     ((((s) & 3) == BIG ? 4 : (((s) & 3) == BIG16 ? 2 : 1)))
695 
696 /* This table is used by relax_frag to promote short jumps to long
697    ones where necessary.  SMALL (short) jumps may be promoted to BIG
698    (32 bit long) ones, and SMALL16 jumps to BIG16 (16 bit long).  We
699    don't allow a short jump in a 32 bit code segment to be promoted to
700    a 16 bit offset jump because it's slower (requires data size
701    prefix), and doesn't work, unless the destination is in the bottom
702    64k of the code segment (The top 16 bits of eip are zeroed).  */
703 
704 const relax_typeS md_relax_table[] =
705 {
706   /* The fields are:
707      1) most positive reach of this state,
708      2) most negative reach of this state,
709      3) how many bytes this mode will have in the variable part of the frag
710      4) which index into the table to try if we can't fit into this one.  */
711 
712   /* UNCOND_JUMP states.  */
713   {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG)},
714   {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16)},
715   /* dword jmp adds 4 bytes to frag:
716      0 extra opcode bytes, 4 displacement bytes.  */
717   {0, 0, 4, 0},
718   /* word jmp adds 2 byte2 to frag:
719      0 extra opcode bytes, 2 displacement bytes.  */
720   {0, 0, 2, 0},
721 
722   /* COND_JUMP states.  */
723   {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG)},
724   {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP, BIG16)},
725   /* dword conditionals adds 5 bytes to frag:
726      1 extra opcode byte, 4 displacement bytes.  */
727   {0, 0, 5, 0},
728   /* word conditionals add 3 bytes to frag:
729      1 extra opcode byte, 2 displacement bytes.  */
730   {0, 0, 3, 0},
731 
732   /* COND_JUMP86 states.  */
733   {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG)},
734   {127 + 1, -128 + 1, 1, ENCODE_RELAX_STATE (COND_JUMP86, BIG16)},
735   /* dword conditionals adds 5 bytes to frag:
736      1 extra opcode byte, 4 displacement bytes.  */
737   {0, 0, 5, 0},
738   /* word conditionals add 4 bytes to frag:
739      1 displacement byte and a 3 byte long branch insn.  */
740   {0, 0, 4, 0}
741 };
742 
743 static const arch_entry cpu_arch[] =
744 {
745   /* Do not replace the first two entries - i386_target_format()
746      relies on them being there in this order.  */
747   { STRING_COMMA_LEN ("generic32"), PROCESSOR_GENERIC32,
748     CPU_GENERIC32_FLAGS, 0 },
749   { STRING_COMMA_LEN ("generic64"), PROCESSOR_GENERIC64,
750     CPU_GENERIC64_FLAGS, 0 },
751   { STRING_COMMA_LEN ("i8086"), PROCESSOR_UNKNOWN,
752     CPU_NONE_FLAGS, 0 },
753   { STRING_COMMA_LEN ("i186"), PROCESSOR_UNKNOWN,
754     CPU_I186_FLAGS, 0 },
755   { STRING_COMMA_LEN ("i286"), PROCESSOR_UNKNOWN,
756     CPU_I286_FLAGS, 0 },
757   { STRING_COMMA_LEN ("i386"), PROCESSOR_I386,
758     CPU_I386_FLAGS, 0 },
759   { STRING_COMMA_LEN ("i486"), PROCESSOR_I486,
760     CPU_I486_FLAGS, 0 },
761   { STRING_COMMA_LEN ("i586"), PROCESSOR_PENTIUM,
762     CPU_I586_FLAGS, 0 },
763   { STRING_COMMA_LEN ("i686"), PROCESSOR_PENTIUMPRO,
764     CPU_I686_FLAGS, 0 },
765   { STRING_COMMA_LEN ("pentium"), PROCESSOR_PENTIUM,
766     CPU_I586_FLAGS, 0 },
767   { STRING_COMMA_LEN ("pentiumpro"), PROCESSOR_PENTIUMPRO,
768     CPU_PENTIUMPRO_FLAGS, 0 },
769   { STRING_COMMA_LEN ("pentiumii"), PROCESSOR_PENTIUMPRO,
770     CPU_P2_FLAGS, 0 },
771   { STRING_COMMA_LEN ("pentiumiii"),PROCESSOR_PENTIUMPRO,
772     CPU_P3_FLAGS, 0 },
773   { STRING_COMMA_LEN ("pentium4"), PROCESSOR_PENTIUM4,
774     CPU_P4_FLAGS, 0 },
775   { STRING_COMMA_LEN ("prescott"), PROCESSOR_NOCONA,
776     CPU_CORE_FLAGS, 0 },
777   { STRING_COMMA_LEN ("nocona"), PROCESSOR_NOCONA,
778     CPU_NOCONA_FLAGS, 0 },
779   { STRING_COMMA_LEN ("yonah"), PROCESSOR_CORE,
780     CPU_CORE_FLAGS, 1 },
781   { STRING_COMMA_LEN ("core"), PROCESSOR_CORE,
782     CPU_CORE_FLAGS, 0 },
783   { STRING_COMMA_LEN ("merom"), PROCESSOR_CORE2,
784     CPU_CORE2_FLAGS, 1 },
785   { STRING_COMMA_LEN ("core2"), PROCESSOR_CORE2,
786     CPU_CORE2_FLAGS, 0 },
787   { STRING_COMMA_LEN ("corei7"), PROCESSOR_COREI7,
788     CPU_COREI7_FLAGS, 0 },
789   { STRING_COMMA_LEN ("l1om"), PROCESSOR_L1OM,
790     CPU_L1OM_FLAGS, 0 },
791   { STRING_COMMA_LEN ("k1om"), PROCESSOR_K1OM,
792     CPU_K1OM_FLAGS, 0 },
793   { STRING_COMMA_LEN ("iamcu"), PROCESSOR_IAMCU,
794     CPU_IAMCU_FLAGS, 0 },
795   { STRING_COMMA_LEN ("k6"), PROCESSOR_K6,
796     CPU_K6_FLAGS, 0 },
797   { STRING_COMMA_LEN ("k6_2"), PROCESSOR_K6,
798     CPU_K6_2_FLAGS, 0 },
799   { STRING_COMMA_LEN ("athlon"), PROCESSOR_ATHLON,
800     CPU_ATHLON_FLAGS, 0 },
801   { STRING_COMMA_LEN ("sledgehammer"), PROCESSOR_K8,
802     CPU_K8_FLAGS, 1 },
803   { STRING_COMMA_LEN ("opteron"), PROCESSOR_K8,
804     CPU_K8_FLAGS, 0 },
805   { STRING_COMMA_LEN ("k8"), PROCESSOR_K8,
806     CPU_K8_FLAGS, 0 },
807   { STRING_COMMA_LEN ("amdfam10"), PROCESSOR_AMDFAM10,
808     CPU_AMDFAM10_FLAGS, 0 },
809   { STRING_COMMA_LEN ("bdver1"), PROCESSOR_BD,
810     CPU_BDVER1_FLAGS, 0 },
811   { STRING_COMMA_LEN ("bdver2"), PROCESSOR_BD,
812     CPU_BDVER2_FLAGS, 0 },
813   { STRING_COMMA_LEN ("bdver3"), PROCESSOR_BD,
814     CPU_BDVER3_FLAGS, 0 },
815   { STRING_COMMA_LEN ("bdver4"), PROCESSOR_BD,
816     CPU_BDVER4_FLAGS, 0 },
817   { STRING_COMMA_LEN ("znver1"), PROCESSOR_ZNVER,
818     CPU_ZNVER1_FLAGS, 0 },
819   { STRING_COMMA_LEN ("btver1"), PROCESSOR_BT,
820     CPU_BTVER1_FLAGS, 0 },
821   { STRING_COMMA_LEN ("btver2"), PROCESSOR_BT,
822     CPU_BTVER2_FLAGS, 0 },
823   { STRING_COMMA_LEN (".8087"), PROCESSOR_UNKNOWN,
824     CPU_8087_FLAGS, 0 },
825   { STRING_COMMA_LEN (".287"), PROCESSOR_UNKNOWN,
826     CPU_287_FLAGS, 0 },
827   { STRING_COMMA_LEN (".387"), PROCESSOR_UNKNOWN,
828     CPU_387_FLAGS, 0 },
829   { STRING_COMMA_LEN (".687"), PROCESSOR_UNKNOWN,
830     CPU_687_FLAGS, 0 },
831   { STRING_COMMA_LEN (".mmx"), PROCESSOR_UNKNOWN,
832     CPU_MMX_FLAGS, 0 },
833   { STRING_COMMA_LEN (".sse"), PROCESSOR_UNKNOWN,
834     CPU_SSE_FLAGS, 0 },
835   { STRING_COMMA_LEN (".sse2"), PROCESSOR_UNKNOWN,
836     CPU_SSE2_FLAGS, 0 },
837   { STRING_COMMA_LEN (".sse3"), PROCESSOR_UNKNOWN,
838     CPU_SSE3_FLAGS, 0 },
839   { STRING_COMMA_LEN (".ssse3"), PROCESSOR_UNKNOWN,
840     CPU_SSSE3_FLAGS, 0 },
841   { STRING_COMMA_LEN (".sse4.1"), PROCESSOR_UNKNOWN,
842     CPU_SSE4_1_FLAGS, 0 },
843   { STRING_COMMA_LEN (".sse4.2"), PROCESSOR_UNKNOWN,
844     CPU_SSE4_2_FLAGS, 0 },
845   { STRING_COMMA_LEN (".sse4"), PROCESSOR_UNKNOWN,
846     CPU_SSE4_2_FLAGS, 0 },
847   { STRING_COMMA_LEN (".avx"), PROCESSOR_UNKNOWN,
848     CPU_AVX_FLAGS, 0 },
849   { STRING_COMMA_LEN (".avx2"), PROCESSOR_UNKNOWN,
850     CPU_AVX2_FLAGS, 0 },
851   { STRING_COMMA_LEN (".avx512f"), PROCESSOR_UNKNOWN,
852     CPU_AVX512F_FLAGS, 0 },
853   { STRING_COMMA_LEN (".avx512cd"), PROCESSOR_UNKNOWN,
854     CPU_AVX512CD_FLAGS, 0 },
855   { STRING_COMMA_LEN (".avx512er"), PROCESSOR_UNKNOWN,
856     CPU_AVX512ER_FLAGS, 0 },
857   { STRING_COMMA_LEN (".avx512pf"), PROCESSOR_UNKNOWN,
858     CPU_AVX512PF_FLAGS, 0 },
859   { STRING_COMMA_LEN (".avx512dq"), PROCESSOR_UNKNOWN,
860     CPU_AVX512DQ_FLAGS, 0 },
861   { STRING_COMMA_LEN (".avx512bw"), PROCESSOR_UNKNOWN,
862     CPU_AVX512BW_FLAGS, 0 },
863   { STRING_COMMA_LEN (".avx512vl"), PROCESSOR_UNKNOWN,
864     CPU_AVX512VL_FLAGS, 0 },
865   { STRING_COMMA_LEN (".vmx"), PROCESSOR_UNKNOWN,
866     CPU_VMX_FLAGS, 0 },
867   { STRING_COMMA_LEN (".vmfunc"), PROCESSOR_UNKNOWN,
868     CPU_VMFUNC_FLAGS, 0 },
869   { STRING_COMMA_LEN (".smx"), PROCESSOR_UNKNOWN,
870     CPU_SMX_FLAGS, 0 },
871   { STRING_COMMA_LEN (".xsave"), PROCESSOR_UNKNOWN,
872     CPU_XSAVE_FLAGS, 0 },
873   { STRING_COMMA_LEN (".xsaveopt"), PROCESSOR_UNKNOWN,
874     CPU_XSAVEOPT_FLAGS, 0 },
875   { STRING_COMMA_LEN (".xsavec"), PROCESSOR_UNKNOWN,
876     CPU_XSAVEC_FLAGS, 0 },
877   { STRING_COMMA_LEN (".xsaves"), PROCESSOR_UNKNOWN,
878     CPU_XSAVES_FLAGS, 0 },
879   { STRING_COMMA_LEN (".aes"), PROCESSOR_UNKNOWN,
880     CPU_AES_FLAGS, 0 },
881   { STRING_COMMA_LEN (".pclmul"), PROCESSOR_UNKNOWN,
882     CPU_PCLMUL_FLAGS, 0 },
883   { STRING_COMMA_LEN (".clmul"), PROCESSOR_UNKNOWN,
884     CPU_PCLMUL_FLAGS, 1 },
885   { STRING_COMMA_LEN (".fsgsbase"), PROCESSOR_UNKNOWN,
886     CPU_FSGSBASE_FLAGS, 0 },
887   { STRING_COMMA_LEN (".rdrnd"), PROCESSOR_UNKNOWN,
888     CPU_RDRND_FLAGS, 0 },
889   { STRING_COMMA_LEN (".f16c"), PROCESSOR_UNKNOWN,
890     CPU_F16C_FLAGS, 0 },
891   { STRING_COMMA_LEN (".bmi2"), PROCESSOR_UNKNOWN,
892     CPU_BMI2_FLAGS, 0 },
893   { STRING_COMMA_LEN (".fma"), PROCESSOR_UNKNOWN,
894     CPU_FMA_FLAGS, 0 },
895   { STRING_COMMA_LEN (".fma4"), PROCESSOR_UNKNOWN,
896     CPU_FMA4_FLAGS, 0 },
897   { STRING_COMMA_LEN (".xop"), PROCESSOR_UNKNOWN,
898     CPU_XOP_FLAGS, 0 },
899   { STRING_COMMA_LEN (".lwp"), PROCESSOR_UNKNOWN,
900     CPU_LWP_FLAGS, 0 },
901   { STRING_COMMA_LEN (".movbe"), PROCESSOR_UNKNOWN,
902     CPU_MOVBE_FLAGS, 0 },
903   { STRING_COMMA_LEN (".cx16"), PROCESSOR_UNKNOWN,
904     CPU_CX16_FLAGS, 0 },
905   { STRING_COMMA_LEN (".ept"), PROCESSOR_UNKNOWN,
906     CPU_EPT_FLAGS, 0 },
907   { STRING_COMMA_LEN (".lzcnt"), PROCESSOR_UNKNOWN,
908     CPU_LZCNT_FLAGS, 0 },
909   { STRING_COMMA_LEN (".hle"), PROCESSOR_UNKNOWN,
910     CPU_HLE_FLAGS, 0 },
911   { STRING_COMMA_LEN (".rtm"), PROCESSOR_UNKNOWN,
912     CPU_RTM_FLAGS, 0 },
913   { STRING_COMMA_LEN (".invpcid"), PROCESSOR_UNKNOWN,
914     CPU_INVPCID_FLAGS, 0 },
915   { STRING_COMMA_LEN (".clflush"), PROCESSOR_UNKNOWN,
916     CPU_CLFLUSH_FLAGS, 0 },
917   { STRING_COMMA_LEN (".nop"), PROCESSOR_UNKNOWN,
918     CPU_NOP_FLAGS, 0 },
919   { STRING_COMMA_LEN (".syscall"), PROCESSOR_UNKNOWN,
920     CPU_SYSCALL_FLAGS, 0 },
921   { STRING_COMMA_LEN (".rdtscp"), PROCESSOR_UNKNOWN,
922     CPU_RDTSCP_FLAGS, 0 },
923   { STRING_COMMA_LEN (".3dnow"), PROCESSOR_UNKNOWN,
924     CPU_3DNOW_FLAGS, 0 },
925   { STRING_COMMA_LEN (".3dnowa"), PROCESSOR_UNKNOWN,
926     CPU_3DNOWA_FLAGS, 0 },
927   { STRING_COMMA_LEN (".padlock"), PROCESSOR_UNKNOWN,
928     CPU_PADLOCK_FLAGS, 0 },
929   { STRING_COMMA_LEN (".pacifica"), PROCESSOR_UNKNOWN,
930     CPU_SVME_FLAGS, 1 },
931   { STRING_COMMA_LEN (".svme"), PROCESSOR_UNKNOWN,
932     CPU_SVME_FLAGS, 0 },
933   { STRING_COMMA_LEN (".sse4a"), PROCESSOR_UNKNOWN,
934     CPU_SSE4A_FLAGS, 0 },
935   { STRING_COMMA_LEN (".abm"), PROCESSOR_UNKNOWN,
936     CPU_ABM_FLAGS, 0 },
937   { STRING_COMMA_LEN (".bmi"), PROCESSOR_UNKNOWN,
938     CPU_BMI_FLAGS, 0 },
939   { STRING_COMMA_LEN (".tbm"), PROCESSOR_UNKNOWN,
940     CPU_TBM_FLAGS, 0 },
941   { STRING_COMMA_LEN (".adx"), PROCESSOR_UNKNOWN,
942     CPU_ADX_FLAGS, 0 },
943   { STRING_COMMA_LEN (".rdseed"), PROCESSOR_UNKNOWN,
944     CPU_RDSEED_FLAGS, 0 },
945   { STRING_COMMA_LEN (".prfchw"), PROCESSOR_UNKNOWN,
946     CPU_PRFCHW_FLAGS, 0 },
947   { STRING_COMMA_LEN (".smap"), PROCESSOR_UNKNOWN,
948     CPU_SMAP_FLAGS, 0 },
949   { STRING_COMMA_LEN (".mpx"), PROCESSOR_UNKNOWN,
950     CPU_MPX_FLAGS, 0 },
951   { STRING_COMMA_LEN (".sha"), PROCESSOR_UNKNOWN,
952     CPU_SHA_FLAGS, 0 },
953   { STRING_COMMA_LEN (".clflushopt"), PROCESSOR_UNKNOWN,
954     CPU_CLFLUSHOPT_FLAGS, 0 },
955   { STRING_COMMA_LEN (".prefetchwt1"), PROCESSOR_UNKNOWN,
956     CPU_PREFETCHWT1_FLAGS, 0 },
957   { STRING_COMMA_LEN (".se1"), PROCESSOR_UNKNOWN,
958     CPU_SE1_FLAGS, 0 },
959   { STRING_COMMA_LEN (".clwb"), PROCESSOR_UNKNOWN,
960     CPU_CLWB_FLAGS, 0 },
961   { STRING_COMMA_LEN (".pcommit"), PROCESSOR_UNKNOWN,
962     CPU_PCOMMIT_FLAGS, 0 },
963   { STRING_COMMA_LEN (".avx512ifma"), PROCESSOR_UNKNOWN,
964     CPU_AVX512IFMA_FLAGS, 0 },
965   { STRING_COMMA_LEN (".avx512vbmi"), PROCESSOR_UNKNOWN,
966     CPU_AVX512VBMI_FLAGS, 0 },
967   { STRING_COMMA_LEN (".clzero"), PROCESSOR_UNKNOWN,
968     CPU_CLZERO_FLAGS, 0 },
969   { STRING_COMMA_LEN (".mwaitx"), PROCESSOR_UNKNOWN,
970     CPU_MWAITX_FLAGS, 0 },
971   { STRING_COMMA_LEN (".ospke"), PROCESSOR_UNKNOWN,
972     CPU_OSPKE_FLAGS, 0 },
973   { STRING_COMMA_LEN (".rdpid"), PROCESSOR_UNKNOWN,
974     CPU_RDPID_FLAGS, 0 },
975 };
976 
977 static const noarch_entry cpu_noarch[] =
978 {
979   { STRING_COMMA_LEN ("no87"),  CPU_ANY_X87_FLAGS },
980   { STRING_COMMA_LEN ("no287"),  CPU_ANY_287_FLAGS },
981   { STRING_COMMA_LEN ("no387"),  CPU_ANY_387_FLAGS },
982   { STRING_COMMA_LEN ("no687"),  CPU_ANY_687_FLAGS },
983   { STRING_COMMA_LEN ("nommx"),  CPU_ANY_MMX_FLAGS },
984   { STRING_COMMA_LEN ("nosse"),  CPU_ANY_SSE_FLAGS },
985   { STRING_COMMA_LEN ("nosse2"),  CPU_ANY_SSE2_FLAGS },
986   { STRING_COMMA_LEN ("nosse3"),  CPU_ANY_SSE3_FLAGS },
987   { STRING_COMMA_LEN ("nossse3"),  CPU_ANY_SSSE3_FLAGS },
988   { STRING_COMMA_LEN ("nosse4.1"),  CPU_ANY_SSE4_1_FLAGS },
989   { STRING_COMMA_LEN ("nosse4.2"),  CPU_ANY_SSE4_2_FLAGS },
990   { STRING_COMMA_LEN ("nosse4"),  CPU_ANY_SSE4_1_FLAGS },
991   { STRING_COMMA_LEN ("noavx"),  CPU_ANY_AVX_FLAGS },
992   { STRING_COMMA_LEN ("noavx2"),  CPU_ANY_AVX2_FLAGS },
993   { STRING_COMMA_LEN ("noavx512f"), CPU_ANY_AVX512F_FLAGS },
994   { STRING_COMMA_LEN ("noavx512cd"), CPU_ANY_AVX512CD_FLAGS },
995   { STRING_COMMA_LEN ("noavx512er"), CPU_ANY_AVX512ER_FLAGS },
996   { STRING_COMMA_LEN ("noavx512pf"), CPU_ANY_AVX512PF_FLAGS },
997   { STRING_COMMA_LEN ("noavx512dq"), CPU_ANY_AVX512DQ_FLAGS },
998   { STRING_COMMA_LEN ("noavx512bw"), CPU_ANY_AVX512BW_FLAGS },
999   { STRING_COMMA_LEN ("noavx512vl"), CPU_ANY_AVX512VL_FLAGS },
1000   { STRING_COMMA_LEN ("noavx512ifma"), CPU_ANY_AVX512IFMA_FLAGS },
1001   { STRING_COMMA_LEN ("noavx512vbmi"), CPU_ANY_AVX512VBMI_FLAGS },
1002 };
1003 
1004 #ifdef I386COFF
1005 /* Like s_lcomm_internal in gas/read.c but the alignment string
1006    is allowed to be optional.  */
1007 
1008 static symbolS *
pe_lcomm_internal(int needs_align,symbolS * symbolP,addressT size)1009 pe_lcomm_internal (int needs_align, symbolS *symbolP, addressT size)
1010 {
1011   addressT align = 0;
1012 
1013   SKIP_WHITESPACE ();
1014 
1015   if (needs_align
1016       && *input_line_pointer == ',')
1017     {
1018       align = parse_align (needs_align - 1);
1019 
1020       if (align == (addressT) -1)
1021 	return NULL;
1022     }
1023   else
1024     {
1025       if (size >= 8)
1026 	align = 3;
1027       else if (size >= 4)
1028 	align = 2;
1029       else if (size >= 2)
1030 	align = 1;
1031       else
1032 	align = 0;
1033     }
1034 
1035   bss_alloc (symbolP, size, align);
1036   return symbolP;
1037 }
1038 
1039 static void
pe_lcomm(int needs_align)1040 pe_lcomm (int needs_align)
1041 {
1042   s_comm_internal (needs_align * 2, pe_lcomm_internal);
1043 }
1044 #endif
1045 
1046 const pseudo_typeS md_pseudo_table[] =
1047 {
1048 #if !defined(OBJ_AOUT) && !defined(USE_ALIGN_PTWO)
1049   {"align", s_align_bytes, 0},
1050 #else
1051   {"align", s_align_ptwo, 0},
1052 #endif
1053   {"arch", set_cpu_arch, 0},
1054 #ifndef I386COFF
1055   {"bss", s_bss, 0},
1056 #else
1057   {"lcomm", pe_lcomm, 1},
1058 #endif
1059   {"ffloat", float_cons, 'f'},
1060   {"dfloat", float_cons, 'd'},
1061   {"tfloat", float_cons, 'x'},
1062   {"value", cons, 2},
1063   {"slong", signed_cons, 4},
1064   {"noopt", s_ignore, 0},
1065   {"optim", s_ignore, 0},
1066   {"code16gcc", set_16bit_gcc_code_flag, CODE_16BIT},
1067   {"code16", set_code_flag, CODE_16BIT},
1068   {"code32", set_code_flag, CODE_32BIT},
1069   {"code64", set_code_flag, CODE_64BIT},
1070   {"intel_syntax", set_intel_syntax, 1},
1071   {"att_syntax", set_intel_syntax, 0},
1072   {"intel_mnemonic", set_intel_mnemonic, 1},
1073   {"att_mnemonic", set_intel_mnemonic, 0},
1074   {"allow_index_reg", set_allow_index_reg, 1},
1075   {"disallow_index_reg", set_allow_index_reg, 0},
1076   {"sse_check", set_check, 0},
1077   {"operand_check", set_check, 1},
1078 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
1079   {"largecomm", handle_large_common, 0},
1080 #else
1081   {"file", (void (*) (int)) dwarf2_directive_file, 0},
1082   {"loc", dwarf2_directive_loc, 0},
1083   {"loc_mark_labels", dwarf2_directive_loc_mark_labels, 0},
1084 #endif
1085 #ifdef TE_PE
1086   {"secrel32", pe_directive_secrel, 0},
1087 #endif
1088   {0, 0, 0}
1089 };
1090 
1091 /* For interface with expression ().  */
1092 extern char *input_line_pointer;
1093 
1094 /* Hash table for instruction mnemonic lookup.  */
1095 static struct hash_control *op_hash;
1096 
1097 /* Hash table for register lookup.  */
1098 static struct hash_control *reg_hash;
1099 
1100 void
i386_align_code(fragS * fragP,int count)1101 i386_align_code (fragS *fragP, int count)
1102 {
1103   /* Various efficient no-op patterns for aligning code labels.
1104      Note: Don't try to assemble the instructions in the comments.
1105      0L and 0w are not legal.  */
1106   static const unsigned char f32_1[] =
1107     {0x90};					/* nop			*/
1108   static const unsigned char f32_2[] =
1109     {0x66,0x90};				/* xchg %ax,%ax */
1110   static const unsigned char f32_3[] =
1111     {0x8d,0x76,0x00};				/* leal 0(%esi),%esi	*/
1112   static const unsigned char f32_4[] =
1113     {0x8d,0x74,0x26,0x00};			/* leal 0(%esi,1),%esi	*/
1114   static const unsigned char f32_5[] =
1115     {0x90,					/* nop			*/
1116      0x8d,0x74,0x26,0x00};			/* leal 0(%esi,1),%esi	*/
1117   static const unsigned char f32_6[] =
1118     {0x8d,0xb6,0x00,0x00,0x00,0x00};		/* leal 0L(%esi),%esi	*/
1119   static const unsigned char f32_7[] =
1120     {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00};	/* leal 0L(%esi,1),%esi */
1121   static const unsigned char f32_8[] =
1122     {0x90,					/* nop			*/
1123      0x8d,0xb4,0x26,0x00,0x00,0x00,0x00};	/* leal 0L(%esi,1),%esi */
1124   static const unsigned char f32_9[] =
1125     {0x89,0xf6,					/* movl %esi,%esi	*/
1126      0x8d,0xbc,0x27,0x00,0x00,0x00,0x00};	/* leal 0L(%edi,1),%edi */
1127   static const unsigned char f32_10[] =
1128     {0x8d,0x76,0x00,				/* leal 0(%esi),%esi	*/
1129      0x8d,0xbc,0x27,0x00,0x00,0x00,0x00};	/* leal 0L(%edi,1),%edi */
1130   static const unsigned char f32_11[] =
1131     {0x8d,0x74,0x26,0x00,			/* leal 0(%esi,1),%esi	*/
1132      0x8d,0xbc,0x27,0x00,0x00,0x00,0x00};	/* leal 0L(%edi,1),%edi */
1133   static const unsigned char f32_12[] =
1134     {0x8d,0xb6,0x00,0x00,0x00,0x00,		/* leal 0L(%esi),%esi	*/
1135      0x8d,0xbf,0x00,0x00,0x00,0x00};		/* leal 0L(%edi),%edi	*/
1136   static const unsigned char f32_13[] =
1137     {0x8d,0xb6,0x00,0x00,0x00,0x00,		/* leal 0L(%esi),%esi	*/
1138      0x8d,0xbc,0x27,0x00,0x00,0x00,0x00};	/* leal 0L(%edi,1),%edi */
1139   static const unsigned char f32_14[] =
1140     {0x8d,0xb4,0x26,0x00,0x00,0x00,0x00,	/* leal 0L(%esi,1),%esi */
1141      0x8d,0xbc,0x27,0x00,0x00,0x00,0x00};	/* leal 0L(%edi,1),%edi */
1142   static const unsigned char f16_3[] =
1143     {0x8d,0x74,0x00};				/* lea 0(%esi),%esi	*/
1144   static const unsigned char f16_4[] =
1145     {0x8d,0xb4,0x00,0x00};			/* lea 0w(%si),%si	*/
1146   static const unsigned char f16_5[] =
1147     {0x90,					/* nop			*/
1148      0x8d,0xb4,0x00,0x00};			/* lea 0w(%si),%si	*/
1149   static const unsigned char f16_6[] =
1150     {0x89,0xf6,					/* mov %si,%si		*/
1151      0x8d,0xbd,0x00,0x00};			/* lea 0w(%di),%di	*/
1152   static const unsigned char f16_7[] =
1153     {0x8d,0x74,0x00,				/* lea 0(%si),%si	*/
1154      0x8d,0xbd,0x00,0x00};			/* lea 0w(%di),%di	*/
1155   static const unsigned char f16_8[] =
1156     {0x8d,0xb4,0x00,0x00,			/* lea 0w(%si),%si	*/
1157      0x8d,0xbd,0x00,0x00};			/* lea 0w(%di),%di	*/
1158   static const unsigned char jump_31[] =
1159     {0xeb,0x1d,0x90,0x90,0x90,0x90,0x90,	/* jmp .+31; lotsa nops	*/
1160      0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1161      0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90,
1162      0x90,0x90,0x90,0x90,0x90,0x90,0x90,0x90};
1163   static const unsigned char *const f32_patt[] = {
1164     f32_1, f32_2, f32_3, f32_4, f32_5, f32_6, f32_7, f32_8,
1165     f32_9, f32_10, f32_11, f32_12, f32_13, f32_14
1166   };
1167   static const unsigned char *const f16_patt[] = {
1168     f32_1, f32_2, f16_3, f16_4, f16_5, f16_6, f16_7, f16_8
1169   };
1170   /* nopl (%[re]ax) */
1171   static const unsigned char alt_3[] =
1172     {0x0f,0x1f,0x00};
1173   /* nopl 0(%[re]ax) */
1174   static const unsigned char alt_4[] =
1175     {0x0f,0x1f,0x40,0x00};
1176   /* nopl 0(%[re]ax,%[re]ax,1) */
1177   static const unsigned char alt_5[] =
1178     {0x0f,0x1f,0x44,0x00,0x00};
1179   /* nopw 0(%[re]ax,%[re]ax,1) */
1180   static const unsigned char alt_6[] =
1181     {0x66,0x0f,0x1f,0x44,0x00,0x00};
1182   /* nopl 0L(%[re]ax) */
1183   static const unsigned char alt_7[] =
1184     {0x0f,0x1f,0x80,0x00,0x00,0x00,0x00};
1185   /* nopl 0L(%[re]ax,%[re]ax,1) */
1186   static const unsigned char alt_8[] =
1187     {0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1188   /* nopw 0L(%[re]ax,%[re]ax,1) */
1189   static const unsigned char alt_9[] =
1190     {0x66,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1191   /* nopw %cs:0L(%[re]ax,%[re]ax,1) */
1192   static const unsigned char alt_10[] =
1193     {0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00};
1194   static const unsigned char *const alt_patt[] = {
1195     f32_1, f32_2, alt_3, alt_4, alt_5, alt_6, alt_7, alt_8,
1196     alt_9, alt_10
1197   };
1198 
1199   /* Only align for at least a positive non-zero boundary. */
1200   if (count <= 0 || count > MAX_MEM_FOR_RS_ALIGN_CODE)
1201     return;
1202 
1203   /* We need to decide which NOP sequence to use for 32bit and
1204      64bit. When -mtune= is used:
1205 
1206      1. For PROCESSOR_I386, PROCESSOR_I486, PROCESSOR_PENTIUM and
1207      PROCESSOR_GENERIC32, f32_patt will be used.
1208      2. For the rest, alt_patt will be used.
1209 
1210      When -mtune= isn't used, alt_patt will be used if
1211      cpu_arch_isa_flags has CpuNop.  Otherwise, f32_patt will
1212      be used.
1213 
1214      When -march= or .arch is used, we can't use anything beyond
1215      cpu_arch_isa_flags.   */
1216 
1217   if (flag_code == CODE_16BIT)
1218     {
1219       if (count > 8)
1220 	{
1221 	  memcpy (fragP->fr_literal + fragP->fr_fix,
1222 		  jump_31, count);
1223 	  /* Adjust jump offset.  */
1224 	  fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1225 	}
1226       else
1227 	memcpy (fragP->fr_literal + fragP->fr_fix,
1228 		f16_patt[count - 1], count);
1229     }
1230   else
1231     {
1232       const unsigned char *const *patt = NULL;
1233 
1234       if (fragP->tc_frag_data.isa == PROCESSOR_UNKNOWN)
1235 	{
1236 	  /* PROCESSOR_UNKNOWN means that all ISAs may be used.  */
1237 	  switch (cpu_arch_tune)
1238 	    {
1239 	    case PROCESSOR_UNKNOWN:
1240 	      /* We use cpu_arch_isa_flags to check if we SHOULD
1241 		 optimize with nops.  */
1242 	      if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1243 		patt = alt_patt;
1244 	      else
1245 		patt = f32_patt;
1246 	      break;
1247 	    case PROCESSOR_PENTIUM4:
1248 	    case PROCESSOR_NOCONA:
1249 	    case PROCESSOR_CORE:
1250 	    case PROCESSOR_CORE2:
1251 	    case PROCESSOR_COREI7:
1252 	    case PROCESSOR_L1OM:
1253 	    case PROCESSOR_K1OM:
1254 	    case PROCESSOR_GENERIC64:
1255 	    case PROCESSOR_K6:
1256 	    case PROCESSOR_ATHLON:
1257 	    case PROCESSOR_K8:
1258 	    case PROCESSOR_AMDFAM10:
1259 	    case PROCESSOR_BD:
1260 	    case PROCESSOR_ZNVER:
1261 	    case PROCESSOR_BT:
1262 	      patt = alt_patt;
1263 	      break;
1264 	    case PROCESSOR_I386:
1265 	    case PROCESSOR_I486:
1266 	    case PROCESSOR_PENTIUM:
1267 	    case PROCESSOR_PENTIUMPRO:
1268 	    case PROCESSOR_IAMCU:
1269 	    case PROCESSOR_GENERIC32:
1270 	      patt = f32_patt;
1271 	      break;
1272 	    }
1273 	}
1274       else
1275 	{
1276 	  switch (fragP->tc_frag_data.tune)
1277 	    {
1278 	    case PROCESSOR_UNKNOWN:
1279 	      /* When cpu_arch_isa is set, cpu_arch_tune shouldn't be
1280 		 PROCESSOR_UNKNOWN.  */
1281 	      abort ();
1282 	      break;
1283 
1284 	    case PROCESSOR_I386:
1285 	    case PROCESSOR_I486:
1286 	    case PROCESSOR_PENTIUM:
1287 	    case PROCESSOR_IAMCU:
1288 	    case PROCESSOR_K6:
1289 	    case PROCESSOR_ATHLON:
1290 	    case PROCESSOR_K8:
1291 	    case PROCESSOR_AMDFAM10:
1292 	    case PROCESSOR_BD:
1293 	    case PROCESSOR_ZNVER:
1294 	    case PROCESSOR_BT:
1295 	    case PROCESSOR_GENERIC32:
1296 	      /* We use cpu_arch_isa_flags to check if we CAN optimize
1297 		 with nops.  */
1298 	      if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1299 		patt = alt_patt;
1300 	      else
1301 		patt = f32_patt;
1302 	      break;
1303 	    case PROCESSOR_PENTIUMPRO:
1304 	    case PROCESSOR_PENTIUM4:
1305 	    case PROCESSOR_NOCONA:
1306 	    case PROCESSOR_CORE:
1307 	    case PROCESSOR_CORE2:
1308 	    case PROCESSOR_COREI7:
1309 	    case PROCESSOR_L1OM:
1310 	    case PROCESSOR_K1OM:
1311 	      if (fragP->tc_frag_data.isa_flags.bitfield.cpunop)
1312 		patt = alt_patt;
1313 	      else
1314 		patt = f32_patt;
1315 	      break;
1316 	    case PROCESSOR_GENERIC64:
1317 	      patt = alt_patt;
1318 	      break;
1319 	    }
1320 	}
1321 
1322       if (patt == f32_patt)
1323 	{
1324 	  /* If the padding is less than 15 bytes, we use the normal
1325 	     ones.  Otherwise, we use a jump instruction and adjust
1326 	     its offset.   */
1327 	  int limit;
1328 
1329 	  /* For 64bit, the limit is 3 bytes.  */
1330 	  if (flag_code == CODE_64BIT
1331 	      && fragP->tc_frag_data.isa_flags.bitfield.cpulm)
1332 	    limit = 3;
1333 	  else
1334 	    limit = 15;
1335 	  if (count < limit)
1336 	    memcpy (fragP->fr_literal + fragP->fr_fix,
1337 		    patt[count - 1], count);
1338 	  else
1339 	    {
1340 	      memcpy (fragP->fr_literal + fragP->fr_fix,
1341 		      jump_31, count);
1342 	      /* Adjust jump offset.  */
1343 	      fragP->fr_literal[fragP->fr_fix + 1] = count - 2;
1344 	    }
1345 	}
1346       else
1347 	{
1348 	  /* Maximum length of an instruction is 10 byte.  If the
1349 	     padding is greater than 10 bytes and we don't use jump,
1350 	     we have to break it into smaller pieces.  */
1351 	  int padding = count;
1352 	  while (padding > 10)
1353 	    {
1354 	      padding -= 10;
1355 	      memcpy (fragP->fr_literal + fragP->fr_fix + padding,
1356 		      patt [9], 10);
1357 	    }
1358 
1359 	  if (padding)
1360 	    memcpy (fragP->fr_literal + fragP->fr_fix,
1361 		    patt [padding - 1], padding);
1362 	}
1363     }
1364   fragP->fr_var = count;
1365 }
1366 
1367 static INLINE int
operand_type_all_zero(const union i386_operand_type * x)1368 operand_type_all_zero (const union i386_operand_type *x)
1369 {
1370   switch (ARRAY_SIZE(x->array))
1371     {
1372     case 3:
1373       if (x->array[2])
1374 	return 0;
1375     case 2:
1376       if (x->array[1])
1377 	return 0;
1378     case 1:
1379       return !x->array[0];
1380     default:
1381       abort ();
1382     }
1383 }
1384 
1385 static INLINE void
operand_type_set(union i386_operand_type * x,unsigned int v)1386 operand_type_set (union i386_operand_type *x, unsigned int v)
1387 {
1388   switch (ARRAY_SIZE(x->array))
1389     {
1390     case 3:
1391       x->array[2] = v;
1392     case 2:
1393       x->array[1] = v;
1394     case 1:
1395       x->array[0] = v;
1396       break;
1397     default:
1398       abort ();
1399     }
1400 }
1401 
1402 static INLINE int
operand_type_equal(const union i386_operand_type * x,const union i386_operand_type * y)1403 operand_type_equal (const union i386_operand_type *x,
1404 		    const union i386_operand_type *y)
1405 {
1406   switch (ARRAY_SIZE(x->array))
1407     {
1408     case 3:
1409       if (x->array[2] != y->array[2])
1410 	return 0;
1411     case 2:
1412       if (x->array[1] != y->array[1])
1413 	return 0;
1414     case 1:
1415       return x->array[0] == y->array[0];
1416       break;
1417     default:
1418       abort ();
1419     }
1420 }
1421 
1422 static INLINE int
cpu_flags_all_zero(const union i386_cpu_flags * x)1423 cpu_flags_all_zero (const union i386_cpu_flags *x)
1424 {
1425   switch (ARRAY_SIZE(x->array))
1426     {
1427     case 3:
1428       if (x->array[2])
1429 	return 0;
1430     case 2:
1431       if (x->array[1])
1432 	return 0;
1433     case 1:
1434       return !x->array[0];
1435     default:
1436       abort ();
1437     }
1438 }
1439 
1440 static INLINE int
cpu_flags_equal(const union i386_cpu_flags * x,const union i386_cpu_flags * y)1441 cpu_flags_equal (const union i386_cpu_flags *x,
1442 		 const union i386_cpu_flags *y)
1443 {
1444   switch (ARRAY_SIZE(x->array))
1445     {
1446     case 3:
1447       if (x->array[2] != y->array[2])
1448 	return 0;
1449     case 2:
1450       if (x->array[1] != y->array[1])
1451 	return 0;
1452     case 1:
1453       return x->array[0] == y->array[0];
1454       break;
1455     default:
1456       abort ();
1457     }
1458 }
1459 
1460 static INLINE int
cpu_flags_check_cpu64(i386_cpu_flags f)1461 cpu_flags_check_cpu64 (i386_cpu_flags f)
1462 {
1463   return !((flag_code == CODE_64BIT && f.bitfield.cpuno64)
1464 	   || (flag_code != CODE_64BIT && f.bitfield.cpu64));
1465 }
1466 
1467 static INLINE i386_cpu_flags
cpu_flags_and(i386_cpu_flags x,i386_cpu_flags y)1468 cpu_flags_and (i386_cpu_flags x, i386_cpu_flags y)
1469 {
1470   switch (ARRAY_SIZE (x.array))
1471     {
1472     case 3:
1473       x.array [2] &= y.array [2];
1474     case 2:
1475       x.array [1] &= y.array [1];
1476     case 1:
1477       x.array [0] &= y.array [0];
1478       break;
1479     default:
1480       abort ();
1481     }
1482   return x;
1483 }
1484 
1485 static INLINE i386_cpu_flags
cpu_flags_or(i386_cpu_flags x,i386_cpu_flags y)1486 cpu_flags_or (i386_cpu_flags x, i386_cpu_flags y)
1487 {
1488   switch (ARRAY_SIZE (x.array))
1489     {
1490     case 3:
1491       x.array [2] |= y.array [2];
1492     case 2:
1493       x.array [1] |= y.array [1];
1494     case 1:
1495       x.array [0] |= y.array [0];
1496       break;
1497     default:
1498       abort ();
1499     }
1500   return x;
1501 }
1502 
1503 static INLINE i386_cpu_flags
cpu_flags_and_not(i386_cpu_flags x,i386_cpu_flags y)1504 cpu_flags_and_not (i386_cpu_flags x, i386_cpu_flags y)
1505 {
1506   switch (ARRAY_SIZE (x.array))
1507     {
1508     case 3:
1509       x.array [2] &= ~y.array [2];
1510     case 2:
1511       x.array [1] &= ~y.array [1];
1512     case 1:
1513       x.array [0] &= ~y.array [0];
1514       break;
1515     default:
1516       abort ();
1517     }
1518   return x;
1519 }
1520 
1521 static int
valid_iamcu_cpu_flags(const i386_cpu_flags * flags)1522 valid_iamcu_cpu_flags (const i386_cpu_flags *flags)
1523 {
1524   if (cpu_arch_isa == PROCESSOR_IAMCU)
1525     {
1526       static const i386_cpu_flags iamcu_flags = CPU_IAMCU_COMPAT_FLAGS;
1527       i386_cpu_flags compat_flags;
1528       compat_flags = cpu_flags_and_not (*flags, iamcu_flags);
1529       return cpu_flags_all_zero (&compat_flags);
1530     }
1531   else
1532     return 1;
1533 }
1534 
1535 #define CPU_FLAGS_ARCH_MATCH		0x1
1536 #define CPU_FLAGS_64BIT_MATCH		0x2
1537 #define CPU_FLAGS_AES_MATCH		0x4
1538 #define CPU_FLAGS_PCLMUL_MATCH		0x8
1539 #define CPU_FLAGS_AVX_MATCH	       0x10
1540 
1541 #define CPU_FLAGS_32BIT_MATCH \
1542   (CPU_FLAGS_ARCH_MATCH | CPU_FLAGS_AES_MATCH \
1543    | CPU_FLAGS_PCLMUL_MATCH | CPU_FLAGS_AVX_MATCH)
1544 #define CPU_FLAGS_PERFECT_MATCH \
1545   (CPU_FLAGS_32BIT_MATCH | CPU_FLAGS_64BIT_MATCH)
1546 
1547 /* Return CPU flags match bits. */
1548 
1549 static int
cpu_flags_match(const insn_template * t)1550 cpu_flags_match (const insn_template *t)
1551 {
1552   i386_cpu_flags x = t->cpu_flags;
1553   int match = cpu_flags_check_cpu64 (x) ? CPU_FLAGS_64BIT_MATCH : 0;
1554 
1555   x.bitfield.cpu64 = 0;
1556   x.bitfield.cpuno64 = 0;
1557 
1558   if (cpu_flags_all_zero (&x))
1559     {
1560       /* This instruction is available on all archs.  */
1561       match |= CPU_FLAGS_32BIT_MATCH;
1562     }
1563   else
1564     {
1565       /* This instruction is available only on some archs.  */
1566       i386_cpu_flags cpu = cpu_arch_flags;
1567 
1568       cpu = cpu_flags_and (x, cpu);
1569       if (!cpu_flags_all_zero (&cpu))
1570 	{
1571 	  if (x.bitfield.cpuavx)
1572 	    {
1573 	      /* We only need to check AES/PCLMUL/SSE2AVX with AVX.  */
1574 	      if (cpu.bitfield.cpuavx)
1575 		{
1576 		  /* Check SSE2AVX.  */
1577 		  if (!t->opcode_modifier.sse2avx|| sse2avx)
1578 		    {
1579 		      match |= (CPU_FLAGS_ARCH_MATCH
1580 				| CPU_FLAGS_AVX_MATCH);
1581 		      /* Check AES.  */
1582 		      if (!x.bitfield.cpuaes || cpu.bitfield.cpuaes)
1583 			match |= CPU_FLAGS_AES_MATCH;
1584 		      /* Check PCLMUL.  */
1585 		      if (!x.bitfield.cpupclmul
1586 			  || cpu.bitfield.cpupclmul)
1587 			match |= CPU_FLAGS_PCLMUL_MATCH;
1588 		    }
1589 		}
1590 	      else
1591 		match |= CPU_FLAGS_ARCH_MATCH;
1592 	    }
1593 	  else if (x.bitfield.cpuavx512vl)
1594 	    {
1595 	      /* Match AVX512VL.  */
1596 	      if (cpu.bitfield.cpuavx512vl)
1597 		{
1598 		  /* Need another match.  */
1599 		  cpu.bitfield.cpuavx512vl = 0;
1600 		  if (!cpu_flags_all_zero (&cpu))
1601 		    match |= CPU_FLAGS_32BIT_MATCH;
1602 		  else
1603 		    match |= CPU_FLAGS_ARCH_MATCH;
1604 		}
1605 	      else
1606 		match |= CPU_FLAGS_ARCH_MATCH;
1607 	    }
1608 	  else
1609 	    match |= CPU_FLAGS_32BIT_MATCH;
1610 	}
1611     }
1612   return match;
1613 }
1614 
1615 static INLINE i386_operand_type
operand_type_and(i386_operand_type x,i386_operand_type y)1616 operand_type_and (i386_operand_type x, i386_operand_type y)
1617 {
1618   switch (ARRAY_SIZE (x.array))
1619     {
1620     case 3:
1621       x.array [2] &= y.array [2];
1622     case 2:
1623       x.array [1] &= y.array [1];
1624     case 1:
1625       x.array [0] &= y.array [0];
1626       break;
1627     default:
1628       abort ();
1629     }
1630   return x;
1631 }
1632 
1633 static INLINE i386_operand_type
operand_type_or(i386_operand_type x,i386_operand_type y)1634 operand_type_or (i386_operand_type x, i386_operand_type y)
1635 {
1636   switch (ARRAY_SIZE (x.array))
1637     {
1638     case 3:
1639       x.array [2] |= y.array [2];
1640     case 2:
1641       x.array [1] |= y.array [1];
1642     case 1:
1643       x.array [0] |= y.array [0];
1644       break;
1645     default:
1646       abort ();
1647     }
1648   return x;
1649 }
1650 
1651 static INLINE i386_operand_type
operand_type_xor(i386_operand_type x,i386_operand_type y)1652 operand_type_xor (i386_operand_type x, i386_operand_type y)
1653 {
1654   switch (ARRAY_SIZE (x.array))
1655     {
1656     case 3:
1657       x.array [2] ^= y.array [2];
1658     case 2:
1659       x.array [1] ^= y.array [1];
1660     case 1:
1661       x.array [0] ^= y.array [0];
1662       break;
1663     default:
1664       abort ();
1665     }
1666   return x;
1667 }
1668 
1669 static const i386_operand_type acc32 = OPERAND_TYPE_ACC32;
1670 static const i386_operand_type acc64 = OPERAND_TYPE_ACC64;
1671 static const i386_operand_type control = OPERAND_TYPE_CONTROL;
1672 static const i386_operand_type inoutportreg
1673   = OPERAND_TYPE_INOUTPORTREG;
1674 static const i386_operand_type reg16_inoutportreg
1675   = OPERAND_TYPE_REG16_INOUTPORTREG;
1676 static const i386_operand_type disp16 = OPERAND_TYPE_DISP16;
1677 static const i386_operand_type disp32 = OPERAND_TYPE_DISP32;
1678 static const i386_operand_type disp32s = OPERAND_TYPE_DISP32S;
1679 static const i386_operand_type disp16_32 = OPERAND_TYPE_DISP16_32;
1680 static const i386_operand_type anydisp
1681   = OPERAND_TYPE_ANYDISP;
1682 static const i386_operand_type regxmm = OPERAND_TYPE_REGXMM;
1683 static const i386_operand_type regymm = OPERAND_TYPE_REGYMM;
1684 static const i386_operand_type regzmm = OPERAND_TYPE_REGZMM;
1685 static const i386_operand_type regmask = OPERAND_TYPE_REGMASK;
1686 static const i386_operand_type imm8 = OPERAND_TYPE_IMM8;
1687 static const i386_operand_type imm8s = OPERAND_TYPE_IMM8S;
1688 static const i386_operand_type imm16 = OPERAND_TYPE_IMM16;
1689 static const i386_operand_type imm32 = OPERAND_TYPE_IMM32;
1690 static const i386_operand_type imm32s = OPERAND_TYPE_IMM32S;
1691 static const i386_operand_type imm64 = OPERAND_TYPE_IMM64;
1692 static const i386_operand_type imm16_32 = OPERAND_TYPE_IMM16_32;
1693 static const i386_operand_type imm16_32s = OPERAND_TYPE_IMM16_32S;
1694 static const i386_operand_type imm16_32_32s = OPERAND_TYPE_IMM16_32_32S;
1695 static const i386_operand_type vec_imm4 = OPERAND_TYPE_VEC_IMM4;
1696 
1697 enum operand_type
1698 {
1699   reg,
1700   imm,
1701   disp,
1702   anymem
1703 };
1704 
1705 static INLINE int
operand_type_check(i386_operand_type t,enum operand_type c)1706 operand_type_check (i386_operand_type t, enum operand_type c)
1707 {
1708   switch (c)
1709     {
1710     case reg:
1711       return (t.bitfield.reg8
1712 	      || t.bitfield.reg16
1713 	      || t.bitfield.reg32
1714 	      || t.bitfield.reg64);
1715 
1716     case imm:
1717       return (t.bitfield.imm8
1718 	      || t.bitfield.imm8s
1719 	      || t.bitfield.imm16
1720 	      || t.bitfield.imm32
1721 	      || t.bitfield.imm32s
1722 	      || t.bitfield.imm64);
1723 
1724     case disp:
1725       return (t.bitfield.disp8
1726 	      || t.bitfield.disp16
1727 	      || t.bitfield.disp32
1728 	      || t.bitfield.disp32s
1729 	      || t.bitfield.disp64);
1730 
1731     case anymem:
1732       return (t.bitfield.disp8
1733 	      || t.bitfield.disp16
1734 	      || t.bitfield.disp32
1735 	      || t.bitfield.disp32s
1736 	      || t.bitfield.disp64
1737 	      || t.bitfield.baseindex);
1738 
1739     default:
1740       abort ();
1741     }
1742 
1743   return 0;
1744 }
1745 
1746 /* Return 1 if there is no conflict in 8bit/16bit/32bit/64bit on
1747    operand J for instruction template T.  */
1748 
1749 static INLINE int
match_reg_size(const insn_template * t,unsigned int j)1750 match_reg_size (const insn_template *t, unsigned int j)
1751 {
1752   return !((i.types[j].bitfield.byte
1753 	    && !t->operand_types[j].bitfield.byte)
1754 	   || (i.types[j].bitfield.word
1755 	       && !t->operand_types[j].bitfield.word)
1756 	   || (i.types[j].bitfield.dword
1757 	       && !t->operand_types[j].bitfield.dword)
1758 	   || (i.types[j].bitfield.qword
1759 	       && !t->operand_types[j].bitfield.qword));
1760 }
1761 
1762 /* Return 1 if there is no conflict in any size on operand J for
1763    instruction template T.  */
1764 
1765 static INLINE int
match_mem_size(const insn_template * t,unsigned int j)1766 match_mem_size (const insn_template *t, unsigned int j)
1767 {
1768   return (match_reg_size (t, j)
1769 	  && !((i.types[j].bitfield.unspecified
1770 		&& !i.broadcast
1771 		&& !t->operand_types[j].bitfield.unspecified)
1772 	       || (i.types[j].bitfield.fword
1773 		   && !t->operand_types[j].bitfield.fword)
1774 	       || (i.types[j].bitfield.tbyte
1775 		   && !t->operand_types[j].bitfield.tbyte)
1776 	       || (i.types[j].bitfield.xmmword
1777 		   && !t->operand_types[j].bitfield.xmmword)
1778 	       || (i.types[j].bitfield.ymmword
1779 		   && !t->operand_types[j].bitfield.ymmword)
1780 	       || (i.types[j].bitfield.zmmword
1781 		   && !t->operand_types[j].bitfield.zmmword)));
1782 }
1783 
1784 /* Return 1 if there is no size conflict on any operands for
1785    instruction template T.  */
1786 
1787 static INLINE int
operand_size_match(const insn_template * t)1788 operand_size_match (const insn_template *t)
1789 {
1790   unsigned int j;
1791   int match = 1;
1792 
1793   /* Don't check jump instructions.  */
1794   if (t->opcode_modifier.jump
1795       || t->opcode_modifier.jumpbyte
1796       || t->opcode_modifier.jumpdword
1797       || t->opcode_modifier.jumpintersegment)
1798     return match;
1799 
1800   /* Check memory and accumulator operand size.  */
1801   for (j = 0; j < i.operands; j++)
1802     {
1803       if (t->operand_types[j].bitfield.anysize)
1804 	continue;
1805 
1806       if (t->operand_types[j].bitfield.acc && !match_reg_size (t, j))
1807 	{
1808 	  match = 0;
1809 	  break;
1810 	}
1811 
1812       if (i.types[j].bitfield.mem && !match_mem_size (t, j))
1813 	{
1814 	  match = 0;
1815 	  break;
1816 	}
1817     }
1818 
1819   if (match)
1820     return match;
1821   else if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
1822     {
1823 mismatch:
1824       i.error = operand_size_mismatch;
1825       return 0;
1826     }
1827 
1828   /* Check reverse.  */
1829   gas_assert (i.operands == 2);
1830 
1831   match = 1;
1832   for (j = 0; j < 2; j++)
1833     {
1834       if (t->operand_types[j].bitfield.acc
1835 	  && !match_reg_size (t, j ? 0 : 1))
1836 	goto mismatch;
1837 
1838       if (i.types[j].bitfield.mem
1839 	  && !match_mem_size (t, j ? 0 : 1))
1840 	goto mismatch;
1841     }
1842 
1843   return match;
1844 }
1845 
1846 static INLINE int
operand_type_match(i386_operand_type overlap,i386_operand_type given)1847 operand_type_match (i386_operand_type overlap,
1848 		    i386_operand_type given)
1849 {
1850   i386_operand_type temp = overlap;
1851 
1852   temp.bitfield.jumpabsolute = 0;
1853   temp.bitfield.unspecified = 0;
1854   temp.bitfield.byte = 0;
1855   temp.bitfield.word = 0;
1856   temp.bitfield.dword = 0;
1857   temp.bitfield.fword = 0;
1858   temp.bitfield.qword = 0;
1859   temp.bitfield.tbyte = 0;
1860   temp.bitfield.xmmword = 0;
1861   temp.bitfield.ymmword = 0;
1862   temp.bitfield.zmmword = 0;
1863   if (operand_type_all_zero (&temp))
1864     goto mismatch;
1865 
1866   if (given.bitfield.baseindex == overlap.bitfield.baseindex
1867       && given.bitfield.jumpabsolute == overlap.bitfield.jumpabsolute)
1868     return 1;
1869 
1870 mismatch:
1871   i.error = operand_type_mismatch;
1872   return 0;
1873 }
1874 
1875 /* If given types g0 and g1 are registers they must be of the same type
1876    unless the expected operand type register overlap is null.
1877    Note that Acc in a template matches every size of reg.  */
1878 
1879 static INLINE int
operand_type_register_match(i386_operand_type m0,i386_operand_type g0,i386_operand_type t0,i386_operand_type m1,i386_operand_type g1,i386_operand_type t1)1880 operand_type_register_match (i386_operand_type m0,
1881 			     i386_operand_type g0,
1882 			     i386_operand_type t0,
1883 			     i386_operand_type m1,
1884 			     i386_operand_type g1,
1885 			     i386_operand_type t1)
1886 {
1887   if (!operand_type_check (g0, reg))
1888     return 1;
1889 
1890   if (!operand_type_check (g1, reg))
1891     return 1;
1892 
1893   if (g0.bitfield.reg8 == g1.bitfield.reg8
1894       && g0.bitfield.reg16 == g1.bitfield.reg16
1895       && g0.bitfield.reg32 == g1.bitfield.reg32
1896       && g0.bitfield.reg64 == g1.bitfield.reg64)
1897     return 1;
1898 
1899   if (m0.bitfield.acc)
1900     {
1901       t0.bitfield.reg8 = 1;
1902       t0.bitfield.reg16 = 1;
1903       t0.bitfield.reg32 = 1;
1904       t0.bitfield.reg64 = 1;
1905     }
1906 
1907   if (m1.bitfield.acc)
1908     {
1909       t1.bitfield.reg8 = 1;
1910       t1.bitfield.reg16 = 1;
1911       t1.bitfield.reg32 = 1;
1912       t1.bitfield.reg64 = 1;
1913     }
1914 
1915   if (!(t0.bitfield.reg8 & t1.bitfield.reg8)
1916       && !(t0.bitfield.reg16 & t1.bitfield.reg16)
1917       && !(t0.bitfield.reg32 & t1.bitfield.reg32)
1918       && !(t0.bitfield.reg64 & t1.bitfield.reg64))
1919     return 1;
1920 
1921   i.error = register_type_mismatch;
1922 
1923   return 0;
1924 }
1925 
1926 static INLINE unsigned int
register_number(const reg_entry * r)1927 register_number (const reg_entry *r)
1928 {
1929   unsigned int nr = r->reg_num;
1930 
1931   if (r->reg_flags & RegRex)
1932     nr += 8;
1933 
1934   if (r->reg_flags & RegVRex)
1935     nr += 16;
1936 
1937   return nr;
1938 }
1939 
1940 static INLINE unsigned int
mode_from_disp_size(i386_operand_type t)1941 mode_from_disp_size (i386_operand_type t)
1942 {
1943   if (t.bitfield.disp8 || t.bitfield.vec_disp8)
1944     return 1;
1945   else if (t.bitfield.disp16
1946 	   || t.bitfield.disp32
1947 	   || t.bitfield.disp32s)
1948     return 2;
1949   else
1950     return 0;
1951 }
1952 
1953 static INLINE int
fits_in_signed_byte(addressT num)1954 fits_in_signed_byte (addressT num)
1955 {
1956   return num + 0x80 <= 0xff;
1957 }
1958 
1959 static INLINE int
fits_in_unsigned_byte(addressT num)1960 fits_in_unsigned_byte (addressT num)
1961 {
1962   return num <= 0xff;
1963 }
1964 
1965 static INLINE int
fits_in_unsigned_word(addressT num)1966 fits_in_unsigned_word (addressT num)
1967 {
1968   return num <= 0xffff;
1969 }
1970 
1971 static INLINE int
fits_in_signed_word(addressT num)1972 fits_in_signed_word (addressT num)
1973 {
1974   return num + 0x8000 <= 0xffff;
1975 }
1976 
1977 static INLINE int
fits_in_signed_long(addressT num ATTRIBUTE_UNUSED)1978 fits_in_signed_long (addressT num ATTRIBUTE_UNUSED)
1979 {
1980 #ifndef BFD64
1981   return 1;
1982 #else
1983   return num + 0x80000000 <= 0xffffffff;
1984 #endif
1985 }				/* fits_in_signed_long() */
1986 
1987 static INLINE int
fits_in_unsigned_long(addressT num ATTRIBUTE_UNUSED)1988 fits_in_unsigned_long (addressT num ATTRIBUTE_UNUSED)
1989 {
1990 #ifndef BFD64
1991   return 1;
1992 #else
1993   return num <= 0xffffffff;
1994 #endif
1995 }				/* fits_in_unsigned_long() */
1996 
1997 static INLINE int
fits_in_vec_disp8(offsetT num)1998 fits_in_vec_disp8 (offsetT num)
1999 {
2000   int shift = i.memshift;
2001   unsigned int mask;
2002 
2003   if (shift == -1)
2004     abort ();
2005 
2006   mask = (1 << shift) - 1;
2007 
2008   /* Return 0 if NUM isn't properly aligned.  */
2009   if ((num & mask))
2010     return 0;
2011 
2012   /* Check if NUM will fit in 8bit after shift.  */
2013   return fits_in_signed_byte (num >> shift);
2014 }
2015 
2016 static INLINE int
fits_in_imm4(offsetT num)2017 fits_in_imm4 (offsetT num)
2018 {
2019   return (num & 0xf) == num;
2020 }
2021 
2022 static i386_operand_type
smallest_imm_type(offsetT num)2023 smallest_imm_type (offsetT num)
2024 {
2025   i386_operand_type t;
2026 
2027   operand_type_set (&t, 0);
2028   t.bitfield.imm64 = 1;
2029 
2030   if (cpu_arch_tune != PROCESSOR_I486 && num == 1)
2031     {
2032       /* This code is disabled on the 486 because all the Imm1 forms
2033 	 in the opcode table are slower on the i486.  They're the
2034 	 versions with the implicitly specified single-position
2035 	 displacement, which has another syntax if you really want to
2036 	 use that form.  */
2037       t.bitfield.imm1 = 1;
2038       t.bitfield.imm8 = 1;
2039       t.bitfield.imm8s = 1;
2040       t.bitfield.imm16 = 1;
2041       t.bitfield.imm32 = 1;
2042       t.bitfield.imm32s = 1;
2043     }
2044   else if (fits_in_signed_byte (num))
2045     {
2046       t.bitfield.imm8 = 1;
2047       t.bitfield.imm8s = 1;
2048       t.bitfield.imm16 = 1;
2049       t.bitfield.imm32 = 1;
2050       t.bitfield.imm32s = 1;
2051     }
2052   else if (fits_in_unsigned_byte (num))
2053     {
2054       t.bitfield.imm8 = 1;
2055       t.bitfield.imm16 = 1;
2056       t.bitfield.imm32 = 1;
2057       t.bitfield.imm32s = 1;
2058     }
2059   else if (fits_in_signed_word (num) || fits_in_unsigned_word (num))
2060     {
2061       t.bitfield.imm16 = 1;
2062       t.bitfield.imm32 = 1;
2063       t.bitfield.imm32s = 1;
2064     }
2065   else if (fits_in_signed_long (num))
2066     {
2067       t.bitfield.imm32 = 1;
2068       t.bitfield.imm32s = 1;
2069     }
2070   else if (fits_in_unsigned_long (num))
2071     t.bitfield.imm32 = 1;
2072 
2073   return t;
2074 }
2075 
2076 static offsetT
offset_in_range(offsetT val,int size)2077 offset_in_range (offsetT val, int size)
2078 {
2079   addressT mask;
2080 
2081   switch (size)
2082     {
2083     case 1: mask = ((addressT) 1 <<  8) - 1; break;
2084     case 2: mask = ((addressT) 1 << 16) - 1; break;
2085     case 4: mask = ((addressT) 2 << 31) - 1; break;
2086 #ifdef BFD64
2087     case 8: mask = ((addressT) 2 << 63) - 1; break;
2088 #endif
2089     default: abort ();
2090     }
2091 
2092 #ifdef BFD64
2093   /* If BFD64, sign extend val for 32bit address mode.  */
2094   if (flag_code != CODE_64BIT
2095       || i.prefix[ADDR_PREFIX])
2096     if ((val & ~(((addressT) 2 << 31) - 1)) == 0)
2097       val = (val ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
2098 #endif
2099 
2100   if ((val & ~mask) != 0 && (val & ~mask) != ~mask)
2101     {
2102       char buf1[40], buf2[40];
2103 
2104       sprint_value (buf1, val);
2105       sprint_value (buf2, val & mask);
2106       as_warn (_("%s shortened to %s"), buf1, buf2);
2107     }
2108   return val & mask;
2109 }
2110 
2111 enum PREFIX_GROUP
2112 {
2113   PREFIX_EXIST = 0,
2114   PREFIX_LOCK,
2115   PREFIX_REP,
2116   PREFIX_OTHER
2117 };
2118 
2119 /* Returns
2120    a. PREFIX_EXIST if attempting to add a prefix where one from the
2121    same class already exists.
2122    b. PREFIX_LOCK if lock prefix is added.
2123    c. PREFIX_REP if rep/repne prefix is added.
2124    d. PREFIX_OTHER if other prefix is added.
2125  */
2126 
2127 static enum PREFIX_GROUP
add_prefix(unsigned int prefix)2128 add_prefix (unsigned int prefix)
2129 {
2130   enum PREFIX_GROUP ret = PREFIX_OTHER;
2131   unsigned int q;
2132 
2133   if (prefix >= REX_OPCODE && prefix < REX_OPCODE + 16
2134       && flag_code == CODE_64BIT)
2135     {
2136       if ((i.prefix[REX_PREFIX] & prefix & REX_W)
2137 	  || ((i.prefix[REX_PREFIX] & (REX_R | REX_X | REX_B))
2138 	      && (prefix & (REX_R | REX_X | REX_B))))
2139 	ret = PREFIX_EXIST;
2140       q = REX_PREFIX;
2141     }
2142   else
2143     {
2144       switch (prefix)
2145 	{
2146 	default:
2147 	  abort ();
2148 
2149 	case CS_PREFIX_OPCODE:
2150 	case DS_PREFIX_OPCODE:
2151 	case ES_PREFIX_OPCODE:
2152 	case FS_PREFIX_OPCODE:
2153 	case GS_PREFIX_OPCODE:
2154 	case SS_PREFIX_OPCODE:
2155 	  q = SEG_PREFIX;
2156 	  break;
2157 
2158 	case REPNE_PREFIX_OPCODE:
2159 	case REPE_PREFIX_OPCODE:
2160 	  q = REP_PREFIX;
2161 	  ret = PREFIX_REP;
2162 	  break;
2163 
2164 	case LOCK_PREFIX_OPCODE:
2165 	  q = LOCK_PREFIX;
2166 	  ret = PREFIX_LOCK;
2167 	  break;
2168 
2169 	case FWAIT_OPCODE:
2170 	  q = WAIT_PREFIX;
2171 	  break;
2172 
2173 	case ADDR_PREFIX_OPCODE:
2174 	  q = ADDR_PREFIX;
2175 	  break;
2176 
2177 	case DATA_PREFIX_OPCODE:
2178 	  q = DATA_PREFIX;
2179 	  break;
2180 	}
2181       if (i.prefix[q] != 0)
2182 	ret = PREFIX_EXIST;
2183     }
2184 
2185   if (ret)
2186     {
2187       if (!i.prefix[q])
2188 	++i.prefixes;
2189       i.prefix[q] |= prefix;
2190     }
2191   else
2192     as_bad (_("same type of prefix used twice"));
2193 
2194   return ret;
2195 }
2196 
2197 static void
update_code_flag(int value,int check)2198 update_code_flag (int value, int check)
2199 {
2200   PRINTF_LIKE ((*as_error));
2201 
2202   flag_code = (enum flag_code) value;
2203   if (flag_code == CODE_64BIT)
2204     {
2205       cpu_arch_flags.bitfield.cpu64 = 1;
2206       cpu_arch_flags.bitfield.cpuno64 = 0;
2207     }
2208   else
2209     {
2210       cpu_arch_flags.bitfield.cpu64 = 0;
2211       cpu_arch_flags.bitfield.cpuno64 = 1;
2212     }
2213   if (value == CODE_64BIT && !cpu_arch_flags.bitfield.cpulm )
2214     {
2215       if (check)
2216 	as_error = as_fatal;
2217       else
2218 	as_error = as_bad;
2219       (*as_error) (_("64bit mode not supported on `%s'."),
2220 		   cpu_arch_name ? cpu_arch_name : default_arch);
2221     }
2222   if (value == CODE_32BIT && !cpu_arch_flags.bitfield.cpui386)
2223     {
2224       if (check)
2225 	as_error = as_fatal;
2226       else
2227 	as_error = as_bad;
2228       (*as_error) (_("32bit mode not supported on `%s'."),
2229 		   cpu_arch_name ? cpu_arch_name : default_arch);
2230     }
2231   stackop_size = '\0';
2232 }
2233 
2234 static void
set_code_flag(int value)2235 set_code_flag (int value)
2236 {
2237   update_code_flag (value, 0);
2238 }
2239 
2240 static void
set_16bit_gcc_code_flag(int new_code_flag)2241 set_16bit_gcc_code_flag (int new_code_flag)
2242 {
2243   flag_code = (enum flag_code) new_code_flag;
2244   if (flag_code != CODE_16BIT)
2245     abort ();
2246   cpu_arch_flags.bitfield.cpu64 = 0;
2247   cpu_arch_flags.bitfield.cpuno64 = 1;
2248   stackop_size = LONG_MNEM_SUFFIX;
2249 }
2250 
2251 static void
set_intel_syntax(int syntax_flag)2252 set_intel_syntax (int syntax_flag)
2253 {
2254   /* Find out if register prefixing is specified.  */
2255   int ask_naked_reg = 0;
2256 
2257   SKIP_WHITESPACE ();
2258   if (!is_end_of_line[(unsigned char) *input_line_pointer])
2259     {
2260       char *string;
2261       int e = get_symbol_name (&string);
2262 
2263       if (strcmp (string, "prefix") == 0)
2264 	ask_naked_reg = 1;
2265       else if (strcmp (string, "noprefix") == 0)
2266 	ask_naked_reg = -1;
2267       else
2268 	as_bad (_("bad argument to syntax directive."));
2269       (void) restore_line_pointer (e);
2270     }
2271   demand_empty_rest_of_line ();
2272 
2273   intel_syntax = syntax_flag;
2274 
2275   if (ask_naked_reg == 0)
2276     allow_naked_reg = (intel_syntax
2277 		       && (bfd_get_symbol_leading_char (stdoutput) != '\0'));
2278   else
2279     allow_naked_reg = (ask_naked_reg < 0);
2280 
2281   expr_set_rank (O_full_ptr, syntax_flag ? 10 : 0);
2282 
2283   identifier_chars['%'] = intel_syntax && allow_naked_reg ? '%' : 0;
2284   identifier_chars['$'] = intel_syntax ? '$' : 0;
2285   register_prefix = allow_naked_reg ? "" : "%";
2286 }
2287 
2288 static void
set_intel_mnemonic(int mnemonic_flag)2289 set_intel_mnemonic (int mnemonic_flag)
2290 {
2291   intel_mnemonic = mnemonic_flag;
2292 }
2293 
2294 static void
set_allow_index_reg(int flag)2295 set_allow_index_reg (int flag)
2296 {
2297   allow_index_reg = flag;
2298 }
2299 
2300 static void
set_check(int what)2301 set_check (int what)
2302 {
2303   enum check_kind *kind;
2304   const char *str;
2305 
2306   if (what)
2307     {
2308       kind = &operand_check;
2309       str = "operand";
2310     }
2311   else
2312     {
2313       kind = &sse_check;
2314       str = "sse";
2315     }
2316 
2317   SKIP_WHITESPACE ();
2318 
2319   if (!is_end_of_line[(unsigned char) *input_line_pointer])
2320     {
2321       char *string;
2322       int e = get_symbol_name (&string);
2323 
2324       if (strcmp (string, "none") == 0)
2325 	*kind = check_none;
2326       else if (strcmp (string, "warning") == 0)
2327 	*kind = check_warning;
2328       else if (strcmp (string, "error") == 0)
2329 	*kind = check_error;
2330       else
2331 	as_bad (_("bad argument to %s_check directive."), str);
2332       (void) restore_line_pointer (e);
2333     }
2334   else
2335     as_bad (_("missing argument for %s_check directive"), str);
2336 
2337   demand_empty_rest_of_line ();
2338 }
2339 
2340 static void
check_cpu_arch_compatible(const char * name ATTRIBUTE_UNUSED,i386_cpu_flags new_flag ATTRIBUTE_UNUSED)2341 check_cpu_arch_compatible (const char *name ATTRIBUTE_UNUSED,
2342 			   i386_cpu_flags new_flag ATTRIBUTE_UNUSED)
2343 {
2344 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2345   static const char *arch;
2346 
2347   /* Intel LIOM is only supported on ELF.  */
2348   if (!IS_ELF)
2349     return;
2350 
2351   if (!arch)
2352     {
2353       /* Use cpu_arch_name if it is set in md_parse_option.  Otherwise
2354 	 use default_arch.  */
2355       arch = cpu_arch_name;
2356       if (!arch)
2357 	arch = default_arch;
2358     }
2359 
2360   /* If we are targeting Intel MCU, we must enable it.  */
2361   if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_IAMCU
2362       || new_flag.bitfield.cpuiamcu)
2363     return;
2364 
2365   /* If we are targeting Intel L1OM, we must enable it.  */
2366   if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_L1OM
2367       || new_flag.bitfield.cpul1om)
2368     return;
2369 
2370   /* If we are targeting Intel K1OM, we must enable it.  */
2371   if (get_elf_backend_data (stdoutput)->elf_machine_code != EM_K1OM
2372       || new_flag.bitfield.cpuk1om)
2373     return;
2374 
2375   as_bad (_("`%s' is not supported on `%s'"), name, arch);
2376 #endif
2377 }
2378 
2379 static void
set_cpu_arch(int dummy ATTRIBUTE_UNUSED)2380 set_cpu_arch (int dummy ATTRIBUTE_UNUSED)
2381 {
2382   SKIP_WHITESPACE ();
2383 
2384   if (!is_end_of_line[(unsigned char) *input_line_pointer])
2385     {
2386       char *string;
2387       int e = get_symbol_name (&string);
2388       unsigned int j;
2389       i386_cpu_flags flags;
2390 
2391       for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
2392 	{
2393 	  if (strcmp (string, cpu_arch[j].name) == 0)
2394 	    {
2395 	      check_cpu_arch_compatible (string, cpu_arch[j].flags);
2396 
2397 	      if (*string != '.')
2398 		{
2399 		  cpu_arch_name = cpu_arch[j].name;
2400 		  cpu_sub_arch_name = NULL;
2401 		  cpu_arch_flags = cpu_arch[j].flags;
2402 		  if (flag_code == CODE_64BIT)
2403 		    {
2404 		      cpu_arch_flags.bitfield.cpu64 = 1;
2405 		      cpu_arch_flags.bitfield.cpuno64 = 0;
2406 		    }
2407 		  else
2408 		    {
2409 		      cpu_arch_flags.bitfield.cpu64 = 0;
2410 		      cpu_arch_flags.bitfield.cpuno64 = 1;
2411 		    }
2412 		  cpu_arch_isa = cpu_arch[j].type;
2413 		  cpu_arch_isa_flags = cpu_arch[j].flags;
2414 		  if (!cpu_arch_tune_set)
2415 		    {
2416 		      cpu_arch_tune = cpu_arch_isa;
2417 		      cpu_arch_tune_flags = cpu_arch_isa_flags;
2418 		    }
2419 		  break;
2420 		}
2421 
2422 	      flags = cpu_flags_or (cpu_arch_flags,
2423 				    cpu_arch[j].flags);
2424 
2425 	      if (!valid_iamcu_cpu_flags (&flags))
2426 		as_fatal (_("`%s' isn't valid for Intel MCU"),
2427 			  cpu_arch[j].name);
2428 	      else if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2429 		{
2430 		  if (cpu_sub_arch_name)
2431 		    {
2432 		      char *name = cpu_sub_arch_name;
2433 		      cpu_sub_arch_name = concat (name,
2434 						  cpu_arch[j].name,
2435 						  (const char *) NULL);
2436 		      free (name);
2437 		    }
2438 		  else
2439 		    cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
2440 		  cpu_arch_flags = flags;
2441 		  cpu_arch_isa_flags = flags;
2442 		}
2443 	      (void) restore_line_pointer (e);
2444 	      demand_empty_rest_of_line ();
2445 	      return;
2446 	    }
2447 	}
2448 
2449       if (*string == '.' && j >= ARRAY_SIZE (cpu_arch))
2450 	{
2451 	  /* Disable an ISA entension.  */
2452 	  for (j = 0; j < ARRAY_SIZE (cpu_noarch); j++)
2453 	    if (strcmp (string + 1, cpu_noarch [j].name) == 0)
2454 	      {
2455 		flags = cpu_flags_and_not (cpu_arch_flags,
2456 					   cpu_noarch[j].flags);
2457 		if (!cpu_flags_equal (&flags, &cpu_arch_flags))
2458 		  {
2459 		    if (cpu_sub_arch_name)
2460 		      {
2461 			char *name = cpu_sub_arch_name;
2462 			cpu_sub_arch_name = concat (name, string,
2463 						    (const char *) NULL);
2464 			free (name);
2465 		      }
2466 		    else
2467 		      cpu_sub_arch_name = xstrdup (string);
2468 		    cpu_arch_flags = flags;
2469 		    cpu_arch_isa_flags = flags;
2470 		  }
2471 		(void) restore_line_pointer (e);
2472 		demand_empty_rest_of_line ();
2473 		return;
2474 	      }
2475 
2476 	  j = ARRAY_SIZE (cpu_arch);
2477 	}
2478 
2479       if (j >= ARRAY_SIZE (cpu_arch))
2480 	as_bad (_("no such architecture: `%s'"), string);
2481 
2482       *input_line_pointer = e;
2483     }
2484   else
2485     as_bad (_("missing cpu architecture"));
2486 
2487   no_cond_jump_promotion = 0;
2488   if (*input_line_pointer == ','
2489       && !is_end_of_line[(unsigned char) input_line_pointer[1]])
2490     {
2491       char *string;
2492       char e;
2493 
2494       ++input_line_pointer;
2495       e = get_symbol_name (&string);
2496 
2497       if (strcmp (string, "nojumps") == 0)
2498 	no_cond_jump_promotion = 1;
2499       else if (strcmp (string, "jumps") == 0)
2500 	;
2501       else
2502 	as_bad (_("no such architecture modifier: `%s'"), string);
2503 
2504       (void) restore_line_pointer (e);
2505     }
2506 
2507   demand_empty_rest_of_line ();
2508 }
2509 
2510 enum bfd_architecture
i386_arch(void)2511 i386_arch (void)
2512 {
2513   if (cpu_arch_isa == PROCESSOR_L1OM)
2514     {
2515       if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2516 	  || flag_code != CODE_64BIT)
2517 	as_fatal (_("Intel L1OM is 64bit ELF only"));
2518       return bfd_arch_l1om;
2519     }
2520   else if (cpu_arch_isa == PROCESSOR_K1OM)
2521     {
2522       if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2523 	  || flag_code != CODE_64BIT)
2524 	as_fatal (_("Intel K1OM is 64bit ELF only"));
2525       return bfd_arch_k1om;
2526     }
2527   else if (cpu_arch_isa == PROCESSOR_IAMCU)
2528     {
2529       if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2530 	  || flag_code == CODE_64BIT)
2531 	as_fatal (_("Intel MCU is 32bit ELF only"));
2532       return bfd_arch_iamcu;
2533     }
2534   else
2535     return bfd_arch_i386;
2536 }
2537 
2538 unsigned long
i386_mach(void)2539 i386_mach (void)
2540 {
2541   if (!strncmp (default_arch, "x86_64", 6))
2542     {
2543       if (cpu_arch_isa == PROCESSOR_L1OM)
2544 	{
2545 	  if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2546 	      || default_arch[6] != '\0')
2547 	    as_fatal (_("Intel L1OM is 64bit ELF only"));
2548 	  return bfd_mach_l1om;
2549 	}
2550       else if (cpu_arch_isa == PROCESSOR_K1OM)
2551 	{
2552 	  if (OUTPUT_FLAVOR != bfd_target_elf_flavour
2553 	      || default_arch[6] != '\0')
2554 	    as_fatal (_("Intel K1OM is 64bit ELF only"));
2555 	  return bfd_mach_k1om;
2556 	}
2557       else if (default_arch[6] == '\0')
2558 	return bfd_mach_x86_64;
2559       else
2560 	return bfd_mach_x64_32;
2561     }
2562   else if (!strcmp (default_arch, "i386")
2563 	   || !strcmp (default_arch, "iamcu"))
2564     {
2565       if (cpu_arch_isa == PROCESSOR_IAMCU)
2566 	{
2567 	  if (OUTPUT_FLAVOR != bfd_target_elf_flavour)
2568 	    as_fatal (_("Intel MCU is 32bit ELF only"));
2569 	  return bfd_mach_i386_iamcu;
2570 	}
2571       else
2572 	return bfd_mach_i386_i386;
2573     }
2574   else
2575     as_fatal (_("unknown architecture"));
2576 }
2577 
2578 void
md_begin(void)2579 md_begin (void)
2580 {
2581   const char *hash_err;
2582 
2583   /* Initialize op_hash hash table.  */
2584   op_hash = hash_new ();
2585 
2586   {
2587     const insn_template *optab;
2588     templates *core_optab;
2589 
2590     /* Setup for loop.  */
2591     optab = i386_optab;
2592     core_optab = XNEW (templates);
2593     core_optab->start = optab;
2594 
2595     while (1)
2596       {
2597 	++optab;
2598 	if (optab->name == NULL
2599 	    || strcmp (optab->name, (optab - 1)->name) != 0)
2600 	  {
2601 	    /* different name --> ship out current template list;
2602 	       add to hash table; & begin anew.  */
2603 	    core_optab->end = optab;
2604 	    hash_err = hash_insert (op_hash,
2605 				    (optab - 1)->name,
2606 				    (void *) core_optab);
2607 	    if (hash_err)
2608 	      {
2609 		as_fatal (_("can't hash %s: %s"),
2610 			  (optab - 1)->name,
2611 			  hash_err);
2612 	      }
2613 	    if (optab->name == NULL)
2614 	      break;
2615 	    core_optab = XNEW (templates);
2616 	    core_optab->start = optab;
2617 	  }
2618       }
2619   }
2620 
2621   /* Initialize reg_hash hash table.  */
2622   reg_hash = hash_new ();
2623   {
2624     const reg_entry *regtab;
2625     unsigned int regtab_size = i386_regtab_size;
2626 
2627     for (regtab = i386_regtab; regtab_size--; regtab++)
2628       {
2629 	hash_err = hash_insert (reg_hash, regtab->reg_name, (void *) regtab);
2630 	if (hash_err)
2631 	  as_fatal (_("can't hash %s: %s"),
2632 		    regtab->reg_name,
2633 		    hash_err);
2634       }
2635   }
2636 
2637   /* Fill in lexical tables:  mnemonic_chars, operand_chars.  */
2638   {
2639     int c;
2640     char *p;
2641 
2642     for (c = 0; c < 256; c++)
2643       {
2644 	if (ISDIGIT (c))
2645 	  {
2646 	    digit_chars[c] = c;
2647 	    mnemonic_chars[c] = c;
2648 	    register_chars[c] = c;
2649 	    operand_chars[c] = c;
2650 	  }
2651 	else if (ISLOWER (c))
2652 	  {
2653 	    mnemonic_chars[c] = c;
2654 	    register_chars[c] = c;
2655 	    operand_chars[c] = c;
2656 	  }
2657 	else if (ISUPPER (c))
2658 	  {
2659 	    mnemonic_chars[c] = TOLOWER (c);
2660 	    register_chars[c] = mnemonic_chars[c];
2661 	    operand_chars[c] = c;
2662 	  }
2663 	else if (c == '{' || c == '}')
2664 	  operand_chars[c] = c;
2665 
2666 	if (ISALPHA (c) || ISDIGIT (c))
2667 	  identifier_chars[c] = c;
2668 	else if (c >= 128)
2669 	  {
2670 	    identifier_chars[c] = c;
2671 	    operand_chars[c] = c;
2672 	  }
2673       }
2674 
2675 #ifdef LEX_AT
2676     identifier_chars['@'] = '@';
2677 #endif
2678 #ifdef LEX_QM
2679     identifier_chars['?'] = '?';
2680     operand_chars['?'] = '?';
2681 #endif
2682     digit_chars['-'] = '-';
2683     mnemonic_chars['_'] = '_';
2684     mnemonic_chars['-'] = '-';
2685     mnemonic_chars['.'] = '.';
2686     identifier_chars['_'] = '_';
2687     identifier_chars['.'] = '.';
2688 
2689     for (p = operand_special_chars; *p != '\0'; p++)
2690       operand_chars[(unsigned char) *p] = *p;
2691   }
2692 
2693   if (flag_code == CODE_64BIT)
2694     {
2695 #if defined (OBJ_COFF) && defined (TE_PE)
2696       x86_dwarf2_return_column = (OUTPUT_FLAVOR == bfd_target_coff_flavour
2697 				  ? 32 : 16);
2698 #else
2699       x86_dwarf2_return_column = 16;
2700 #endif
2701       x86_cie_data_alignment = -8;
2702     }
2703   else
2704     {
2705       x86_dwarf2_return_column = 8;
2706       x86_cie_data_alignment = -4;
2707     }
2708 }
2709 
2710 void
i386_print_statistics(FILE * file)2711 i386_print_statistics (FILE *file)
2712 {
2713   hash_print_statistics (file, "i386 opcode", op_hash);
2714   hash_print_statistics (file, "i386 register", reg_hash);
2715 }
2716 
2717 #ifdef DEBUG386
2718 
2719 /* Debugging routines for md_assemble.  */
2720 static void pte (insn_template *);
2721 static void pt (i386_operand_type);
2722 static void pe (expressionS *);
2723 static void ps (symbolS *);
2724 
2725 static void
pi(char * line,i386_insn * x)2726 pi (char *line, i386_insn *x)
2727 {
2728   unsigned int j;
2729 
2730   fprintf (stdout, "%s: template ", line);
2731   pte (&x->tm);
2732   fprintf (stdout, "  address: base %s  index %s  scale %x\n",
2733 	   x->base_reg ? x->base_reg->reg_name : "none",
2734 	   x->index_reg ? x->index_reg->reg_name : "none",
2735 	   x->log2_scale_factor);
2736   fprintf (stdout, "  modrm:  mode %x  reg %x  reg/mem %x\n",
2737 	   x->rm.mode, x->rm.reg, x->rm.regmem);
2738   fprintf (stdout, "  sib:  base %x  index %x  scale %x\n",
2739 	   x->sib.base, x->sib.index, x->sib.scale);
2740   fprintf (stdout, "  rex: 64bit %x  extX %x  extY %x  extZ %x\n",
2741 	   (x->rex & REX_W) != 0,
2742 	   (x->rex & REX_R) != 0,
2743 	   (x->rex & REX_X) != 0,
2744 	   (x->rex & REX_B) != 0);
2745   for (j = 0; j < x->operands; j++)
2746     {
2747       fprintf (stdout, "    #%d:  ", j + 1);
2748       pt (x->types[j]);
2749       fprintf (stdout, "\n");
2750       if (x->types[j].bitfield.reg8
2751 	  || x->types[j].bitfield.reg16
2752 	  || x->types[j].bitfield.reg32
2753 	  || x->types[j].bitfield.reg64
2754 	  || x->types[j].bitfield.regmmx
2755 	  || x->types[j].bitfield.regxmm
2756 	  || x->types[j].bitfield.regymm
2757 	  || x->types[j].bitfield.regzmm
2758 	  || x->types[j].bitfield.sreg2
2759 	  || x->types[j].bitfield.sreg3
2760 	  || x->types[j].bitfield.control
2761 	  || x->types[j].bitfield.debug
2762 	  || x->types[j].bitfield.test)
2763 	fprintf (stdout, "%s\n", x->op[j].regs->reg_name);
2764       if (operand_type_check (x->types[j], imm))
2765 	pe (x->op[j].imms);
2766       if (operand_type_check (x->types[j], disp))
2767 	pe (x->op[j].disps);
2768     }
2769 }
2770 
2771 static void
pte(insn_template * t)2772 pte (insn_template *t)
2773 {
2774   unsigned int j;
2775   fprintf (stdout, " %d operands ", t->operands);
2776   fprintf (stdout, "opcode %x ", t->base_opcode);
2777   if (t->extension_opcode != None)
2778     fprintf (stdout, "ext %x ", t->extension_opcode);
2779   if (t->opcode_modifier.d)
2780     fprintf (stdout, "D");
2781   if (t->opcode_modifier.w)
2782     fprintf (stdout, "W");
2783   fprintf (stdout, "\n");
2784   for (j = 0; j < t->operands; j++)
2785     {
2786       fprintf (stdout, "    #%d type ", j + 1);
2787       pt (t->operand_types[j]);
2788       fprintf (stdout, "\n");
2789     }
2790 }
2791 
2792 static void
pe(expressionS * e)2793 pe (expressionS *e)
2794 {
2795   fprintf (stdout, "    operation     %d\n", e->X_op);
2796   fprintf (stdout, "    add_number    %ld (%lx)\n",
2797 	   (long) e->X_add_number, (long) e->X_add_number);
2798   if (e->X_add_symbol)
2799     {
2800       fprintf (stdout, "    add_symbol    ");
2801       ps (e->X_add_symbol);
2802       fprintf (stdout, "\n");
2803     }
2804   if (e->X_op_symbol)
2805     {
2806       fprintf (stdout, "    op_symbol    ");
2807       ps (e->X_op_symbol);
2808       fprintf (stdout, "\n");
2809     }
2810 }
2811 
2812 static void
ps(symbolS * s)2813 ps (symbolS *s)
2814 {
2815   fprintf (stdout, "%s type %s%s",
2816 	   S_GET_NAME (s),
2817 	   S_IS_EXTERNAL (s) ? "EXTERNAL " : "",
2818 	   segment_name (S_GET_SEGMENT (s)));
2819 }
2820 
2821 static struct type_name
2822   {
2823     i386_operand_type mask;
2824     const char *name;
2825   }
2826 const type_names[] =
2827 {
2828   { OPERAND_TYPE_REG8, "r8" },
2829   { OPERAND_TYPE_REG16, "r16" },
2830   { OPERAND_TYPE_REG32, "r32" },
2831   { OPERAND_TYPE_REG64, "r64" },
2832   { OPERAND_TYPE_IMM8, "i8" },
2833   { OPERAND_TYPE_IMM8, "i8s" },
2834   { OPERAND_TYPE_IMM16, "i16" },
2835   { OPERAND_TYPE_IMM32, "i32" },
2836   { OPERAND_TYPE_IMM32S, "i32s" },
2837   { OPERAND_TYPE_IMM64, "i64" },
2838   { OPERAND_TYPE_IMM1, "i1" },
2839   { OPERAND_TYPE_BASEINDEX, "BaseIndex" },
2840   { OPERAND_TYPE_DISP8, "d8" },
2841   { OPERAND_TYPE_DISP16, "d16" },
2842   { OPERAND_TYPE_DISP32, "d32" },
2843   { OPERAND_TYPE_DISP32S, "d32s" },
2844   { OPERAND_TYPE_DISP64, "d64" },
2845   { OPERAND_TYPE_VEC_DISP8, "Vector d8" },
2846   { OPERAND_TYPE_INOUTPORTREG, "InOutPortReg" },
2847   { OPERAND_TYPE_SHIFTCOUNT, "ShiftCount" },
2848   { OPERAND_TYPE_CONTROL, "control reg" },
2849   { OPERAND_TYPE_TEST, "test reg" },
2850   { OPERAND_TYPE_DEBUG, "debug reg" },
2851   { OPERAND_TYPE_FLOATREG, "FReg" },
2852   { OPERAND_TYPE_FLOATACC, "FAcc" },
2853   { OPERAND_TYPE_SREG2, "SReg2" },
2854   { OPERAND_TYPE_SREG3, "SReg3" },
2855   { OPERAND_TYPE_ACC, "Acc" },
2856   { OPERAND_TYPE_JUMPABSOLUTE, "Jump Absolute" },
2857   { OPERAND_TYPE_REGMMX, "rMMX" },
2858   { OPERAND_TYPE_REGXMM, "rXMM" },
2859   { OPERAND_TYPE_REGYMM, "rYMM" },
2860   { OPERAND_TYPE_REGZMM, "rZMM" },
2861   { OPERAND_TYPE_REGMASK, "Mask reg" },
2862   { OPERAND_TYPE_ESSEG, "es" },
2863 };
2864 
2865 static void
pt(i386_operand_type t)2866 pt (i386_operand_type t)
2867 {
2868   unsigned int j;
2869   i386_operand_type a;
2870 
2871   for (j = 0; j < ARRAY_SIZE (type_names); j++)
2872     {
2873       a = operand_type_and (t, type_names[j].mask);
2874       if (!operand_type_all_zero (&a))
2875 	fprintf (stdout, "%s, ",  type_names[j].name);
2876     }
2877   fflush (stdout);
2878 }
2879 
2880 #endif /* DEBUG386 */
2881 
2882 static bfd_reloc_code_real_type
reloc(unsigned int size,int pcrel,int sign,bfd_reloc_code_real_type other)2883 reloc (unsigned int size,
2884        int pcrel,
2885        int sign,
2886        bfd_reloc_code_real_type other)
2887 {
2888   if (other != NO_RELOC)
2889     {
2890       reloc_howto_type *rel;
2891 
2892       if (size == 8)
2893 	switch (other)
2894 	  {
2895 	  case BFD_RELOC_X86_64_GOT32:
2896 	    return BFD_RELOC_X86_64_GOT64;
2897 	    break;
2898 	  case BFD_RELOC_X86_64_GOTPLT64:
2899 	    return BFD_RELOC_X86_64_GOTPLT64;
2900 	    break;
2901 	  case BFD_RELOC_X86_64_PLTOFF64:
2902 	    return BFD_RELOC_X86_64_PLTOFF64;
2903 	    break;
2904 	  case BFD_RELOC_X86_64_GOTPC32:
2905 	    other = BFD_RELOC_X86_64_GOTPC64;
2906 	    break;
2907 	  case BFD_RELOC_X86_64_GOTPCREL:
2908 	    other = BFD_RELOC_X86_64_GOTPCREL64;
2909 	    break;
2910 	  case BFD_RELOC_X86_64_TPOFF32:
2911 	    other = BFD_RELOC_X86_64_TPOFF64;
2912 	    break;
2913 	  case BFD_RELOC_X86_64_DTPOFF32:
2914 	    other = BFD_RELOC_X86_64_DTPOFF64;
2915 	    break;
2916 	  default:
2917 	    break;
2918 	  }
2919 
2920 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
2921       if (other == BFD_RELOC_SIZE32)
2922 	{
2923 	  if (size == 8)
2924 	    other = BFD_RELOC_SIZE64;
2925 	  if (pcrel)
2926 	    {
2927 	      as_bad (_("there are no pc-relative size relocations"));
2928 	      return NO_RELOC;
2929 	    }
2930 	}
2931 #endif
2932 
2933       /* Sign-checking 4-byte relocations in 16-/32-bit code is pointless.  */
2934       if (size == 4 && (flag_code != CODE_64BIT || disallow_64bit_reloc))
2935 	sign = -1;
2936 
2937       rel = bfd_reloc_type_lookup (stdoutput, other);
2938       if (!rel)
2939 	as_bad (_("unknown relocation (%u)"), other);
2940       else if (size != bfd_get_reloc_size (rel))
2941 	as_bad (_("%u-byte relocation cannot be applied to %u-byte field"),
2942 		bfd_get_reloc_size (rel),
2943 		size);
2944       else if (pcrel && !rel->pc_relative)
2945 	as_bad (_("non-pc-relative relocation for pc-relative field"));
2946       else if ((rel->complain_on_overflow == complain_overflow_signed
2947 		&& !sign)
2948 	       || (rel->complain_on_overflow == complain_overflow_unsigned
2949 		   && sign > 0))
2950 	as_bad (_("relocated field and relocation type differ in signedness"));
2951       else
2952 	return other;
2953       return NO_RELOC;
2954     }
2955 
2956   if (pcrel)
2957     {
2958       if (!sign)
2959 	as_bad (_("there are no unsigned pc-relative relocations"));
2960       switch (size)
2961 	{
2962 	case 1: return BFD_RELOC_8_PCREL;
2963 	case 2: return BFD_RELOC_16_PCREL;
2964 	case 4: return BFD_RELOC_32_PCREL;
2965 	case 8: return BFD_RELOC_64_PCREL;
2966 	}
2967       as_bad (_("cannot do %u byte pc-relative relocation"), size);
2968     }
2969   else
2970     {
2971       if (sign > 0)
2972 	switch (size)
2973 	  {
2974 	  case 4: return BFD_RELOC_X86_64_32S;
2975 	  }
2976       else
2977 	switch (size)
2978 	  {
2979 	  case 1: return BFD_RELOC_8;
2980 	  case 2: return BFD_RELOC_16;
2981 	  case 4: return BFD_RELOC_32;
2982 	  case 8: return BFD_RELOC_64;
2983 	  }
2984       as_bad (_("cannot do %s %u byte relocation"),
2985 	      sign > 0 ? "signed" : "unsigned", size);
2986     }
2987 
2988   return NO_RELOC;
2989 }
2990 
2991 /* Here we decide which fixups can be adjusted to make them relative to
2992    the beginning of the section instead of the symbol.  Basically we need
2993    to make sure that the dynamic relocations are done correctly, so in
2994    some cases we force the original symbol to be used.  */
2995 
2996 int
tc_i386_fix_adjustable(fixS * fixP ATTRIBUTE_UNUSED)2997 tc_i386_fix_adjustable (fixS *fixP ATTRIBUTE_UNUSED)
2998 {
2999 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
3000   if (!IS_ELF)
3001     return 1;
3002 
3003   /* Don't adjust pc-relative references to merge sections in 64-bit
3004      mode.  */
3005   if (use_rela_relocations
3006       && (S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_MERGE) != 0
3007       && fixP->fx_pcrel)
3008     return 0;
3009 
3010   /* The x86_64 GOTPCREL are represented as 32bit PCrel relocations
3011      and changed later by validate_fix.  */
3012   if (GOT_symbol && fixP->fx_subsy == GOT_symbol
3013       && fixP->fx_r_type == BFD_RELOC_32_PCREL)
3014     return 0;
3015 
3016   /* Adjust_reloc_syms doesn't know about the GOT.  Need to keep symbol
3017      for size relocations.  */
3018   if (fixP->fx_r_type == BFD_RELOC_SIZE32
3019       || fixP->fx_r_type == BFD_RELOC_SIZE64
3020       || fixP->fx_r_type == BFD_RELOC_386_GOTOFF
3021       || fixP->fx_r_type == BFD_RELOC_386_PLT32
3022       || fixP->fx_r_type == BFD_RELOC_386_GOT32
3023       || fixP->fx_r_type == BFD_RELOC_386_GOT32X
3024       || fixP->fx_r_type == BFD_RELOC_386_TLS_GD
3025       || fixP->fx_r_type == BFD_RELOC_386_TLS_LDM
3026       || fixP->fx_r_type == BFD_RELOC_386_TLS_LDO_32
3027       || fixP->fx_r_type == BFD_RELOC_386_TLS_IE_32
3028       || fixP->fx_r_type == BFD_RELOC_386_TLS_IE
3029       || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTIE
3030       || fixP->fx_r_type == BFD_RELOC_386_TLS_LE_32
3031       || fixP->fx_r_type == BFD_RELOC_386_TLS_LE
3032       || fixP->fx_r_type == BFD_RELOC_386_TLS_GOTDESC
3033       || fixP->fx_r_type == BFD_RELOC_386_TLS_DESC_CALL
3034       || fixP->fx_r_type == BFD_RELOC_X86_64_PLT32
3035       || fixP->fx_r_type == BFD_RELOC_X86_64_GOT32
3036       || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCREL
3037       || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPCRELX
3038       || fixP->fx_r_type == BFD_RELOC_X86_64_REX_GOTPCRELX
3039       || fixP->fx_r_type == BFD_RELOC_X86_64_TLSGD
3040       || fixP->fx_r_type == BFD_RELOC_X86_64_TLSLD
3041       || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF32
3042       || fixP->fx_r_type == BFD_RELOC_X86_64_DTPOFF64
3043       || fixP->fx_r_type == BFD_RELOC_X86_64_GOTTPOFF
3044       || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF32
3045       || fixP->fx_r_type == BFD_RELOC_X86_64_TPOFF64
3046       || fixP->fx_r_type == BFD_RELOC_X86_64_GOTOFF64
3047       || fixP->fx_r_type == BFD_RELOC_X86_64_GOTPC32_TLSDESC
3048       || fixP->fx_r_type == BFD_RELOC_X86_64_TLSDESC_CALL
3049       || fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
3050       || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
3051     return 0;
3052 #endif
3053   return 1;
3054 }
3055 
3056 static int
intel_float_operand(const char * mnemonic)3057 intel_float_operand (const char *mnemonic)
3058 {
3059   /* Note that the value returned is meaningful only for opcodes with (memory)
3060      operands, hence the code here is free to improperly handle opcodes that
3061      have no operands (for better performance and smaller code). */
3062 
3063   if (mnemonic[0] != 'f')
3064     return 0; /* non-math */
3065 
3066   switch (mnemonic[1])
3067     {
3068     /* fclex, fdecstp, fdisi, femms, feni, fincstp, finit, fsetpm, and
3069        the fs segment override prefix not currently handled because no
3070        call path can make opcodes without operands get here */
3071     case 'i':
3072       return 2 /* integer op */;
3073     case 'l':
3074       if (mnemonic[2] == 'd' && (mnemonic[3] == 'c' || mnemonic[3] == 'e'))
3075 	return 3; /* fldcw/fldenv */
3076       break;
3077     case 'n':
3078       if (mnemonic[2] != 'o' /* fnop */)
3079 	return 3; /* non-waiting control op */
3080       break;
3081     case 'r':
3082       if (mnemonic[2] == 's')
3083 	return 3; /* frstor/frstpm */
3084       break;
3085     case 's':
3086       if (mnemonic[2] == 'a')
3087 	return 3; /* fsave */
3088       if (mnemonic[2] == 't')
3089 	{
3090 	  switch (mnemonic[3])
3091 	    {
3092 	    case 'c': /* fstcw */
3093 	    case 'd': /* fstdw */
3094 	    case 'e': /* fstenv */
3095 	    case 's': /* fsts[gw] */
3096 	      return 3;
3097 	    }
3098 	}
3099       break;
3100     case 'x':
3101       if (mnemonic[2] == 'r' || mnemonic[2] == 's')
3102 	return 0; /* fxsave/fxrstor are not really math ops */
3103       break;
3104     }
3105 
3106   return 1;
3107 }
3108 
3109 /* Build the VEX prefix.  */
3110 
3111 static void
build_vex_prefix(const insn_template * t)3112 build_vex_prefix (const insn_template *t)
3113 {
3114   unsigned int register_specifier;
3115   unsigned int implied_prefix;
3116   unsigned int vector_length;
3117 
3118   /* Check register specifier.  */
3119   if (i.vex.register_specifier)
3120     {
3121       register_specifier =
3122 	~register_number (i.vex.register_specifier) & 0xf;
3123       gas_assert ((i.vex.register_specifier->reg_flags & RegVRex) == 0);
3124     }
3125   else
3126     register_specifier = 0xf;
3127 
3128   /* Use 2-byte VEX prefix by swappping destination and source
3129      operand.  */
3130   if (!i.swap_operand
3131       && i.operands == i.reg_operands
3132       && i.tm.opcode_modifier.vexopcode == VEX0F
3133       && i.tm.opcode_modifier.s
3134       && i.rex == REX_B)
3135     {
3136       unsigned int xchg = i.operands - 1;
3137       union i386_op temp_op;
3138       i386_operand_type temp_type;
3139 
3140       temp_type = i.types[xchg];
3141       i.types[xchg] = i.types[0];
3142       i.types[0] = temp_type;
3143       temp_op = i.op[xchg];
3144       i.op[xchg] = i.op[0];
3145       i.op[0] = temp_op;
3146 
3147       gas_assert (i.rm.mode == 3);
3148 
3149       i.rex = REX_R;
3150       xchg = i.rm.regmem;
3151       i.rm.regmem = i.rm.reg;
3152       i.rm.reg = xchg;
3153 
3154       /* Use the next insn.  */
3155       i.tm = t[1];
3156     }
3157 
3158   if (i.tm.opcode_modifier.vex == VEXScalar)
3159     vector_length = avxscalar;
3160   else
3161     vector_length = i.tm.opcode_modifier.vex == VEX256 ? 1 : 0;
3162 
3163   switch ((i.tm.base_opcode >> 8) & 0xff)
3164     {
3165     case 0:
3166       implied_prefix = 0;
3167       break;
3168     case DATA_PREFIX_OPCODE:
3169       implied_prefix = 1;
3170       break;
3171     case REPE_PREFIX_OPCODE:
3172       implied_prefix = 2;
3173       break;
3174     case REPNE_PREFIX_OPCODE:
3175       implied_prefix = 3;
3176       break;
3177     default:
3178       abort ();
3179     }
3180 
3181   /* Use 2-byte VEX prefix if possible.  */
3182   if (i.tm.opcode_modifier.vexopcode == VEX0F
3183       && i.tm.opcode_modifier.vexw != VEXW1
3184       && (i.rex & (REX_W | REX_X | REX_B)) == 0)
3185     {
3186       /* 2-byte VEX prefix.  */
3187       unsigned int r;
3188 
3189       i.vex.length = 2;
3190       i.vex.bytes[0] = 0xc5;
3191 
3192       /* Check the REX.R bit.  */
3193       r = (i.rex & REX_R) ? 0 : 1;
3194       i.vex.bytes[1] = (r << 7
3195 			| register_specifier << 3
3196 			| vector_length << 2
3197 			| implied_prefix);
3198     }
3199   else
3200     {
3201       /* 3-byte VEX prefix.  */
3202       unsigned int m, w;
3203 
3204       i.vex.length = 3;
3205 
3206       switch (i.tm.opcode_modifier.vexopcode)
3207 	{
3208 	case VEX0F:
3209 	  m = 0x1;
3210 	  i.vex.bytes[0] = 0xc4;
3211 	  break;
3212 	case VEX0F38:
3213 	  m = 0x2;
3214 	  i.vex.bytes[0] = 0xc4;
3215 	  break;
3216 	case VEX0F3A:
3217 	  m = 0x3;
3218 	  i.vex.bytes[0] = 0xc4;
3219 	  break;
3220 	case XOP08:
3221 	  m = 0x8;
3222 	  i.vex.bytes[0] = 0x8f;
3223 	  break;
3224 	case XOP09:
3225 	  m = 0x9;
3226 	  i.vex.bytes[0] = 0x8f;
3227 	  break;
3228 	case XOP0A:
3229 	  m = 0xa;
3230 	  i.vex.bytes[0] = 0x8f;
3231 	  break;
3232 	default:
3233 	  abort ();
3234 	}
3235 
3236       /* The high 3 bits of the second VEX byte are 1's compliment
3237 	 of RXB bits from REX.  */
3238       i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3239 
3240       /* Check the REX.W bit.  */
3241       w = (i.rex & REX_W) ? 1 : 0;
3242       if (i.tm.opcode_modifier.vexw == VEXW1)
3243 	w = 1;
3244 
3245       i.vex.bytes[2] = (w << 7
3246 			| register_specifier << 3
3247 			| vector_length << 2
3248 			| implied_prefix);
3249     }
3250 }
3251 
3252 /* Build the EVEX prefix.  */
3253 
3254 static void
build_evex_prefix(void)3255 build_evex_prefix (void)
3256 {
3257   unsigned int register_specifier;
3258   unsigned int implied_prefix;
3259   unsigned int m, w;
3260   rex_byte vrex_used = 0;
3261 
3262   /* Check register specifier.  */
3263   if (i.vex.register_specifier)
3264     {
3265       gas_assert ((i.vrex & REX_X) == 0);
3266 
3267       register_specifier = i.vex.register_specifier->reg_num;
3268       if ((i.vex.register_specifier->reg_flags & RegRex))
3269 	register_specifier += 8;
3270       /* The upper 16 registers are encoded in the fourth byte of the
3271 	 EVEX prefix.  */
3272       if (!(i.vex.register_specifier->reg_flags & RegVRex))
3273 	i.vex.bytes[3] = 0x8;
3274       register_specifier = ~register_specifier & 0xf;
3275     }
3276   else
3277     {
3278       register_specifier = 0xf;
3279 
3280       /* Encode upper 16 vector index register in the fourth byte of
3281 	 the EVEX prefix.  */
3282       if (!(i.vrex & REX_X))
3283 	i.vex.bytes[3] = 0x8;
3284       else
3285 	vrex_used |= REX_X;
3286     }
3287 
3288   switch ((i.tm.base_opcode >> 8) & 0xff)
3289     {
3290     case 0:
3291       implied_prefix = 0;
3292       break;
3293     case DATA_PREFIX_OPCODE:
3294       implied_prefix = 1;
3295       break;
3296     case REPE_PREFIX_OPCODE:
3297       implied_prefix = 2;
3298       break;
3299     case REPNE_PREFIX_OPCODE:
3300       implied_prefix = 3;
3301       break;
3302     default:
3303       abort ();
3304     }
3305 
3306   /* 4 byte EVEX prefix.  */
3307   i.vex.length = 4;
3308   i.vex.bytes[0] = 0x62;
3309 
3310   /* mmmm bits.  */
3311   switch (i.tm.opcode_modifier.vexopcode)
3312     {
3313     case VEX0F:
3314       m = 1;
3315       break;
3316     case VEX0F38:
3317       m = 2;
3318       break;
3319     case VEX0F3A:
3320       m = 3;
3321       break;
3322     default:
3323       abort ();
3324       break;
3325     }
3326 
3327   /* The high 3 bits of the second EVEX byte are 1's compliment of RXB
3328      bits from REX.  */
3329   i.vex.bytes[1] = (~i.rex & 0x7) << 5 | m;
3330 
3331   /* The fifth bit of the second EVEX byte is 1's compliment of the
3332      REX_R bit in VREX.  */
3333   if (!(i.vrex & REX_R))
3334     i.vex.bytes[1] |= 0x10;
3335   else
3336     vrex_used |= REX_R;
3337 
3338   if ((i.reg_operands + i.imm_operands) == i.operands)
3339     {
3340       /* When all operands are registers, the REX_X bit in REX is not
3341 	 used.  We reuse it to encode the upper 16 registers, which is
3342 	 indicated by the REX_B bit in VREX.  The REX_X bit is encoded
3343 	 as 1's compliment.  */
3344       if ((i.vrex & REX_B))
3345 	{
3346 	  vrex_used |= REX_B;
3347 	  i.vex.bytes[1] &= ~0x40;
3348 	}
3349     }
3350 
3351   /* EVEX instructions shouldn't need the REX prefix.  */
3352   i.vrex &= ~vrex_used;
3353   gas_assert (i.vrex == 0);
3354 
3355   /* Check the REX.W bit.  */
3356   w = (i.rex & REX_W) ? 1 : 0;
3357   if (i.tm.opcode_modifier.vexw)
3358     {
3359       if (i.tm.opcode_modifier.vexw == VEXW1)
3360 	w = 1;
3361     }
3362   /* If w is not set it means we are dealing with WIG instruction.  */
3363   else if (!w)
3364     {
3365       if (evexwig == evexw1)
3366         w = 1;
3367     }
3368 
3369   /* Encode the U bit.  */
3370   implied_prefix |= 0x4;
3371 
3372   /* The third byte of the EVEX prefix.  */
3373   i.vex.bytes[2] = (w << 7 | register_specifier << 3 | implied_prefix);
3374 
3375   /* The fourth byte of the EVEX prefix.  */
3376   /* The zeroing-masking bit.  */
3377   if (i.mask && i.mask->zeroing)
3378     i.vex.bytes[3] |= 0x80;
3379 
3380   /* Don't always set the broadcast bit if there is no RC.  */
3381   if (!i.rounding)
3382     {
3383       /* Encode the vector length.  */
3384       unsigned int vec_length;
3385 
3386       switch (i.tm.opcode_modifier.evex)
3387 	{
3388 	case EVEXLIG: /* LL' is ignored */
3389 	  vec_length = evexlig << 5;
3390 	  break;
3391 	case EVEX128:
3392 	  vec_length = 0 << 5;
3393 	  break;
3394 	case EVEX256:
3395 	  vec_length = 1 << 5;
3396 	  break;
3397 	case EVEX512:
3398 	  vec_length = 2 << 5;
3399 	  break;
3400 	default:
3401 	  abort ();
3402 	  break;
3403 	}
3404       i.vex.bytes[3] |= vec_length;
3405       /* Encode the broadcast bit.  */
3406       if (i.broadcast)
3407 	i.vex.bytes[3] |= 0x10;
3408     }
3409   else
3410     {
3411       if (i.rounding->type != saeonly)
3412 	i.vex.bytes[3] |= 0x10 | (i.rounding->type << 5);
3413       else
3414 	i.vex.bytes[3] |= 0x10 | (evexrcig << 5);
3415     }
3416 
3417   if (i.mask && i.mask->mask)
3418     i.vex.bytes[3] |= i.mask->mask->reg_num;
3419 }
3420 
3421 static void
process_immext(void)3422 process_immext (void)
3423 {
3424   expressionS *exp;
3425 
3426   if ((i.tm.cpu_flags.bitfield.cpusse3 || i.tm.cpu_flags.bitfield.cpusvme)
3427       && i.operands > 0)
3428     {
3429       /* MONITOR/MWAIT as well as SVME instructions have fixed operands
3430 	 with an opcode suffix which is coded in the same place as an
3431 	 8-bit immediate field would be.
3432 	 Here we check those operands and remove them afterwards.  */
3433       unsigned int x;
3434 
3435       for (x = 0; x < i.operands; x++)
3436 	if (register_number (i.op[x].regs) != x)
3437 	  as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3438 		  register_prefix, i.op[x].regs->reg_name, x + 1,
3439 		  i.tm.name);
3440 
3441       i.operands = 0;
3442     }
3443 
3444   if (i.tm.cpu_flags.bitfield.cpumwaitx && i.operands > 0)
3445     {
3446       /* MONITORX/MWAITX instructions have fixed operands with an opcode
3447 	 suffix which is coded in the same place as an 8-bit immediate
3448 	 field would be.
3449 	 Here we check those operands and remove them afterwards.  */
3450       unsigned int x;
3451 
3452       if (i.operands != 3)
3453 	abort();
3454 
3455       for (x = 0; x < 2; x++)
3456 	if (register_number (i.op[x].regs) != x)
3457 	  goto bad_register_operand;
3458 
3459       /* Check for third operand for mwaitx/monitorx insn.  */
3460       if (register_number (i.op[x].regs)
3461 	  != (x + (i.tm.extension_opcode == 0xfb)))
3462 	{
3463 bad_register_operand:
3464 	  as_bad (_("can't use register '%s%s' as operand %d in '%s'."),
3465 		  register_prefix, i.op[x].regs->reg_name, x+1,
3466 		  i.tm.name);
3467 	}
3468 
3469       i.operands = 0;
3470     }
3471 
3472   /* These AMD 3DNow! and SSE2 instructions have an opcode suffix
3473      which is coded in the same place as an 8-bit immediate field
3474      would be.  Here we fake an 8-bit immediate operand from the
3475      opcode suffix stored in tm.extension_opcode.
3476 
3477      AVX instructions also use this encoding, for some of
3478      3 argument instructions.  */
3479 
3480   gas_assert (i.imm_operands <= 1
3481 	      && (i.operands <= 2
3482 		  || ((i.tm.opcode_modifier.vex
3483 		       || i.tm.opcode_modifier.evex)
3484 		      && i.operands <= 4)));
3485 
3486   exp = &im_expressions[i.imm_operands++];
3487   i.op[i.operands].imms = exp;
3488   i.types[i.operands] = imm8;
3489   i.operands++;
3490   exp->X_op = O_constant;
3491   exp->X_add_number = i.tm.extension_opcode;
3492   i.tm.extension_opcode = None;
3493 }
3494 
3495 
3496 static int
check_hle(void)3497 check_hle (void)
3498 {
3499   switch (i.tm.opcode_modifier.hleprefixok)
3500     {
3501     default:
3502       abort ();
3503     case HLEPrefixNone:
3504       as_bad (_("invalid instruction `%s' after `%s'"),
3505 	      i.tm.name, i.hle_prefix);
3506       return 0;
3507     case HLEPrefixLock:
3508       if (i.prefix[LOCK_PREFIX])
3509 	return 1;
3510       as_bad (_("missing `lock' with `%s'"), i.hle_prefix);
3511       return 0;
3512     case HLEPrefixAny:
3513       return 1;
3514     case HLEPrefixRelease:
3515       if (i.prefix[HLE_PREFIX] != XRELEASE_PREFIX_OPCODE)
3516 	{
3517 	  as_bad (_("instruction `%s' after `xacquire' not allowed"),
3518 		  i.tm.name);
3519 	  return 0;
3520 	}
3521       if (i.mem_operands == 0
3522 	  || !operand_type_check (i.types[i.operands - 1], anymem))
3523 	{
3524 	  as_bad (_("memory destination needed for instruction `%s'"
3525 		    " after `xrelease'"), i.tm.name);
3526 	  return 0;
3527 	}
3528       return 1;
3529     }
3530 }
3531 
3532 /* This is the guts of the machine-dependent assembler.  LINE points to a
3533    machine dependent instruction.  This function is supposed to emit
3534    the frags/bytes it assembles to.  */
3535 
3536 void
md_assemble(char * line)3537 md_assemble (char *line)
3538 {
3539   unsigned int j;
3540   char mnemonic[MAX_MNEM_SIZE], mnem_suffix;
3541   const insn_template *t;
3542 
3543   /* Initialize globals.  */
3544   memset (&i, '\0', sizeof (i));
3545   for (j = 0; j < MAX_OPERANDS; j++)
3546     i.reloc[j] = NO_RELOC;
3547   memset (disp_expressions, '\0', sizeof (disp_expressions));
3548   memset (im_expressions, '\0', sizeof (im_expressions));
3549   save_stack_p = save_stack;
3550 
3551   /* First parse an instruction mnemonic & call i386_operand for the operands.
3552      We assume that the scrubber has arranged it so that line[0] is the valid
3553      start of a (possibly prefixed) mnemonic.  */
3554 
3555   line = parse_insn (line, mnemonic);
3556   if (line == NULL)
3557     return;
3558   mnem_suffix = i.suffix;
3559 
3560   line = parse_operands (line, mnemonic);
3561   this_operand = -1;
3562   xfree (i.memop1_string);
3563   i.memop1_string = NULL;
3564   if (line == NULL)
3565     return;
3566 
3567   /* Now we've parsed the mnemonic into a set of templates, and have the
3568      operands at hand.  */
3569 
3570   /* All intel opcodes have reversed operands except for "bound" and
3571      "enter".  We also don't reverse intersegment "jmp" and "call"
3572      instructions with 2 immediate operands so that the immediate segment
3573      precedes the offset, as it does when in AT&T mode. */
3574   if (intel_syntax
3575       && i.operands > 1
3576       && (strcmp (mnemonic, "bound") != 0)
3577       && (strcmp (mnemonic, "invlpga") != 0)
3578       && !(operand_type_check (i.types[0], imm)
3579 	   && operand_type_check (i.types[1], imm)))
3580     swap_operands ();
3581 
3582   /* The order of the immediates should be reversed
3583      for 2 immediates extrq and insertq instructions */
3584   if (i.imm_operands == 2
3585       && (strcmp (mnemonic, "extrq") == 0
3586 	  || strcmp (mnemonic, "insertq") == 0))
3587       swap_2_operands (0, 1);
3588 
3589   if (i.imm_operands)
3590     optimize_imm ();
3591 
3592   /* Don't optimize displacement for movabs since it only takes 64bit
3593      displacement.  */
3594   if (i.disp_operands
3595       && i.disp_encoding != disp_encoding_32bit
3596       && (flag_code != CODE_64BIT
3597 	  || strcmp (mnemonic, "movabs") != 0))
3598     optimize_disp ();
3599 
3600   /* Next, we find a template that matches the given insn,
3601      making sure the overlap of the given operands types is consistent
3602      with the template operand types.  */
3603 
3604   if (!(t = match_template (mnem_suffix)))
3605     return;
3606 
3607   if (sse_check != check_none
3608       && !i.tm.opcode_modifier.noavx
3609       && (i.tm.cpu_flags.bitfield.cpusse
3610 	  || i.tm.cpu_flags.bitfield.cpusse2
3611 	  || i.tm.cpu_flags.bitfield.cpusse3
3612 	  || i.tm.cpu_flags.bitfield.cpussse3
3613 	  || i.tm.cpu_flags.bitfield.cpusse4_1
3614 	  || i.tm.cpu_flags.bitfield.cpusse4_2))
3615     {
3616       (sse_check == check_warning
3617        ? as_warn
3618        : as_bad) (_("SSE instruction `%s' is used"), i.tm.name);
3619     }
3620 
3621   /* Zap movzx and movsx suffix.  The suffix has been set from
3622      "word ptr" or "byte ptr" on the source operand in Intel syntax
3623      or extracted from mnemonic in AT&T syntax.  But we'll use
3624      the destination register to choose the suffix for encoding.  */
3625   if ((i.tm.base_opcode & ~9) == 0x0fb6)
3626     {
3627       /* In Intel syntax, there must be a suffix.  In AT&T syntax, if
3628 	 there is no suffix, the default will be byte extension.  */
3629       if (i.reg_operands != 2
3630 	  && !i.suffix
3631 	  && intel_syntax)
3632 	as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
3633 
3634       i.suffix = 0;
3635     }
3636 
3637   if (i.tm.opcode_modifier.fwait)
3638     if (!add_prefix (FWAIT_OPCODE))
3639       return;
3640 
3641   /* Check if REP prefix is OK.  */
3642   if (i.rep_prefix && !i.tm.opcode_modifier.repprefixok)
3643     {
3644       as_bad (_("invalid instruction `%s' after `%s'"),
3645 		i.tm.name, i.rep_prefix);
3646       return;
3647     }
3648 
3649   /* Check for lock without a lockable instruction.  Destination operand
3650      must be memory unless it is xchg (0x86).  */
3651   if (i.prefix[LOCK_PREFIX]
3652       && (!i.tm.opcode_modifier.islockable
3653 	  || i.mem_operands == 0
3654 	  || (i.tm.base_opcode != 0x86
3655 	      && !operand_type_check (i.types[i.operands - 1], anymem))))
3656     {
3657       as_bad (_("expecting lockable instruction after `lock'"));
3658       return;
3659     }
3660 
3661   /* Check if HLE prefix is OK.  */
3662   if (i.hle_prefix && !check_hle ())
3663     return;
3664 
3665   /* Check BND prefix.  */
3666   if (i.bnd_prefix && !i.tm.opcode_modifier.bndprefixok)
3667     as_bad (_("expecting valid branch instruction after `bnd'"));
3668 
3669   if (i.tm.cpu_flags.bitfield.cpumpx)
3670     {
3671       if (flag_code == CODE_64BIT && i.prefix[ADDR_PREFIX])
3672 	as_bad (_("32-bit address isn't allowed in 64-bit MPX instructions."));
3673       else if (flag_code != CODE_16BIT
3674 	       ? i.prefix[ADDR_PREFIX]
3675 	       : i.mem_operands && !i.prefix[ADDR_PREFIX])
3676 	as_bad (_("16-bit address isn't allowed in MPX instructions"));
3677     }
3678 
3679   /* Insert BND prefix.  */
3680   if (add_bnd_prefix
3681       && i.tm.opcode_modifier.bndprefixok
3682       && !i.prefix[BND_PREFIX])
3683     add_prefix (BND_PREFIX_OPCODE);
3684 
3685   /* Check string instruction segment overrides.  */
3686   if (i.tm.opcode_modifier.isstring && i.mem_operands != 0)
3687     {
3688       if (!check_string ())
3689 	return;
3690       i.disp_operands = 0;
3691     }
3692 
3693   if (!process_suffix ())
3694     return;
3695 
3696   /* Update operand types.  */
3697   for (j = 0; j < i.operands; j++)
3698     i.types[j] = operand_type_and (i.types[j], i.tm.operand_types[j]);
3699 
3700   /* Make still unresolved immediate matches conform to size of immediate
3701      given in i.suffix.  */
3702   if (!finalize_imm ())
3703     return;
3704 
3705   if (i.types[0].bitfield.imm1)
3706     i.imm_operands = 0;	/* kludge for shift insns.  */
3707 
3708   /* We only need to check those implicit registers for instructions
3709      with 3 operands or less.  */
3710   if (i.operands <= 3)
3711     for (j = 0; j < i.operands; j++)
3712       if (i.types[j].bitfield.inoutportreg
3713 	  || i.types[j].bitfield.shiftcount
3714 	  || i.types[j].bitfield.acc
3715 	  || i.types[j].bitfield.floatacc)
3716 	i.reg_operands--;
3717 
3718   /* ImmExt should be processed after SSE2AVX.  */
3719   if (!i.tm.opcode_modifier.sse2avx
3720       && i.tm.opcode_modifier.immext)
3721     process_immext ();
3722 
3723   /* For insns with operands there are more diddles to do to the opcode.  */
3724   if (i.operands)
3725     {
3726       if (!process_operands ())
3727 	return;
3728     }
3729   else if (!quiet_warnings && i.tm.opcode_modifier.ugh)
3730     {
3731       /* UnixWare fsub no args is alias for fsubp, fadd -> faddp, etc.  */
3732       as_warn (_("translating to `%sp'"), i.tm.name);
3733     }
3734 
3735   if (i.tm.opcode_modifier.vex || i.tm.opcode_modifier.evex)
3736     {
3737       if (flag_code == CODE_16BIT)
3738 	{
3739 	  as_bad (_("instruction `%s' isn't supported in 16-bit mode."),
3740 		  i.tm.name);
3741 	  return;
3742 	}
3743 
3744       if (i.tm.opcode_modifier.vex)
3745 	build_vex_prefix (t);
3746       else
3747 	build_evex_prefix ();
3748     }
3749 
3750   /* Handle conversion of 'int $3' --> special int3 insn.  XOP or FMA4
3751      instructions may define INT_OPCODE as well, so avoid this corner
3752      case for those instructions that use MODRM.  */
3753   if (i.tm.base_opcode == INT_OPCODE
3754       && !i.tm.opcode_modifier.modrm
3755       && i.op[0].imms->X_add_number == 3)
3756     {
3757       i.tm.base_opcode = INT3_OPCODE;
3758       i.imm_operands = 0;
3759     }
3760 
3761   if ((i.tm.opcode_modifier.jump
3762        || i.tm.opcode_modifier.jumpbyte
3763        || i.tm.opcode_modifier.jumpdword)
3764       && i.op[0].disps->X_op == O_constant)
3765     {
3766       /* Convert "jmp constant" (and "call constant") to a jump (call) to
3767 	 the absolute address given by the constant.  Since ix86 jumps and
3768 	 calls are pc relative, we need to generate a reloc.  */
3769       i.op[0].disps->X_add_symbol = &abs_symbol;
3770       i.op[0].disps->X_op = O_symbol;
3771     }
3772 
3773   if (i.tm.opcode_modifier.rex64)
3774     i.rex |= REX_W;
3775 
3776   /* For 8 bit registers we need an empty rex prefix.  Also if the
3777      instruction already has a prefix, we need to convert old
3778      registers to new ones.  */
3779 
3780   if ((i.types[0].bitfield.reg8
3781        && (i.op[0].regs->reg_flags & RegRex64) != 0)
3782       || (i.types[1].bitfield.reg8
3783 	  && (i.op[1].regs->reg_flags & RegRex64) != 0)
3784       || ((i.types[0].bitfield.reg8
3785 	   || i.types[1].bitfield.reg8)
3786 	  && i.rex != 0))
3787     {
3788       int x;
3789 
3790       i.rex |= REX_OPCODE;
3791       for (x = 0; x < 2; x++)
3792 	{
3793 	  /* Look for 8 bit operand that uses old registers.  */
3794 	  if (i.types[x].bitfield.reg8
3795 	      && (i.op[x].regs->reg_flags & RegRex64) == 0)
3796 	    {
3797 	      /* In case it is "hi" register, give up.  */
3798 	      if (i.op[x].regs->reg_num > 3)
3799 		as_bad (_("can't encode register '%s%s' in an "
3800 			  "instruction requiring REX prefix."),
3801 			register_prefix, i.op[x].regs->reg_name);
3802 
3803 	      /* Otherwise it is equivalent to the extended register.
3804 		 Since the encoding doesn't change this is merely
3805 		 cosmetic cleanup for debug output.  */
3806 
3807 	      i.op[x].regs = i.op[x].regs + 8;
3808 	    }
3809 	}
3810     }
3811 
3812   if (i.rex != 0)
3813     add_prefix (REX_OPCODE | i.rex);
3814 
3815   /* We are ready to output the insn.  */
3816   output_insn ();
3817 }
3818 
3819 static char *
parse_insn(char * line,char * mnemonic)3820 parse_insn (char *line, char *mnemonic)
3821 {
3822   char *l = line;
3823   char *token_start = l;
3824   char *mnem_p;
3825   int supported;
3826   const insn_template *t;
3827   char *dot_p = NULL;
3828 
3829   while (1)
3830     {
3831       mnem_p = mnemonic;
3832       while ((*mnem_p = mnemonic_chars[(unsigned char) *l]) != 0)
3833 	{
3834 	  if (*mnem_p == '.')
3835 	    dot_p = mnem_p;
3836 	  mnem_p++;
3837 	  if (mnem_p >= mnemonic + MAX_MNEM_SIZE)
3838 	    {
3839 	      as_bad (_("no such instruction: `%s'"), token_start);
3840 	      return NULL;
3841 	    }
3842 	  l++;
3843 	}
3844       if (!is_space_char (*l)
3845 	  && *l != END_OF_INSN
3846 	  && (intel_syntax
3847 	      || (*l != PREFIX_SEPARATOR
3848 		  && *l != ',')))
3849 	{
3850 	  as_bad (_("invalid character %s in mnemonic"),
3851 		  output_invalid (*l));
3852 	  return NULL;
3853 	}
3854       if (token_start == l)
3855 	{
3856 	  if (!intel_syntax && *l == PREFIX_SEPARATOR)
3857 	    as_bad (_("expecting prefix; got nothing"));
3858 	  else
3859 	    as_bad (_("expecting mnemonic; got nothing"));
3860 	  return NULL;
3861 	}
3862 
3863       /* Look up instruction (or prefix) via hash table.  */
3864       current_templates = (const templates *) hash_find (op_hash, mnemonic);
3865 
3866       if (*l != END_OF_INSN
3867 	  && (!is_space_char (*l) || l[1] != END_OF_INSN)
3868 	  && current_templates
3869 	  && current_templates->start->opcode_modifier.isprefix)
3870 	{
3871 	  if (!cpu_flags_check_cpu64 (current_templates->start->cpu_flags))
3872 	    {
3873 	      as_bad ((flag_code != CODE_64BIT
3874 		       ? _("`%s' is only supported in 64-bit mode")
3875 		       : _("`%s' is not supported in 64-bit mode")),
3876 		      current_templates->start->name);
3877 	      return NULL;
3878 	    }
3879 	  /* If we are in 16-bit mode, do not allow addr16 or data16.
3880 	     Similarly, in 32-bit mode, do not allow addr32 or data32.  */
3881 	  if ((current_templates->start->opcode_modifier.size16
3882 	       || current_templates->start->opcode_modifier.size32)
3883 	      && flag_code != CODE_64BIT
3884 	      && (current_templates->start->opcode_modifier.size32
3885 		  ^ (flag_code == CODE_16BIT)))
3886 	    {
3887 	      as_bad (_("redundant %s prefix"),
3888 		      current_templates->start->name);
3889 	      return NULL;
3890 	    }
3891 	  /* Add prefix, checking for repeated prefixes.  */
3892 	  switch (add_prefix (current_templates->start->base_opcode))
3893 	    {
3894 	    case PREFIX_EXIST:
3895 	      return NULL;
3896 	    case PREFIX_REP:
3897 	      if (current_templates->start->cpu_flags.bitfield.cpuhle)
3898 		i.hle_prefix = current_templates->start->name;
3899 	      else if (current_templates->start->cpu_flags.bitfield.cpumpx)
3900 		i.bnd_prefix = current_templates->start->name;
3901 	      else
3902 		i.rep_prefix = current_templates->start->name;
3903 	      break;
3904 	    default:
3905 	      break;
3906 	    }
3907 	  /* Skip past PREFIX_SEPARATOR and reset token_start.  */
3908 	  token_start = ++l;
3909 	}
3910       else
3911 	break;
3912     }
3913 
3914   if (!current_templates)
3915     {
3916       /* Check if we should swap operand or force 32bit displacement in
3917 	 encoding.  */
3918       if (mnem_p - 2 == dot_p && dot_p[1] == 's')
3919 	i.swap_operand = 1;
3920       else if (mnem_p - 3 == dot_p
3921 	       && dot_p[1] == 'd'
3922 	       && dot_p[2] == '8')
3923 	i.disp_encoding = disp_encoding_8bit;
3924       else if (mnem_p - 4 == dot_p
3925 	       && dot_p[1] == 'd'
3926 	       && dot_p[2] == '3'
3927 	       && dot_p[3] == '2')
3928 	i.disp_encoding = disp_encoding_32bit;
3929       else
3930 	goto check_suffix;
3931       mnem_p = dot_p;
3932       *dot_p = '\0';
3933       current_templates = (const templates *) hash_find (op_hash, mnemonic);
3934     }
3935 
3936   if (!current_templates)
3937     {
3938 check_suffix:
3939       /* See if we can get a match by trimming off a suffix.  */
3940       switch (mnem_p[-1])
3941 	{
3942 	case WORD_MNEM_SUFFIX:
3943 	  if (intel_syntax && (intel_float_operand (mnemonic) & 2))
3944 	    i.suffix = SHORT_MNEM_SUFFIX;
3945 	  else
3946 	case BYTE_MNEM_SUFFIX:
3947 	case QWORD_MNEM_SUFFIX:
3948 	  i.suffix = mnem_p[-1];
3949 	  mnem_p[-1] = '\0';
3950 	  current_templates = (const templates *) hash_find (op_hash,
3951                                                              mnemonic);
3952 	  break;
3953 	case SHORT_MNEM_SUFFIX:
3954 	case LONG_MNEM_SUFFIX:
3955 	  if (!intel_syntax)
3956 	    {
3957 	      i.suffix = mnem_p[-1];
3958 	      mnem_p[-1] = '\0';
3959 	      current_templates = (const templates *) hash_find (op_hash,
3960                                                                  mnemonic);
3961 	    }
3962 	  break;
3963 
3964 	  /* Intel Syntax.  */
3965 	case 'd':
3966 	  if (intel_syntax)
3967 	    {
3968 	      if (intel_float_operand (mnemonic) == 1)
3969 		i.suffix = SHORT_MNEM_SUFFIX;
3970 	      else
3971 		i.suffix = LONG_MNEM_SUFFIX;
3972 	      mnem_p[-1] = '\0';
3973 	      current_templates = (const templates *) hash_find (op_hash,
3974                                                                  mnemonic);
3975 	    }
3976 	  break;
3977 	}
3978       if (!current_templates)
3979 	{
3980 	  as_bad (_("no such instruction: `%s'"), token_start);
3981 	  return NULL;
3982 	}
3983     }
3984 
3985   if (current_templates->start->opcode_modifier.jump
3986       || current_templates->start->opcode_modifier.jumpbyte)
3987     {
3988       /* Check for a branch hint.  We allow ",pt" and ",pn" for
3989 	 predict taken and predict not taken respectively.
3990 	 I'm not sure that branch hints actually do anything on loop
3991 	 and jcxz insns (JumpByte) for current Pentium4 chips.  They
3992 	 may work in the future and it doesn't hurt to accept them
3993 	 now.  */
3994       if (l[0] == ',' && l[1] == 'p')
3995 	{
3996 	  if (l[2] == 't')
3997 	    {
3998 	      if (!add_prefix (DS_PREFIX_OPCODE))
3999 		return NULL;
4000 	      l += 3;
4001 	    }
4002 	  else if (l[2] == 'n')
4003 	    {
4004 	      if (!add_prefix (CS_PREFIX_OPCODE))
4005 		return NULL;
4006 	      l += 3;
4007 	    }
4008 	}
4009     }
4010   /* Any other comma loses.  */
4011   if (*l == ',')
4012     {
4013       as_bad (_("invalid character %s in mnemonic"),
4014 	      output_invalid (*l));
4015       return NULL;
4016     }
4017 
4018   /* Check if instruction is supported on specified architecture.  */
4019   supported = 0;
4020   for (t = current_templates->start; t < current_templates->end; ++t)
4021     {
4022       supported |= cpu_flags_match (t);
4023       if (supported == CPU_FLAGS_PERFECT_MATCH)
4024 	goto skip;
4025     }
4026 
4027   if (!(supported & CPU_FLAGS_64BIT_MATCH))
4028     {
4029       as_bad (flag_code == CODE_64BIT
4030 	      ? _("`%s' is not supported in 64-bit mode")
4031 	      : _("`%s' is only supported in 64-bit mode"),
4032 	      current_templates->start->name);
4033       return NULL;
4034     }
4035   if (supported != CPU_FLAGS_PERFECT_MATCH)
4036     {
4037       as_bad (_("`%s' is not supported on `%s%s'"),
4038 	      current_templates->start->name,
4039 	      cpu_arch_name ? cpu_arch_name : default_arch,
4040 	      cpu_sub_arch_name ? cpu_sub_arch_name : "");
4041       return NULL;
4042     }
4043 
4044 skip:
4045   if (!cpu_arch_flags.bitfield.cpui386
4046 	   && (flag_code != CODE_16BIT))
4047     {
4048       as_warn (_("use .code16 to ensure correct addressing mode"));
4049     }
4050 
4051   return l;
4052 }
4053 
4054 static char *
parse_operands(char * l,const char * mnemonic)4055 parse_operands (char *l, const char *mnemonic)
4056 {
4057   char *token_start;
4058 
4059   /* 1 if operand is pending after ','.  */
4060   unsigned int expecting_operand = 0;
4061 
4062   /* Non-zero if operand parens not balanced.  */
4063   unsigned int paren_not_balanced;
4064 
4065   while (*l != END_OF_INSN)
4066     {
4067       /* Skip optional white space before operand.  */
4068       if (is_space_char (*l))
4069 	++l;
4070       if (!is_operand_char (*l) && *l != END_OF_INSN && *l != '"')
4071 	{
4072 	  as_bad (_("invalid character %s before operand %d"),
4073 		  output_invalid (*l),
4074 		  i.operands + 1);
4075 	  return NULL;
4076 	}
4077       token_start = l;	/* After white space.  */
4078       paren_not_balanced = 0;
4079       while (paren_not_balanced || *l != ',')
4080 	{
4081 	  if (*l == END_OF_INSN)
4082 	    {
4083 	      if (paren_not_balanced)
4084 		{
4085 		  if (!intel_syntax)
4086 		    as_bad (_("unbalanced parenthesis in operand %d."),
4087 			    i.operands + 1);
4088 		  else
4089 		    as_bad (_("unbalanced brackets in operand %d."),
4090 			    i.operands + 1);
4091 		  return NULL;
4092 		}
4093 	      else
4094 		break;	/* we are done */
4095 	    }
4096 	  else if (!is_operand_char (*l) && !is_space_char (*l) && *l != '"')
4097 	    {
4098 	      as_bad (_("invalid character %s in operand %d"),
4099 		      output_invalid (*l),
4100 		      i.operands + 1);
4101 	      return NULL;
4102 	    }
4103 	  if (!intel_syntax)
4104 	    {
4105 	      if (*l == '(')
4106 		++paren_not_balanced;
4107 	      if (*l == ')')
4108 		--paren_not_balanced;
4109 	    }
4110 	  else
4111 	    {
4112 	      if (*l == '[')
4113 		++paren_not_balanced;
4114 	      if (*l == ']')
4115 		--paren_not_balanced;
4116 	    }
4117 	  l++;
4118 	}
4119       if (l != token_start)
4120 	{			/* Yes, we've read in another operand.  */
4121 	  unsigned int operand_ok;
4122 	  this_operand = i.operands++;
4123 	  i.types[this_operand].bitfield.unspecified = 1;
4124 	  if (i.operands > MAX_OPERANDS)
4125 	    {
4126 	      as_bad (_("spurious operands; (%d operands/instruction max)"),
4127 		      MAX_OPERANDS);
4128 	      return NULL;
4129 	    }
4130 	  /* Now parse operand adding info to 'i' as we go along.  */
4131 	  END_STRING_AND_SAVE (l);
4132 
4133 	  if (intel_syntax)
4134 	    operand_ok =
4135 	      i386_intel_operand (token_start,
4136 				  intel_float_operand (mnemonic));
4137 	  else
4138 	    operand_ok = i386_att_operand (token_start);
4139 
4140 	  RESTORE_END_STRING (l);
4141 	  if (!operand_ok)
4142 	    return NULL;
4143 	}
4144       else
4145 	{
4146 	  if (expecting_operand)
4147 	    {
4148 	    expecting_operand_after_comma:
4149 	      as_bad (_("expecting operand after ','; got nothing"));
4150 	      return NULL;
4151 	    }
4152 	  if (*l == ',')
4153 	    {
4154 	      as_bad (_("expecting operand before ','; got nothing"));
4155 	      return NULL;
4156 	    }
4157 	}
4158 
4159       /* Now *l must be either ',' or END_OF_INSN.  */
4160       if (*l == ',')
4161 	{
4162 	  if (*++l == END_OF_INSN)
4163 	    {
4164 	      /* Just skip it, if it's \n complain.  */
4165 	      goto expecting_operand_after_comma;
4166 	    }
4167 	  expecting_operand = 1;
4168 	}
4169     }
4170   return l;
4171 }
4172 
4173 static void
swap_2_operands(int xchg1,int xchg2)4174 swap_2_operands (int xchg1, int xchg2)
4175 {
4176   union i386_op temp_op;
4177   i386_operand_type temp_type;
4178   enum bfd_reloc_code_real temp_reloc;
4179 
4180   temp_type = i.types[xchg2];
4181   i.types[xchg2] = i.types[xchg1];
4182   i.types[xchg1] = temp_type;
4183   temp_op = i.op[xchg2];
4184   i.op[xchg2] = i.op[xchg1];
4185   i.op[xchg1] = temp_op;
4186   temp_reloc = i.reloc[xchg2];
4187   i.reloc[xchg2] = i.reloc[xchg1];
4188   i.reloc[xchg1] = temp_reloc;
4189 
4190   if (i.mask)
4191     {
4192       if (i.mask->operand == xchg1)
4193 	i.mask->operand = xchg2;
4194       else if (i.mask->operand == xchg2)
4195 	i.mask->operand = xchg1;
4196     }
4197   if (i.broadcast)
4198     {
4199       if (i.broadcast->operand == xchg1)
4200 	i.broadcast->operand = xchg2;
4201       else if (i.broadcast->operand == xchg2)
4202 	i.broadcast->operand = xchg1;
4203     }
4204   if (i.rounding)
4205     {
4206       if (i.rounding->operand == xchg1)
4207 	i.rounding->operand = xchg2;
4208       else if (i.rounding->operand == xchg2)
4209 	i.rounding->operand = xchg1;
4210     }
4211 }
4212 
4213 static void
swap_operands(void)4214 swap_operands (void)
4215 {
4216   switch (i.operands)
4217     {
4218     case 5:
4219     case 4:
4220       swap_2_operands (1, i.operands - 2);
4221     case 3:
4222     case 2:
4223       swap_2_operands (0, i.operands - 1);
4224       break;
4225     default:
4226       abort ();
4227     }
4228 
4229   if (i.mem_operands == 2)
4230     {
4231       const seg_entry *temp_seg;
4232       temp_seg = i.seg[0];
4233       i.seg[0] = i.seg[1];
4234       i.seg[1] = temp_seg;
4235     }
4236 }
4237 
4238 /* Try to ensure constant immediates are represented in the smallest
4239    opcode possible.  */
4240 static void
optimize_imm(void)4241 optimize_imm (void)
4242 {
4243   char guess_suffix = 0;
4244   int op;
4245 
4246   if (i.suffix)
4247     guess_suffix = i.suffix;
4248   else if (i.reg_operands)
4249     {
4250       /* Figure out a suffix from the last register operand specified.
4251 	 We can't do this properly yet, ie. excluding InOutPortReg,
4252 	 but the following works for instructions with immediates.
4253 	 In any case, we can't set i.suffix yet.  */
4254       for (op = i.operands; --op >= 0;)
4255 	if (i.types[op].bitfield.reg8)
4256 	  {
4257 	    guess_suffix = BYTE_MNEM_SUFFIX;
4258 	    break;
4259 	  }
4260 	else if (i.types[op].bitfield.reg16)
4261 	  {
4262 	    guess_suffix = WORD_MNEM_SUFFIX;
4263 	    break;
4264 	  }
4265 	else if (i.types[op].bitfield.reg32)
4266 	  {
4267 	    guess_suffix = LONG_MNEM_SUFFIX;
4268 	    break;
4269 	  }
4270 	else if (i.types[op].bitfield.reg64)
4271 	  {
4272 	    guess_suffix = QWORD_MNEM_SUFFIX;
4273 	    break;
4274 	  }
4275     }
4276   else if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
4277     guess_suffix = WORD_MNEM_SUFFIX;
4278 
4279   for (op = i.operands; --op >= 0;)
4280     if (operand_type_check (i.types[op], imm))
4281       {
4282 	switch (i.op[op].imms->X_op)
4283 	  {
4284 	  case O_constant:
4285 	    /* If a suffix is given, this operand may be shortened.  */
4286 	    switch (guess_suffix)
4287 	      {
4288 	      case LONG_MNEM_SUFFIX:
4289 		i.types[op].bitfield.imm32 = 1;
4290 		i.types[op].bitfield.imm64 = 1;
4291 		break;
4292 	      case WORD_MNEM_SUFFIX:
4293 		i.types[op].bitfield.imm16 = 1;
4294 		i.types[op].bitfield.imm32 = 1;
4295 		i.types[op].bitfield.imm32s = 1;
4296 		i.types[op].bitfield.imm64 = 1;
4297 		break;
4298 	      case BYTE_MNEM_SUFFIX:
4299 		i.types[op].bitfield.imm8 = 1;
4300 		i.types[op].bitfield.imm8s = 1;
4301 		i.types[op].bitfield.imm16 = 1;
4302 		i.types[op].bitfield.imm32 = 1;
4303 		i.types[op].bitfield.imm32s = 1;
4304 		i.types[op].bitfield.imm64 = 1;
4305 		break;
4306 	      }
4307 
4308 	    /* If this operand is at most 16 bits, convert it
4309 	       to a signed 16 bit number before trying to see
4310 	       whether it will fit in an even smaller size.
4311 	       This allows a 16-bit operand such as $0xffe0 to
4312 	       be recognised as within Imm8S range.  */
4313 	    if ((i.types[op].bitfield.imm16)
4314 		&& (i.op[op].imms->X_add_number & ~(offsetT) 0xffff) == 0)
4315 	      {
4316 		i.op[op].imms->X_add_number =
4317 		  (((i.op[op].imms->X_add_number & 0xffff) ^ 0x8000) - 0x8000);
4318 	      }
4319 #ifdef BFD64
4320 	    /* Store 32-bit immediate in 64-bit for 64-bit BFD.  */
4321 	    if ((i.types[op].bitfield.imm32)
4322 		&& ((i.op[op].imms->X_add_number & ~(((offsetT) 2 << 31) - 1))
4323 		    == 0))
4324 	      {
4325 		i.op[op].imms->X_add_number = ((i.op[op].imms->X_add_number
4326 						^ ((offsetT) 1 << 31))
4327 					       - ((offsetT) 1 << 31));
4328 	      }
4329 #endif
4330 	    i.types[op]
4331 	      = operand_type_or (i.types[op],
4332 				 smallest_imm_type (i.op[op].imms->X_add_number));
4333 
4334 	    /* We must avoid matching of Imm32 templates when 64bit
4335 	       only immediate is available.  */
4336 	    if (guess_suffix == QWORD_MNEM_SUFFIX)
4337 	      i.types[op].bitfield.imm32 = 0;
4338 	    break;
4339 
4340 	  case O_absent:
4341 	  case O_register:
4342 	    abort ();
4343 
4344 	    /* Symbols and expressions.  */
4345 	  default:
4346 	    /* Convert symbolic operand to proper sizes for matching, but don't
4347 	       prevent matching a set of insns that only supports sizes other
4348 	       than those matching the insn suffix.  */
4349 	    {
4350 	      i386_operand_type mask, allowed;
4351 	      const insn_template *t;
4352 
4353 	      operand_type_set (&mask, 0);
4354 	      operand_type_set (&allowed, 0);
4355 
4356 	      for (t = current_templates->start;
4357 		   t < current_templates->end;
4358 		   ++t)
4359 		allowed = operand_type_or (allowed,
4360 					   t->operand_types[op]);
4361 	      switch (guess_suffix)
4362 		{
4363 		case QWORD_MNEM_SUFFIX:
4364 		  mask.bitfield.imm64 = 1;
4365 		  mask.bitfield.imm32s = 1;
4366 		  break;
4367 		case LONG_MNEM_SUFFIX:
4368 		  mask.bitfield.imm32 = 1;
4369 		  break;
4370 		case WORD_MNEM_SUFFIX:
4371 		  mask.bitfield.imm16 = 1;
4372 		  break;
4373 		case BYTE_MNEM_SUFFIX:
4374 		  mask.bitfield.imm8 = 1;
4375 		  break;
4376 		default:
4377 		  break;
4378 		}
4379 	      allowed = operand_type_and (mask, allowed);
4380 	      if (!operand_type_all_zero (&allowed))
4381 		i.types[op] = operand_type_and (i.types[op], mask);
4382 	    }
4383 	    break;
4384 	  }
4385       }
4386 }
4387 
4388 /* Try to use the smallest displacement type too.  */
4389 static void
optimize_disp(void)4390 optimize_disp (void)
4391 {
4392   int op;
4393 
4394   for (op = i.operands; --op >= 0;)
4395     if (operand_type_check (i.types[op], disp))
4396       {
4397 	if (i.op[op].disps->X_op == O_constant)
4398 	  {
4399 	    offsetT op_disp = i.op[op].disps->X_add_number;
4400 
4401 	    if (i.types[op].bitfield.disp16
4402 		&& (op_disp & ~(offsetT) 0xffff) == 0)
4403 	      {
4404 		/* If this operand is at most 16 bits, convert
4405 		   to a signed 16 bit number and don't use 64bit
4406 		   displacement.  */
4407 		op_disp = (((op_disp & 0xffff) ^ 0x8000) - 0x8000);
4408 		i.types[op].bitfield.disp64 = 0;
4409 	      }
4410 #ifdef BFD64
4411 	    /* Optimize 64-bit displacement to 32-bit for 64-bit BFD.  */
4412 	    if (i.types[op].bitfield.disp32
4413 		&& (op_disp & ~(((offsetT) 2 << 31) - 1)) == 0)
4414 	      {
4415 		/* If this operand is at most 32 bits, convert
4416 		   to a signed 32 bit number and don't use 64bit
4417 		   displacement.  */
4418 		op_disp &= (((offsetT) 2 << 31) - 1);
4419 		op_disp = (op_disp ^ ((offsetT) 1 << 31)) - ((addressT) 1 << 31);
4420 		i.types[op].bitfield.disp64 = 0;
4421 	      }
4422 #endif
4423 	    if (!op_disp && i.types[op].bitfield.baseindex)
4424 	      {
4425 		i.types[op].bitfield.disp8 = 0;
4426 		i.types[op].bitfield.disp16 = 0;
4427 		i.types[op].bitfield.disp32 = 0;
4428 		i.types[op].bitfield.disp32s = 0;
4429 		i.types[op].bitfield.disp64 = 0;
4430 		i.op[op].disps = 0;
4431 		i.disp_operands--;
4432 	      }
4433 	    else if (flag_code == CODE_64BIT)
4434 	      {
4435 		if (fits_in_signed_long (op_disp))
4436 		  {
4437 		    i.types[op].bitfield.disp64 = 0;
4438 		    i.types[op].bitfield.disp32s = 1;
4439 		  }
4440 		if (i.prefix[ADDR_PREFIX]
4441 		    && fits_in_unsigned_long (op_disp))
4442 		  i.types[op].bitfield.disp32 = 1;
4443 	      }
4444 	    if ((i.types[op].bitfield.disp32
4445 		 || i.types[op].bitfield.disp32s
4446 		 || i.types[op].bitfield.disp16)
4447 		&& fits_in_signed_byte (op_disp))
4448 	      i.types[op].bitfield.disp8 = 1;
4449 	  }
4450 	else if (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
4451 		 || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL)
4452 	  {
4453 	    fix_new_exp (frag_now, frag_more (0) - frag_now->fr_literal, 0,
4454 			 i.op[op].disps, 0, i.reloc[op]);
4455 	    i.types[op].bitfield.disp8 = 0;
4456 	    i.types[op].bitfield.disp16 = 0;
4457 	    i.types[op].bitfield.disp32 = 0;
4458 	    i.types[op].bitfield.disp32s = 0;
4459 	    i.types[op].bitfield.disp64 = 0;
4460 	  }
4461  	else
4462 	  /* We only support 64bit displacement on constants.  */
4463 	  i.types[op].bitfield.disp64 = 0;
4464       }
4465 }
4466 
4467 /* Check if operands are valid for the instruction.  */
4468 
4469 static int
check_VecOperands(const insn_template * t)4470 check_VecOperands (const insn_template *t)
4471 {
4472   unsigned int op;
4473 
4474   /* Without VSIB byte, we can't have a vector register for index.  */
4475   if (!t->opcode_modifier.vecsib
4476       && i.index_reg
4477       && (i.index_reg->reg_type.bitfield.regxmm
4478 	  || i.index_reg->reg_type.bitfield.regymm
4479 	  || i.index_reg->reg_type.bitfield.regzmm))
4480     {
4481       i.error = unsupported_vector_index_register;
4482       return 1;
4483     }
4484 
4485   /* Check if default mask is allowed.  */
4486   if (t->opcode_modifier.nodefmask
4487       && (!i.mask || i.mask->mask->reg_num == 0))
4488     {
4489       i.error = no_default_mask;
4490       return 1;
4491     }
4492 
4493   /* For VSIB byte, we need a vector register for index, and all vector
4494      registers must be distinct.  */
4495   if (t->opcode_modifier.vecsib)
4496     {
4497       if (!i.index_reg
4498 	  || !((t->opcode_modifier.vecsib == VecSIB128
4499 		&& i.index_reg->reg_type.bitfield.regxmm)
4500 	       || (t->opcode_modifier.vecsib == VecSIB256
4501 		   && i.index_reg->reg_type.bitfield.regymm)
4502 	       || (t->opcode_modifier.vecsib == VecSIB512
4503 		   && i.index_reg->reg_type.bitfield.regzmm)))
4504       {
4505 	i.error = invalid_vsib_address;
4506 	return 1;
4507       }
4508 
4509       gas_assert (i.reg_operands == 2 || i.mask);
4510       if (i.reg_operands == 2 && !i.mask)
4511 	{
4512 	  gas_assert (i.types[0].bitfield.regxmm
4513 		      || i.types[0].bitfield.regymm);
4514 	  gas_assert (i.types[2].bitfield.regxmm
4515 		      || i.types[2].bitfield.regymm);
4516 	  if (operand_check == check_none)
4517 	    return 0;
4518 	  if (register_number (i.op[0].regs)
4519 	      != register_number (i.index_reg)
4520 	      && register_number (i.op[2].regs)
4521 		 != register_number (i.index_reg)
4522 	      && register_number (i.op[0].regs)
4523 		 != register_number (i.op[2].regs))
4524 	    return 0;
4525 	  if (operand_check == check_error)
4526 	    {
4527 	      i.error = invalid_vector_register_set;
4528 	      return 1;
4529 	    }
4530 	  as_warn (_("mask, index, and destination registers should be distinct"));
4531 	}
4532       else if (i.reg_operands == 1 && i.mask)
4533 	{
4534 	  if ((i.types[1].bitfield.regymm
4535 	       || i.types[1].bitfield.regzmm)
4536 	      && (register_number (i.op[1].regs)
4537 		  == register_number (i.index_reg)))
4538 	    {
4539 	      if (operand_check == check_error)
4540 		{
4541 		  i.error = invalid_vector_register_set;
4542 		  return 1;
4543 		}
4544 	      if (operand_check != check_none)
4545 		as_warn (_("index and destination registers should be distinct"));
4546 	    }
4547 	}
4548     }
4549 
4550   /* Check if broadcast is supported by the instruction and is applied
4551      to the memory operand.  */
4552   if (i.broadcast)
4553     {
4554       int broadcasted_opnd_size;
4555 
4556       /* Check if specified broadcast is supported in this instruction,
4557 	 and it's applied to memory operand of DWORD or QWORD type,
4558 	 depending on VecESize.  */
4559       if (i.broadcast->type != t->opcode_modifier.broadcast
4560 	  || !i.types[i.broadcast->operand].bitfield.mem
4561 	  || (t->opcode_modifier.vecesize == 0
4562 	      && !i.types[i.broadcast->operand].bitfield.dword
4563 	      && !i.types[i.broadcast->operand].bitfield.unspecified)
4564 	  || (t->opcode_modifier.vecesize == 1
4565 	      && !i.types[i.broadcast->operand].bitfield.qword
4566 	      && !i.types[i.broadcast->operand].bitfield.unspecified))
4567 	goto bad_broadcast;
4568 
4569       broadcasted_opnd_size = t->opcode_modifier.vecesize ? 64 : 32;
4570       if (i.broadcast->type == BROADCAST_1TO16)
4571 	broadcasted_opnd_size <<= 4; /* Broadcast 1to16.  */
4572       else if (i.broadcast->type == BROADCAST_1TO8)
4573 	broadcasted_opnd_size <<= 3; /* Broadcast 1to8.  */
4574       else if (i.broadcast->type == BROADCAST_1TO4)
4575 	broadcasted_opnd_size <<= 2; /* Broadcast 1to4.  */
4576       else if (i.broadcast->type == BROADCAST_1TO2)
4577 	broadcasted_opnd_size <<= 1; /* Broadcast 1to2.  */
4578       else
4579 	goto bad_broadcast;
4580 
4581       if ((broadcasted_opnd_size == 256
4582 	   && !t->operand_types[i.broadcast->operand].bitfield.ymmword)
4583 	  || (broadcasted_opnd_size == 512
4584 	      && !t->operand_types[i.broadcast->operand].bitfield.zmmword))
4585 	{
4586 	bad_broadcast:
4587 	  i.error = unsupported_broadcast;
4588 	  return 1;
4589 	}
4590     }
4591   /* If broadcast is supported in this instruction, we need to check if
4592      operand of one-element size isn't specified without broadcast.  */
4593   else if (t->opcode_modifier.broadcast && i.mem_operands)
4594     {
4595       /* Find memory operand.  */
4596       for (op = 0; op < i.operands; op++)
4597 	if (operand_type_check (i.types[op], anymem))
4598 	  break;
4599       gas_assert (op < i.operands);
4600       /* Check size of the memory operand.  */
4601       if ((t->opcode_modifier.vecesize == 0
4602 	   && i.types[op].bitfield.dword)
4603 	  || (t->opcode_modifier.vecesize == 1
4604 	      && i.types[op].bitfield.qword))
4605 	{
4606 	  i.error = broadcast_needed;
4607 	  return 1;
4608 	}
4609     }
4610 
4611   /* Check if requested masking is supported.  */
4612   if (i.mask
4613       && (!t->opcode_modifier.masking
4614 	  || (i.mask->zeroing
4615 	      && t->opcode_modifier.masking == MERGING_MASKING)))
4616     {
4617       i.error = unsupported_masking;
4618       return 1;
4619     }
4620 
4621   /* Check if masking is applied to dest operand.  */
4622   if (i.mask && (i.mask->operand != (int) (i.operands - 1)))
4623     {
4624       i.error = mask_not_on_destination;
4625       return 1;
4626     }
4627 
4628   /* Check RC/SAE.  */
4629   if (i.rounding)
4630     {
4631       if ((i.rounding->type != saeonly
4632 	   && !t->opcode_modifier.staticrounding)
4633 	  || (i.rounding->type == saeonly
4634 	      && (t->opcode_modifier.staticrounding
4635 		  || !t->opcode_modifier.sae)))
4636 	{
4637 	  i.error = unsupported_rc_sae;
4638 	  return 1;
4639 	}
4640       /* If the instruction has several immediate operands and one of
4641 	 them is rounding, the rounding operand should be the last
4642 	 immediate operand.  */
4643       if (i.imm_operands > 1
4644 	  && i.rounding->operand != (int) (i.imm_operands - 1))
4645 	{
4646 	  i.error = rc_sae_operand_not_last_imm;
4647 	  return 1;
4648 	}
4649     }
4650 
4651   /* Check vector Disp8 operand.  */
4652   if (t->opcode_modifier.disp8memshift)
4653     {
4654       if (i.broadcast)
4655 	i.memshift = t->opcode_modifier.vecesize ? 3 : 2;
4656       else
4657 	i.memshift = t->opcode_modifier.disp8memshift;
4658 
4659       for (op = 0; op < i.operands; op++)
4660 	if (operand_type_check (i.types[op], disp)
4661 	    && i.op[op].disps->X_op == O_constant)
4662 	  {
4663 	    offsetT value = i.op[op].disps->X_add_number;
4664 	    int vec_disp8_ok
4665 	      = (i.disp_encoding != disp_encoding_32bit
4666 		 && fits_in_vec_disp8 (value));
4667 	    if (t->operand_types [op].bitfield.vec_disp8)
4668 	      {
4669 		if (vec_disp8_ok)
4670 		  i.types[op].bitfield.vec_disp8 = 1;
4671 		else
4672 		  {
4673 		    /* Vector insn can only have Vec_Disp8/Disp32 in
4674 		       32/64bit modes, and Vec_Disp8/Disp16 in 16bit
4675 		       mode.  */
4676 		    i.types[op].bitfield.disp8 = 0;
4677 		    if (flag_code != CODE_16BIT)
4678 		      i.types[op].bitfield.disp16 = 0;
4679 		  }
4680 	      }
4681 	    else if (flag_code != CODE_16BIT)
4682 	      {
4683 		/* One form of this instruction supports vector Disp8.
4684 		   Try vector Disp8 if we need to use Disp32.  */
4685 		if (vec_disp8_ok && !fits_in_signed_byte (value))
4686 		  {
4687 		    i.error = try_vector_disp8;
4688 		    return 1;
4689 		  }
4690 	      }
4691 	  }
4692     }
4693   else
4694     i.memshift = -1;
4695 
4696   return 0;
4697 }
4698 
4699 /* Check if operands are valid for the instruction.  Update VEX
4700    operand types.  */
4701 
4702 static int
VEX_check_operands(const insn_template * t)4703 VEX_check_operands (const insn_template *t)
4704 {
4705   /* VREX is only valid with EVEX prefix.  */
4706   if (i.need_vrex && !t->opcode_modifier.evex)
4707     {
4708       i.error = invalid_register_operand;
4709       return 1;
4710     }
4711 
4712   if (!t->opcode_modifier.vex)
4713     return 0;
4714 
4715   /* Only check VEX_Imm4, which must be the first operand.  */
4716   if (t->operand_types[0].bitfield.vec_imm4)
4717     {
4718       if (i.op[0].imms->X_op != O_constant
4719 	  || !fits_in_imm4 (i.op[0].imms->X_add_number))
4720 	{
4721 	  i.error = bad_imm4;
4722 	  return 1;
4723 	}
4724 
4725       /* Turn off Imm8 so that update_imm won't complain.  */
4726       i.types[0] = vec_imm4;
4727     }
4728 
4729   return 0;
4730 }
4731 
4732 static const insn_template *
match_template(char mnem_suffix)4733 match_template (char mnem_suffix)
4734 {
4735   /* Points to template once we've found it.  */
4736   const insn_template *t;
4737   i386_operand_type overlap0, overlap1, overlap2, overlap3;
4738   i386_operand_type overlap4;
4739   unsigned int found_reverse_match;
4740   i386_opcode_modifier suffix_check, mnemsuf_check;
4741   i386_operand_type operand_types [MAX_OPERANDS];
4742   int addr_prefix_disp;
4743   unsigned int j;
4744   unsigned int found_cpu_match;
4745   unsigned int check_register;
4746   enum i386_error specific_error = 0;
4747 
4748 #if MAX_OPERANDS != 5
4749 # error "MAX_OPERANDS must be 5."
4750 #endif
4751 
4752   found_reverse_match = 0;
4753   addr_prefix_disp = -1;
4754 
4755   memset (&suffix_check, 0, sizeof (suffix_check));
4756   if (i.suffix == BYTE_MNEM_SUFFIX)
4757     suffix_check.no_bsuf = 1;
4758   else if (i.suffix == WORD_MNEM_SUFFIX)
4759     suffix_check.no_wsuf = 1;
4760   else if (i.suffix == SHORT_MNEM_SUFFIX)
4761     suffix_check.no_ssuf = 1;
4762   else if (i.suffix == LONG_MNEM_SUFFIX)
4763     suffix_check.no_lsuf = 1;
4764   else if (i.suffix == QWORD_MNEM_SUFFIX)
4765     suffix_check.no_qsuf = 1;
4766   else if (i.suffix == LONG_DOUBLE_MNEM_SUFFIX)
4767     suffix_check.no_ldsuf = 1;
4768 
4769   memset (&mnemsuf_check, 0, sizeof (mnemsuf_check));
4770   if (intel_syntax)
4771     {
4772       switch (mnem_suffix)
4773 	{
4774 	case BYTE_MNEM_SUFFIX:  mnemsuf_check.no_bsuf = 1; break;
4775 	case WORD_MNEM_SUFFIX:  mnemsuf_check.no_wsuf = 1; break;
4776 	case SHORT_MNEM_SUFFIX: mnemsuf_check.no_ssuf = 1; break;
4777 	case LONG_MNEM_SUFFIX:  mnemsuf_check.no_lsuf = 1; break;
4778 	case QWORD_MNEM_SUFFIX: mnemsuf_check.no_qsuf = 1; break;
4779 	}
4780     }
4781 
4782   /* Must have right number of operands.  */
4783   i.error = number_of_operands_mismatch;
4784 
4785   for (t = current_templates->start; t < current_templates->end; t++)
4786     {
4787       addr_prefix_disp = -1;
4788 
4789       if (i.operands != t->operands)
4790 	continue;
4791 
4792       /* Check processor support.  */
4793       i.error = unsupported;
4794       found_cpu_match = (cpu_flags_match (t)
4795 			 == CPU_FLAGS_PERFECT_MATCH);
4796       if (!found_cpu_match)
4797 	continue;
4798 
4799       /* Check old gcc support. */
4800       i.error = old_gcc_only;
4801       if (!old_gcc && t->opcode_modifier.oldgcc)
4802 	continue;
4803 
4804       /* Check AT&T mnemonic.   */
4805       i.error = unsupported_with_intel_mnemonic;
4806       if (intel_mnemonic && t->opcode_modifier.attmnemonic)
4807 	continue;
4808 
4809       /* Check AT&T/Intel syntax and Intel64/AMD64 ISA.   */
4810       i.error = unsupported_syntax;
4811       if ((intel_syntax && t->opcode_modifier.attsyntax)
4812 	  || (!intel_syntax && t->opcode_modifier.intelsyntax)
4813 	  || (intel64 && t->opcode_modifier.amd64)
4814 	  || (!intel64 && t->opcode_modifier.intel64))
4815 	continue;
4816 
4817       /* Check the suffix, except for some instructions in intel mode.  */
4818       i.error = invalid_instruction_suffix;
4819       if ((!intel_syntax || !t->opcode_modifier.ignoresize)
4820 	  && ((t->opcode_modifier.no_bsuf && suffix_check.no_bsuf)
4821 	      || (t->opcode_modifier.no_wsuf && suffix_check.no_wsuf)
4822 	      || (t->opcode_modifier.no_lsuf && suffix_check.no_lsuf)
4823 	      || (t->opcode_modifier.no_ssuf && suffix_check.no_ssuf)
4824 	      || (t->opcode_modifier.no_qsuf && suffix_check.no_qsuf)
4825 	      || (t->opcode_modifier.no_ldsuf && suffix_check.no_ldsuf)))
4826 	continue;
4827       /* In Intel mode all mnemonic suffixes must be explicitly allowed.  */
4828       if ((t->opcode_modifier.no_bsuf && mnemsuf_check.no_bsuf)
4829 	  || (t->opcode_modifier.no_wsuf && mnemsuf_check.no_wsuf)
4830 	  || (t->opcode_modifier.no_lsuf && mnemsuf_check.no_lsuf)
4831 	  || (t->opcode_modifier.no_ssuf && mnemsuf_check.no_ssuf)
4832 	  || (t->opcode_modifier.no_qsuf && mnemsuf_check.no_qsuf)
4833 	  || (t->opcode_modifier.no_ldsuf && mnemsuf_check.no_ldsuf))
4834 	continue;
4835 
4836       if (!operand_size_match (t))
4837 	continue;
4838 
4839       for (j = 0; j < MAX_OPERANDS; j++)
4840 	operand_types[j] = t->operand_types[j];
4841 
4842       /* In general, don't allow 64-bit operands in 32-bit mode.  */
4843       if (i.suffix == QWORD_MNEM_SUFFIX
4844 	  && flag_code != CODE_64BIT
4845 	  && (intel_syntax
4846 	      ? (!t->opcode_modifier.ignoresize
4847 		 && !intel_float_operand (t->name))
4848 	      : intel_float_operand (t->name) != 2)
4849 	  && ((!operand_types[0].bitfield.regmmx
4850 	       && !operand_types[0].bitfield.regxmm
4851 	       && !operand_types[0].bitfield.regymm
4852 	       && !operand_types[0].bitfield.regzmm)
4853 	      || (!operand_types[t->operands > 1].bitfield.regmmx
4854 		  && operand_types[t->operands > 1].bitfield.regxmm
4855 		  && operand_types[t->operands > 1].bitfield.regymm
4856 		  && operand_types[t->operands > 1].bitfield.regzmm))
4857 	  && (t->base_opcode != 0x0fc7
4858 	      || t->extension_opcode != 1 /* cmpxchg8b */))
4859 	continue;
4860 
4861       /* In general, don't allow 32-bit operands on pre-386.  */
4862       else if (i.suffix == LONG_MNEM_SUFFIX
4863 	       && !cpu_arch_flags.bitfield.cpui386
4864 	       && (intel_syntax
4865 		   ? (!t->opcode_modifier.ignoresize
4866 		      && !intel_float_operand (t->name))
4867 		   : intel_float_operand (t->name) != 2)
4868 	       && ((!operand_types[0].bitfield.regmmx
4869 		    && !operand_types[0].bitfield.regxmm)
4870 		   || (!operand_types[t->operands > 1].bitfield.regmmx
4871 		       && operand_types[t->operands > 1].bitfield.regxmm)))
4872 	continue;
4873 
4874       /* Do not verify operands when there are none.  */
4875       else
4876 	{
4877 	  if (!t->operands)
4878 	    /* We've found a match; break out of loop.  */
4879 	    break;
4880 	}
4881 
4882       /* Address size prefix will turn Disp64/Disp32/Disp16 operand
4883 	 into Disp32/Disp16/Disp32 operand.  */
4884       if (i.prefix[ADDR_PREFIX] != 0)
4885 	  {
4886 	    /* There should be only one Disp operand.  */
4887 	    switch (flag_code)
4888 	    {
4889 	    case CODE_16BIT:
4890 	      for (j = 0; j < MAX_OPERANDS; j++)
4891 		{
4892 		  if (operand_types[j].bitfield.disp16)
4893 		    {
4894 		      addr_prefix_disp = j;
4895 		      operand_types[j].bitfield.disp32 = 1;
4896 		      operand_types[j].bitfield.disp16 = 0;
4897 		      break;
4898 		    }
4899 		}
4900 	      break;
4901 	    case CODE_32BIT:
4902 	      for (j = 0; j < MAX_OPERANDS; j++)
4903 		{
4904 		  if (operand_types[j].bitfield.disp32)
4905 		    {
4906 		      addr_prefix_disp = j;
4907 		      operand_types[j].bitfield.disp32 = 0;
4908 		      operand_types[j].bitfield.disp16 = 1;
4909 		      break;
4910 		    }
4911 		}
4912 	      break;
4913 	    case CODE_64BIT:
4914 	      for (j = 0; j < MAX_OPERANDS; j++)
4915 		{
4916 		  if (operand_types[j].bitfield.disp64)
4917 		    {
4918 		      addr_prefix_disp = j;
4919 		      operand_types[j].bitfield.disp64 = 0;
4920 		      operand_types[j].bitfield.disp32 = 1;
4921 		      break;
4922 		    }
4923 		}
4924 	      break;
4925 	    }
4926 	  }
4927 
4928       /* Force 0x8b encoding for "mov foo@GOT, %eax".  */
4929       if (i.reloc[0] == BFD_RELOC_386_GOT32 && t->base_opcode == 0xa0)
4930 	continue;
4931 
4932       /* We check register size if needed.  */
4933       check_register = t->opcode_modifier.checkregsize;
4934       overlap0 = operand_type_and (i.types[0], operand_types[0]);
4935       switch (t->operands)
4936 	{
4937 	case 1:
4938 	  if (!operand_type_match (overlap0, i.types[0]))
4939 	    continue;
4940 	  break;
4941 	case 2:
4942 	  /* xchg %eax, %eax is a special case. It is an aliase for nop
4943 	     only in 32bit mode and we can use opcode 0x90.  In 64bit
4944 	     mode, we can't use 0x90 for xchg %eax, %eax since it should
4945 	     zero-extend %eax to %rax.  */
4946 	  if (flag_code == CODE_64BIT
4947 	      && t->base_opcode == 0x90
4948 	      && operand_type_equal (&i.types [0], &acc32)
4949 	      && operand_type_equal (&i.types [1], &acc32))
4950 	    continue;
4951 	  if (i.swap_operand)
4952 	    {
4953 	      /* If we swap operand in encoding, we either match
4954 		 the next one or reverse direction of operands.  */
4955 	      if (t->opcode_modifier.s)
4956 		continue;
4957 	      else if (t->opcode_modifier.d)
4958 		goto check_reverse;
4959 	    }
4960 
4961 	case 3:
4962 	  /* If we swap operand in encoding, we match the next one.  */
4963 	  if (i.swap_operand && t->opcode_modifier.s)
4964 	    continue;
4965 	case 4:
4966 	case 5:
4967 	  overlap1 = operand_type_and (i.types[1], operand_types[1]);
4968 	  if (!operand_type_match (overlap0, i.types[0])
4969 	      || !operand_type_match (overlap1, i.types[1])
4970 	      || (check_register
4971 		  && !operand_type_register_match (overlap0, i.types[0],
4972 						   operand_types[0],
4973 						   overlap1, i.types[1],
4974 						   operand_types[1])))
4975 	    {
4976 	      /* Check if other direction is valid ...  */
4977 	      if (!t->opcode_modifier.d && !t->opcode_modifier.floatd)
4978 		continue;
4979 
4980 check_reverse:
4981 	      /* Try reversing direction of operands.  */
4982 	      overlap0 = operand_type_and (i.types[0], operand_types[1]);
4983 	      overlap1 = operand_type_and (i.types[1], operand_types[0]);
4984 	      if (!operand_type_match (overlap0, i.types[0])
4985 		  || !operand_type_match (overlap1, i.types[1])
4986 		  || (check_register
4987 		      && !operand_type_register_match (overlap0,
4988 						       i.types[0],
4989 						       operand_types[1],
4990 						       overlap1,
4991 						       i.types[1],
4992 						       operand_types[0])))
4993 		{
4994 		  /* Does not match either direction.  */
4995 		  continue;
4996 		}
4997 	      /* found_reverse_match holds which of D or FloatDR
4998 		 we've found.  */
4999 	      if (t->opcode_modifier.d)
5000 		found_reverse_match = Opcode_D;
5001 	      else if (t->opcode_modifier.floatd)
5002 		found_reverse_match = Opcode_FloatD;
5003 	      else
5004 		found_reverse_match = 0;
5005 	      if (t->opcode_modifier.floatr)
5006 		found_reverse_match |= Opcode_FloatR;
5007 	    }
5008 	  else
5009 	    {
5010 	      /* Found a forward 2 operand match here.  */
5011 	      switch (t->operands)
5012 		{
5013 		case 5:
5014 		  overlap4 = operand_type_and (i.types[4],
5015 					       operand_types[4]);
5016 		case 4:
5017 		  overlap3 = operand_type_and (i.types[3],
5018 					       operand_types[3]);
5019 		case 3:
5020 		  overlap2 = operand_type_and (i.types[2],
5021 					       operand_types[2]);
5022 		  break;
5023 		}
5024 
5025 	      switch (t->operands)
5026 		{
5027 		case 5:
5028 		  if (!operand_type_match (overlap4, i.types[4])
5029 		      || !operand_type_register_match (overlap3,
5030 						       i.types[3],
5031 						       operand_types[3],
5032 						       overlap4,
5033 						       i.types[4],
5034 						       operand_types[4]))
5035 		    continue;
5036 		case 4:
5037 		  if (!operand_type_match (overlap3, i.types[3])
5038 		      || (check_register
5039 			  && !operand_type_register_match (overlap2,
5040 							   i.types[2],
5041 							   operand_types[2],
5042 							   overlap3,
5043 							   i.types[3],
5044 							   operand_types[3])))
5045 		    continue;
5046 		case 3:
5047 		  /* Here we make use of the fact that there are no
5048 		     reverse match 3 operand instructions, and all 3
5049 		     operand instructions only need to be checked for
5050 		     register consistency between operands 2 and 3.  */
5051 		  if (!operand_type_match (overlap2, i.types[2])
5052 		      || (check_register
5053 			  && !operand_type_register_match (overlap1,
5054 							   i.types[1],
5055 							   operand_types[1],
5056 							   overlap2,
5057 							   i.types[2],
5058 							   operand_types[2])))
5059 		    continue;
5060 		  break;
5061 		}
5062 	    }
5063 	  /* Found either forward/reverse 2, 3 or 4 operand match here:
5064 	     slip through to break.  */
5065 	}
5066       if (!found_cpu_match)
5067 	{
5068 	  found_reverse_match = 0;
5069 	  continue;
5070 	}
5071 
5072       /* Check if vector and VEX operands are valid.  */
5073       if (check_VecOperands (t) || VEX_check_operands (t))
5074 	{
5075 	  specific_error = i.error;
5076 	  continue;
5077 	}
5078 
5079       /* We've found a match; break out of loop.  */
5080       break;
5081     }
5082 
5083   if (t == current_templates->end)
5084     {
5085       /* We found no match.  */
5086       const char *err_msg;
5087       switch (specific_error ? specific_error : i.error)
5088 	{
5089 	default:
5090 	  abort ();
5091 	case operand_size_mismatch:
5092 	  err_msg = _("operand size mismatch");
5093 	  break;
5094 	case operand_type_mismatch:
5095 	  err_msg = _("operand type mismatch");
5096 	  break;
5097 	case register_type_mismatch:
5098 	  err_msg = _("register type mismatch");
5099 	  break;
5100 	case number_of_operands_mismatch:
5101 	  err_msg = _("number of operands mismatch");
5102 	  break;
5103 	case invalid_instruction_suffix:
5104 	  err_msg = _("invalid instruction suffix");
5105 	  break;
5106 	case bad_imm4:
5107 	  err_msg = _("constant doesn't fit in 4 bits");
5108 	  break;
5109 	case old_gcc_only:
5110 	  err_msg = _("only supported with old gcc");
5111 	  break;
5112 	case unsupported_with_intel_mnemonic:
5113 	  err_msg = _("unsupported with Intel mnemonic");
5114 	  break;
5115 	case unsupported_syntax:
5116 	  err_msg = _("unsupported syntax");
5117 	  break;
5118 	case unsupported:
5119 	  as_bad (_("unsupported instruction `%s'"),
5120 		  current_templates->start->name);
5121 	  return NULL;
5122 	case invalid_vsib_address:
5123 	  err_msg = _("invalid VSIB address");
5124 	  break;
5125 	case invalid_vector_register_set:
5126 	  err_msg = _("mask, index, and destination registers must be distinct");
5127 	  break;
5128 	case unsupported_vector_index_register:
5129 	  err_msg = _("unsupported vector index register");
5130 	  break;
5131 	case unsupported_broadcast:
5132 	  err_msg = _("unsupported broadcast");
5133 	  break;
5134 	case broadcast_not_on_src_operand:
5135 	  err_msg = _("broadcast not on source memory operand");
5136 	  break;
5137 	case broadcast_needed:
5138 	  err_msg = _("broadcast is needed for operand of such type");
5139 	  break;
5140 	case unsupported_masking:
5141 	  err_msg = _("unsupported masking");
5142 	  break;
5143 	case mask_not_on_destination:
5144 	  err_msg = _("mask not on destination operand");
5145 	  break;
5146 	case no_default_mask:
5147 	  err_msg = _("default mask isn't allowed");
5148 	  break;
5149 	case unsupported_rc_sae:
5150 	  err_msg = _("unsupported static rounding/sae");
5151 	  break;
5152 	case rc_sae_operand_not_last_imm:
5153 	  if (intel_syntax)
5154 	    err_msg = _("RC/SAE operand must precede immediate operands");
5155 	  else
5156 	    err_msg = _("RC/SAE operand must follow immediate operands");
5157 	  break;
5158 	case invalid_register_operand:
5159 	  err_msg = _("invalid register operand");
5160 	  break;
5161 	}
5162       as_bad (_("%s for `%s'"), err_msg,
5163 	      current_templates->start->name);
5164       return NULL;
5165     }
5166 
5167   if (!quiet_warnings)
5168     {
5169       if (!intel_syntax
5170 	  && (i.types[0].bitfield.jumpabsolute
5171 	      != operand_types[0].bitfield.jumpabsolute))
5172 	{
5173 	  as_warn (_("indirect %s without `*'"), t->name);
5174 	}
5175 
5176       if (t->opcode_modifier.isprefix
5177 	  && t->opcode_modifier.ignoresize)
5178 	{
5179 	  /* Warn them that a data or address size prefix doesn't
5180 	     affect assembly of the next line of code.  */
5181 	  as_warn (_("stand-alone `%s' prefix"), t->name);
5182 	}
5183     }
5184 
5185   /* Copy the template we found.  */
5186   i.tm = *t;
5187 
5188   if (addr_prefix_disp != -1)
5189     i.tm.operand_types[addr_prefix_disp]
5190       = operand_types[addr_prefix_disp];
5191 
5192   if (found_reverse_match)
5193     {
5194       /* If we found a reverse match we must alter the opcode
5195 	 direction bit.  found_reverse_match holds bits to change
5196 	 (different for int & float insns).  */
5197 
5198       i.tm.base_opcode ^= found_reverse_match;
5199 
5200       i.tm.operand_types[0] = operand_types[1];
5201       i.tm.operand_types[1] = operand_types[0];
5202     }
5203 
5204   return t;
5205 }
5206 
5207 static int
check_string(void)5208 check_string (void)
5209 {
5210   int mem_op = operand_type_check (i.types[0], anymem) ? 0 : 1;
5211   if (i.tm.operand_types[mem_op].bitfield.esseg)
5212     {
5213       if (i.seg[0] != NULL && i.seg[0] != &es)
5214 	{
5215 	  as_bad (_("`%s' operand %d must use `%ses' segment"),
5216 		  i.tm.name,
5217 		  mem_op + 1,
5218 		  register_prefix);
5219 	  return 0;
5220 	}
5221       /* There's only ever one segment override allowed per instruction.
5222 	 This instruction possibly has a legal segment override on the
5223 	 second operand, so copy the segment to where non-string
5224 	 instructions store it, allowing common code.  */
5225       i.seg[0] = i.seg[1];
5226     }
5227   else if (i.tm.operand_types[mem_op + 1].bitfield.esseg)
5228     {
5229       if (i.seg[1] != NULL && i.seg[1] != &es)
5230 	{
5231 	  as_bad (_("`%s' operand %d must use `%ses' segment"),
5232 		  i.tm.name,
5233 		  mem_op + 2,
5234 		  register_prefix);
5235 	  return 0;
5236 	}
5237     }
5238   return 1;
5239 }
5240 
5241 static int
process_suffix(void)5242 process_suffix (void)
5243 {
5244   /* If matched instruction specifies an explicit instruction mnemonic
5245      suffix, use it.  */
5246   if (i.tm.opcode_modifier.size16)
5247     i.suffix = WORD_MNEM_SUFFIX;
5248   else if (i.tm.opcode_modifier.size32)
5249     i.suffix = LONG_MNEM_SUFFIX;
5250   else if (i.tm.opcode_modifier.size64)
5251     i.suffix = QWORD_MNEM_SUFFIX;
5252   else if (i.reg_operands)
5253     {
5254       /* If there's no instruction mnemonic suffix we try to invent one
5255 	 based on register operands.  */
5256       if (!i.suffix)
5257 	{
5258 	  /* We take i.suffix from the last register operand specified,
5259 	     Destination register type is more significant than source
5260 	     register type.  crc32 in SSE4.2 prefers source register
5261 	     type. */
5262 	  if (i.tm.base_opcode == 0xf20f38f1)
5263 	    {
5264 	      if (i.types[0].bitfield.reg16)
5265 		i.suffix = WORD_MNEM_SUFFIX;
5266 	      else if (i.types[0].bitfield.reg32)
5267 		i.suffix = LONG_MNEM_SUFFIX;
5268 	      else if (i.types[0].bitfield.reg64)
5269 		i.suffix = QWORD_MNEM_SUFFIX;
5270 	    }
5271 	  else if (i.tm.base_opcode == 0xf20f38f0)
5272 	    {
5273 	      if (i.types[0].bitfield.reg8)
5274 		i.suffix = BYTE_MNEM_SUFFIX;
5275 	    }
5276 
5277 	  if (!i.suffix)
5278 	    {
5279 	      int op;
5280 
5281 	      if (i.tm.base_opcode == 0xf20f38f1
5282 		  || i.tm.base_opcode == 0xf20f38f0)
5283 		{
5284 		  /* We have to know the operand size for crc32.  */
5285 		  as_bad (_("ambiguous memory operand size for `%s`"),
5286 			  i.tm.name);
5287 		  return 0;
5288 		}
5289 
5290 	      for (op = i.operands; --op >= 0;)
5291 		if (!i.tm.operand_types[op].bitfield.inoutportreg)
5292 		  {
5293 		    if (i.types[op].bitfield.reg8)
5294 		      {
5295 			i.suffix = BYTE_MNEM_SUFFIX;
5296 			break;
5297 		      }
5298 		    else if (i.types[op].bitfield.reg16)
5299 		      {
5300 			i.suffix = WORD_MNEM_SUFFIX;
5301 			break;
5302 		      }
5303 		    else if (i.types[op].bitfield.reg32)
5304 		      {
5305 			i.suffix = LONG_MNEM_SUFFIX;
5306 			break;
5307 		      }
5308 		    else if (i.types[op].bitfield.reg64)
5309 		      {
5310 			i.suffix = QWORD_MNEM_SUFFIX;
5311 			break;
5312 		      }
5313 		  }
5314 	    }
5315 	}
5316       else if (i.suffix == BYTE_MNEM_SUFFIX)
5317 	{
5318 	  if (intel_syntax
5319 	      && i.tm.opcode_modifier.ignoresize
5320 	      && i.tm.opcode_modifier.no_bsuf)
5321 	    i.suffix = 0;
5322 	  else if (!check_byte_reg ())
5323 	    return 0;
5324 	}
5325       else if (i.suffix == LONG_MNEM_SUFFIX)
5326 	{
5327 	  if (intel_syntax
5328 	      && i.tm.opcode_modifier.ignoresize
5329 	      && i.tm.opcode_modifier.no_lsuf)
5330 	    i.suffix = 0;
5331 	  else if (!check_long_reg ())
5332 	    return 0;
5333 	}
5334       else if (i.suffix == QWORD_MNEM_SUFFIX)
5335 	{
5336 	  if (intel_syntax
5337 	      && i.tm.opcode_modifier.ignoresize
5338 	      && i.tm.opcode_modifier.no_qsuf)
5339 	    i.suffix = 0;
5340 	  else if (!check_qword_reg ())
5341 	    return 0;
5342 	}
5343       else if (i.suffix == WORD_MNEM_SUFFIX)
5344 	{
5345 	  if (intel_syntax
5346 	      && i.tm.opcode_modifier.ignoresize
5347 	      && i.tm.opcode_modifier.no_wsuf)
5348 	    i.suffix = 0;
5349 	  else if (!check_word_reg ())
5350 	    return 0;
5351 	}
5352       else if (i.suffix == XMMWORD_MNEM_SUFFIX
5353 	       || i.suffix == YMMWORD_MNEM_SUFFIX
5354 	       || i.suffix == ZMMWORD_MNEM_SUFFIX)
5355 	{
5356 	  /* Skip if the instruction has x/y/z suffix.  match_template
5357 	     should check if it is a valid suffix.  */
5358 	}
5359       else if (intel_syntax && i.tm.opcode_modifier.ignoresize)
5360 	/* Do nothing if the instruction is going to ignore the prefix.  */
5361 	;
5362       else
5363 	abort ();
5364     }
5365   else if (i.tm.opcode_modifier.defaultsize
5366 	   && !i.suffix
5367 	   /* exclude fldenv/frstor/fsave/fstenv */
5368 	   && i.tm.opcode_modifier.no_ssuf)
5369     {
5370       i.suffix = stackop_size;
5371     }
5372   else if (intel_syntax
5373 	   && !i.suffix
5374 	   && (i.tm.operand_types[0].bitfield.jumpabsolute
5375 	       || i.tm.opcode_modifier.jumpbyte
5376 	       || i.tm.opcode_modifier.jumpintersegment
5377 	       || (i.tm.base_opcode == 0x0f01 /* [ls][gi]dt */
5378 		   && i.tm.extension_opcode <= 3)))
5379     {
5380       switch (flag_code)
5381 	{
5382 	case CODE_64BIT:
5383 	  if (!i.tm.opcode_modifier.no_qsuf)
5384 	    {
5385 	      i.suffix = QWORD_MNEM_SUFFIX;
5386 	      break;
5387 	    }
5388 	case CODE_32BIT:
5389 	  if (!i.tm.opcode_modifier.no_lsuf)
5390 	    i.suffix = LONG_MNEM_SUFFIX;
5391 	  break;
5392 	case CODE_16BIT:
5393 	  if (!i.tm.opcode_modifier.no_wsuf)
5394 	    i.suffix = WORD_MNEM_SUFFIX;
5395 	  break;
5396 	}
5397     }
5398 
5399   if (!i.suffix)
5400     {
5401       if (!intel_syntax)
5402 	{
5403 	  if (i.tm.opcode_modifier.w)
5404 	    {
5405 	      as_bad (_("no instruction mnemonic suffix given and "
5406 			"no register operands; can't size instruction"));
5407 	      return 0;
5408 	    }
5409 	}
5410       else
5411 	{
5412 	  unsigned int suffixes;
5413 
5414 	  suffixes = !i.tm.opcode_modifier.no_bsuf;
5415 	  if (!i.tm.opcode_modifier.no_wsuf)
5416 	    suffixes |= 1 << 1;
5417 	  if (!i.tm.opcode_modifier.no_lsuf)
5418 	    suffixes |= 1 << 2;
5419 	  if (!i.tm.opcode_modifier.no_ldsuf)
5420 	    suffixes |= 1 << 3;
5421 	  if (!i.tm.opcode_modifier.no_ssuf)
5422 	    suffixes |= 1 << 4;
5423 	  if (!i.tm.opcode_modifier.no_qsuf)
5424 	    suffixes |= 1 << 5;
5425 
5426 	  /* There are more than suffix matches.  */
5427 	  if (i.tm.opcode_modifier.w
5428 	      || ((suffixes & (suffixes - 1))
5429 		  && !i.tm.opcode_modifier.defaultsize
5430 		  && !i.tm.opcode_modifier.ignoresize))
5431 	    {
5432 	      as_bad (_("ambiguous operand size for `%s'"), i.tm.name);
5433 	      return 0;
5434 	    }
5435 	}
5436     }
5437 
5438   /* Change the opcode based on the operand size given by i.suffix;
5439      We don't need to change things for byte insns.  */
5440 
5441   if (i.suffix
5442       && i.suffix != BYTE_MNEM_SUFFIX
5443       && i.suffix != XMMWORD_MNEM_SUFFIX
5444       && i.suffix != YMMWORD_MNEM_SUFFIX
5445       && i.suffix != ZMMWORD_MNEM_SUFFIX)
5446     {
5447       /* It's not a byte, select word/dword operation.  */
5448       if (i.tm.opcode_modifier.w)
5449 	{
5450 	  if (i.tm.opcode_modifier.shortform)
5451 	    i.tm.base_opcode |= 8;
5452 	  else
5453 	    i.tm.base_opcode |= 1;
5454 	}
5455 
5456       /* Now select between word & dword operations via the operand
5457 	 size prefix, except for instructions that will ignore this
5458 	 prefix anyway.  */
5459       if (i.tm.opcode_modifier.addrprefixop0)
5460 	{
5461 	  /* The address size override prefix changes the size of the
5462 	     first operand.  */
5463 	  if ((flag_code == CODE_32BIT
5464 	       && i.op->regs[0].reg_type.bitfield.reg16)
5465 	      || (flag_code != CODE_32BIT
5466 		  && i.op->regs[0].reg_type.bitfield.reg32))
5467 	    if (!add_prefix (ADDR_PREFIX_OPCODE))
5468 	      return 0;
5469 	}
5470       else if (i.suffix != QWORD_MNEM_SUFFIX
5471 	       && i.suffix != LONG_DOUBLE_MNEM_SUFFIX
5472 	       && !i.tm.opcode_modifier.ignoresize
5473 	       && !i.tm.opcode_modifier.floatmf
5474 	       && ((i.suffix == LONG_MNEM_SUFFIX) == (flag_code == CODE_16BIT)
5475 		   || (flag_code == CODE_64BIT
5476 		       && i.tm.opcode_modifier.jumpbyte)))
5477 	{
5478 	  unsigned int prefix = DATA_PREFIX_OPCODE;
5479 
5480 	  if (i.tm.opcode_modifier.jumpbyte) /* jcxz, loop */
5481 	    prefix = ADDR_PREFIX_OPCODE;
5482 
5483 	  if (!add_prefix (prefix))
5484 	    return 0;
5485 	}
5486 
5487       /* Set mode64 for an operand.  */
5488       if (i.suffix == QWORD_MNEM_SUFFIX
5489 	  && flag_code == CODE_64BIT
5490 	  && !i.tm.opcode_modifier.norex64)
5491 	{
5492 	  /* Special case for xchg %rax,%rax.  It is NOP and doesn't
5493 	     need rex64.  cmpxchg8b is also a special case. */
5494 	  if (! (i.operands == 2
5495 		 && i.tm.base_opcode == 0x90
5496 		 && i.tm.extension_opcode == None
5497 		 && operand_type_equal (&i.types [0], &acc64)
5498 		 && operand_type_equal (&i.types [1], &acc64))
5499 	      && ! (i.operands == 1
5500 		    && i.tm.base_opcode == 0xfc7
5501 		    && i.tm.extension_opcode == 1
5502 		    && !operand_type_check (i.types [0], reg)
5503 		    && operand_type_check (i.types [0], anymem)))
5504 	    i.rex |= REX_W;
5505 	}
5506 
5507       /* Size floating point instruction.  */
5508       if (i.suffix == LONG_MNEM_SUFFIX)
5509 	if (i.tm.opcode_modifier.floatmf)
5510 	  i.tm.base_opcode ^= 4;
5511     }
5512 
5513   return 1;
5514 }
5515 
5516 static int
check_byte_reg(void)5517 check_byte_reg (void)
5518 {
5519   int op;
5520 
5521   for (op = i.operands; --op >= 0;)
5522     {
5523       /* If this is an eight bit register, it's OK.  If it's the 16 or
5524 	 32 bit version of an eight bit register, we will just use the
5525 	 low portion, and that's OK too.  */
5526       if (i.types[op].bitfield.reg8)
5527 	continue;
5528 
5529       /* I/O port address operands are OK too.  */
5530       if (i.tm.operand_types[op].bitfield.inoutportreg)
5531 	continue;
5532 
5533       /* crc32 doesn't generate this warning.  */
5534       if (i.tm.base_opcode == 0xf20f38f0)
5535 	continue;
5536 
5537       if ((i.types[op].bitfield.reg16
5538 	   || i.types[op].bitfield.reg32
5539 	   || i.types[op].bitfield.reg64)
5540 	  && i.op[op].regs->reg_num < 4
5541 	  /* Prohibit these changes in 64bit mode, since the lowering
5542 	     would be more complicated.  */
5543 	  && flag_code != CODE_64BIT)
5544 	{
5545 #if REGISTER_WARNINGS
5546 	  if (!quiet_warnings)
5547 	    as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5548 		     register_prefix,
5549 		     (i.op[op].regs + (i.types[op].bitfield.reg16
5550 				       ? REGNAM_AL - REGNAM_AX
5551 				       : REGNAM_AL - REGNAM_EAX))->reg_name,
5552 		     register_prefix,
5553 		     i.op[op].regs->reg_name,
5554 		     i.suffix);
5555 #endif
5556 	  continue;
5557 	}
5558       /* Any other register is bad.  */
5559       if (i.types[op].bitfield.reg16
5560 	  || i.types[op].bitfield.reg32
5561 	  || i.types[op].bitfield.reg64
5562 	  || i.types[op].bitfield.regmmx
5563 	  || i.types[op].bitfield.regxmm
5564 	  || i.types[op].bitfield.regymm
5565 	  || i.types[op].bitfield.regzmm
5566 	  || i.types[op].bitfield.sreg2
5567 	  || i.types[op].bitfield.sreg3
5568 	  || i.types[op].bitfield.control
5569 	  || i.types[op].bitfield.debug
5570 	  || i.types[op].bitfield.test
5571 	  || i.types[op].bitfield.floatreg
5572 	  || i.types[op].bitfield.floatacc)
5573 	{
5574 	  as_bad (_("`%s%s' not allowed with `%s%c'"),
5575 		  register_prefix,
5576 		  i.op[op].regs->reg_name,
5577 		  i.tm.name,
5578 		  i.suffix);
5579 	  return 0;
5580 	}
5581     }
5582   return 1;
5583 }
5584 
5585 static int
check_long_reg(void)5586 check_long_reg (void)
5587 {
5588   int op;
5589 
5590   for (op = i.operands; --op >= 0;)
5591     /* Reject eight bit registers, except where the template requires
5592        them. (eg. movzb)  */
5593     if (i.types[op].bitfield.reg8
5594 	&& (i.tm.operand_types[op].bitfield.reg16
5595 	    || i.tm.operand_types[op].bitfield.reg32
5596 	    || i.tm.operand_types[op].bitfield.acc))
5597       {
5598 	as_bad (_("`%s%s' not allowed with `%s%c'"),
5599 		register_prefix,
5600 		i.op[op].regs->reg_name,
5601 		i.tm.name,
5602 		i.suffix);
5603 	return 0;
5604       }
5605     /* Warn if the e prefix on a general reg is missing.  */
5606     else if ((!quiet_warnings || flag_code == CODE_64BIT)
5607 	     && i.types[op].bitfield.reg16
5608 	     && (i.tm.operand_types[op].bitfield.reg32
5609 		 || i.tm.operand_types[op].bitfield.acc))
5610       {
5611 	/* Prohibit these changes in the 64bit mode, since the
5612 	   lowering is more complicated.  */
5613 	if (flag_code == CODE_64BIT)
5614 	  {
5615 	    as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5616 		    register_prefix, i.op[op].regs->reg_name,
5617 		    i.suffix);
5618 	    return 0;
5619 	  }
5620 #if REGISTER_WARNINGS
5621 	as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5622 		 register_prefix,
5623 		 (i.op[op].regs + REGNAM_EAX - REGNAM_AX)->reg_name,
5624 		 register_prefix, i.op[op].regs->reg_name, i.suffix);
5625 #endif
5626       }
5627     /* Warn if the r prefix on a general reg is present.  */
5628     else if (i.types[op].bitfield.reg64
5629 	     && (i.tm.operand_types[op].bitfield.reg32
5630 		 || i.tm.operand_types[op].bitfield.acc))
5631       {
5632 	if (intel_syntax
5633 	    && i.tm.opcode_modifier.toqword
5634 	    && !i.types[0].bitfield.regxmm)
5635 	  {
5636 	    /* Convert to QWORD.  We want REX byte. */
5637 	    i.suffix = QWORD_MNEM_SUFFIX;
5638 	  }
5639 	else
5640 	  {
5641 	    as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5642 		    register_prefix, i.op[op].regs->reg_name,
5643 		    i.suffix);
5644 	    return 0;
5645 	  }
5646       }
5647   return 1;
5648 }
5649 
5650 static int
check_qword_reg(void)5651 check_qword_reg (void)
5652 {
5653   int op;
5654 
5655   for (op = i.operands; --op >= 0; )
5656     /* Reject eight bit registers, except where the template requires
5657        them. (eg. movzb)  */
5658     if (i.types[op].bitfield.reg8
5659 	&& (i.tm.operand_types[op].bitfield.reg16
5660 	    || i.tm.operand_types[op].bitfield.reg32
5661 	    || i.tm.operand_types[op].bitfield.acc))
5662       {
5663 	as_bad (_("`%s%s' not allowed with `%s%c'"),
5664 		register_prefix,
5665 		i.op[op].regs->reg_name,
5666 		i.tm.name,
5667 		i.suffix);
5668 	return 0;
5669       }
5670     /* Warn if the r prefix on a general reg is missing.  */
5671     else if ((i.types[op].bitfield.reg16
5672 	      || i.types[op].bitfield.reg32)
5673 	     && (i.tm.operand_types[op].bitfield.reg32
5674 		 || i.tm.operand_types[op].bitfield.acc))
5675       {
5676 	/* Prohibit these changes in the 64bit mode, since the
5677 	   lowering is more complicated.  */
5678 	if (intel_syntax
5679 	    && i.tm.opcode_modifier.todword
5680 	    && !i.types[0].bitfield.regxmm)
5681 	  {
5682 	    /* Convert to DWORD.  We don't want REX byte. */
5683 	    i.suffix = LONG_MNEM_SUFFIX;
5684 	  }
5685 	else
5686 	  {
5687 	    as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5688 		    register_prefix, i.op[op].regs->reg_name,
5689 		    i.suffix);
5690 	    return 0;
5691 	  }
5692       }
5693   return 1;
5694 }
5695 
5696 static int
check_word_reg(void)5697 check_word_reg (void)
5698 {
5699   int op;
5700   for (op = i.operands; --op >= 0;)
5701     /* Reject eight bit registers, except where the template requires
5702        them. (eg. movzb)  */
5703     if (i.types[op].bitfield.reg8
5704 	&& (i.tm.operand_types[op].bitfield.reg16
5705 	    || i.tm.operand_types[op].bitfield.reg32
5706 	    || i.tm.operand_types[op].bitfield.acc))
5707       {
5708 	as_bad (_("`%s%s' not allowed with `%s%c'"),
5709 		register_prefix,
5710 		i.op[op].regs->reg_name,
5711 		i.tm.name,
5712 		i.suffix);
5713 	return 0;
5714       }
5715     /* Warn if the e or r prefix on a general reg is present.  */
5716     else if ((!quiet_warnings || flag_code == CODE_64BIT)
5717 	     && (i.types[op].bitfield.reg32
5718 		 || i.types[op].bitfield.reg64)
5719 	     && (i.tm.operand_types[op].bitfield.reg16
5720 		 || i.tm.operand_types[op].bitfield.acc))
5721       {
5722 	/* Prohibit these changes in the 64bit mode, since the
5723 	   lowering is more complicated.  */
5724 	if (flag_code == CODE_64BIT)
5725 	  {
5726 	    as_bad (_("incorrect register `%s%s' used with `%c' suffix"),
5727 		    register_prefix, i.op[op].regs->reg_name,
5728 		    i.suffix);
5729 	    return 0;
5730 	  }
5731 #if REGISTER_WARNINGS
5732 	as_warn (_("using `%s%s' instead of `%s%s' due to `%c' suffix"),
5733 		 register_prefix,
5734 		 (i.op[op].regs + REGNAM_AX - REGNAM_EAX)->reg_name,
5735 		 register_prefix, i.op[op].regs->reg_name, i.suffix);
5736 #endif
5737       }
5738   return 1;
5739 }
5740 
5741 static int
update_imm(unsigned int j)5742 update_imm (unsigned int j)
5743 {
5744   i386_operand_type overlap = i.types[j];
5745   if ((overlap.bitfield.imm8
5746        || overlap.bitfield.imm8s
5747        || overlap.bitfield.imm16
5748        || overlap.bitfield.imm32
5749        || overlap.bitfield.imm32s
5750        || overlap.bitfield.imm64)
5751       && !operand_type_equal (&overlap, &imm8)
5752       && !operand_type_equal (&overlap, &imm8s)
5753       && !operand_type_equal (&overlap, &imm16)
5754       && !operand_type_equal (&overlap, &imm32)
5755       && !operand_type_equal (&overlap, &imm32s)
5756       && !operand_type_equal (&overlap, &imm64))
5757     {
5758       if (i.suffix)
5759 	{
5760 	  i386_operand_type temp;
5761 
5762 	  operand_type_set (&temp, 0);
5763 	  if (i.suffix == BYTE_MNEM_SUFFIX)
5764 	    {
5765 	      temp.bitfield.imm8 = overlap.bitfield.imm8;
5766 	      temp.bitfield.imm8s = overlap.bitfield.imm8s;
5767 	    }
5768 	  else if (i.suffix == WORD_MNEM_SUFFIX)
5769 	    temp.bitfield.imm16 = overlap.bitfield.imm16;
5770 	  else if (i.suffix == QWORD_MNEM_SUFFIX)
5771 	    {
5772 	      temp.bitfield.imm64 = overlap.bitfield.imm64;
5773 	      temp.bitfield.imm32s = overlap.bitfield.imm32s;
5774 	    }
5775 	  else
5776 	    temp.bitfield.imm32 = overlap.bitfield.imm32;
5777 	  overlap = temp;
5778 	}
5779       else if (operand_type_equal (&overlap, &imm16_32_32s)
5780 	       || operand_type_equal (&overlap, &imm16_32)
5781 	       || operand_type_equal (&overlap, &imm16_32s))
5782 	{
5783 	  if ((flag_code == CODE_16BIT) ^ (i.prefix[DATA_PREFIX] != 0))
5784 	    overlap = imm16;
5785 	  else
5786 	    overlap = imm32s;
5787 	}
5788       if (!operand_type_equal (&overlap, &imm8)
5789 	  && !operand_type_equal (&overlap, &imm8s)
5790 	  && !operand_type_equal (&overlap, &imm16)
5791 	  && !operand_type_equal (&overlap, &imm32)
5792 	  && !operand_type_equal (&overlap, &imm32s)
5793 	  && !operand_type_equal (&overlap, &imm64))
5794 	{
5795 	  as_bad (_("no instruction mnemonic suffix given; "
5796 		    "can't determine immediate size"));
5797 	  return 0;
5798 	}
5799     }
5800   i.types[j] = overlap;
5801 
5802   return 1;
5803 }
5804 
5805 static int
finalize_imm(void)5806 finalize_imm (void)
5807 {
5808   unsigned int j, n;
5809 
5810   /* Update the first 2 immediate operands.  */
5811   n = i.operands > 2 ? 2 : i.operands;
5812   if (n)
5813     {
5814       for (j = 0; j < n; j++)
5815 	if (update_imm (j) == 0)
5816 	  return 0;
5817 
5818       /* The 3rd operand can't be immediate operand.  */
5819       gas_assert (operand_type_check (i.types[2], imm) == 0);
5820     }
5821 
5822   return 1;
5823 }
5824 
5825 static int
bad_implicit_operand(int xmm)5826 bad_implicit_operand (int xmm)
5827 {
5828   const char *ireg = xmm ? "xmm0" : "ymm0";
5829 
5830   if (intel_syntax)
5831     as_bad (_("the last operand of `%s' must be `%s%s'"),
5832 	    i.tm.name, register_prefix, ireg);
5833   else
5834     as_bad (_("the first operand of `%s' must be `%s%s'"),
5835 	    i.tm.name, register_prefix, ireg);
5836   return 0;
5837 }
5838 
5839 static int
process_operands(void)5840 process_operands (void)
5841 {
5842   /* Default segment register this instruction will use for memory
5843      accesses.  0 means unknown.  This is only for optimizing out
5844      unnecessary segment overrides.  */
5845   const seg_entry *default_seg = 0;
5846 
5847   if (i.tm.opcode_modifier.sse2avx && i.tm.opcode_modifier.vexvvvv)
5848     {
5849       unsigned int dupl = i.operands;
5850       unsigned int dest = dupl - 1;
5851       unsigned int j;
5852 
5853       /* The destination must be an xmm register.  */
5854       gas_assert (i.reg_operands
5855 		  && MAX_OPERANDS > dupl
5856 		  && operand_type_equal (&i.types[dest], &regxmm));
5857 
5858       if (i.tm.opcode_modifier.firstxmm0)
5859 	{
5860 	  /* The first operand is implicit and must be xmm0.  */
5861 	  gas_assert (operand_type_equal (&i.types[0], &regxmm));
5862 	  if (register_number (i.op[0].regs) != 0)
5863 	    return bad_implicit_operand (1);
5864 
5865 	  if (i.tm.opcode_modifier.vexsources == VEX3SOURCES)
5866 	    {
5867 	      /* Keep xmm0 for instructions with VEX prefix and 3
5868 		 sources.  */
5869 	      goto duplicate;
5870 	    }
5871 	  else
5872 	    {
5873 	      /* We remove the first xmm0 and keep the number of
5874 		 operands unchanged, which in fact duplicates the
5875 		 destination.  */
5876 	      for (j = 1; j < i.operands; j++)
5877 		{
5878 		  i.op[j - 1] = i.op[j];
5879 		  i.types[j - 1] = i.types[j];
5880 		  i.tm.operand_types[j - 1] = i.tm.operand_types[j];
5881 		}
5882 	    }
5883 	}
5884       else if (i.tm.opcode_modifier.implicit1stxmm0)
5885 	{
5886 	  gas_assert ((MAX_OPERANDS - 1) > dupl
5887 		      && (i.tm.opcode_modifier.vexsources
5888 			  == VEX3SOURCES));
5889 
5890 	  /* Add the implicit xmm0 for instructions with VEX prefix
5891 	     and 3 sources.  */
5892 	  for (j = i.operands; j > 0; j--)
5893 	    {
5894 	      i.op[j] = i.op[j - 1];
5895 	      i.types[j] = i.types[j - 1];
5896 	      i.tm.operand_types[j] = i.tm.operand_types[j - 1];
5897 	    }
5898 	  i.op[0].regs
5899 	    = (const reg_entry *) hash_find (reg_hash, "xmm0");
5900 	  i.types[0] = regxmm;
5901 	  i.tm.operand_types[0] = regxmm;
5902 
5903 	  i.operands += 2;
5904 	  i.reg_operands += 2;
5905 	  i.tm.operands += 2;
5906 
5907 	  dupl++;
5908 	  dest++;
5909 	  i.op[dupl] = i.op[dest];
5910 	  i.types[dupl] = i.types[dest];
5911 	  i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5912 	}
5913       else
5914 	{
5915 duplicate:
5916 	  i.operands++;
5917 	  i.reg_operands++;
5918 	  i.tm.operands++;
5919 
5920 	  i.op[dupl] = i.op[dest];
5921 	  i.types[dupl] = i.types[dest];
5922 	  i.tm.operand_types[dupl] = i.tm.operand_types[dest];
5923 	}
5924 
5925        if (i.tm.opcode_modifier.immext)
5926 	 process_immext ();
5927     }
5928   else if (i.tm.opcode_modifier.firstxmm0)
5929     {
5930       unsigned int j;
5931 
5932       /* The first operand is implicit and must be xmm0/ymm0/zmm0.  */
5933       gas_assert (i.reg_operands
5934 		  && (operand_type_equal (&i.types[0], &regxmm)
5935 		      || operand_type_equal (&i.types[0], &regymm)
5936 		      || operand_type_equal (&i.types[0], &regzmm)));
5937       if (register_number (i.op[0].regs) != 0)
5938 	return bad_implicit_operand (i.types[0].bitfield.regxmm);
5939 
5940       for (j = 1; j < i.operands; j++)
5941 	{
5942 	  i.op[j - 1] = i.op[j];
5943 	  i.types[j - 1] = i.types[j];
5944 
5945 	  /* We need to adjust fields in i.tm since they are used by
5946 	     build_modrm_byte.  */
5947 	  i.tm.operand_types [j - 1] = i.tm.operand_types [j];
5948 	}
5949 
5950       i.operands--;
5951       i.reg_operands--;
5952       i.tm.operands--;
5953     }
5954   else if (i.tm.opcode_modifier.regkludge)
5955     {
5956       /* The imul $imm, %reg instruction is converted into
5957 	 imul $imm, %reg, %reg, and the clr %reg instruction
5958 	 is converted into xor %reg, %reg.  */
5959 
5960       unsigned int first_reg_op;
5961 
5962       if (operand_type_check (i.types[0], reg))
5963 	first_reg_op = 0;
5964       else
5965 	first_reg_op = 1;
5966       /* Pretend we saw the extra register operand.  */
5967       gas_assert (i.reg_operands == 1
5968 		  && i.op[first_reg_op + 1].regs == 0);
5969       i.op[first_reg_op + 1].regs = i.op[first_reg_op].regs;
5970       i.types[first_reg_op + 1] = i.types[first_reg_op];
5971       i.operands++;
5972       i.reg_operands++;
5973     }
5974 
5975   if (i.tm.opcode_modifier.shortform)
5976     {
5977       if (i.types[0].bitfield.sreg2
5978 	  || i.types[0].bitfield.sreg3)
5979 	{
5980 	  if (i.tm.base_opcode == POP_SEG_SHORT
5981 	      && i.op[0].regs->reg_num == 1)
5982 	    {
5983 	      as_bad (_("you can't `pop %scs'"), register_prefix);
5984 	      return 0;
5985 	    }
5986 	  i.tm.base_opcode |= (i.op[0].regs->reg_num << 3);
5987 	  if ((i.op[0].regs->reg_flags & RegRex) != 0)
5988 	    i.rex |= REX_B;
5989 	}
5990       else
5991 	{
5992 	  /* The register or float register operand is in operand
5993 	     0 or 1.  */
5994 	  unsigned int op;
5995 
5996 	  if (i.types[0].bitfield.floatreg
5997 	      || operand_type_check (i.types[0], reg))
5998 	    op = 0;
5999 	  else
6000 	    op = 1;
6001 	  /* Register goes in low 3 bits of opcode.  */
6002 	  i.tm.base_opcode |= i.op[op].regs->reg_num;
6003 	  if ((i.op[op].regs->reg_flags & RegRex) != 0)
6004 	    i.rex |= REX_B;
6005 	  if (!quiet_warnings && i.tm.opcode_modifier.ugh)
6006 	    {
6007 	      /* Warn about some common errors, but press on regardless.
6008 		 The first case can be generated by gcc (<= 2.8.1).  */
6009 	      if (i.operands == 2)
6010 		{
6011 		  /* Reversed arguments on faddp, fsubp, etc.  */
6012 		  as_warn (_("translating to `%s %s%s,%s%s'"), i.tm.name,
6013 			   register_prefix, i.op[!intel_syntax].regs->reg_name,
6014 			   register_prefix, i.op[intel_syntax].regs->reg_name);
6015 		}
6016 	      else
6017 		{
6018 		  /* Extraneous `l' suffix on fp insn.  */
6019 		  as_warn (_("translating to `%s %s%s'"), i.tm.name,
6020 			   register_prefix, i.op[0].regs->reg_name);
6021 		}
6022 	    }
6023 	}
6024     }
6025   else if (i.tm.opcode_modifier.modrm)
6026     {
6027       /* The opcode is completed (modulo i.tm.extension_opcode which
6028 	 must be put into the modrm byte).  Now, we make the modrm and
6029 	 index base bytes based on all the info we've collected.  */
6030 
6031       default_seg = build_modrm_byte ();
6032     }
6033   else if ((i.tm.base_opcode & ~0x3) == MOV_AX_DISP32)
6034     {
6035       default_seg = &ds;
6036     }
6037   else if (i.tm.opcode_modifier.isstring)
6038     {
6039       /* For the string instructions that allow a segment override
6040 	 on one of their operands, the default segment is ds.  */
6041       default_seg = &ds;
6042     }
6043 
6044   if (i.tm.base_opcode == 0x8d /* lea */
6045       && i.seg[0]
6046       && !quiet_warnings)
6047     as_warn (_("segment override on `%s' is ineffectual"), i.tm.name);
6048 
6049   /* If a segment was explicitly specified, and the specified segment
6050      is not the default, use an opcode prefix to select it.  If we
6051      never figured out what the default segment is, then default_seg
6052      will be zero at this point, and the specified segment prefix will
6053      always be used.  */
6054   if ((i.seg[0]) && (i.seg[0] != default_seg))
6055     {
6056       if (!add_prefix (i.seg[0]->seg_prefix))
6057 	return 0;
6058     }
6059   return 1;
6060 }
6061 
6062 static const seg_entry *
build_modrm_byte(void)6063 build_modrm_byte (void)
6064 {
6065   const seg_entry *default_seg = 0;
6066   unsigned int source, dest;
6067   int vex_3_sources;
6068 
6069   /* The first operand of instructions with VEX prefix and 3 sources
6070      must be VEX_Imm4.  */
6071   vex_3_sources = i.tm.opcode_modifier.vexsources == VEX3SOURCES;
6072   if (vex_3_sources)
6073     {
6074       unsigned int nds, reg_slot;
6075       expressionS *exp;
6076 
6077       if (i.tm.opcode_modifier.veximmext
6078           && i.tm.opcode_modifier.immext)
6079         {
6080           dest = i.operands - 2;
6081           gas_assert (dest == 3);
6082         }
6083       else
6084         dest = i.operands - 1;
6085       nds = dest - 1;
6086 
6087       /* There are 2 kinds of instructions:
6088          1. 5 operands: 4 register operands or 3 register operands
6089          plus 1 memory operand plus one Vec_Imm4 operand, VexXDS, and
6090          VexW0 or VexW1.  The destination must be either XMM, YMM or
6091 	 ZMM register.
6092          2. 4 operands: 4 register operands or 3 register operands
6093          plus 1 memory operand, VexXDS, and VexImmExt  */
6094       gas_assert ((i.reg_operands == 4
6095                    || (i.reg_operands == 3 && i.mem_operands == 1))
6096                   && i.tm.opcode_modifier.vexvvvv == VEXXDS
6097                   && (i.tm.opcode_modifier.veximmext
6098                       || (i.imm_operands == 1
6099                           && i.types[0].bitfield.vec_imm4
6100                           && (i.tm.opcode_modifier.vexw == VEXW0
6101                               || i.tm.opcode_modifier.vexw == VEXW1)
6102                           && (operand_type_equal (&i.tm.operand_types[dest], &regxmm)
6103                               || operand_type_equal (&i.tm.operand_types[dest], &regymm)
6104                               || operand_type_equal (&i.tm.operand_types[dest], &regzmm)))));
6105 
6106       if (i.imm_operands == 0)
6107         {
6108           /* When there is no immediate operand, generate an 8bit
6109              immediate operand to encode the first operand.  */
6110           exp = &im_expressions[i.imm_operands++];
6111           i.op[i.operands].imms = exp;
6112           i.types[i.operands] = imm8;
6113           i.operands++;
6114           /* If VexW1 is set, the first operand is the source and
6115              the second operand is encoded in the immediate operand.  */
6116           if (i.tm.opcode_modifier.vexw == VEXW1)
6117             {
6118               source = 0;
6119               reg_slot = 1;
6120             }
6121           else
6122             {
6123               source = 1;
6124               reg_slot = 0;
6125             }
6126 
6127           /* FMA swaps REG and NDS.  */
6128           if (i.tm.cpu_flags.bitfield.cpufma)
6129             {
6130               unsigned int tmp;
6131               tmp = reg_slot;
6132               reg_slot = nds;
6133               nds = tmp;
6134             }
6135 
6136           gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
6137 					  &regxmm)
6138                       || operand_type_equal (&i.tm.operand_types[reg_slot],
6139                                              &regymm)
6140                       || operand_type_equal (&i.tm.operand_types[reg_slot],
6141                                              &regzmm));
6142           exp->X_op = O_constant;
6143           exp->X_add_number = register_number (i.op[reg_slot].regs) << 4;
6144 	  gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
6145 	}
6146       else
6147         {
6148           unsigned int imm_slot;
6149 
6150           if (i.tm.opcode_modifier.vexw == VEXW0)
6151             {
6152               /* If VexW0 is set, the third operand is the source and
6153                  the second operand is encoded in the immediate
6154                  operand.  */
6155               source = 2;
6156               reg_slot = 1;
6157             }
6158           else
6159             {
6160               /* VexW1 is set, the second operand is the source and
6161                  the third operand is encoded in the immediate
6162                  operand.  */
6163               source = 1;
6164               reg_slot = 2;
6165             }
6166 
6167           if (i.tm.opcode_modifier.immext)
6168             {
6169               /* When ImmExt is set, the immdiate byte is the last
6170                  operand.  */
6171               imm_slot = i.operands - 1;
6172               source--;
6173               reg_slot--;
6174             }
6175           else
6176             {
6177               imm_slot = 0;
6178 
6179               /* Turn on Imm8 so that output_imm will generate it.  */
6180               i.types[imm_slot].bitfield.imm8 = 1;
6181             }
6182 
6183           gas_assert (operand_type_equal (&i.tm.operand_types[reg_slot],
6184 					  &regxmm)
6185 		      || operand_type_equal (&i.tm.operand_types[reg_slot],
6186 					     &regymm)
6187 		      || operand_type_equal (&i.tm.operand_types[reg_slot],
6188 					     &regzmm));
6189           i.op[imm_slot].imms->X_add_number
6190               |= register_number (i.op[reg_slot].regs) << 4;
6191 	  gas_assert ((i.op[reg_slot].regs->reg_flags & RegVRex) == 0);
6192         }
6193 
6194       gas_assert (operand_type_equal (&i.tm.operand_types[nds], &regxmm)
6195                   || operand_type_equal (&i.tm.operand_types[nds],
6196                                          &regymm)
6197                   || operand_type_equal (&i.tm.operand_types[nds],
6198                                          &regzmm));
6199       i.vex.register_specifier = i.op[nds].regs;
6200     }
6201   else
6202     source = dest = 0;
6203 
6204   /* i.reg_operands MUST be the number of real register operands;
6205      implicit registers do not count.  If there are 3 register
6206      operands, it must be a instruction with VexNDS.  For a
6207      instruction with VexNDD, the destination register is encoded
6208      in VEX prefix.  If there are 4 register operands, it must be
6209      a instruction with VEX prefix and 3 sources.  */
6210   if (i.mem_operands == 0
6211       && ((i.reg_operands == 2
6212 	   && i.tm.opcode_modifier.vexvvvv <= VEXXDS)
6213 	  || (i.reg_operands == 3
6214 	      && i.tm.opcode_modifier.vexvvvv == VEXXDS)
6215 	  || (i.reg_operands == 4 && vex_3_sources)))
6216     {
6217       switch (i.operands)
6218 	{
6219 	case 2:
6220 	  source = 0;
6221 	  break;
6222 	case 3:
6223 	  /* When there are 3 operands, one of them may be immediate,
6224 	     which may be the first or the last operand.  Otherwise,
6225 	     the first operand must be shift count register (cl) or it
6226 	     is an instruction with VexNDS. */
6227 	  gas_assert (i.imm_operands == 1
6228 		      || (i.imm_operands == 0
6229 			  && (i.tm.opcode_modifier.vexvvvv == VEXXDS
6230 			      || i.types[0].bitfield.shiftcount)));
6231 	  if (operand_type_check (i.types[0], imm)
6232 	      || i.types[0].bitfield.shiftcount)
6233 	    source = 1;
6234 	  else
6235 	    source = 0;
6236 	  break;
6237 	case 4:
6238 	  /* When there are 4 operands, the first two must be 8bit
6239 	     immediate operands. The source operand will be the 3rd
6240 	     one.
6241 
6242 	     For instructions with VexNDS, if the first operand
6243 	     an imm8, the source operand is the 2nd one.  If the last
6244 	     operand is imm8, the source operand is the first one.  */
6245 	  gas_assert ((i.imm_operands == 2
6246 		       && i.types[0].bitfield.imm8
6247 		       && i.types[1].bitfield.imm8)
6248 		      || (i.tm.opcode_modifier.vexvvvv == VEXXDS
6249 			  && i.imm_operands == 1
6250 			  && (i.types[0].bitfield.imm8
6251 			      || i.types[i.operands - 1].bitfield.imm8
6252 			      || i.rounding)));
6253 	  if (i.imm_operands == 2)
6254 	    source = 2;
6255 	  else
6256 	    {
6257 	      if (i.types[0].bitfield.imm8)
6258 		source = 1;
6259 	      else
6260 		source = 0;
6261 	    }
6262 	  break;
6263 	case 5:
6264 	  if (i.tm.opcode_modifier.evex)
6265 	    {
6266 	      /* For EVEX instructions, when there are 5 operands, the
6267 		 first one must be immediate operand.  If the second one
6268 		 is immediate operand, the source operand is the 3th
6269 		 one.  If the last one is immediate operand, the source
6270 		 operand is the 2nd one.  */
6271 	      gas_assert (i.imm_operands == 2
6272 			  && i.tm.opcode_modifier.sae
6273 			  && operand_type_check (i.types[0], imm));
6274 	      if (operand_type_check (i.types[1], imm))
6275 		source = 2;
6276 	      else if (operand_type_check (i.types[4], imm))
6277 		source = 1;
6278 	      else
6279 		abort ();
6280 	    }
6281 	  break;
6282 	default:
6283 	  abort ();
6284 	}
6285 
6286       if (!vex_3_sources)
6287 	{
6288 	  dest = source + 1;
6289 
6290 	  /* RC/SAE operand could be between DEST and SRC.  That happens
6291 	     when one operand is GPR and the other one is XMM/YMM/ZMM
6292 	     register.  */
6293 	  if (i.rounding && i.rounding->operand == (int) dest)
6294 	    dest++;
6295 
6296 	  if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
6297 	    {
6298 	      /* For instructions with VexNDS, the register-only source
6299 		 operand must be 32/64bit integer, XMM, YMM or ZMM
6300 		 register.  It is encoded in VEX prefix.  We need to
6301 		 clear RegMem bit before calling operand_type_equal.  */
6302 
6303 	      i386_operand_type op;
6304 	      unsigned int vvvv;
6305 
6306 	      /* Check register-only source operand when two source
6307 		 operands are swapped.  */
6308 	      if (!i.tm.operand_types[source].bitfield.baseindex
6309 		  && i.tm.operand_types[dest].bitfield.baseindex)
6310 		{
6311 		  vvvv = source;
6312 		  source = dest;
6313 		}
6314 	      else
6315 		vvvv = dest;
6316 
6317 	      op = i.tm.operand_types[vvvv];
6318 	      op.bitfield.regmem = 0;
6319 	      if ((dest + 1) >= i.operands
6320 		  || (!op.bitfield.reg32
6321 		      && op.bitfield.reg64
6322 		      && !operand_type_equal (&op, &regxmm)
6323 		      && !operand_type_equal (&op, &regymm)
6324 		      && !operand_type_equal (&op, &regzmm)
6325 		      && !operand_type_equal (&op, &regmask)))
6326 		abort ();
6327 	      i.vex.register_specifier = i.op[vvvv].regs;
6328 	      dest++;
6329 	    }
6330 	}
6331 
6332       i.rm.mode = 3;
6333       /* One of the register operands will be encoded in the i.tm.reg
6334 	 field, the other in the combined i.tm.mode and i.tm.regmem
6335 	 fields.  If no form of this instruction supports a memory
6336 	 destination operand, then we assume the source operand may
6337 	 sometimes be a memory operand and so we need to store the
6338 	 destination in the i.rm.reg field.  */
6339       if (!i.tm.operand_types[dest].bitfield.regmem
6340 	  && operand_type_check (i.tm.operand_types[dest], anymem) == 0)
6341 	{
6342 	  i.rm.reg = i.op[dest].regs->reg_num;
6343 	  i.rm.regmem = i.op[source].regs->reg_num;
6344 	  if ((i.op[dest].regs->reg_flags & RegRex) != 0)
6345 	    i.rex |= REX_R;
6346 	  if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
6347 	    i.vrex |= REX_R;
6348 	  if ((i.op[source].regs->reg_flags & RegRex) != 0)
6349 	    i.rex |= REX_B;
6350 	  if ((i.op[source].regs->reg_flags & RegVRex) != 0)
6351 	    i.vrex |= REX_B;
6352 	}
6353       else
6354 	{
6355 	  i.rm.reg = i.op[source].regs->reg_num;
6356 	  i.rm.regmem = i.op[dest].regs->reg_num;
6357 	  if ((i.op[dest].regs->reg_flags & RegRex) != 0)
6358 	    i.rex |= REX_B;
6359 	  if ((i.op[dest].regs->reg_flags & RegVRex) != 0)
6360 	    i.vrex |= REX_B;
6361 	  if ((i.op[source].regs->reg_flags & RegRex) != 0)
6362 	    i.rex |= REX_R;
6363 	  if ((i.op[source].regs->reg_flags & RegVRex) != 0)
6364 	    i.vrex |= REX_R;
6365 	}
6366       if (flag_code != CODE_64BIT && (i.rex & (REX_R | REX_B)))
6367 	{
6368 	  if (!i.types[0].bitfield.control
6369 	      && !i.types[1].bitfield.control)
6370 	    abort ();
6371 	  i.rex &= ~(REX_R | REX_B);
6372 	  add_prefix (LOCK_PREFIX_OPCODE);
6373 	}
6374     }
6375   else
6376     {			/* If it's not 2 reg operands...  */
6377       unsigned int mem;
6378 
6379       if (i.mem_operands)
6380 	{
6381 	  unsigned int fake_zero_displacement = 0;
6382 	  unsigned int op;
6383 
6384 	  for (op = 0; op < i.operands; op++)
6385 	    if (operand_type_check (i.types[op], anymem))
6386 	      break;
6387 	  gas_assert (op < i.operands);
6388 
6389 	  if (i.tm.opcode_modifier.vecsib)
6390 	    {
6391 	      if (i.index_reg->reg_num == RegEiz
6392 		  || i.index_reg->reg_num == RegRiz)
6393 		abort ();
6394 
6395 	      i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6396 	      if (!i.base_reg)
6397 		{
6398 		  i.sib.base = NO_BASE_REGISTER;
6399 		  i.sib.scale = i.log2_scale_factor;
6400 		  /* No Vec_Disp8 if there is no base.  */
6401 		  i.types[op].bitfield.vec_disp8 = 0;
6402 		  i.types[op].bitfield.disp8 = 0;
6403 		  i.types[op].bitfield.disp16 = 0;
6404 		  i.types[op].bitfield.disp64 = 0;
6405 		  if (flag_code != CODE_64BIT)
6406 		    {
6407 		      /* Must be 32 bit */
6408 		      i.types[op].bitfield.disp32 = 1;
6409 		      i.types[op].bitfield.disp32s = 0;
6410 		    }
6411 		  else
6412 		    {
6413 		      i.types[op].bitfield.disp32 = 0;
6414 		      i.types[op].bitfield.disp32s = 1;
6415 		    }
6416 		}
6417 	      i.sib.index = i.index_reg->reg_num;
6418 	      if ((i.index_reg->reg_flags & RegRex) != 0)
6419 		i.rex |= REX_X;
6420 	      if ((i.index_reg->reg_flags & RegVRex) != 0)
6421 		i.vrex |= REX_X;
6422 	    }
6423 
6424 	  default_seg = &ds;
6425 
6426 	  if (i.base_reg == 0)
6427 	    {
6428 	      i.rm.mode = 0;
6429 	      if (!i.disp_operands)
6430 		{
6431 		  fake_zero_displacement = 1;
6432 		  /* Instructions with VSIB byte need 32bit displacement
6433 		     if there is no base register.  */
6434 		  if (i.tm.opcode_modifier.vecsib)
6435 		    i.types[op].bitfield.disp32 = 1;
6436 		}
6437 	      if (i.index_reg == 0)
6438 		{
6439 		  gas_assert (!i.tm.opcode_modifier.vecsib);
6440 		  /* Operand is just <disp>  */
6441 		  if (flag_code == CODE_64BIT)
6442 		    {
6443 		      /* 64bit mode overwrites the 32bit absolute
6444 			 addressing by RIP relative addressing and
6445 			 absolute addressing is encoded by one of the
6446 			 redundant SIB forms.  */
6447 		      i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6448 		      i.sib.base = NO_BASE_REGISTER;
6449 		      i.sib.index = NO_INDEX_REGISTER;
6450 		      i.types[op] = ((i.prefix[ADDR_PREFIX] == 0)
6451 				     ? disp32s : disp32);
6452 		    }
6453 		  else if ((flag_code == CODE_16BIT)
6454 			   ^ (i.prefix[ADDR_PREFIX] != 0))
6455 		    {
6456 		      i.rm.regmem = NO_BASE_REGISTER_16;
6457 		      i.types[op] = disp16;
6458 		    }
6459 		  else
6460 		    {
6461 		      i.rm.regmem = NO_BASE_REGISTER;
6462 		      i.types[op] = disp32;
6463 		    }
6464 		}
6465 	      else if (!i.tm.opcode_modifier.vecsib)
6466 		{
6467 		  /* !i.base_reg && i.index_reg  */
6468 		  if (i.index_reg->reg_num == RegEiz
6469 		      || i.index_reg->reg_num == RegRiz)
6470 		    i.sib.index = NO_INDEX_REGISTER;
6471 		  else
6472 		    i.sib.index = i.index_reg->reg_num;
6473 		  i.sib.base = NO_BASE_REGISTER;
6474 		  i.sib.scale = i.log2_scale_factor;
6475 		  i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6476 		  /* No Vec_Disp8 if there is no base.  */
6477 		  i.types[op].bitfield.vec_disp8 = 0;
6478 		  i.types[op].bitfield.disp8 = 0;
6479 		  i.types[op].bitfield.disp16 = 0;
6480 		  i.types[op].bitfield.disp64 = 0;
6481 		  if (flag_code != CODE_64BIT)
6482 		    {
6483 		      /* Must be 32 bit */
6484 		      i.types[op].bitfield.disp32 = 1;
6485 		      i.types[op].bitfield.disp32s = 0;
6486 		    }
6487 		  else
6488 		    {
6489 		      i.types[op].bitfield.disp32 = 0;
6490 		      i.types[op].bitfield.disp32s = 1;
6491 		    }
6492 		  if ((i.index_reg->reg_flags & RegRex) != 0)
6493 		    i.rex |= REX_X;
6494 		}
6495 	    }
6496 	  /* RIP addressing for 64bit mode.  */
6497 	  else if (i.base_reg->reg_num == RegRip ||
6498 		   i.base_reg->reg_num == RegEip)
6499 	    {
6500 	      gas_assert (!i.tm.opcode_modifier.vecsib);
6501 	      i.rm.regmem = NO_BASE_REGISTER;
6502 	      i.types[op].bitfield.disp8 = 0;
6503 	      i.types[op].bitfield.disp16 = 0;
6504 	      i.types[op].bitfield.disp32 = 0;
6505 	      i.types[op].bitfield.disp32s = 1;
6506 	      i.types[op].bitfield.disp64 = 0;
6507 	      i.types[op].bitfield.vec_disp8 = 0;
6508 	      i.flags[op] |= Operand_PCrel;
6509 	      if (! i.disp_operands)
6510 		fake_zero_displacement = 1;
6511 	    }
6512 	  else if (i.base_reg->reg_type.bitfield.reg16)
6513 	    {
6514 	      gas_assert (!i.tm.opcode_modifier.vecsib);
6515 	      switch (i.base_reg->reg_num)
6516 		{
6517 		case 3: /* (%bx)  */
6518 		  if (i.index_reg == 0)
6519 		    i.rm.regmem = 7;
6520 		  else /* (%bx,%si) -> 0, or (%bx,%di) -> 1  */
6521 		    i.rm.regmem = i.index_reg->reg_num - 6;
6522 		  break;
6523 		case 5: /* (%bp)  */
6524 		  default_seg = &ss;
6525 		  if (i.index_reg == 0)
6526 		    {
6527 		      i.rm.regmem = 6;
6528 		      if (operand_type_check (i.types[op], disp) == 0)
6529 			{
6530 			  /* fake (%bp) into 0(%bp)  */
6531 			  if (i.tm.operand_types[op].bitfield.vec_disp8)
6532 			    i.types[op].bitfield.vec_disp8 = 1;
6533 			  else
6534 			    i.types[op].bitfield.disp8 = 1;
6535 			  fake_zero_displacement = 1;
6536 			}
6537 		    }
6538 		  else /* (%bp,%si) -> 2, or (%bp,%di) -> 3  */
6539 		    i.rm.regmem = i.index_reg->reg_num - 6 + 2;
6540 		  break;
6541 		default: /* (%si) -> 4 or (%di) -> 5  */
6542 		  i.rm.regmem = i.base_reg->reg_num - 6 + 4;
6543 		}
6544 	      i.rm.mode = mode_from_disp_size (i.types[op]);
6545 	    }
6546 	  else /* i.base_reg and 32/64 bit mode  */
6547 	    {
6548 	      if (flag_code == CODE_64BIT
6549 		  && operand_type_check (i.types[op], disp))
6550 		{
6551 		  i386_operand_type temp;
6552 		  operand_type_set (&temp, 0);
6553 		  temp.bitfield.disp8 = i.types[op].bitfield.disp8;
6554 		  temp.bitfield.vec_disp8
6555 		    = i.types[op].bitfield.vec_disp8;
6556 		  i.types[op] = temp;
6557 		  if (i.prefix[ADDR_PREFIX] == 0)
6558 		    i.types[op].bitfield.disp32s = 1;
6559 		  else
6560 		    i.types[op].bitfield.disp32 = 1;
6561 		}
6562 
6563 	      if (!i.tm.opcode_modifier.vecsib)
6564 		i.rm.regmem = i.base_reg->reg_num;
6565 	      if ((i.base_reg->reg_flags & RegRex) != 0)
6566 		i.rex |= REX_B;
6567 	      i.sib.base = i.base_reg->reg_num;
6568 	      /* x86-64 ignores REX prefix bit here to avoid decoder
6569 		 complications.  */
6570 	      if (!(i.base_reg->reg_flags & RegRex)
6571 		  && (i.base_reg->reg_num == EBP_REG_NUM
6572 		   || i.base_reg->reg_num == ESP_REG_NUM))
6573 		  default_seg = &ss;
6574 	      if (i.base_reg->reg_num == 5 && i.disp_operands == 0)
6575 		{
6576 		  fake_zero_displacement = 1;
6577 		  if (i.tm.operand_types [op].bitfield.vec_disp8)
6578 		    i.types[op].bitfield.vec_disp8 = 1;
6579 		  else
6580 		    i.types[op].bitfield.disp8 = 1;
6581 		}
6582 	      i.sib.scale = i.log2_scale_factor;
6583 	      if (i.index_reg == 0)
6584 		{
6585 		  gas_assert (!i.tm.opcode_modifier.vecsib);
6586 		  /* <disp>(%esp) becomes two byte modrm with no index
6587 		     register.  We've already stored the code for esp
6588 		     in i.rm.regmem ie. ESCAPE_TO_TWO_BYTE_ADDRESSING.
6589 		     Any base register besides %esp will not use the
6590 		     extra modrm byte.  */
6591 		  i.sib.index = NO_INDEX_REGISTER;
6592 		}
6593 	      else if (!i.tm.opcode_modifier.vecsib)
6594 		{
6595 		  if (i.index_reg->reg_num == RegEiz
6596 		      || i.index_reg->reg_num == RegRiz)
6597 		    i.sib.index = NO_INDEX_REGISTER;
6598 		  else
6599 		    i.sib.index = i.index_reg->reg_num;
6600 		  i.rm.regmem = ESCAPE_TO_TWO_BYTE_ADDRESSING;
6601 		  if ((i.index_reg->reg_flags & RegRex) != 0)
6602 		    i.rex |= REX_X;
6603 		}
6604 
6605 	      if (i.disp_operands
6606 		  && (i.reloc[op] == BFD_RELOC_386_TLS_DESC_CALL
6607 		      || i.reloc[op] == BFD_RELOC_X86_64_TLSDESC_CALL))
6608 		i.rm.mode = 0;
6609 	      else
6610 		{
6611 		  if (!fake_zero_displacement
6612 		      && !i.disp_operands
6613 		      && i.disp_encoding)
6614 		    {
6615 		      fake_zero_displacement = 1;
6616 		      if (i.disp_encoding == disp_encoding_8bit)
6617 			i.types[op].bitfield.disp8 = 1;
6618 		      else
6619 			i.types[op].bitfield.disp32 = 1;
6620 		    }
6621 		  i.rm.mode = mode_from_disp_size (i.types[op]);
6622 		}
6623 	    }
6624 
6625 	  if (fake_zero_displacement)
6626 	    {
6627 	      /* Fakes a zero displacement assuming that i.types[op]
6628 		 holds the correct displacement size.  */
6629 	      expressionS *exp;
6630 
6631 	      gas_assert (i.op[op].disps == 0);
6632 	      exp = &disp_expressions[i.disp_operands++];
6633 	      i.op[op].disps = exp;
6634 	      exp->X_op = O_constant;
6635 	      exp->X_add_number = 0;
6636 	      exp->X_add_symbol = (symbolS *) 0;
6637 	      exp->X_op_symbol = (symbolS *) 0;
6638 	    }
6639 
6640 	  mem = op;
6641 	}
6642       else
6643 	mem = ~0;
6644 
6645       if (i.tm.opcode_modifier.vexsources == XOP2SOURCES)
6646 	{
6647 	  if (operand_type_check (i.types[0], imm))
6648 	    i.vex.register_specifier = NULL;
6649 	  else
6650 	    {
6651 	      /* VEX.vvvv encodes one of the sources when the first
6652 		 operand is not an immediate.  */
6653 	      if (i.tm.opcode_modifier.vexw == VEXW0)
6654 		i.vex.register_specifier = i.op[0].regs;
6655 	      else
6656 		i.vex.register_specifier = i.op[1].regs;
6657 	    }
6658 
6659 	  /* Destination is a XMM register encoded in the ModRM.reg
6660 	     and VEX.R bit.  */
6661 	  i.rm.reg = i.op[2].regs->reg_num;
6662 	  if ((i.op[2].regs->reg_flags & RegRex) != 0)
6663 	    i.rex |= REX_R;
6664 
6665 	  /* ModRM.rm and VEX.B encodes the other source.  */
6666 	  if (!i.mem_operands)
6667 	    {
6668 	      i.rm.mode = 3;
6669 
6670 	      if (i.tm.opcode_modifier.vexw == VEXW0)
6671 		i.rm.regmem = i.op[1].regs->reg_num;
6672 	      else
6673 		i.rm.regmem = i.op[0].regs->reg_num;
6674 
6675 	      if ((i.op[1].regs->reg_flags & RegRex) != 0)
6676 		i.rex |= REX_B;
6677 	    }
6678 	}
6679       else if (i.tm.opcode_modifier.vexvvvv == VEXLWP)
6680 	{
6681 	  i.vex.register_specifier = i.op[2].regs;
6682 	  if (!i.mem_operands)
6683 	    {
6684 	      i.rm.mode = 3;
6685 	      i.rm.regmem = i.op[1].regs->reg_num;
6686 	      if ((i.op[1].regs->reg_flags & RegRex) != 0)
6687 		i.rex |= REX_B;
6688 	    }
6689 	}
6690       /* Fill in i.rm.reg or i.rm.regmem field with register operand
6691 	 (if any) based on i.tm.extension_opcode.  Again, we must be
6692 	 careful to make sure that segment/control/debug/test/MMX
6693 	 registers are coded into the i.rm.reg field.  */
6694       else if (i.reg_operands)
6695 	{
6696 	  unsigned int op;
6697 	  unsigned int vex_reg = ~0;
6698 
6699 	  for (op = 0; op < i.operands; op++)
6700 	    if (i.types[op].bitfield.reg8
6701 		|| i.types[op].bitfield.reg16
6702 		|| i.types[op].bitfield.reg32
6703 		|| i.types[op].bitfield.reg64
6704 		|| i.types[op].bitfield.regmmx
6705 		|| i.types[op].bitfield.regxmm
6706 		|| i.types[op].bitfield.regymm
6707 		|| i.types[op].bitfield.regbnd
6708 		|| i.types[op].bitfield.regzmm
6709 		|| i.types[op].bitfield.regmask
6710 		|| i.types[op].bitfield.sreg2
6711 		|| i.types[op].bitfield.sreg3
6712 		|| i.types[op].bitfield.control
6713 		|| i.types[op].bitfield.debug
6714 		|| i.types[op].bitfield.test)
6715 	      break;
6716 
6717 	  if (vex_3_sources)
6718 	    op = dest;
6719 	  else if (i.tm.opcode_modifier.vexvvvv == VEXXDS)
6720 	    {
6721 	      /* For instructions with VexNDS, the register-only
6722 		 source operand is encoded in VEX prefix. */
6723 	      gas_assert (mem != (unsigned int) ~0);
6724 
6725 	      if (op > mem)
6726 		{
6727 		  vex_reg = op++;
6728 		  gas_assert (op < i.operands);
6729 		}
6730 	      else
6731 		{
6732 		  /* Check register-only source operand when two source
6733 		     operands are swapped.  */
6734 		  if (!i.tm.operand_types[op].bitfield.baseindex
6735 		      && i.tm.operand_types[op + 1].bitfield.baseindex)
6736 		    {
6737 		      vex_reg = op;
6738 		      op += 2;
6739 		      gas_assert (mem == (vex_reg + 1)
6740 				  && op < i.operands);
6741 		    }
6742 		  else
6743 		    {
6744 		      vex_reg = op + 1;
6745 		      gas_assert (vex_reg < i.operands);
6746 		    }
6747 		}
6748 	    }
6749 	  else if (i.tm.opcode_modifier.vexvvvv == VEXNDD)
6750 	    {
6751 	      /* For instructions with VexNDD, the register destination
6752 		 is encoded in VEX prefix.  */
6753 	      if (i.mem_operands == 0)
6754 		{
6755 		  /* There is no memory operand.  */
6756 		  gas_assert ((op + 2) == i.operands);
6757 		  vex_reg = op + 1;
6758 		}
6759 	      else
6760 		{
6761 		  /* There are only 2 operands.  */
6762 		  gas_assert (op < 2 && i.operands == 2);
6763 		  vex_reg = 1;
6764 		}
6765 	    }
6766 	  else
6767 	    gas_assert (op < i.operands);
6768 
6769 	  if (vex_reg != (unsigned int) ~0)
6770 	    {
6771 	      i386_operand_type *type = &i.tm.operand_types[vex_reg];
6772 
6773 	      if (type->bitfield.reg32 != 1
6774 		  && type->bitfield.reg64 != 1
6775 		  && !operand_type_equal (type, &regxmm)
6776 		  && !operand_type_equal (type, &regymm)
6777 		  && !operand_type_equal (type, &regzmm)
6778 		  && !operand_type_equal (type, &regmask))
6779 		abort ();
6780 
6781 	      i.vex.register_specifier = i.op[vex_reg].regs;
6782 	    }
6783 
6784 	  /* Don't set OP operand twice.  */
6785 	  if (vex_reg != op)
6786 	    {
6787 	      /* If there is an extension opcode to put here, the
6788 		 register number must be put into the regmem field.  */
6789 	      if (i.tm.extension_opcode != None)
6790 		{
6791 		  i.rm.regmem = i.op[op].regs->reg_num;
6792 		  if ((i.op[op].regs->reg_flags & RegRex) != 0)
6793 		    i.rex |= REX_B;
6794 		  if ((i.op[op].regs->reg_flags & RegVRex) != 0)
6795 		    i.vrex |= REX_B;
6796 		}
6797 	      else
6798 		{
6799 		  i.rm.reg = i.op[op].regs->reg_num;
6800 		  if ((i.op[op].regs->reg_flags & RegRex) != 0)
6801 		    i.rex |= REX_R;
6802 		  if ((i.op[op].regs->reg_flags & RegVRex) != 0)
6803 		    i.vrex |= REX_R;
6804 		}
6805 	    }
6806 
6807 	  /* Now, if no memory operand has set i.rm.mode = 0, 1, 2 we
6808 	     must set it to 3 to indicate this is a register operand
6809 	     in the regmem field.  */
6810 	  if (!i.mem_operands)
6811 	    i.rm.mode = 3;
6812 	}
6813 
6814       /* Fill in i.rm.reg field with extension opcode (if any).  */
6815       if (i.tm.extension_opcode != None)
6816 	i.rm.reg = i.tm.extension_opcode;
6817     }
6818   return default_seg;
6819 }
6820 
6821 static void
output_branch(void)6822 output_branch (void)
6823 {
6824   char *p;
6825   int size;
6826   int code16;
6827   int prefix;
6828   relax_substateT subtype;
6829   symbolS *sym;
6830   offsetT off;
6831 
6832   code16 = flag_code == CODE_16BIT ? CODE16 : 0;
6833   size = i.disp_encoding == disp_encoding_32bit ? BIG : SMALL;
6834 
6835   prefix = 0;
6836   if (i.prefix[DATA_PREFIX] != 0)
6837     {
6838       prefix = 1;
6839       i.prefixes -= 1;
6840       code16 ^= CODE16;
6841     }
6842   /* Pentium4 branch hints.  */
6843   if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6844       || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6845     {
6846       prefix++;
6847       i.prefixes--;
6848     }
6849   if (i.prefix[REX_PREFIX] != 0)
6850     {
6851       prefix++;
6852       i.prefixes--;
6853     }
6854 
6855   /* BND prefixed jump.  */
6856   if (i.prefix[BND_PREFIX] != 0)
6857     {
6858       FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
6859       i.prefixes -= 1;
6860     }
6861 
6862   if (i.prefixes != 0 && !intel_syntax)
6863     as_warn (_("skipping prefixes on this instruction"));
6864 
6865   /* It's always a symbol;  End frag & setup for relax.
6866      Make sure there is enough room in this frag for the largest
6867      instruction we may generate in md_convert_frag.  This is 2
6868      bytes for the opcode and room for the prefix and largest
6869      displacement.  */
6870   frag_grow (prefix + 2 + 4);
6871   /* Prefix and 1 opcode byte go in fr_fix.  */
6872   p = frag_more (prefix + 1);
6873   if (i.prefix[DATA_PREFIX] != 0)
6874     *p++ = DATA_PREFIX_OPCODE;
6875   if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE
6876       || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE)
6877     *p++ = i.prefix[SEG_PREFIX];
6878   if (i.prefix[REX_PREFIX] != 0)
6879     *p++ = i.prefix[REX_PREFIX];
6880   *p = i.tm.base_opcode;
6881 
6882   if ((unsigned char) *p == JUMP_PC_RELATIVE)
6883     subtype = ENCODE_RELAX_STATE (UNCOND_JUMP, size);
6884   else if (cpu_arch_flags.bitfield.cpui386)
6885     subtype = ENCODE_RELAX_STATE (COND_JUMP, size);
6886   else
6887     subtype = ENCODE_RELAX_STATE (COND_JUMP86, size);
6888   subtype |= code16;
6889 
6890   sym = i.op[0].disps->X_add_symbol;
6891   off = i.op[0].disps->X_add_number;
6892 
6893   if (i.op[0].disps->X_op != O_constant
6894       && i.op[0].disps->X_op != O_symbol)
6895     {
6896       /* Handle complex expressions.  */
6897       sym = make_expr_symbol (i.op[0].disps);
6898       off = 0;
6899     }
6900 
6901   /* 1 possible extra opcode + 4 byte displacement go in var part.
6902      Pass reloc in fr_var.  */
6903   frag_var (rs_machine_dependent, 5, i.reloc[0], subtype, sym, off, p);
6904 }
6905 
6906 static void
output_jump(void)6907 output_jump (void)
6908 {
6909   char *p;
6910   int size;
6911   fixS *fixP;
6912 
6913   if (i.tm.opcode_modifier.jumpbyte)
6914     {
6915       /* This is a loop or jecxz type instruction.  */
6916       size = 1;
6917       if (i.prefix[ADDR_PREFIX] != 0)
6918 	{
6919 	  FRAG_APPEND_1_CHAR (ADDR_PREFIX_OPCODE);
6920 	  i.prefixes -= 1;
6921 	}
6922       /* Pentium4 branch hints.  */
6923       if (i.prefix[SEG_PREFIX] == CS_PREFIX_OPCODE /* not taken */
6924 	  || i.prefix[SEG_PREFIX] == DS_PREFIX_OPCODE /* taken */)
6925 	{
6926 	  FRAG_APPEND_1_CHAR (i.prefix[SEG_PREFIX]);
6927 	  i.prefixes--;
6928 	}
6929     }
6930   else
6931     {
6932       int code16;
6933 
6934       code16 = 0;
6935       if (flag_code == CODE_16BIT)
6936 	code16 = CODE16;
6937 
6938       if (i.prefix[DATA_PREFIX] != 0)
6939 	{
6940 	  FRAG_APPEND_1_CHAR (DATA_PREFIX_OPCODE);
6941 	  i.prefixes -= 1;
6942 	  code16 ^= CODE16;
6943 	}
6944 
6945       size = 4;
6946       if (code16)
6947 	size = 2;
6948     }
6949 
6950   if (i.prefix[REX_PREFIX] != 0)
6951     {
6952       FRAG_APPEND_1_CHAR (i.prefix[REX_PREFIX]);
6953       i.prefixes -= 1;
6954     }
6955 
6956   /* BND prefixed jump.  */
6957   if (i.prefix[BND_PREFIX] != 0)
6958     {
6959       FRAG_APPEND_1_CHAR (i.prefix[BND_PREFIX]);
6960       i.prefixes -= 1;
6961     }
6962 
6963   if (i.prefixes != 0 && !intel_syntax)
6964     as_warn (_("skipping prefixes on this instruction"));
6965 
6966   p = frag_more (i.tm.opcode_length + size);
6967   switch (i.tm.opcode_length)
6968     {
6969     case 2:
6970       *p++ = i.tm.base_opcode >> 8;
6971     case 1:
6972       *p++ = i.tm.base_opcode;
6973       break;
6974     default:
6975       abort ();
6976     }
6977 
6978   fixP = fix_new_exp (frag_now, p - frag_now->fr_literal, size,
6979 		      i.op[0].disps, 1, reloc (size, 1, 1, i.reloc[0]));
6980 
6981   /* All jumps handled here are signed, but don't use a signed limit
6982      check for 32 and 16 bit jumps as we want to allow wrap around at
6983      4G and 64k respectively.  */
6984   if (size == 1)
6985     fixP->fx_signed = 1;
6986 }
6987 
6988 static void
output_interseg_jump(void)6989 output_interseg_jump (void)
6990 {
6991   char *p;
6992   int size;
6993   int prefix;
6994   int code16;
6995 
6996   code16 = 0;
6997   if (flag_code == CODE_16BIT)
6998     code16 = CODE16;
6999 
7000   prefix = 0;
7001   if (i.prefix[DATA_PREFIX] != 0)
7002     {
7003       prefix = 1;
7004       i.prefixes -= 1;
7005       code16 ^= CODE16;
7006     }
7007   if (i.prefix[REX_PREFIX] != 0)
7008     {
7009       prefix++;
7010       i.prefixes -= 1;
7011     }
7012 
7013   size = 4;
7014   if (code16)
7015     size = 2;
7016 
7017   if (i.prefixes != 0 && !intel_syntax)
7018     as_warn (_("skipping prefixes on this instruction"));
7019 
7020   /* 1 opcode; 2 segment; offset  */
7021   p = frag_more (prefix + 1 + 2 + size);
7022 
7023   if (i.prefix[DATA_PREFIX] != 0)
7024     *p++ = DATA_PREFIX_OPCODE;
7025 
7026   if (i.prefix[REX_PREFIX] != 0)
7027     *p++ = i.prefix[REX_PREFIX];
7028 
7029   *p++ = i.tm.base_opcode;
7030   if (i.op[1].imms->X_op == O_constant)
7031     {
7032       offsetT n = i.op[1].imms->X_add_number;
7033 
7034       if (size == 2
7035 	  && !fits_in_unsigned_word (n)
7036 	  && !fits_in_signed_word (n))
7037 	{
7038 	  as_bad (_("16-bit jump out of range"));
7039 	  return;
7040 	}
7041       md_number_to_chars (p, n, size);
7042     }
7043   else
7044     fix_new_exp (frag_now, p - frag_now->fr_literal, size,
7045 		 i.op[1].imms, 0, reloc (size, 0, 0, i.reloc[1]));
7046   if (i.op[0].imms->X_op != O_constant)
7047     as_bad (_("can't handle non absolute segment in `%s'"),
7048 	    i.tm.name);
7049   md_number_to_chars (p + size, (valueT) i.op[0].imms->X_add_number, 2);
7050 }
7051 
7052 static void
output_insn(void)7053 output_insn (void)
7054 {
7055   fragS *insn_start_frag;
7056   offsetT insn_start_off;
7057 
7058   /* Tie dwarf2 debug info to the address at the start of the insn.
7059      We can't do this after the insn has been output as the current
7060      frag may have been closed off.  eg. by frag_var.  */
7061   dwarf2_emit_insn (0);
7062 
7063   insn_start_frag = frag_now;
7064   insn_start_off = frag_now_fix ();
7065 
7066   /* Output jumps.  */
7067   if (i.tm.opcode_modifier.jump)
7068     output_branch ();
7069   else if (i.tm.opcode_modifier.jumpbyte
7070 	   || i.tm.opcode_modifier.jumpdword)
7071     output_jump ();
7072   else if (i.tm.opcode_modifier.jumpintersegment)
7073     output_interseg_jump ();
7074   else
7075     {
7076       /* Output normal instructions here.  */
7077       char *p;
7078       unsigned char *q;
7079       unsigned int j;
7080       unsigned int prefix;
7081 
7082       if (avoid_fence
7083          && i.tm.base_opcode == 0xfae
7084          && i.operands == 1
7085          && i.imm_operands == 1
7086          && (i.op[0].imms->X_add_number == 0xe8
7087              || i.op[0].imms->X_add_number == 0xf0
7088              || i.op[0].imms->X_add_number == 0xf8))
7089         {
7090           /* Encode lfence, mfence, and sfence as
7091              f0 83 04 24 00   lock addl $0x0, (%{re}sp).  */
7092           offsetT val = 0x240483f0ULL;
7093           p = frag_more (5);
7094           md_number_to_chars (p, val, 5);
7095           return;
7096         }
7097 
7098       /* Some processors fail on LOCK prefix. This options makes
7099 	 assembler ignore LOCK prefix and serves as a workaround.  */
7100       if (omit_lock_prefix)
7101 	{
7102 	  if (i.tm.base_opcode == LOCK_PREFIX_OPCODE)
7103 	    return;
7104 	  i.prefix[LOCK_PREFIX] = 0;
7105 	}
7106 
7107       /* Since the VEX/EVEX prefix contains the implicit prefix, we
7108 	 don't need the explicit prefix.  */
7109       if (!i.tm.opcode_modifier.vex && !i.tm.opcode_modifier.evex)
7110 	{
7111 	  switch (i.tm.opcode_length)
7112 	    {
7113 	    case 3:
7114 	      if (i.tm.base_opcode & 0xff000000)
7115 		{
7116 		  prefix = (i.tm.base_opcode >> 24) & 0xff;
7117 		  goto check_prefix;
7118 		}
7119 	      break;
7120 	    case 2:
7121 	      if ((i.tm.base_opcode & 0xff0000) != 0)
7122 		{
7123 		  prefix = (i.tm.base_opcode >> 16) & 0xff;
7124 		  if (i.tm.cpu_flags.bitfield.cpupadlock)
7125 		    {
7126 check_prefix:
7127 		      if (prefix != REPE_PREFIX_OPCODE
7128 			  || (i.prefix[REP_PREFIX]
7129 			      != REPE_PREFIX_OPCODE))
7130 			add_prefix (prefix);
7131 		    }
7132 		  else
7133 		    add_prefix (prefix);
7134 		}
7135 	      break;
7136 	    case 1:
7137 	      break;
7138 	    default:
7139 	      abort ();
7140 	    }
7141 
7142 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7143 	  /* For x32, add a dummy REX_OPCODE prefix for mov/add with
7144 	     R_X86_64_GOTTPOFF relocation so that linker can safely
7145 	     perform IE->LE optimization.  */
7146 	  if (x86_elf_abi == X86_64_X32_ABI
7147 	      && i.operands == 2
7148 	      && i.reloc[0] == BFD_RELOC_X86_64_GOTTPOFF
7149 	      && i.prefix[REX_PREFIX] == 0)
7150 	    add_prefix (REX_OPCODE);
7151 #endif
7152 
7153 	  /* The prefix bytes.  */
7154 	  for (j = ARRAY_SIZE (i.prefix), q = i.prefix; j > 0; j--, q++)
7155 	    if (*q)
7156 	      FRAG_APPEND_1_CHAR (*q);
7157 	}
7158       else
7159 	{
7160 	  for (j = 0, q = i.prefix; j < ARRAY_SIZE (i.prefix); j++, q++)
7161 	    if (*q)
7162 	      switch (j)
7163 		{
7164 		case REX_PREFIX:
7165 		  /* REX byte is encoded in VEX prefix.  */
7166 		  break;
7167 		case SEG_PREFIX:
7168 		case ADDR_PREFIX:
7169 		  FRAG_APPEND_1_CHAR (*q);
7170 		  break;
7171 		default:
7172 		  /* There should be no other prefixes for instructions
7173 		     with VEX prefix.  */
7174 		  abort ();
7175 		}
7176 
7177 	  /* For EVEX instructions i.vrex should become 0 after
7178 	     build_evex_prefix.  For VEX instructions upper 16 registers
7179 	     aren't available, so VREX should be 0.  */
7180 	  if (i.vrex)
7181 	    abort ();
7182 	  /* Now the VEX prefix.  */
7183 	  p = frag_more (i.vex.length);
7184 	  for (j = 0; j < i.vex.length; j++)
7185 	    p[j] = i.vex.bytes[j];
7186 	}
7187 
7188       /* Now the opcode; be careful about word order here!  */
7189       if (i.tm.opcode_length == 1)
7190 	{
7191 	  FRAG_APPEND_1_CHAR (i.tm.base_opcode);
7192 	}
7193       else
7194 	{
7195 	  switch (i.tm.opcode_length)
7196 	    {
7197 	    case 4:
7198 	      p = frag_more (4);
7199 	      *p++ = (i.tm.base_opcode >> 24) & 0xff;
7200 	      *p++ = (i.tm.base_opcode >> 16) & 0xff;
7201 	      break;
7202 	    case 3:
7203 	      p = frag_more (3);
7204 	      *p++ = (i.tm.base_opcode >> 16) & 0xff;
7205 	      break;
7206 	    case 2:
7207 	      p = frag_more (2);
7208 	      break;
7209 	    default:
7210 	      abort ();
7211 	      break;
7212 	    }
7213 
7214 	  /* Put out high byte first: can't use md_number_to_chars!  */
7215 	  *p++ = (i.tm.base_opcode >> 8) & 0xff;
7216 	  *p = i.tm.base_opcode & 0xff;
7217 	}
7218 
7219       /* Now the modrm byte and sib byte (if present).  */
7220       if (i.tm.opcode_modifier.modrm)
7221 	{
7222 	  FRAG_APPEND_1_CHAR ((i.rm.regmem << 0
7223 			       | i.rm.reg << 3
7224 			       | i.rm.mode << 6));
7225 	  /* If i.rm.regmem == ESP (4)
7226 	     && i.rm.mode != (Register mode)
7227 	     && not 16 bit
7228 	     ==> need second modrm byte.  */
7229 	  if (i.rm.regmem == ESCAPE_TO_TWO_BYTE_ADDRESSING
7230 	      && i.rm.mode != 3
7231 	      && !(i.base_reg && i.base_reg->reg_type.bitfield.reg16))
7232 	    FRAG_APPEND_1_CHAR ((i.sib.base << 0
7233 				 | i.sib.index << 3
7234 				 | i.sib.scale << 6));
7235 	}
7236 
7237       if (i.disp_operands)
7238 	output_disp (insn_start_frag, insn_start_off);
7239 
7240       if (i.imm_operands)
7241 	output_imm (insn_start_frag, insn_start_off);
7242     }
7243 
7244 #ifdef DEBUG386
7245   if (flag_debug)
7246     {
7247       pi ("" /*line*/, &i);
7248     }
7249 #endif /* DEBUG386  */
7250 }
7251 
7252 /* Return the size of the displacement operand N.  */
7253 
7254 static int
disp_size(unsigned int n)7255 disp_size (unsigned int n)
7256 {
7257   int size = 4;
7258 
7259   /* Vec_Disp8 has to be 8bit.  */
7260   if (i.types[n].bitfield.vec_disp8)
7261     size = 1;
7262   else if (i.types[n].bitfield.disp64)
7263     size = 8;
7264   else if (i.types[n].bitfield.disp8)
7265     size = 1;
7266   else if (i.types[n].bitfield.disp16)
7267     size = 2;
7268   return size;
7269 }
7270 
7271 /* Return the size of the immediate operand N.  */
7272 
7273 static int
imm_size(unsigned int n)7274 imm_size (unsigned int n)
7275 {
7276   int size = 4;
7277   if (i.types[n].bitfield.imm64)
7278     size = 8;
7279   else if (i.types[n].bitfield.imm8 || i.types[n].bitfield.imm8s)
7280     size = 1;
7281   else if (i.types[n].bitfield.imm16)
7282     size = 2;
7283   return size;
7284 }
7285 
7286 static void
output_disp(fragS * insn_start_frag,offsetT insn_start_off)7287 output_disp (fragS *insn_start_frag, offsetT insn_start_off)
7288 {
7289   char *p;
7290   unsigned int n;
7291 
7292   for (n = 0; n < i.operands; n++)
7293     {
7294       if (i.types[n].bitfield.vec_disp8
7295 	  || operand_type_check (i.types[n], disp))
7296 	{
7297 	  if (i.op[n].disps->X_op == O_constant)
7298 	    {
7299 	      int size = disp_size (n);
7300 	      offsetT val = i.op[n].disps->X_add_number;
7301 
7302 	      if (i.types[n].bitfield.vec_disp8)
7303 		val >>= i.memshift;
7304 	      val = offset_in_range (val, size);
7305 	      p = frag_more (size);
7306 	      md_number_to_chars (p, val, size);
7307 	    }
7308 	  else
7309 	    {
7310 	      enum bfd_reloc_code_real reloc_type;
7311 	      int size = disp_size (n);
7312 	      int sign = i.types[n].bitfield.disp32s;
7313 	      int pcrel = (i.flags[n] & Operand_PCrel) != 0;
7314 	      fixS *fixP;
7315 
7316 	      /* We can't have 8 bit displacement here.  */
7317 	      gas_assert (!i.types[n].bitfield.disp8);
7318 
7319 	      /* The PC relative address is computed relative
7320 		 to the instruction boundary, so in case immediate
7321 		 fields follows, we need to adjust the value.  */
7322 	      if (pcrel && i.imm_operands)
7323 		{
7324 		  unsigned int n1;
7325 		  int sz = 0;
7326 
7327 		  for (n1 = 0; n1 < i.operands; n1++)
7328 		    if (operand_type_check (i.types[n1], imm))
7329 		      {
7330 			/* Only one immediate is allowed for PC
7331 			   relative address.  */
7332 			gas_assert (sz == 0);
7333 			sz = imm_size (n1);
7334 			i.op[n].disps->X_add_number -= sz;
7335 		      }
7336 		  /* We should find the immediate.  */
7337 		  gas_assert (sz != 0);
7338 		}
7339 
7340 	      p = frag_more (size);
7341 	      reloc_type = reloc (size, pcrel, sign, i.reloc[n]);
7342 	      if (GOT_symbol
7343 		  && GOT_symbol == i.op[n].disps->X_add_symbol
7344 		  && (((reloc_type == BFD_RELOC_32
7345 			|| reloc_type == BFD_RELOC_X86_64_32S
7346 			|| (reloc_type == BFD_RELOC_64
7347 			    && object_64bit))
7348 		       && (i.op[n].disps->X_op == O_symbol
7349 			   || (i.op[n].disps->X_op == O_add
7350 			       && ((symbol_get_value_expression
7351 				    (i.op[n].disps->X_op_symbol)->X_op)
7352 				   == O_subtract))))
7353 		      || reloc_type == BFD_RELOC_32_PCREL))
7354 		{
7355 		  offsetT add;
7356 
7357 		  if (insn_start_frag == frag_now)
7358 		    add = (p - frag_now->fr_literal) - insn_start_off;
7359 		  else
7360 		    {
7361 		      fragS *fr;
7362 
7363 		      add = insn_start_frag->fr_fix - insn_start_off;
7364 		      for (fr = insn_start_frag->fr_next;
7365 			   fr && fr != frag_now; fr = fr->fr_next)
7366 			add += fr->fr_fix;
7367 		      add += p - frag_now->fr_literal;
7368 		    }
7369 
7370 		  if (!object_64bit)
7371 		    {
7372 		      reloc_type = BFD_RELOC_386_GOTPC;
7373 		      i.op[n].imms->X_add_number += add;
7374 		    }
7375 		  else if (reloc_type == BFD_RELOC_64)
7376 		    reloc_type = BFD_RELOC_X86_64_GOTPC64;
7377 		  else
7378 		    /* Don't do the adjustment for x86-64, as there
7379 		       the pcrel addressing is relative to the _next_
7380 		       insn, and that is taken care of in other code.  */
7381 		    reloc_type = BFD_RELOC_X86_64_GOTPC32;
7382 		}
7383 	      fixP = fix_new_exp (frag_now, p - frag_now->fr_literal,
7384 				  size, i.op[n].disps, pcrel,
7385 				  reloc_type);
7386 	      /* Check for "call/jmp *mem", "mov mem, %reg",
7387 		 "test %reg, mem" and "binop mem, %reg" where binop
7388 		 is one of adc, add, and, cmp, or, sbb, sub, xor
7389 		 instructions.  Always generate R_386_GOT32X for
7390 		 "sym*GOT" operand in 32-bit mode.  */
7391 	      if ((generate_relax_relocations
7392 		   || (!object_64bit
7393 		       && i.rm.mode == 0
7394 		       && i.rm.regmem == 5))
7395 		  && (i.rm.mode == 2
7396 		      || (i.rm.mode == 0 && i.rm.regmem == 5))
7397 		  && ((i.operands == 1
7398 		       && i.tm.base_opcode == 0xff
7399 		       && (i.rm.reg == 2 || i.rm.reg == 4))
7400 		      || (i.operands == 2
7401 			  && (i.tm.base_opcode == 0x8b
7402 			      || i.tm.base_opcode == 0x85
7403 			      || (i.tm.base_opcode & 0xc7) == 0x03))))
7404 		{
7405 		  if (object_64bit)
7406 		    {
7407 		      fixP->fx_tcbit = i.rex != 0;
7408 		      if (i.base_reg
7409 			  && (i.base_reg->reg_num == RegRip
7410 			      || i.base_reg->reg_num == RegEip))
7411 		      fixP->fx_tcbit2 = 1;
7412 		    }
7413 		  else
7414 		    fixP->fx_tcbit2 = 1;
7415 		}
7416 	    }
7417 	}
7418     }
7419 }
7420 
7421 static void
output_imm(fragS * insn_start_frag,offsetT insn_start_off)7422 output_imm (fragS *insn_start_frag, offsetT insn_start_off)
7423 {
7424   char *p;
7425   unsigned int n;
7426 
7427   for (n = 0; n < i.operands; n++)
7428     {
7429       /* Skip SAE/RC Imm operand in EVEX.  They are already handled.  */
7430       if (i.rounding && (int) n == i.rounding->operand)
7431 	continue;
7432 
7433       if (operand_type_check (i.types[n], imm))
7434 	{
7435 	  if (i.op[n].imms->X_op == O_constant)
7436 	    {
7437 	      int size = imm_size (n);
7438 	      offsetT val;
7439 
7440 	      val = offset_in_range (i.op[n].imms->X_add_number,
7441 				     size);
7442 	      p = frag_more (size);
7443 	      md_number_to_chars (p, val, size);
7444 	    }
7445 	  else
7446 	    {
7447 	      /* Not absolute_section.
7448 		 Need a 32-bit fixup (don't support 8bit
7449 		 non-absolute imms).  Try to support other
7450 		 sizes ...  */
7451 	      enum bfd_reloc_code_real reloc_type;
7452 	      int size = imm_size (n);
7453 	      int sign;
7454 
7455 	      if (i.types[n].bitfield.imm32s
7456 		  && (i.suffix == QWORD_MNEM_SUFFIX
7457 		      || (!i.suffix && i.tm.opcode_modifier.no_lsuf)))
7458 		sign = 1;
7459 	      else
7460 		sign = 0;
7461 
7462 	      p = frag_more (size);
7463 	      reloc_type = reloc (size, 0, sign, i.reloc[n]);
7464 
7465 	      /*   This is tough to explain.  We end up with this one if we
7466 	       * have operands that look like
7467 	       * "_GLOBAL_OFFSET_TABLE_+[.-.L284]".  The goal here is to
7468 	       * obtain the absolute address of the GOT, and it is strongly
7469 	       * preferable from a performance point of view to avoid using
7470 	       * a runtime relocation for this.  The actual sequence of
7471 	       * instructions often look something like:
7472 	       *
7473 	       *	call	.L66
7474 	       * .L66:
7475 	       *	popl	%ebx
7476 	       *	addl	$_GLOBAL_OFFSET_TABLE_+[.-.L66],%ebx
7477 	       *
7478 	       *   The call and pop essentially return the absolute address
7479 	       * of the label .L66 and store it in %ebx.  The linker itself
7480 	       * will ultimately change the first operand of the addl so
7481 	       * that %ebx points to the GOT, but to keep things simple, the
7482 	       * .o file must have this operand set so that it generates not
7483 	       * the absolute address of .L66, but the absolute address of
7484 	       * itself.  This allows the linker itself simply treat a GOTPC
7485 	       * relocation as asking for a pcrel offset to the GOT to be
7486 	       * added in, and the addend of the relocation is stored in the
7487 	       * operand field for the instruction itself.
7488 	       *
7489 	       *   Our job here is to fix the operand so that it would add
7490 	       * the correct offset so that %ebx would point to itself.  The
7491 	       * thing that is tricky is that .-.L66 will point to the
7492 	       * beginning of the instruction, so we need to further modify
7493 	       * the operand so that it will point to itself.  There are
7494 	       * other cases where you have something like:
7495 	       *
7496 	       *	.long	$_GLOBAL_OFFSET_TABLE_+[.-.L66]
7497 	       *
7498 	       * and here no correction would be required.  Internally in
7499 	       * the assembler we treat operands of this form as not being
7500 	       * pcrel since the '.' is explicitly mentioned, and I wonder
7501 	       * whether it would simplify matters to do it this way.  Who
7502 	       * knows.  In earlier versions of the PIC patches, the
7503 	       * pcrel_adjust field was used to store the correction, but
7504 	       * since the expression is not pcrel, I felt it would be
7505 	       * confusing to do it this way.  */
7506 
7507 	      if ((reloc_type == BFD_RELOC_32
7508 		   || reloc_type == BFD_RELOC_X86_64_32S
7509 		   || reloc_type == BFD_RELOC_64)
7510 		  && GOT_symbol
7511 		  && GOT_symbol == i.op[n].imms->X_add_symbol
7512 		  && (i.op[n].imms->X_op == O_symbol
7513 		      || (i.op[n].imms->X_op == O_add
7514 			  && ((symbol_get_value_expression
7515 			       (i.op[n].imms->X_op_symbol)->X_op)
7516 			      == O_subtract))))
7517 		{
7518 		  offsetT add;
7519 
7520 		  if (insn_start_frag == frag_now)
7521 		    add = (p - frag_now->fr_literal) - insn_start_off;
7522 		  else
7523 		    {
7524 		      fragS *fr;
7525 
7526 		      add = insn_start_frag->fr_fix - insn_start_off;
7527 		      for (fr = insn_start_frag->fr_next;
7528 			   fr && fr != frag_now; fr = fr->fr_next)
7529 			add += fr->fr_fix;
7530 		      add += p - frag_now->fr_literal;
7531 		    }
7532 
7533 		  if (!object_64bit)
7534 		    reloc_type = BFD_RELOC_386_GOTPC;
7535 		  else if (size == 4)
7536 		    reloc_type = BFD_RELOC_X86_64_GOTPC32;
7537 		  else if (size == 8)
7538 		    reloc_type = BFD_RELOC_X86_64_GOTPC64;
7539 		  i.op[n].imms->X_add_number += add;
7540 		}
7541 	      fix_new_exp (frag_now, p - frag_now->fr_literal, size,
7542 			   i.op[n].imms, 0, reloc_type);
7543 	    }
7544 	}
7545     }
7546 }
7547 
7548 /* x86_cons_fix_new is called via the expression parsing code when a
7549    reloc is needed.  We use this hook to get the correct .got reloc.  */
7550 static int cons_sign = -1;
7551 
7552 void
x86_cons_fix_new(fragS * frag,unsigned int off,unsigned int len,expressionS * exp,bfd_reloc_code_real_type r)7553 x86_cons_fix_new (fragS *frag, unsigned int off, unsigned int len,
7554 		  expressionS *exp, bfd_reloc_code_real_type r)
7555 {
7556   r = reloc (len, 0, cons_sign, r);
7557 
7558 #ifdef TE_PE
7559   if (exp->X_op == O_secrel)
7560     {
7561       exp->X_op = O_symbol;
7562       r = BFD_RELOC_32_SECREL;
7563     }
7564 #endif
7565 
7566   fix_new_exp (frag, off, len, exp, 0, r);
7567 }
7568 
7569 /* Export the ABI address size for use by TC_ADDRESS_BYTES for the
7570    purpose of the `.dc.a' internal pseudo-op.  */
7571 
7572 int
x86_address_bytes(void)7573 x86_address_bytes (void)
7574 {
7575   if ((stdoutput->arch_info->mach & bfd_mach_x64_32))
7576     return 4;
7577   return stdoutput->arch_info->bits_per_address / 8;
7578 }
7579 
7580 #if !(defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) || defined (OBJ_MACH_O)) \
7581     || defined (LEX_AT)
7582 # define lex_got(reloc, adjust, types) NULL
7583 #else
7584 /* Parse operands of the form
7585    <symbol>@GOTOFF+<nnn>
7586    and similar .plt or .got references.
7587 
7588    If we find one, set up the correct relocation in RELOC and copy the
7589    input string, minus the `@GOTOFF' into a malloc'd buffer for
7590    parsing by the calling routine.  Return this buffer, and if ADJUST
7591    is non-null set it to the length of the string we removed from the
7592    input line.  Otherwise return NULL.  */
7593 static char *
lex_got(enum bfd_reloc_code_real * rel,int * adjust,i386_operand_type * types)7594 lex_got (enum bfd_reloc_code_real *rel,
7595 	 int *adjust,
7596 	 i386_operand_type *types)
7597 {
7598   /* Some of the relocations depend on the size of what field is to
7599      be relocated.  But in our callers i386_immediate and i386_displacement
7600      we don't yet know the operand size (this will be set by insn
7601      matching).  Hence we record the word32 relocation here,
7602      and adjust the reloc according to the real size in reloc().  */
7603   static const struct {
7604     const char *str;
7605     int len;
7606     const enum bfd_reloc_code_real rel[2];
7607     const i386_operand_type types64;
7608   } gotrel[] = {
7609 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
7610     { STRING_COMMA_LEN ("SIZE"),      { BFD_RELOC_SIZE32,
7611 					BFD_RELOC_SIZE32 },
7612       OPERAND_TYPE_IMM32_64 },
7613 #endif
7614     { STRING_COMMA_LEN ("PLTOFF"),   { _dummy_first_bfd_reloc_code_real,
7615 				       BFD_RELOC_X86_64_PLTOFF64 },
7616       OPERAND_TYPE_IMM64 },
7617     { STRING_COMMA_LEN ("PLT"),      { BFD_RELOC_386_PLT32,
7618 				       BFD_RELOC_X86_64_PLT32    },
7619       OPERAND_TYPE_IMM32_32S_DISP32 },
7620     { STRING_COMMA_LEN ("GOTPLT"),   { _dummy_first_bfd_reloc_code_real,
7621 				       BFD_RELOC_X86_64_GOTPLT64 },
7622       OPERAND_TYPE_IMM64_DISP64 },
7623     { STRING_COMMA_LEN ("GOTOFF"),   { BFD_RELOC_386_GOTOFF,
7624 				       BFD_RELOC_X86_64_GOTOFF64 },
7625       OPERAND_TYPE_IMM64_DISP64 },
7626     { STRING_COMMA_LEN ("GOTPCREL"), { _dummy_first_bfd_reloc_code_real,
7627 				       BFD_RELOC_X86_64_GOTPCREL },
7628       OPERAND_TYPE_IMM32_32S_DISP32 },
7629     { STRING_COMMA_LEN ("TLSGD"),    { BFD_RELOC_386_TLS_GD,
7630 				       BFD_RELOC_X86_64_TLSGD    },
7631       OPERAND_TYPE_IMM32_32S_DISP32 },
7632     { STRING_COMMA_LEN ("TLSLDM"),   { BFD_RELOC_386_TLS_LDM,
7633 				       _dummy_first_bfd_reloc_code_real },
7634       OPERAND_TYPE_NONE },
7635     { STRING_COMMA_LEN ("TLSLD"),    { _dummy_first_bfd_reloc_code_real,
7636 				       BFD_RELOC_X86_64_TLSLD    },
7637       OPERAND_TYPE_IMM32_32S_DISP32 },
7638     { STRING_COMMA_LEN ("GOTTPOFF"), { BFD_RELOC_386_TLS_IE_32,
7639 				       BFD_RELOC_X86_64_GOTTPOFF },
7640       OPERAND_TYPE_IMM32_32S_DISP32 },
7641     { STRING_COMMA_LEN ("TPOFF"),    { BFD_RELOC_386_TLS_LE_32,
7642 				       BFD_RELOC_X86_64_TPOFF32  },
7643       OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7644     { STRING_COMMA_LEN ("NTPOFF"),   { BFD_RELOC_386_TLS_LE,
7645 				       _dummy_first_bfd_reloc_code_real },
7646       OPERAND_TYPE_NONE },
7647     { STRING_COMMA_LEN ("DTPOFF"),   { BFD_RELOC_386_TLS_LDO_32,
7648 				       BFD_RELOC_X86_64_DTPOFF32 },
7649       OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7650     { STRING_COMMA_LEN ("GOTNTPOFF"),{ BFD_RELOC_386_TLS_GOTIE,
7651 				       _dummy_first_bfd_reloc_code_real },
7652       OPERAND_TYPE_NONE },
7653     { STRING_COMMA_LEN ("INDNTPOFF"),{ BFD_RELOC_386_TLS_IE,
7654 				       _dummy_first_bfd_reloc_code_real },
7655       OPERAND_TYPE_NONE },
7656     { STRING_COMMA_LEN ("GOT"),      { BFD_RELOC_386_GOT32,
7657 				       BFD_RELOC_X86_64_GOT32    },
7658       OPERAND_TYPE_IMM32_32S_64_DISP32 },
7659     { STRING_COMMA_LEN ("TLSDESC"),  { BFD_RELOC_386_TLS_GOTDESC,
7660 				       BFD_RELOC_X86_64_GOTPC32_TLSDESC },
7661       OPERAND_TYPE_IMM32_32S_DISP32 },
7662     { STRING_COMMA_LEN ("TLSCALL"),  { BFD_RELOC_386_TLS_DESC_CALL,
7663 				       BFD_RELOC_X86_64_TLSDESC_CALL },
7664       OPERAND_TYPE_IMM32_32S_DISP32 },
7665   };
7666   char *cp;
7667   unsigned int j;
7668 
7669 #if defined (OBJ_MAYBE_ELF)
7670   if (!IS_ELF)
7671     return NULL;
7672 #endif
7673 
7674   for (cp = input_line_pointer; *cp != '@'; cp++)
7675     if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
7676       return NULL;
7677 
7678   for (j = 0; j < ARRAY_SIZE (gotrel); j++)
7679     {
7680       int len = gotrel[j].len;
7681       if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
7682 	{
7683 	  if (gotrel[j].rel[object_64bit] != 0)
7684 	    {
7685 	      int first, second;
7686 	      char *tmpbuf, *past_reloc;
7687 
7688 	      *rel = gotrel[j].rel[object_64bit];
7689 
7690 	      if (types)
7691 		{
7692 		  if (flag_code != CODE_64BIT)
7693 		    {
7694 		      types->bitfield.imm32 = 1;
7695 		      types->bitfield.disp32 = 1;
7696 		    }
7697 		  else
7698 		    *types = gotrel[j].types64;
7699 		}
7700 
7701 	      if (j != 0 && GOT_symbol == NULL)
7702 		GOT_symbol = symbol_find_or_make (GLOBAL_OFFSET_TABLE_NAME);
7703 
7704 	      /* The length of the first part of our input line.  */
7705 	      first = cp - input_line_pointer;
7706 
7707 	      /* The second part goes from after the reloc token until
7708 		 (and including) an end_of_line char or comma.  */
7709 	      past_reloc = cp + 1 + len;
7710 	      cp = past_reloc;
7711 	      while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
7712 		++cp;
7713 	      second = cp + 1 - past_reloc;
7714 
7715 	      /* Allocate and copy string.  The trailing NUL shouldn't
7716 		 be necessary, but be safe.  */
7717 	      tmpbuf = XNEWVEC (char, first + second + 2);
7718 	      memcpy (tmpbuf, input_line_pointer, first);
7719 	      if (second != 0 && *past_reloc != ' ')
7720 		/* Replace the relocation token with ' ', so that
7721 		   errors like foo@GOTOFF1 will be detected.  */
7722 		tmpbuf[first++] = ' ';
7723 	      else
7724 		/* Increment length by 1 if the relocation token is
7725 		   removed.  */
7726 		len++;
7727 	      if (adjust)
7728 		*adjust = len;
7729 	      memcpy (tmpbuf + first, past_reloc, second);
7730 	      tmpbuf[first + second] = '\0';
7731 	      return tmpbuf;
7732 	    }
7733 
7734 	  as_bad (_("@%s reloc is not supported with %d-bit output format"),
7735 		  gotrel[j].str, 1 << (5 + object_64bit));
7736 	  return NULL;
7737 	}
7738     }
7739 
7740   /* Might be a symbol version string.  Don't as_bad here.  */
7741   return NULL;
7742 }
7743 #endif
7744 
7745 #ifdef TE_PE
7746 #ifdef lex_got
7747 #undef lex_got
7748 #endif
7749 /* Parse operands of the form
7750    <symbol>@SECREL32+<nnn>
7751 
7752    If we find one, set up the correct relocation in RELOC and copy the
7753    input string, minus the `@SECREL32' into a malloc'd buffer for
7754    parsing by the calling routine.  Return this buffer, and if ADJUST
7755    is non-null set it to the length of the string we removed from the
7756    input line.  Otherwise return NULL.
7757 
7758    This function is copied from the ELF version above adjusted for PE targets.  */
7759 
7760 static char *
lex_got(enum bfd_reloc_code_real * rel ATTRIBUTE_UNUSED,int * adjust ATTRIBUTE_UNUSED,i386_operand_type * types)7761 lex_got (enum bfd_reloc_code_real *rel ATTRIBUTE_UNUSED,
7762 	 int *adjust ATTRIBUTE_UNUSED,
7763 	 i386_operand_type *types)
7764 {
7765   static const struct
7766   {
7767     const char *str;
7768     int len;
7769     const enum bfd_reloc_code_real rel[2];
7770     const i386_operand_type types64;
7771   }
7772   gotrel[] =
7773   {
7774     { STRING_COMMA_LEN ("SECREL32"),    { BFD_RELOC_32_SECREL,
7775 					  BFD_RELOC_32_SECREL },
7776       OPERAND_TYPE_IMM32_32S_64_DISP32_64 },
7777   };
7778 
7779   char *cp;
7780   unsigned j;
7781 
7782   for (cp = input_line_pointer; *cp != '@'; cp++)
7783     if (is_end_of_line[(unsigned char) *cp] || *cp == ',')
7784       return NULL;
7785 
7786   for (j = 0; j < ARRAY_SIZE (gotrel); j++)
7787     {
7788       int len = gotrel[j].len;
7789 
7790       if (strncasecmp (cp + 1, gotrel[j].str, len) == 0)
7791 	{
7792 	  if (gotrel[j].rel[object_64bit] != 0)
7793 	    {
7794 	      int first, second;
7795 	      char *tmpbuf, *past_reloc;
7796 
7797 	      *rel = gotrel[j].rel[object_64bit];
7798 	      if (adjust)
7799 		*adjust = len;
7800 
7801 	      if (types)
7802 		{
7803 		  if (flag_code != CODE_64BIT)
7804 		    {
7805 		      types->bitfield.imm32 = 1;
7806 		      types->bitfield.disp32 = 1;
7807 		    }
7808 		  else
7809 		    *types = gotrel[j].types64;
7810 		}
7811 
7812 	      /* The length of the first part of our input line.  */
7813 	      first = cp - input_line_pointer;
7814 
7815 	      /* The second part goes from after the reloc token until
7816 		 (and including) an end_of_line char or comma.  */
7817 	      past_reloc = cp + 1 + len;
7818 	      cp = past_reloc;
7819 	      while (!is_end_of_line[(unsigned char) *cp] && *cp != ',')
7820 		++cp;
7821 	      second = cp + 1 - past_reloc;
7822 
7823 	      /* Allocate and copy string.  The trailing NUL shouldn't
7824 		 be necessary, but be safe.  */
7825 	      tmpbuf = XNEWVEC (char, first + second + 2);
7826 	      memcpy (tmpbuf, input_line_pointer, first);
7827 	      if (second != 0 && *past_reloc != ' ')
7828 		/* Replace the relocation token with ' ', so that
7829 		   errors like foo@SECLREL321 will be detected.  */
7830 		tmpbuf[first++] = ' ';
7831 	      memcpy (tmpbuf + first, past_reloc, second);
7832 	      tmpbuf[first + second] = '\0';
7833 	      return tmpbuf;
7834 	    }
7835 
7836 	  as_bad (_("@%s reloc is not supported with %d-bit output format"),
7837 		  gotrel[j].str, 1 << (5 + object_64bit));
7838 	  return NULL;
7839 	}
7840     }
7841 
7842   /* Might be a symbol version string.  Don't as_bad here.  */
7843   return NULL;
7844 }
7845 
7846 #endif /* TE_PE */
7847 
7848 bfd_reloc_code_real_type
x86_cons(expressionS * exp,int size)7849 x86_cons (expressionS *exp, int size)
7850 {
7851   bfd_reloc_code_real_type got_reloc = NO_RELOC;
7852 
7853   intel_syntax = -intel_syntax;
7854 
7855   exp->X_md = 0;
7856   if (size == 4 || (object_64bit && size == 8))
7857     {
7858       /* Handle @GOTOFF and the like in an expression.  */
7859       char *save;
7860       char *gotfree_input_line;
7861       int adjust = 0;
7862 
7863       save = input_line_pointer;
7864       gotfree_input_line = lex_got (&got_reloc, &adjust, NULL);
7865       if (gotfree_input_line)
7866 	input_line_pointer = gotfree_input_line;
7867 
7868       expression (exp);
7869 
7870       if (gotfree_input_line)
7871 	{
7872 	  /* expression () has merrily parsed up to the end of line,
7873 	     or a comma - in the wrong buffer.  Transfer how far
7874 	     input_line_pointer has moved to the right buffer.  */
7875 	  input_line_pointer = (save
7876 				+ (input_line_pointer - gotfree_input_line)
7877 				+ adjust);
7878 	  free (gotfree_input_line);
7879 	  if (exp->X_op == O_constant
7880 	      || exp->X_op == O_absent
7881 	      || exp->X_op == O_illegal
7882 	      || exp->X_op == O_register
7883 	      || exp->X_op == O_big)
7884 	    {
7885 	      char c = *input_line_pointer;
7886 	      *input_line_pointer = 0;
7887 	      as_bad (_("missing or invalid expression `%s'"), save);
7888 	      *input_line_pointer = c;
7889 	    }
7890 	}
7891     }
7892   else
7893     expression (exp);
7894 
7895   intel_syntax = -intel_syntax;
7896 
7897   if (intel_syntax)
7898     i386_intel_simplify (exp);
7899 
7900   return got_reloc;
7901 }
7902 
7903 static void
signed_cons(int size)7904 signed_cons (int size)
7905 {
7906   if (flag_code == CODE_64BIT)
7907     cons_sign = 1;
7908   cons (size);
7909   cons_sign = -1;
7910 }
7911 
7912 #ifdef TE_PE
7913 static void
pe_directive_secrel(int dummy ATTRIBUTE_UNUSED)7914 pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
7915 {
7916   expressionS exp;
7917 
7918   do
7919     {
7920       expression (&exp);
7921       if (exp.X_op == O_symbol)
7922 	exp.X_op = O_secrel;
7923 
7924       emit_expr (&exp, 4);
7925     }
7926   while (*input_line_pointer++ == ',');
7927 
7928   input_line_pointer--;
7929   demand_empty_rest_of_line ();
7930 }
7931 #endif
7932 
7933 /* Handle Vector operations.  */
7934 
7935 static char *
check_VecOperations(char * op_string,char * op_end)7936 check_VecOperations (char *op_string, char *op_end)
7937 {
7938   const reg_entry *mask;
7939   const char *saved;
7940   char *end_op;
7941 
7942   while (*op_string
7943 	 && (op_end == NULL || op_string < op_end))
7944     {
7945       saved = op_string;
7946       if (*op_string == '{')
7947 	{
7948 	  op_string++;
7949 
7950 	  /* Check broadcasts.  */
7951 	  if (strncmp (op_string, "1to", 3) == 0)
7952 	    {
7953 	      int bcst_type;
7954 
7955 	      if (i.broadcast)
7956 		goto duplicated_vec_op;
7957 
7958 	      op_string += 3;
7959 	      if (*op_string == '8')
7960 		bcst_type = BROADCAST_1TO8;
7961 	      else if (*op_string == '4')
7962 		bcst_type = BROADCAST_1TO4;
7963 	      else if (*op_string == '2')
7964 		bcst_type = BROADCAST_1TO2;
7965 	      else if (*op_string == '1'
7966 		       && *(op_string+1) == '6')
7967 		{
7968 		  bcst_type = BROADCAST_1TO16;
7969 		  op_string++;
7970 		}
7971 	      else
7972 		{
7973 		  as_bad (_("Unsupported broadcast: `%s'"), saved);
7974 		  return NULL;
7975 		}
7976 	      op_string++;
7977 
7978 	      broadcast_op.type = bcst_type;
7979 	      broadcast_op.operand = this_operand;
7980 	      i.broadcast = &broadcast_op;
7981 	    }
7982 	  /* Check masking operation.  */
7983 	  else if ((mask = parse_register (op_string, &end_op)) != NULL)
7984 	    {
7985 	      /* k0 can't be used for write mask.  */
7986 	      if (mask->reg_num == 0)
7987 		{
7988 		  as_bad (_("`%s' can't be used for write mask"),
7989 			  op_string);
7990 		  return NULL;
7991 		}
7992 
7993 	      if (!i.mask)
7994 		{
7995 		  mask_op.mask = mask;
7996 		  mask_op.zeroing = 0;
7997 		  mask_op.operand = this_operand;
7998 		  i.mask = &mask_op;
7999 		}
8000 	      else
8001 		{
8002 		  if (i.mask->mask)
8003 		    goto duplicated_vec_op;
8004 
8005 		  i.mask->mask = mask;
8006 
8007 		  /* Only "{z}" is allowed here.  No need to check
8008 		     zeroing mask explicitly.  */
8009 		  if (i.mask->operand != this_operand)
8010 		    {
8011 		      as_bad (_("invalid write mask `%s'"), saved);
8012 		      return NULL;
8013 		    }
8014 		}
8015 
8016 	      op_string = end_op;
8017 	    }
8018 	  /* Check zeroing-flag for masking operation.  */
8019 	  else if (*op_string == 'z')
8020 	    {
8021 	      if (!i.mask)
8022 		{
8023 		  mask_op.mask = NULL;
8024 		  mask_op.zeroing = 1;
8025 		  mask_op.operand = this_operand;
8026 		  i.mask = &mask_op;
8027 		}
8028 	      else
8029 		{
8030 		  if (i.mask->zeroing)
8031 		    {
8032 		    duplicated_vec_op:
8033 		      as_bad (_("duplicated `%s'"), saved);
8034 		      return NULL;
8035 		    }
8036 
8037 		  i.mask->zeroing = 1;
8038 
8039 		  /* Only "{%k}" is allowed here.  No need to check mask
8040 		     register explicitly.  */
8041 		  if (i.mask->operand != this_operand)
8042 		    {
8043 		      as_bad (_("invalid zeroing-masking `%s'"),
8044 			      saved);
8045 		      return NULL;
8046 		    }
8047 		}
8048 
8049 	      op_string++;
8050 	    }
8051 	  else
8052 	    goto unknown_vec_op;
8053 
8054 	  if (*op_string != '}')
8055 	    {
8056 	      as_bad (_("missing `}' in `%s'"), saved);
8057 	      return NULL;
8058 	    }
8059 	  op_string++;
8060 	  continue;
8061 	}
8062     unknown_vec_op:
8063       /* We don't know this one.  */
8064       as_bad (_("unknown vector operation: `%s'"), saved);
8065       return NULL;
8066     }
8067 
8068   return op_string;
8069 }
8070 
8071 static int
i386_immediate(char * imm_start)8072 i386_immediate (char *imm_start)
8073 {
8074   char *save_input_line_pointer;
8075   char *gotfree_input_line;
8076   segT exp_seg = 0;
8077   expressionS *exp;
8078   i386_operand_type types;
8079 
8080   operand_type_set (&types, ~0);
8081 
8082   if (i.imm_operands == MAX_IMMEDIATE_OPERANDS)
8083     {
8084       as_bad (_("at most %d immediate operands are allowed"),
8085 	      MAX_IMMEDIATE_OPERANDS);
8086       return 0;
8087     }
8088 
8089   exp = &im_expressions[i.imm_operands++];
8090   i.op[this_operand].imms = exp;
8091 
8092   if (is_space_char (*imm_start))
8093     ++imm_start;
8094 
8095   save_input_line_pointer = input_line_pointer;
8096   input_line_pointer = imm_start;
8097 
8098   gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
8099   if (gotfree_input_line)
8100     input_line_pointer = gotfree_input_line;
8101 
8102   exp_seg = expression (exp);
8103 
8104   SKIP_WHITESPACE ();
8105 
8106   /* Handle vector operations.  */
8107   if (*input_line_pointer == '{')
8108     {
8109       input_line_pointer = check_VecOperations (input_line_pointer,
8110 						NULL);
8111       if (input_line_pointer == NULL)
8112 	return 0;
8113     }
8114 
8115   if (*input_line_pointer)
8116     as_bad (_("junk `%s' after expression"), input_line_pointer);
8117 
8118   input_line_pointer = save_input_line_pointer;
8119   if (gotfree_input_line)
8120     {
8121       free (gotfree_input_line);
8122 
8123       if (exp->X_op == O_constant || exp->X_op == O_register)
8124 	exp->X_op = O_illegal;
8125     }
8126 
8127   return i386_finalize_immediate (exp_seg, exp, types, imm_start);
8128 }
8129 
8130 static int
i386_finalize_immediate(segT exp_seg ATTRIBUTE_UNUSED,expressionS * exp,i386_operand_type types,const char * imm_start)8131 i386_finalize_immediate (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
8132 			 i386_operand_type types, const char *imm_start)
8133 {
8134   if (exp->X_op == O_absent || exp->X_op == O_illegal || exp->X_op == O_big)
8135     {
8136       if (imm_start)
8137 	as_bad (_("missing or invalid immediate expression `%s'"),
8138 		imm_start);
8139       return 0;
8140     }
8141   else if (exp->X_op == O_constant)
8142     {
8143       /* Size it properly later.  */
8144       i.types[this_operand].bitfield.imm64 = 1;
8145       /* If not 64bit, sign extend val.  */
8146       if (flag_code != CODE_64BIT
8147 	  && (exp->X_add_number & ~(((addressT) 2 << 31) - 1)) == 0)
8148 	exp->X_add_number
8149 	  = (exp->X_add_number ^ ((addressT) 1 << 31)) - ((addressT) 1 << 31);
8150     }
8151 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8152   else if (OUTPUT_FLAVOR == bfd_target_aout_flavour
8153 	   && exp_seg != absolute_section
8154 	   && exp_seg != text_section
8155 	   && exp_seg != data_section
8156 	   && exp_seg != bss_section
8157 	   && exp_seg != undefined_section
8158 	   && !bfd_is_com_section (exp_seg))
8159     {
8160       as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
8161       return 0;
8162     }
8163 #endif
8164   else if (!intel_syntax && exp_seg == reg_section)
8165     {
8166       if (imm_start)
8167 	as_bad (_("illegal immediate register operand %s"), imm_start);
8168       return 0;
8169     }
8170   else
8171     {
8172       /* This is an address.  The size of the address will be
8173 	 determined later, depending on destination register,
8174 	 suffix, or the default for the section.  */
8175       i.types[this_operand].bitfield.imm8 = 1;
8176       i.types[this_operand].bitfield.imm16 = 1;
8177       i.types[this_operand].bitfield.imm32 = 1;
8178       i.types[this_operand].bitfield.imm32s = 1;
8179       i.types[this_operand].bitfield.imm64 = 1;
8180       i.types[this_operand] = operand_type_and (i.types[this_operand],
8181 						types);
8182     }
8183 
8184   return 1;
8185 }
8186 
8187 static char *
i386_scale(char * scale)8188 i386_scale (char *scale)
8189 {
8190   offsetT val;
8191   char *save = input_line_pointer;
8192 
8193   input_line_pointer = scale;
8194   val = get_absolute_expression ();
8195 
8196   switch (val)
8197     {
8198     case 1:
8199       i.log2_scale_factor = 0;
8200       break;
8201     case 2:
8202       i.log2_scale_factor = 1;
8203       break;
8204     case 4:
8205       i.log2_scale_factor = 2;
8206       break;
8207     case 8:
8208       i.log2_scale_factor = 3;
8209       break;
8210     default:
8211       {
8212 	char sep = *input_line_pointer;
8213 
8214 	*input_line_pointer = '\0';
8215 	as_bad (_("expecting scale factor of 1, 2, 4, or 8: got `%s'"),
8216 		scale);
8217 	*input_line_pointer = sep;
8218 	input_line_pointer = save;
8219 	return NULL;
8220       }
8221     }
8222   if (i.log2_scale_factor != 0 && i.index_reg == 0)
8223     {
8224       as_warn (_("scale factor of %d without an index register"),
8225 	       1 << i.log2_scale_factor);
8226       i.log2_scale_factor = 0;
8227     }
8228   scale = input_line_pointer;
8229   input_line_pointer = save;
8230   return scale;
8231 }
8232 
8233 static int
i386_displacement(char * disp_start,char * disp_end)8234 i386_displacement (char *disp_start, char *disp_end)
8235 {
8236   expressionS *exp;
8237   segT exp_seg = 0;
8238   char *save_input_line_pointer;
8239   char *gotfree_input_line;
8240   int override;
8241   i386_operand_type bigdisp, types = anydisp;
8242   int ret;
8243 
8244   if (i.disp_operands == MAX_MEMORY_OPERANDS)
8245     {
8246       as_bad (_("at most %d displacement operands are allowed"),
8247 	      MAX_MEMORY_OPERANDS);
8248       return 0;
8249     }
8250 
8251   operand_type_set (&bigdisp, 0);
8252   if ((i.types[this_operand].bitfield.jumpabsolute)
8253       || (!current_templates->start->opcode_modifier.jump
8254 	  && !current_templates->start->opcode_modifier.jumpdword))
8255     {
8256       bigdisp.bitfield.disp32 = 1;
8257       override = (i.prefix[ADDR_PREFIX] != 0);
8258       if (flag_code == CODE_64BIT)
8259 	{
8260 	  if (!override)
8261 	    {
8262 	      bigdisp.bitfield.disp32s = 1;
8263 	      bigdisp.bitfield.disp64 = 1;
8264 	    }
8265 	}
8266       else if ((flag_code == CODE_16BIT) ^ override)
8267 	{
8268 	  bigdisp.bitfield.disp32 = 0;
8269 	  bigdisp.bitfield.disp16 = 1;
8270 	}
8271     }
8272   else
8273     {
8274       /* For PC-relative branches, the width of the displacement
8275 	 is dependent upon data size, not address size.  */
8276       override = (i.prefix[DATA_PREFIX] != 0);
8277       if (flag_code == CODE_64BIT)
8278 	{
8279 	  if (override || i.suffix == WORD_MNEM_SUFFIX)
8280 	    bigdisp.bitfield.disp16 = 1;
8281 	  else
8282 	    {
8283 	      bigdisp.bitfield.disp32 = 1;
8284 	      bigdisp.bitfield.disp32s = 1;
8285 	    }
8286 	}
8287       else
8288 	{
8289 	  if (!override)
8290 	    override = (i.suffix == (flag_code != CODE_16BIT
8291 				     ? WORD_MNEM_SUFFIX
8292 				     : LONG_MNEM_SUFFIX));
8293 	  bigdisp.bitfield.disp32 = 1;
8294 	  if ((flag_code == CODE_16BIT) ^ override)
8295 	    {
8296 	      bigdisp.bitfield.disp32 = 0;
8297 	      bigdisp.bitfield.disp16 = 1;
8298 	    }
8299 	}
8300     }
8301   i.types[this_operand] = operand_type_or (i.types[this_operand],
8302 					   bigdisp);
8303 
8304   exp = &disp_expressions[i.disp_operands];
8305   i.op[this_operand].disps = exp;
8306   i.disp_operands++;
8307   save_input_line_pointer = input_line_pointer;
8308   input_line_pointer = disp_start;
8309   END_STRING_AND_SAVE (disp_end);
8310 
8311 #ifndef GCC_ASM_O_HACK
8312 #define GCC_ASM_O_HACK 0
8313 #endif
8314 #if GCC_ASM_O_HACK
8315   END_STRING_AND_SAVE (disp_end + 1);
8316   if (i.types[this_operand].bitfield.baseIndex
8317       && displacement_string_end[-1] == '+')
8318     {
8319       /* This hack is to avoid a warning when using the "o"
8320 	 constraint within gcc asm statements.
8321 	 For instance:
8322 
8323 	 #define _set_tssldt_desc(n,addr,limit,type) \
8324 	 __asm__ __volatile__ ( \
8325 	 "movw %w2,%0\n\t" \
8326 	 "movw %w1,2+%0\n\t" \
8327 	 "rorl $16,%1\n\t" \
8328 	 "movb %b1,4+%0\n\t" \
8329 	 "movb %4,5+%0\n\t" \
8330 	 "movb $0,6+%0\n\t" \
8331 	 "movb %h1,7+%0\n\t" \
8332 	 "rorl $16,%1" \
8333 	 : "=o"(*(n)) : "q" (addr), "ri"(limit), "i"(type))
8334 
8335 	 This works great except that the output assembler ends
8336 	 up looking a bit weird if it turns out that there is
8337 	 no offset.  You end up producing code that looks like:
8338 
8339 	 #APP
8340 	 movw $235,(%eax)
8341 	 movw %dx,2+(%eax)
8342 	 rorl $16,%edx
8343 	 movb %dl,4+(%eax)
8344 	 movb $137,5+(%eax)
8345 	 movb $0,6+(%eax)
8346 	 movb %dh,7+(%eax)
8347 	 rorl $16,%edx
8348 	 #NO_APP
8349 
8350 	 So here we provide the missing zero.  */
8351 
8352       *displacement_string_end = '0';
8353     }
8354 #endif
8355   gotfree_input_line = lex_got (&i.reloc[this_operand], NULL, &types);
8356   if (gotfree_input_line)
8357     input_line_pointer = gotfree_input_line;
8358 
8359   exp_seg = expression (exp);
8360 
8361   SKIP_WHITESPACE ();
8362   if (*input_line_pointer)
8363     as_bad (_("junk `%s' after expression"), input_line_pointer);
8364 #if GCC_ASM_O_HACK
8365   RESTORE_END_STRING (disp_end + 1);
8366 #endif
8367   input_line_pointer = save_input_line_pointer;
8368   if (gotfree_input_line)
8369     {
8370       free (gotfree_input_line);
8371 
8372       if (exp->X_op == O_constant || exp->X_op == O_register)
8373 	exp->X_op = O_illegal;
8374     }
8375 
8376   ret = i386_finalize_displacement (exp_seg, exp, types, disp_start);
8377 
8378   RESTORE_END_STRING (disp_end);
8379 
8380   return ret;
8381 }
8382 
8383 static int
i386_finalize_displacement(segT exp_seg ATTRIBUTE_UNUSED,expressionS * exp,i386_operand_type types,const char * disp_start)8384 i386_finalize_displacement (segT exp_seg ATTRIBUTE_UNUSED, expressionS *exp,
8385 			    i386_operand_type types, const char *disp_start)
8386 {
8387   i386_operand_type bigdisp;
8388   int ret = 1;
8389 
8390   /* We do this to make sure that the section symbol is in
8391      the symbol table.  We will ultimately change the relocation
8392      to be relative to the beginning of the section.  */
8393   if (i.reloc[this_operand] == BFD_RELOC_386_GOTOFF
8394       || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL
8395       || i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
8396     {
8397       if (exp->X_op != O_symbol)
8398 	goto inv_disp;
8399 
8400       if (S_IS_LOCAL (exp->X_add_symbol)
8401 	  && S_GET_SEGMENT (exp->X_add_symbol) != undefined_section
8402 	  && S_GET_SEGMENT (exp->X_add_symbol) != expr_section)
8403 	section_symbol (S_GET_SEGMENT (exp->X_add_symbol));
8404       exp->X_op = O_subtract;
8405       exp->X_op_symbol = GOT_symbol;
8406       if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTPCREL)
8407 	i.reloc[this_operand] = BFD_RELOC_32_PCREL;
8408       else if (i.reloc[this_operand] == BFD_RELOC_X86_64_GOTOFF64)
8409 	i.reloc[this_operand] = BFD_RELOC_64;
8410       else
8411 	i.reloc[this_operand] = BFD_RELOC_32;
8412     }
8413 
8414   else if (exp->X_op == O_absent
8415 	   || exp->X_op == O_illegal
8416 	   || exp->X_op == O_big)
8417     {
8418     inv_disp:
8419       as_bad (_("missing or invalid displacement expression `%s'"),
8420 	      disp_start);
8421       ret = 0;
8422     }
8423 
8424   else if (flag_code == CODE_64BIT
8425 	   && !i.prefix[ADDR_PREFIX]
8426 	   && exp->X_op == O_constant)
8427     {
8428       /* Since displacement is signed extended to 64bit, don't allow
8429 	 disp32 and turn off disp32s if they are out of range.  */
8430       i.types[this_operand].bitfield.disp32 = 0;
8431       if (!fits_in_signed_long (exp->X_add_number))
8432 	{
8433 	  i.types[this_operand].bitfield.disp32s = 0;
8434 	  if (i.types[this_operand].bitfield.baseindex)
8435 	    {
8436 	      as_bad (_("0x%lx out range of signed 32bit displacement"),
8437 		      (long) exp->X_add_number);
8438 	      ret = 0;
8439 	    }
8440 	}
8441     }
8442 
8443 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
8444   else if (exp->X_op != O_constant
8445 	   && OUTPUT_FLAVOR == bfd_target_aout_flavour
8446 	   && exp_seg != absolute_section
8447 	   && exp_seg != text_section
8448 	   && exp_seg != data_section
8449 	   && exp_seg != bss_section
8450 	   && exp_seg != undefined_section
8451 	   && !bfd_is_com_section (exp_seg))
8452     {
8453       as_bad (_("unimplemented segment %s in operand"), exp_seg->name);
8454       ret = 0;
8455     }
8456 #endif
8457 
8458   /* Check if this is a displacement only operand.  */
8459   bigdisp = i.types[this_operand];
8460   bigdisp.bitfield.disp8 = 0;
8461   bigdisp.bitfield.disp16 = 0;
8462   bigdisp.bitfield.disp32 = 0;
8463   bigdisp.bitfield.disp32s = 0;
8464   bigdisp.bitfield.disp64 = 0;
8465   if (operand_type_all_zero (&bigdisp))
8466     i.types[this_operand] = operand_type_and (i.types[this_operand],
8467 					      types);
8468 
8469   return ret;
8470 }
8471 
8472 /* Make sure the memory operand we've been dealt is valid.
8473    Return 1 on success, 0 on a failure.  */
8474 
8475 static int
i386_index_check(const char * operand_string)8476 i386_index_check (const char *operand_string)
8477 {
8478   const char *kind = "base/index";
8479   enum flag_code addr_mode;
8480 
8481   if (i.prefix[ADDR_PREFIX])
8482     addr_mode = flag_code == CODE_32BIT ? CODE_16BIT : CODE_32BIT;
8483   else
8484     {
8485       addr_mode = flag_code;
8486 
8487 #if INFER_ADDR_PREFIX
8488       if (i.mem_operands == 0)
8489 	{
8490 	  /* Infer address prefix from the first memory operand.  */
8491 	  const reg_entry *addr_reg = i.base_reg;
8492 
8493 	  if (addr_reg == NULL)
8494 	    addr_reg = i.index_reg;
8495 
8496 	  if (addr_reg)
8497 	    {
8498 	      if (addr_reg->reg_num == RegEip
8499 		  || addr_reg->reg_num == RegEiz
8500 		  || addr_reg->reg_type.bitfield.reg32)
8501 		addr_mode = CODE_32BIT;
8502 	      else if (flag_code != CODE_64BIT
8503 		       && addr_reg->reg_type.bitfield.reg16)
8504 		addr_mode = CODE_16BIT;
8505 
8506 	      if (addr_mode != flag_code)
8507 		{
8508 		  i.prefix[ADDR_PREFIX] = ADDR_PREFIX_OPCODE;
8509 		  i.prefixes += 1;
8510 		  /* Change the size of any displacement too.  At most one
8511 		     of Disp16 or Disp32 is set.
8512 		     FIXME.  There doesn't seem to be any real need for
8513 		     separate Disp16 and Disp32 flags.  The same goes for
8514 		     Imm16 and Imm32.  Removing them would probably clean
8515 		     up the code quite a lot.  */
8516 		  if (flag_code != CODE_64BIT
8517 		      && (i.types[this_operand].bitfield.disp16
8518 			  || i.types[this_operand].bitfield.disp32))
8519 		    i.types[this_operand]
8520 		      = operand_type_xor (i.types[this_operand], disp16_32);
8521 		}
8522 	    }
8523 	}
8524 #endif
8525     }
8526 
8527   if (current_templates->start->opcode_modifier.isstring
8528       && !current_templates->start->opcode_modifier.immext
8529       && (current_templates->end[-1].opcode_modifier.isstring
8530 	  || i.mem_operands))
8531     {
8532       /* Memory operands of string insns are special in that they only allow
8533 	 a single register (rDI, rSI, or rBX) as their memory address.  */
8534       const reg_entry *expected_reg;
8535       static const char *di_si[][2] =
8536 	{
8537 	  { "esi", "edi" },
8538 	  { "si", "di" },
8539 	  { "rsi", "rdi" }
8540 	};
8541       static const char *bx[] = { "ebx", "bx", "rbx" };
8542 
8543       kind = "string address";
8544 
8545       if (current_templates->start->opcode_modifier.repprefixok)
8546 	{
8547 	  i386_operand_type type = current_templates->end[-1].operand_types[0];
8548 
8549 	  if (!type.bitfield.baseindex
8550 	      || ((!i.mem_operands != !intel_syntax)
8551 		  && current_templates->end[-1].operand_types[1]
8552 		     .bitfield.baseindex))
8553 	    type = current_templates->end[-1].operand_types[1];
8554 	  expected_reg = hash_find (reg_hash,
8555 				    di_si[addr_mode][type.bitfield.esseg]);
8556 
8557 	}
8558       else
8559 	expected_reg = hash_find (reg_hash, bx[addr_mode]);
8560 
8561       if (i.base_reg != expected_reg
8562 	  || i.index_reg
8563 	  || operand_type_check (i.types[this_operand], disp))
8564 	{
8565 	  /* The second memory operand must have the same size as
8566 	     the first one.  */
8567 	  if (i.mem_operands
8568 	      && i.base_reg
8569 	      && !((addr_mode == CODE_64BIT
8570 		    && i.base_reg->reg_type.bitfield.reg64)
8571 		   || (addr_mode == CODE_32BIT
8572 		       ? i.base_reg->reg_type.bitfield.reg32
8573 		       : i.base_reg->reg_type.bitfield.reg16)))
8574 	    goto bad_address;
8575 
8576 	  as_warn (_("`%s' is not valid here (expected `%c%s%s%c')"),
8577 		   operand_string,
8578 		   intel_syntax ? '[' : '(',
8579 		   register_prefix,
8580 		   expected_reg->reg_name,
8581 		   intel_syntax ? ']' : ')');
8582 	  return 1;
8583 	}
8584       else
8585 	return 1;
8586 
8587 bad_address:
8588       as_bad (_("`%s' is not a valid %s expression"),
8589 	      operand_string, kind);
8590       return 0;
8591     }
8592   else
8593     {
8594       if (addr_mode != CODE_16BIT)
8595 	{
8596 	  /* 32-bit/64-bit checks.  */
8597 	  if ((i.base_reg
8598 	       && (addr_mode == CODE_64BIT
8599 		   ? !i.base_reg->reg_type.bitfield.reg64
8600 		   : !i.base_reg->reg_type.bitfield.reg32)
8601 	       && (i.index_reg
8602 		   || (i.base_reg->reg_num
8603 		       != (addr_mode == CODE_64BIT ? RegRip : RegEip))))
8604 	      || (i.index_reg
8605 		  && !i.index_reg->reg_type.bitfield.regxmm
8606 		  && !i.index_reg->reg_type.bitfield.regymm
8607 		  && !i.index_reg->reg_type.bitfield.regzmm
8608 		  && ((addr_mode == CODE_64BIT
8609 		       ? !(i.index_reg->reg_type.bitfield.reg64
8610 			   || i.index_reg->reg_num == RegRiz)
8611 		       : !(i.index_reg->reg_type.bitfield.reg32
8612 			   || i.index_reg->reg_num == RegEiz))
8613 		      || !i.index_reg->reg_type.bitfield.baseindex)))
8614 	    goto bad_address;
8615 
8616 	  /* bndmk, bndldx, and bndstx have special restrictions. */
8617 	  if (current_templates->start->base_opcode == 0xf30f1b
8618 	      || (current_templates->start->base_opcode & ~1) == 0x0f1a)
8619 	    {
8620 	      /* They cannot use RIP-relative addressing. */
8621 	      if (i.base_reg && i.base_reg->reg_num == RegRip)
8622 		{
8623 		  as_bad (_("`%s' cannot be used here"), operand_string);
8624 		  return 0;
8625 		}
8626 
8627 	      /* bndldx and bndstx ignore their scale factor. */
8628 	      if (current_templates->start->base_opcode != 0xf30f1b
8629 		  && i.log2_scale_factor)
8630 		as_warn (_("register scaling is being ignored here"));
8631 	    }
8632 	}
8633       else
8634 	{
8635 	  /* 16-bit checks.  */
8636 	  if ((i.base_reg
8637 	       && (!i.base_reg->reg_type.bitfield.reg16
8638 		   || !i.base_reg->reg_type.bitfield.baseindex))
8639 	      || (i.index_reg
8640 		  && (!i.index_reg->reg_type.bitfield.reg16
8641 		      || !i.index_reg->reg_type.bitfield.baseindex
8642 		      || !(i.base_reg
8643 			   && i.base_reg->reg_num < 6
8644 			   && i.index_reg->reg_num >= 6
8645 			   && i.log2_scale_factor == 0))))
8646 	    goto bad_address;
8647 	}
8648     }
8649   return 1;
8650 }
8651 
8652 /* Handle vector immediates.  */
8653 
8654 static int
RC_SAE_immediate(const char * imm_start)8655 RC_SAE_immediate (const char *imm_start)
8656 {
8657   unsigned int match_found, j;
8658   const char *pstr = imm_start;
8659   expressionS *exp;
8660 
8661   if (*pstr != '{')
8662     return 0;
8663 
8664   pstr++;
8665   match_found = 0;
8666   for (j = 0; j < ARRAY_SIZE (RC_NamesTable); j++)
8667     {
8668       if (!strncmp (pstr, RC_NamesTable[j].name, RC_NamesTable[j].len))
8669 	{
8670 	  if (!i.rounding)
8671 	    {
8672 	      rc_op.type = RC_NamesTable[j].type;
8673 	      rc_op.operand = this_operand;
8674 	      i.rounding = &rc_op;
8675 	    }
8676 	  else
8677 	    {
8678 	      as_bad (_("duplicated `%s'"), imm_start);
8679 	      return 0;
8680 	    }
8681 	  pstr += RC_NamesTable[j].len;
8682 	  match_found = 1;
8683 	  break;
8684 	}
8685     }
8686   if (!match_found)
8687     return 0;
8688 
8689   if (*pstr++ != '}')
8690     {
8691       as_bad (_("Missing '}': '%s'"), imm_start);
8692       return 0;
8693     }
8694   /* RC/SAE immediate string should contain nothing more.  */;
8695   if (*pstr != 0)
8696     {
8697       as_bad (_("Junk after '}': '%s'"), imm_start);
8698       return 0;
8699     }
8700 
8701   exp = &im_expressions[i.imm_operands++];
8702   i.op[this_operand].imms = exp;
8703 
8704   exp->X_op = O_constant;
8705   exp->X_add_number = 0;
8706   exp->X_add_symbol = (symbolS *) 0;
8707   exp->X_op_symbol = (symbolS *) 0;
8708 
8709   i.types[this_operand].bitfield.imm8 = 1;
8710   return 1;
8711 }
8712 
8713 /* Only string instructions can have a second memory operand, so
8714    reduce current_templates to just those if it contains any.  */
8715 static int
maybe_adjust_templates(void)8716 maybe_adjust_templates (void)
8717 {
8718   const insn_template *t;
8719 
8720   gas_assert (i.mem_operands == 1);
8721 
8722   for (t = current_templates->start; t < current_templates->end; ++t)
8723     if (t->opcode_modifier.isstring)
8724       break;
8725 
8726   if (t < current_templates->end)
8727     {
8728       static templates aux_templates;
8729       bfd_boolean recheck;
8730 
8731       aux_templates.start = t;
8732       for (; t < current_templates->end; ++t)
8733 	if (!t->opcode_modifier.isstring)
8734 	  break;
8735       aux_templates.end = t;
8736 
8737       /* Determine whether to re-check the first memory operand.  */
8738       recheck = (aux_templates.start != current_templates->start
8739 		 || t != current_templates->end);
8740 
8741       current_templates = &aux_templates;
8742 
8743       if (recheck)
8744 	{
8745 	  i.mem_operands = 0;
8746 	  if (i.memop1_string != NULL
8747 	      && i386_index_check (i.memop1_string) == 0)
8748 	    return 0;
8749 	  i.mem_operands = 1;
8750 	}
8751     }
8752 
8753   return 1;
8754 }
8755 
8756 /* Parse OPERAND_STRING into the i386_insn structure I.  Returns zero
8757    on error.  */
8758 
8759 static int
i386_att_operand(char * operand_string)8760 i386_att_operand (char *operand_string)
8761 {
8762   const reg_entry *r;
8763   char *end_op;
8764   char *op_string = operand_string;
8765 
8766   if (is_space_char (*op_string))
8767     ++op_string;
8768 
8769   /* We check for an absolute prefix (differentiating,
8770      for example, 'jmp pc_relative_label' from 'jmp *absolute_label'.  */
8771   if (*op_string == ABSOLUTE_PREFIX)
8772     {
8773       ++op_string;
8774       if (is_space_char (*op_string))
8775 	++op_string;
8776       i.types[this_operand].bitfield.jumpabsolute = 1;
8777     }
8778 
8779   /* Check if operand is a register.  */
8780   if ((r = parse_register (op_string, &end_op)) != NULL)
8781     {
8782       i386_operand_type temp;
8783 
8784       /* Check for a segment override by searching for ':' after a
8785 	 segment register.  */
8786       op_string = end_op;
8787       if (is_space_char (*op_string))
8788 	++op_string;
8789       if (*op_string == ':'
8790 	  && (r->reg_type.bitfield.sreg2
8791 	      || r->reg_type.bitfield.sreg3))
8792 	{
8793 	  switch (r->reg_num)
8794 	    {
8795 	    case 0:
8796 	      i.seg[i.mem_operands] = &es;
8797 	      break;
8798 	    case 1:
8799 	      i.seg[i.mem_operands] = &cs;
8800 	      break;
8801 	    case 2:
8802 	      i.seg[i.mem_operands] = &ss;
8803 	      break;
8804 	    case 3:
8805 	      i.seg[i.mem_operands] = &ds;
8806 	      break;
8807 	    case 4:
8808 	      i.seg[i.mem_operands] = &fs;
8809 	      break;
8810 	    case 5:
8811 	      i.seg[i.mem_operands] = &gs;
8812 	      break;
8813 	    }
8814 
8815 	  /* Skip the ':' and whitespace.  */
8816 	  ++op_string;
8817 	  if (is_space_char (*op_string))
8818 	    ++op_string;
8819 
8820 	  if (!is_digit_char (*op_string)
8821 	      && !is_identifier_char (*op_string)
8822 	      && *op_string != '('
8823 	      && *op_string != ABSOLUTE_PREFIX)
8824 	    {
8825 	      as_bad (_("bad memory operand `%s'"), op_string);
8826 	      return 0;
8827 	    }
8828 	  /* Handle case of %es:*foo.  */
8829 	  if (*op_string == ABSOLUTE_PREFIX)
8830 	    {
8831 	      ++op_string;
8832 	      if (is_space_char (*op_string))
8833 		++op_string;
8834 	      i.types[this_operand].bitfield.jumpabsolute = 1;
8835 	    }
8836 	  goto do_memory_reference;
8837 	}
8838 
8839       /* Handle vector operations.  */
8840       if (*op_string == '{')
8841 	{
8842 	  op_string = check_VecOperations (op_string, NULL);
8843 	  if (op_string == NULL)
8844 	    return 0;
8845 	}
8846 
8847       if (*op_string)
8848 	{
8849 	  as_bad (_("junk `%s' after register"), op_string);
8850 	  return 0;
8851 	}
8852       temp = r->reg_type;
8853       temp.bitfield.baseindex = 0;
8854       i.types[this_operand] = operand_type_or (i.types[this_operand],
8855 					       temp);
8856       i.types[this_operand].bitfield.unspecified = 0;
8857       i.op[this_operand].regs = r;
8858       i.reg_operands++;
8859     }
8860   else if (*op_string == REGISTER_PREFIX)
8861     {
8862       as_bad (_("bad register name `%s'"), op_string);
8863       return 0;
8864     }
8865   else if (*op_string == IMMEDIATE_PREFIX)
8866     {
8867       ++op_string;
8868       if (i.types[this_operand].bitfield.jumpabsolute)
8869 	{
8870 	  as_bad (_("immediate operand illegal with absolute jump"));
8871 	  return 0;
8872 	}
8873       if (!i386_immediate (op_string))
8874 	return 0;
8875     }
8876   else if (RC_SAE_immediate (operand_string))
8877     {
8878       /* If it is a RC or SAE immediate, do nothing.  */
8879       ;
8880     }
8881   else if (is_digit_char (*op_string)
8882 	   || is_identifier_char (*op_string)
8883 	   || *op_string == '"'
8884 	   || *op_string == '(')
8885     {
8886       /* This is a memory reference of some sort.  */
8887       char *base_string;
8888 
8889       /* Start and end of displacement string expression (if found).  */
8890       char *displacement_string_start;
8891       char *displacement_string_end;
8892       char *vop_start;
8893 
8894     do_memory_reference:
8895       if (i.mem_operands == 1 && !maybe_adjust_templates ())
8896 	return 0;
8897       if ((i.mem_operands == 1
8898 	   && !current_templates->start->opcode_modifier.isstring)
8899 	  || i.mem_operands == 2)
8900 	{
8901 	  as_bad (_("too many memory references for `%s'"),
8902 		  current_templates->start->name);
8903 	  return 0;
8904 	}
8905 
8906       /* Check for base index form.  We detect the base index form by
8907 	 looking for an ')' at the end of the operand, searching
8908 	 for the '(' matching it, and finding a REGISTER_PREFIX or ','
8909 	 after the '('.  */
8910       base_string = op_string + strlen (op_string);
8911 
8912       /* Handle vector operations.  */
8913       vop_start = strchr (op_string, '{');
8914       if (vop_start && vop_start < base_string)
8915 	{
8916 	  if (check_VecOperations (vop_start, base_string) == NULL)
8917 	    return 0;
8918 	  base_string = vop_start;
8919 	}
8920 
8921       --base_string;
8922       if (is_space_char (*base_string))
8923 	--base_string;
8924 
8925       /* If we only have a displacement, set-up for it to be parsed later.  */
8926       displacement_string_start = op_string;
8927       displacement_string_end = base_string + 1;
8928 
8929       if (*base_string == ')')
8930 	{
8931 	  char *temp_string;
8932 	  unsigned int parens_balanced = 1;
8933 	  /* We've already checked that the number of left & right ()'s are
8934 	     equal, so this loop will not be infinite.  */
8935 	  do
8936 	    {
8937 	      base_string--;
8938 	      if (*base_string == ')')
8939 		parens_balanced++;
8940 	      if (*base_string == '(')
8941 		parens_balanced--;
8942 	    }
8943 	  while (parens_balanced);
8944 
8945 	  temp_string = base_string;
8946 
8947 	  /* Skip past '(' and whitespace.  */
8948 	  ++base_string;
8949 	  if (is_space_char (*base_string))
8950 	    ++base_string;
8951 
8952 	  if (*base_string == ','
8953 	      || ((i.base_reg = parse_register (base_string, &end_op))
8954 		  != NULL))
8955 	    {
8956 	      displacement_string_end = temp_string;
8957 
8958 	      i.types[this_operand].bitfield.baseindex = 1;
8959 
8960 	      if (i.base_reg)
8961 		{
8962 		  base_string = end_op;
8963 		  if (is_space_char (*base_string))
8964 		    ++base_string;
8965 		}
8966 
8967 	      /* There may be an index reg or scale factor here.  */
8968 	      if (*base_string == ',')
8969 		{
8970 		  ++base_string;
8971 		  if (is_space_char (*base_string))
8972 		    ++base_string;
8973 
8974 		  if ((i.index_reg = parse_register (base_string, &end_op))
8975 		      != NULL)
8976 		    {
8977 		      base_string = end_op;
8978 		      if (is_space_char (*base_string))
8979 			++base_string;
8980 		      if (*base_string == ',')
8981 			{
8982 			  ++base_string;
8983 			  if (is_space_char (*base_string))
8984 			    ++base_string;
8985 			}
8986 		      else if (*base_string != ')')
8987 			{
8988 			  as_bad (_("expecting `,' or `)' "
8989 				    "after index register in `%s'"),
8990 				  operand_string);
8991 			  return 0;
8992 			}
8993 		    }
8994 		  else if (*base_string == REGISTER_PREFIX)
8995 		    {
8996 		      end_op = strchr (base_string, ',');
8997 		      if (end_op)
8998 			*end_op = '\0';
8999 		      as_bad (_("bad register name `%s'"), base_string);
9000 		      return 0;
9001 		    }
9002 
9003 		  /* Check for scale factor.  */
9004 		  if (*base_string != ')')
9005 		    {
9006 		      char *end_scale = i386_scale (base_string);
9007 
9008 		      if (!end_scale)
9009 			return 0;
9010 
9011 		      base_string = end_scale;
9012 		      if (is_space_char (*base_string))
9013 			++base_string;
9014 		      if (*base_string != ')')
9015 			{
9016 			  as_bad (_("expecting `)' "
9017 				    "after scale factor in `%s'"),
9018 				  operand_string);
9019 			  return 0;
9020 			}
9021 		    }
9022 		  else if (!i.index_reg)
9023 		    {
9024 		      as_bad (_("expecting index register or scale factor "
9025 				"after `,'; got '%c'"),
9026 			      *base_string);
9027 		      return 0;
9028 		    }
9029 		}
9030 	      else if (*base_string != ')')
9031 		{
9032 		  as_bad (_("expecting `,' or `)' "
9033 			    "after base register in `%s'"),
9034 			  operand_string);
9035 		  return 0;
9036 		}
9037 	    }
9038 	  else if (*base_string == REGISTER_PREFIX)
9039 	    {
9040 	      end_op = strchr (base_string, ',');
9041 	      if (end_op)
9042 		*end_op = '\0';
9043 	      as_bad (_("bad register name `%s'"), base_string);
9044 	      return 0;
9045 	    }
9046 	}
9047 
9048       /* If there's an expression beginning the operand, parse it,
9049 	 assuming displacement_string_start and
9050 	 displacement_string_end are meaningful.  */
9051       if (displacement_string_start != displacement_string_end)
9052 	{
9053 	  if (!i386_displacement (displacement_string_start,
9054 				  displacement_string_end))
9055 	    return 0;
9056 	}
9057 
9058       /* Special case for (%dx) while doing input/output op.  */
9059       if (i.base_reg
9060 	  && operand_type_equal (&i.base_reg->reg_type,
9061 				 &reg16_inoutportreg)
9062 	  && i.index_reg == 0
9063 	  && i.log2_scale_factor == 0
9064 	  && i.seg[i.mem_operands] == 0
9065 	  && !operand_type_check (i.types[this_operand], disp))
9066 	{
9067 	  i.types[this_operand] = inoutportreg;
9068 	  return 1;
9069 	}
9070 
9071       if (i386_index_check (operand_string) == 0)
9072 	return 0;
9073       i.types[this_operand].bitfield.mem = 1;
9074       if (i.mem_operands == 0)
9075 	i.memop1_string = xstrdup (operand_string);
9076       i.mem_operands++;
9077     }
9078   else
9079     {
9080       /* It's not a memory operand; argh!  */
9081       as_bad (_("invalid char %s beginning operand %d `%s'"),
9082 	      output_invalid (*op_string),
9083 	      this_operand + 1,
9084 	      op_string);
9085       return 0;
9086     }
9087   return 1;			/* Normal return.  */
9088 }
9089 
9090 /* Calculate the maximum variable size (i.e., excluding fr_fix)
9091    that an rs_machine_dependent frag may reach.  */
9092 
9093 unsigned int
i386_frag_max_var(fragS * frag)9094 i386_frag_max_var (fragS *frag)
9095 {
9096   /* The only relaxable frags are for jumps.
9097      Unconditional jumps can grow by 4 bytes and others by 5 bytes.  */
9098   gas_assert (frag->fr_type == rs_machine_dependent);
9099   return TYPE_FROM_RELAX_STATE (frag->fr_subtype) == UNCOND_JUMP ? 4 : 5;
9100 }
9101 
9102 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9103 static int
elf_symbol_resolved_in_segment_p(symbolS * fr_symbol,offsetT fr_var)9104 elf_symbol_resolved_in_segment_p (symbolS *fr_symbol, offsetT fr_var)
9105 {
9106   /* STT_GNU_IFUNC symbol must go through PLT.  */
9107   if ((symbol_get_bfdsym (fr_symbol)->flags
9108        & BSF_GNU_INDIRECT_FUNCTION) != 0)
9109     return 0;
9110 
9111   if (!S_IS_EXTERNAL (fr_symbol))
9112     /* Symbol may be weak or local.  */
9113     return !S_IS_WEAK (fr_symbol);
9114 
9115   /* Global symbols with non-default visibility can't be preempted. */
9116   if (ELF_ST_VISIBILITY (S_GET_OTHER (fr_symbol)) != STV_DEFAULT)
9117     return 1;
9118 
9119   if (fr_var != NO_RELOC)
9120     switch ((enum bfd_reloc_code_real) fr_var)
9121       {
9122       case BFD_RELOC_386_PLT32:
9123       case BFD_RELOC_X86_64_PLT32:
9124 	/* Symbol with PLT relocatin may be preempted. */
9125 	return 0;
9126       default:
9127 	abort ();
9128       }
9129 
9130   /* Global symbols with default visibility in a shared library may be
9131      preempted by another definition.  */
9132   return !shared;
9133 }
9134 #endif
9135 
9136 /* md_estimate_size_before_relax()
9137 
9138    Called just before relax() for rs_machine_dependent frags.  The x86
9139    assembler uses these frags to handle variable size jump
9140    instructions.
9141 
9142    Any symbol that is now undefined will not become defined.
9143    Return the correct fr_subtype in the frag.
9144    Return the initial "guess for variable size of frag" to caller.
9145    The guess is actually the growth beyond the fixed part.  Whatever
9146    we do to grow the fixed or variable part contributes to our
9147    returned value.  */
9148 
9149 int
md_estimate_size_before_relax(fragS * fragP,segT segment)9150 md_estimate_size_before_relax (fragS *fragP, segT segment)
9151 {
9152   /* We've already got fragP->fr_subtype right;  all we have to do is
9153      check for un-relaxable symbols.  On an ELF system, we can't relax
9154      an externally visible symbol, because it may be overridden by a
9155      shared library.  */
9156   if (S_GET_SEGMENT (fragP->fr_symbol) != segment
9157 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9158       || (IS_ELF
9159 	  && !elf_symbol_resolved_in_segment_p (fragP->fr_symbol,
9160 						fragP->fr_var))
9161 #endif
9162 #if defined (OBJ_COFF) && defined (TE_PE)
9163       || (OUTPUT_FLAVOR == bfd_target_coff_flavour
9164 	  && S_IS_WEAK (fragP->fr_symbol))
9165 #endif
9166       )
9167     {
9168       /* Symbol is undefined in this segment, or we need to keep a
9169 	 reloc so that weak symbols can be overridden.  */
9170       int size = (fragP->fr_subtype & CODE16) ? 2 : 4;
9171       enum bfd_reloc_code_real reloc_type;
9172       unsigned char *opcode;
9173       int old_fr_fix;
9174 
9175       if (fragP->fr_var != NO_RELOC)
9176 	reloc_type = (enum bfd_reloc_code_real) fragP->fr_var;
9177       else if (size == 2)
9178 	reloc_type = BFD_RELOC_16_PCREL;
9179       else
9180 	reloc_type = BFD_RELOC_32_PCREL;
9181 
9182       old_fr_fix = fragP->fr_fix;
9183       opcode = (unsigned char *) fragP->fr_opcode;
9184 
9185       switch (TYPE_FROM_RELAX_STATE (fragP->fr_subtype))
9186 	{
9187 	case UNCOND_JUMP:
9188 	  /* Make jmp (0xeb) a (d)word displacement jump.  */
9189 	  opcode[0] = 0xe9;
9190 	  fragP->fr_fix += size;
9191 	  fix_new (fragP, old_fr_fix, size,
9192 		   fragP->fr_symbol,
9193 		   fragP->fr_offset, 1,
9194 		   reloc_type);
9195 	  break;
9196 
9197 	case COND_JUMP86:
9198 	  if (size == 2
9199 	      && (!no_cond_jump_promotion || fragP->fr_var != NO_RELOC))
9200 	    {
9201 	      /* Negate the condition, and branch past an
9202 		 unconditional jump.  */
9203 	      opcode[0] ^= 1;
9204 	      opcode[1] = 3;
9205 	      /* Insert an unconditional jump.  */
9206 	      opcode[2] = 0xe9;
9207 	      /* We added two extra opcode bytes, and have a two byte
9208 		 offset.  */
9209 	      fragP->fr_fix += 2 + 2;
9210 	      fix_new (fragP, old_fr_fix + 2, 2,
9211 		       fragP->fr_symbol,
9212 		       fragP->fr_offset, 1,
9213 		       reloc_type);
9214 	      break;
9215 	    }
9216 	  /* Fall through.  */
9217 
9218 	case COND_JUMP:
9219 	  if (no_cond_jump_promotion && fragP->fr_var == NO_RELOC)
9220 	    {
9221 	      fixS *fixP;
9222 
9223 	      fragP->fr_fix += 1;
9224 	      fixP = fix_new (fragP, old_fr_fix, 1,
9225 			      fragP->fr_symbol,
9226 			      fragP->fr_offset, 1,
9227 			      BFD_RELOC_8_PCREL);
9228 	      fixP->fx_signed = 1;
9229 	      break;
9230 	    }
9231 
9232 	  /* This changes the byte-displacement jump 0x7N
9233 	     to the (d)word-displacement jump 0x0f,0x8N.  */
9234 	  opcode[1] = opcode[0] + 0x10;
9235 	  opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
9236 	  /* We've added an opcode byte.  */
9237 	  fragP->fr_fix += 1 + size;
9238 	  fix_new (fragP, old_fr_fix + 1, size,
9239 		   fragP->fr_symbol,
9240 		   fragP->fr_offset, 1,
9241 		   reloc_type);
9242 	  break;
9243 
9244 	default:
9245 	  BAD_CASE (fragP->fr_subtype);
9246 	  break;
9247 	}
9248       frag_wane (fragP);
9249       return fragP->fr_fix - old_fr_fix;
9250     }
9251 
9252   /* Guess size depending on current relax state.  Initially the relax
9253      state will correspond to a short jump and we return 1, because
9254      the variable part of the frag (the branch offset) is one byte
9255      long.  However, we can relax a section more than once and in that
9256      case we must either set fr_subtype back to the unrelaxed state,
9257      or return the value for the appropriate branch.  */
9258   return md_relax_table[fragP->fr_subtype].rlx_length;
9259 }
9260 
9261 /* Called after relax() is finished.
9262 
9263    In:	Address of frag.
9264 	fr_type == rs_machine_dependent.
9265 	fr_subtype is what the address relaxed to.
9266 
9267    Out:	Any fixSs and constants are set up.
9268 	Caller will turn frag into a ".space 0".  */
9269 
9270 void
md_convert_frag(bfd * abfd ATTRIBUTE_UNUSED,segT sec ATTRIBUTE_UNUSED,fragS * fragP)9271 md_convert_frag (bfd *abfd ATTRIBUTE_UNUSED, segT sec ATTRIBUTE_UNUSED,
9272                  fragS *fragP)
9273 {
9274   unsigned char *opcode;
9275   unsigned char *where_to_put_displacement = NULL;
9276   offsetT target_address;
9277   offsetT opcode_address;
9278   unsigned int extension = 0;
9279   offsetT displacement_from_opcode_start;
9280 
9281   opcode = (unsigned char *) fragP->fr_opcode;
9282 
9283   /* Address we want to reach in file space.  */
9284   target_address = S_GET_VALUE (fragP->fr_symbol) + fragP->fr_offset;
9285 
9286   /* Address opcode resides at in file space.  */
9287   opcode_address = fragP->fr_address + fragP->fr_fix;
9288 
9289   /* Displacement from opcode start to fill into instruction.  */
9290   displacement_from_opcode_start = target_address - opcode_address;
9291 
9292   if ((fragP->fr_subtype & BIG) == 0)
9293     {
9294       /* Don't have to change opcode.  */
9295       extension = 1;		/* 1 opcode + 1 displacement  */
9296       where_to_put_displacement = &opcode[1];
9297     }
9298   else
9299     {
9300       if (no_cond_jump_promotion
9301 	  && TYPE_FROM_RELAX_STATE (fragP->fr_subtype) != UNCOND_JUMP)
9302 	as_warn_where (fragP->fr_file, fragP->fr_line,
9303 		       _("long jump required"));
9304 
9305       switch (fragP->fr_subtype)
9306 	{
9307 	case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG):
9308 	  extension = 4;		/* 1 opcode + 4 displacement  */
9309 	  opcode[0] = 0xe9;
9310 	  where_to_put_displacement = &opcode[1];
9311 	  break;
9312 
9313 	case ENCODE_RELAX_STATE (UNCOND_JUMP, BIG16):
9314 	  extension = 2;		/* 1 opcode + 2 displacement  */
9315 	  opcode[0] = 0xe9;
9316 	  where_to_put_displacement = &opcode[1];
9317 	  break;
9318 
9319 	case ENCODE_RELAX_STATE (COND_JUMP, BIG):
9320 	case ENCODE_RELAX_STATE (COND_JUMP86, BIG):
9321 	  extension = 5;		/* 2 opcode + 4 displacement  */
9322 	  opcode[1] = opcode[0] + 0x10;
9323 	  opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
9324 	  where_to_put_displacement = &opcode[2];
9325 	  break;
9326 
9327 	case ENCODE_RELAX_STATE (COND_JUMP, BIG16):
9328 	  extension = 3;		/* 2 opcode + 2 displacement  */
9329 	  opcode[1] = opcode[0] + 0x10;
9330 	  opcode[0] = TWO_BYTE_OPCODE_ESCAPE;
9331 	  where_to_put_displacement = &opcode[2];
9332 	  break;
9333 
9334 	case ENCODE_RELAX_STATE (COND_JUMP86, BIG16):
9335 	  extension = 4;
9336 	  opcode[0] ^= 1;
9337 	  opcode[1] = 3;
9338 	  opcode[2] = 0xe9;
9339 	  where_to_put_displacement = &opcode[3];
9340 	  break;
9341 
9342 	default:
9343 	  BAD_CASE (fragP->fr_subtype);
9344 	  break;
9345 	}
9346     }
9347 
9348   /* If size if less then four we are sure that the operand fits,
9349      but if it's 4, then it could be that the displacement is larger
9350      then -/+ 2GB.  */
9351   if (DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype) == 4
9352       && object_64bit
9353       && ((addressT) (displacement_from_opcode_start - extension
9354 		      + ((addressT) 1 << 31))
9355 	  > (((addressT) 2 << 31) - 1)))
9356     {
9357       as_bad_where (fragP->fr_file, fragP->fr_line,
9358 		    _("jump target out of range"));
9359       /* Make us emit 0.  */
9360       displacement_from_opcode_start = extension;
9361     }
9362   /* Now put displacement after opcode.  */
9363   md_number_to_chars ((char *) where_to_put_displacement,
9364 		      (valueT) (displacement_from_opcode_start - extension),
9365 		      DISP_SIZE_FROM_RELAX_STATE (fragP->fr_subtype));
9366   fragP->fr_fix += extension;
9367 }
9368 
9369 /* Apply a fixup (fixP) to segment data, once it has been determined
9370    by our caller that we have all the info we need to fix it up.
9371 
9372    Parameter valP is the pointer to the value of the bits.
9373 
9374    On the 386, immediates, displacements, and data pointers are all in
9375    the same (little-endian) format, so we don't need to care about which
9376    we are handling.  */
9377 
9378 void
md_apply_fix(fixS * fixP,valueT * valP,segT seg ATTRIBUTE_UNUSED)9379 md_apply_fix (fixS *fixP, valueT *valP, segT seg ATTRIBUTE_UNUSED)
9380 {
9381   char *p = fixP->fx_where + fixP->fx_frag->fr_literal;
9382   valueT value = *valP;
9383 
9384 #if !defined (TE_Mach)
9385   if (fixP->fx_pcrel)
9386     {
9387       switch (fixP->fx_r_type)
9388 	{
9389 	default:
9390 	  break;
9391 
9392 	case BFD_RELOC_64:
9393 	  fixP->fx_r_type = BFD_RELOC_64_PCREL;
9394 	  break;
9395 	case BFD_RELOC_32:
9396 	case BFD_RELOC_X86_64_32S:
9397 	  fixP->fx_r_type = BFD_RELOC_32_PCREL;
9398 	  break;
9399 	case BFD_RELOC_16:
9400 	  fixP->fx_r_type = BFD_RELOC_16_PCREL;
9401 	  break;
9402 	case BFD_RELOC_8:
9403 	  fixP->fx_r_type = BFD_RELOC_8_PCREL;
9404 	  break;
9405 	}
9406     }
9407 
9408   if (fixP->fx_addsy != NULL
9409       && (fixP->fx_r_type == BFD_RELOC_32_PCREL
9410 	  || fixP->fx_r_type == BFD_RELOC_64_PCREL
9411 	  || fixP->fx_r_type == BFD_RELOC_16_PCREL
9412 	  || fixP->fx_r_type == BFD_RELOC_8_PCREL)
9413       && !use_rela_relocations)
9414     {
9415       /* This is a hack.  There should be a better way to handle this.
9416 	 This covers for the fact that bfd_install_relocation will
9417 	 subtract the current location (for partial_inplace, PC relative
9418 	 relocations); see more below.  */
9419 #ifndef OBJ_AOUT
9420       if (IS_ELF
9421 #ifdef TE_PE
9422 	  || OUTPUT_FLAVOR == bfd_target_coff_flavour
9423 #endif
9424 	  )
9425 	value += fixP->fx_where + fixP->fx_frag->fr_address;
9426 #endif
9427 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9428       if (IS_ELF)
9429 	{
9430 	  segT sym_seg = S_GET_SEGMENT (fixP->fx_addsy);
9431 
9432 	  if ((sym_seg == seg
9433 	       || (symbol_section_p (fixP->fx_addsy)
9434 		   && sym_seg != absolute_section))
9435 	      && !generic_force_reloc (fixP))
9436 	    {
9437 	      /* Yes, we add the values in twice.  This is because
9438 		 bfd_install_relocation subtracts them out again.  I think
9439 		 bfd_install_relocation is broken, but I don't dare change
9440 		 it.  FIXME.  */
9441 	      value += fixP->fx_where + fixP->fx_frag->fr_address;
9442 	    }
9443 	}
9444 #endif
9445 #if defined (OBJ_COFF) && defined (TE_PE)
9446       /* For some reason, the PE format does not store a
9447 	 section address offset for a PC relative symbol.  */
9448       if (S_GET_SEGMENT (fixP->fx_addsy) != seg
9449 	  || S_IS_WEAK (fixP->fx_addsy))
9450 	value += md_pcrel_from (fixP);
9451 #endif
9452     }
9453 #if defined (OBJ_COFF) && defined (TE_PE)
9454   if (fixP->fx_addsy != NULL
9455       && S_IS_WEAK (fixP->fx_addsy)
9456       /* PR 16858: Do not modify weak function references.  */
9457       && ! fixP->fx_pcrel)
9458     {
9459 #if !defined (TE_PEP)
9460       /* For x86 PE weak function symbols are neither PC-relative
9461 	 nor do they set S_IS_FUNCTION.  So the only reliable way
9462 	 to detect them is to check the flags of their containing
9463 	 section.  */
9464       if (S_GET_SEGMENT (fixP->fx_addsy) != NULL
9465 	  && S_GET_SEGMENT (fixP->fx_addsy)->flags & SEC_CODE)
9466 	;
9467       else
9468 #endif
9469       value -= S_GET_VALUE (fixP->fx_addsy);
9470     }
9471 #endif
9472 
9473   /* Fix a few things - the dynamic linker expects certain values here,
9474      and we must not disappoint it.  */
9475 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9476   if (IS_ELF && fixP->fx_addsy)
9477     switch (fixP->fx_r_type)
9478       {
9479       case BFD_RELOC_386_PLT32:
9480       case BFD_RELOC_X86_64_PLT32:
9481 	/* Make the jump instruction point to the address of the operand.  At
9482 	   runtime we merely add the offset to the actual PLT entry.  */
9483 	value = -4;
9484 	break;
9485 
9486       case BFD_RELOC_386_TLS_GD:
9487       case BFD_RELOC_386_TLS_LDM:
9488       case BFD_RELOC_386_TLS_IE_32:
9489       case BFD_RELOC_386_TLS_IE:
9490       case BFD_RELOC_386_TLS_GOTIE:
9491       case BFD_RELOC_386_TLS_GOTDESC:
9492       case BFD_RELOC_X86_64_TLSGD:
9493       case BFD_RELOC_X86_64_TLSLD:
9494       case BFD_RELOC_X86_64_GOTTPOFF:
9495       case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
9496 	value = 0; /* Fully resolved at runtime.  No addend.  */
9497 	/* Fallthrough */
9498       case BFD_RELOC_386_TLS_LE:
9499       case BFD_RELOC_386_TLS_LDO_32:
9500       case BFD_RELOC_386_TLS_LE_32:
9501       case BFD_RELOC_X86_64_DTPOFF32:
9502       case BFD_RELOC_X86_64_DTPOFF64:
9503       case BFD_RELOC_X86_64_TPOFF32:
9504       case BFD_RELOC_X86_64_TPOFF64:
9505 	S_SET_THREAD_LOCAL (fixP->fx_addsy);
9506 	break;
9507 
9508       case BFD_RELOC_386_TLS_DESC_CALL:
9509       case BFD_RELOC_X86_64_TLSDESC_CALL:
9510 	value = 0; /* Fully resolved at runtime.  No addend.  */
9511 	S_SET_THREAD_LOCAL (fixP->fx_addsy);
9512 	fixP->fx_done = 0;
9513 	return;
9514 
9515       case BFD_RELOC_VTABLE_INHERIT:
9516       case BFD_RELOC_VTABLE_ENTRY:
9517 	fixP->fx_done = 0;
9518 	return;
9519 
9520       default:
9521 	break;
9522       }
9523 #endif /* defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)  */
9524   *valP = value;
9525 #endif /* !defined (TE_Mach)  */
9526 
9527   /* Are we finished with this relocation now?  */
9528   if (fixP->fx_addsy == NULL)
9529     fixP->fx_done = 1;
9530 #if defined (OBJ_COFF) && defined (TE_PE)
9531   else if (fixP->fx_addsy != NULL && S_IS_WEAK (fixP->fx_addsy))
9532     {
9533       fixP->fx_done = 0;
9534       /* Remember value for tc_gen_reloc.  */
9535       fixP->fx_addnumber = value;
9536       /* Clear out the frag for now.  */
9537       value = 0;
9538     }
9539 #endif
9540   else if (use_rela_relocations)
9541     {
9542       fixP->fx_no_overflow = 1;
9543       /* Remember value for tc_gen_reloc.  */
9544       fixP->fx_addnumber = value;
9545       value = 0;
9546     }
9547 
9548   md_number_to_chars (p, value, fixP->fx_size);
9549 }
9550 
9551 const char *
md_atof(int type,char * litP,int * sizeP)9552 md_atof (int type, char *litP, int *sizeP)
9553 {
9554   /* This outputs the LITTLENUMs in REVERSE order;
9555      in accord with the bigendian 386.  */
9556   return ieee_md_atof (type, litP, sizeP, FALSE);
9557 }
9558 
9559 static char output_invalid_buf[sizeof (unsigned char) * 2 + 6];
9560 
9561 static char *
output_invalid(int c)9562 output_invalid (int c)
9563 {
9564   if (ISPRINT (c))
9565     snprintf (output_invalid_buf, sizeof (output_invalid_buf),
9566 	      "'%c'", c);
9567   else
9568     snprintf (output_invalid_buf, sizeof (output_invalid_buf),
9569 	      "(0x%x)", (unsigned char) c);
9570   return output_invalid_buf;
9571 }
9572 
9573 /* REG_STRING starts *before* REGISTER_PREFIX.  */
9574 
9575 static const reg_entry *
parse_real_register(char * reg_string,char ** end_op)9576 parse_real_register (char *reg_string, char **end_op)
9577 {
9578   char *s = reg_string;
9579   char *p;
9580   char reg_name_given[MAX_REG_NAME_SIZE + 1];
9581   const reg_entry *r;
9582 
9583   /* Skip possible REGISTER_PREFIX and possible whitespace.  */
9584   if (*s == REGISTER_PREFIX)
9585     ++s;
9586 
9587   if (is_space_char (*s))
9588     ++s;
9589 
9590   p = reg_name_given;
9591   while ((*p++ = register_chars[(unsigned char) *s]) != '\0')
9592     {
9593       if (p >= reg_name_given + MAX_REG_NAME_SIZE)
9594 	return (const reg_entry *) NULL;
9595       s++;
9596     }
9597 
9598   /* For naked regs, make sure that we are not dealing with an identifier.
9599      This prevents confusing an identifier like `eax_var' with register
9600      `eax'.  */
9601   if (allow_naked_reg && identifier_chars[(unsigned char) *s])
9602     return (const reg_entry *) NULL;
9603 
9604   *end_op = s;
9605 
9606   r = (const reg_entry *) hash_find (reg_hash, reg_name_given);
9607 
9608   /* Handle floating point regs, allowing spaces in the (i) part.  */
9609   if (r == i386_regtab /* %st is first entry of table  */)
9610     {
9611       if (is_space_char (*s))
9612 	++s;
9613       if (*s == '(')
9614 	{
9615 	  ++s;
9616 	  if (is_space_char (*s))
9617 	    ++s;
9618 	  if (*s >= '0' && *s <= '7')
9619 	    {
9620 	      int fpr = *s - '0';
9621 	      ++s;
9622 	      if (is_space_char (*s))
9623 		++s;
9624 	      if (*s == ')')
9625 		{
9626 		  *end_op = s + 1;
9627 		  r = (const reg_entry *) hash_find (reg_hash, "st(0)");
9628 		  know (r);
9629 		  return r + fpr;
9630 		}
9631 	    }
9632 	  /* We have "%st(" then garbage.  */
9633 	  return (const reg_entry *) NULL;
9634 	}
9635     }
9636 
9637   if (r == NULL || allow_pseudo_reg)
9638     return r;
9639 
9640   if (operand_type_all_zero (&r->reg_type))
9641     return (const reg_entry *) NULL;
9642 
9643   if ((r->reg_type.bitfield.reg32
9644        || r->reg_type.bitfield.sreg3
9645        || r->reg_type.bitfield.control
9646        || r->reg_type.bitfield.debug
9647        || r->reg_type.bitfield.test)
9648       && !cpu_arch_flags.bitfield.cpui386)
9649     return (const reg_entry *) NULL;
9650 
9651   if (r->reg_type.bitfield.floatreg
9652       && !cpu_arch_flags.bitfield.cpu8087
9653       && !cpu_arch_flags.bitfield.cpu287
9654       && !cpu_arch_flags.bitfield.cpu387)
9655     return (const reg_entry *) NULL;
9656 
9657   if (r->reg_type.bitfield.regmmx && !cpu_arch_flags.bitfield.cpuregmmx)
9658     return (const reg_entry *) NULL;
9659 
9660   if (r->reg_type.bitfield.regxmm && !cpu_arch_flags.bitfield.cpuregxmm)
9661     return (const reg_entry *) NULL;
9662 
9663   if (r->reg_type.bitfield.regymm && !cpu_arch_flags.bitfield.cpuregymm)
9664     return (const reg_entry *) NULL;
9665 
9666   if (r->reg_type.bitfield.regzmm && !cpu_arch_flags.bitfield.cpuregzmm)
9667     return (const reg_entry *) NULL;
9668 
9669   if (r->reg_type.bitfield.regmask
9670       && !cpu_arch_flags.bitfield.cpuregmask)
9671     return (const reg_entry *) NULL;
9672 
9673   /* Don't allow fake index register unless allow_index_reg isn't 0. */
9674   if (!allow_index_reg
9675       && (r->reg_num == RegEiz || r->reg_num == RegRiz))
9676     return (const reg_entry *) NULL;
9677 
9678   /* Upper 16 vector register is only available with VREX in 64bit
9679      mode.  */
9680   if ((r->reg_flags & RegVRex))
9681     {
9682       if (!cpu_arch_flags.bitfield.cpuvrex
9683 	  || flag_code != CODE_64BIT)
9684 	return (const reg_entry *) NULL;
9685 
9686       i.need_vrex = 1;
9687     }
9688 
9689   if (((r->reg_flags & (RegRex64 | RegRex))
9690        || r->reg_type.bitfield.reg64)
9691       && (!cpu_arch_flags.bitfield.cpulm
9692 	  || !operand_type_equal (&r->reg_type, &control))
9693       && flag_code != CODE_64BIT)
9694     return (const reg_entry *) NULL;
9695 
9696   if (r->reg_type.bitfield.sreg3 && r->reg_num == RegFlat && !intel_syntax)
9697     return (const reg_entry *) NULL;
9698 
9699   return r;
9700 }
9701 
9702 /* REG_STRING starts *before* REGISTER_PREFIX.  */
9703 
9704 static const reg_entry *
parse_register(char * reg_string,char ** end_op)9705 parse_register (char *reg_string, char **end_op)
9706 {
9707   const reg_entry *r;
9708 
9709   if (*reg_string == REGISTER_PREFIX || allow_naked_reg)
9710     r = parse_real_register (reg_string, end_op);
9711   else
9712     r = NULL;
9713   if (!r)
9714     {
9715       char *save = input_line_pointer;
9716       char c;
9717       symbolS *symbolP;
9718 
9719       input_line_pointer = reg_string;
9720       c = get_symbol_name (&reg_string);
9721       symbolP = symbol_find (reg_string);
9722       if (symbolP && S_GET_SEGMENT (symbolP) == reg_section)
9723 	{
9724 	  const expressionS *e = symbol_get_value_expression (symbolP);
9725 
9726 	  know (e->X_op == O_register);
9727 	  know (e->X_add_number >= 0
9728 		&& (valueT) e->X_add_number < i386_regtab_size);
9729 	  r = i386_regtab + e->X_add_number;
9730 	  if ((r->reg_flags & RegVRex))
9731 	    i.need_vrex = 1;
9732 	  *end_op = input_line_pointer;
9733 	}
9734       *input_line_pointer = c;
9735       input_line_pointer = save;
9736     }
9737   return r;
9738 }
9739 
9740 int
i386_parse_name(char * name,expressionS * e,char * nextcharP)9741 i386_parse_name (char *name, expressionS *e, char *nextcharP)
9742 {
9743   const reg_entry *r;
9744   char *end = input_line_pointer;
9745 
9746   *end = *nextcharP;
9747   r = parse_register (name, &input_line_pointer);
9748   if (r && end <= input_line_pointer)
9749     {
9750       *nextcharP = *input_line_pointer;
9751       *input_line_pointer = 0;
9752       e->X_op = O_register;
9753       e->X_add_number = r - i386_regtab;
9754       return 1;
9755     }
9756   input_line_pointer = end;
9757   *end = 0;
9758   return intel_syntax ? i386_intel_parse_name (name, e) : 0;
9759 }
9760 
9761 void
md_operand(expressionS * e)9762 md_operand (expressionS *e)
9763 {
9764   char *end;
9765   const reg_entry *r;
9766 
9767   switch (*input_line_pointer)
9768     {
9769     case REGISTER_PREFIX:
9770       r = parse_real_register (input_line_pointer, &end);
9771       if (r)
9772 	{
9773 	  e->X_op = O_register;
9774 	  e->X_add_number = r - i386_regtab;
9775 	  input_line_pointer = end;
9776 	}
9777       break;
9778 
9779     case '[':
9780       gas_assert (intel_syntax);
9781       end = input_line_pointer++;
9782       expression (e);
9783       if (*input_line_pointer == ']')
9784 	{
9785 	  ++input_line_pointer;
9786 	  e->X_op_symbol = make_expr_symbol (e);
9787 	  e->X_add_symbol = NULL;
9788 	  e->X_add_number = 0;
9789 	  e->X_op = O_index;
9790 	}
9791       else
9792 	{
9793 	  e->X_op = O_absent;
9794 	  input_line_pointer = end;
9795 	}
9796       break;
9797     }
9798 }
9799 
9800 
9801 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9802 const char *md_shortopts = "kVQ:sqn";
9803 #else
9804 const char *md_shortopts = "qn";
9805 #endif
9806 
9807 #define OPTION_32 (OPTION_MD_BASE + 0)
9808 #define OPTION_64 (OPTION_MD_BASE + 1)
9809 #define OPTION_DIVIDE (OPTION_MD_BASE + 2)
9810 #define OPTION_MARCH (OPTION_MD_BASE + 3)
9811 #define OPTION_MTUNE (OPTION_MD_BASE + 4)
9812 #define OPTION_MMNEMONIC (OPTION_MD_BASE + 5)
9813 #define OPTION_MSYNTAX (OPTION_MD_BASE + 6)
9814 #define OPTION_MINDEX_REG (OPTION_MD_BASE + 7)
9815 #define OPTION_MNAKED_REG (OPTION_MD_BASE + 8)
9816 #define OPTION_MOLD_GCC (OPTION_MD_BASE + 9)
9817 #define OPTION_MSSE2AVX (OPTION_MD_BASE + 10)
9818 #define OPTION_MSSE_CHECK (OPTION_MD_BASE + 11)
9819 #define OPTION_MOPERAND_CHECK (OPTION_MD_BASE + 12)
9820 #define OPTION_MAVXSCALAR (OPTION_MD_BASE + 13)
9821 #define OPTION_X32 (OPTION_MD_BASE + 14)
9822 #define OPTION_MADD_BND_PREFIX (OPTION_MD_BASE + 15)
9823 #define OPTION_MEVEXLIG (OPTION_MD_BASE + 16)
9824 #define OPTION_MEVEXWIG (OPTION_MD_BASE + 17)
9825 #define OPTION_MBIG_OBJ (OPTION_MD_BASE + 18)
9826 #define OPTION_MOMIT_LOCK_PREFIX (OPTION_MD_BASE + 19)
9827 #define OPTION_MEVEXRCIG (OPTION_MD_BASE + 20)
9828 #define OPTION_MSHARED (OPTION_MD_BASE + 21)
9829 #define OPTION_MAMD64 (OPTION_MD_BASE + 22)
9830 #define OPTION_MINTEL64 (OPTION_MD_BASE + 23)
9831 #define OPTION_MFENCE_AS_LOCK_ADD (OPTION_MD_BASE + 24)
9832 #define OPTION_MRELAX_RELOCATIONS (OPTION_MD_BASE + 25)
9833 
9834 struct option md_longopts[] =
9835 {
9836   {"32", no_argument, NULL, OPTION_32},
9837 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9838      || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9839   {"64", no_argument, NULL, OPTION_64},
9840 #endif
9841 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9842   {"x32", no_argument, NULL, OPTION_X32},
9843   {"mshared", no_argument, NULL, OPTION_MSHARED},
9844 #endif
9845   {"divide", no_argument, NULL, OPTION_DIVIDE},
9846   {"march", required_argument, NULL, OPTION_MARCH},
9847   {"mtune", required_argument, NULL, OPTION_MTUNE},
9848   {"mmnemonic", required_argument, NULL, OPTION_MMNEMONIC},
9849   {"msyntax", required_argument, NULL, OPTION_MSYNTAX},
9850   {"mindex-reg", no_argument, NULL, OPTION_MINDEX_REG},
9851   {"mnaked-reg", no_argument, NULL, OPTION_MNAKED_REG},
9852   {"mold-gcc", no_argument, NULL, OPTION_MOLD_GCC},
9853   {"msse2avx", no_argument, NULL, OPTION_MSSE2AVX},
9854   {"msse-check", required_argument, NULL, OPTION_MSSE_CHECK},
9855   {"moperand-check", required_argument, NULL, OPTION_MOPERAND_CHECK},
9856   {"mavxscalar", required_argument, NULL, OPTION_MAVXSCALAR},
9857   {"madd-bnd-prefix", no_argument, NULL, OPTION_MADD_BND_PREFIX},
9858   {"mevexlig", required_argument, NULL, OPTION_MEVEXLIG},
9859   {"mevexwig", required_argument, NULL, OPTION_MEVEXWIG},
9860 # if defined (TE_PE) || defined (TE_PEP)
9861   {"mbig-obj", no_argument, NULL, OPTION_MBIG_OBJ},
9862 #endif
9863   {"momit-lock-prefix", required_argument, NULL, OPTION_MOMIT_LOCK_PREFIX},
9864   {"mfence-as-lock-add", required_argument, NULL, OPTION_MFENCE_AS_LOCK_ADD},
9865   {"mrelax-relocations", required_argument, NULL, OPTION_MRELAX_RELOCATIONS},
9866   {"mevexrcig", required_argument, NULL, OPTION_MEVEXRCIG},
9867   {"mamd64", no_argument, NULL, OPTION_MAMD64},
9868   {"mintel64", no_argument, NULL, OPTION_MINTEL64},
9869   {NULL, no_argument, NULL, 0}
9870 };
9871 size_t md_longopts_size = sizeof (md_longopts);
9872 
9873 int
md_parse_option(int c,const char * arg)9874 md_parse_option (int c, const char *arg)
9875 {
9876   unsigned int j;
9877   char *arch, *next, *saved;
9878 
9879   switch (c)
9880     {
9881     case 'n':
9882       optimize_align_code = 0;
9883       break;
9884 
9885     case 'q':
9886       quiet_warnings = 1;
9887       break;
9888 
9889 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9890       /* -Qy, -Qn: SVR4 arguments controlling whether a .comment section
9891 	 should be emitted or not.  FIXME: Not implemented.  */
9892     case 'Q':
9893       break;
9894 
9895       /* -V: SVR4 argument to print version ID.  */
9896     case 'V':
9897       print_version_id ();
9898       break;
9899 
9900       /* -k: Ignore for FreeBSD compatibility.  */
9901     case 'k':
9902       break;
9903 
9904     case 's':
9905       /* -s: On i386 Solaris, this tells the native assembler to use
9906 	 .stab instead of .stab.excl.  We always use .stab anyhow.  */
9907       break;
9908 
9909     case OPTION_MSHARED:
9910       shared = 1;
9911       break;
9912 #endif
9913 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
9914      || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
9915     case OPTION_64:
9916       {
9917 	const char **list, **l;
9918 
9919 	list = bfd_target_list ();
9920 	for (l = list; *l != NULL; l++)
9921 	  if (CONST_STRNEQ (*l, "elf64-x86-64")
9922 	      || strcmp (*l, "coff-x86-64") == 0
9923 	      || strcmp (*l, "pe-x86-64") == 0
9924 	      || strcmp (*l, "pei-x86-64") == 0
9925 	      || strcmp (*l, "mach-o-x86-64") == 0)
9926 	    {
9927 	      default_arch = "x86_64";
9928 	      break;
9929 	    }
9930 	if (*l == NULL)
9931 	  as_fatal (_("no compiled in support for x86_64"));
9932 	free (list);
9933       }
9934       break;
9935 #endif
9936 
9937 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
9938     case OPTION_X32:
9939       if (IS_ELF)
9940 	{
9941 	  const char **list, **l;
9942 
9943 	  list = bfd_target_list ();
9944 	  for (l = list; *l != NULL; l++)
9945 	    if (CONST_STRNEQ (*l, "elf32-x86-64"))
9946 	      {
9947 		default_arch = "x86_64:32";
9948 		break;
9949 	      }
9950 	  if (*l == NULL)
9951 	    as_fatal (_("no compiled in support for 32bit x86_64"));
9952 	  free (list);
9953 	}
9954       else
9955 	as_fatal (_("32bit x86_64 is only supported for ELF"));
9956       break;
9957 #endif
9958 
9959     case OPTION_32:
9960       default_arch = "i386";
9961       break;
9962 
9963     case OPTION_DIVIDE:
9964 #ifdef SVR4_COMMENT_CHARS
9965       {
9966 	char *n, *t;
9967 	const char *s;
9968 
9969 	n = XNEWVEC (char, strlen (i386_comment_chars) + 1);
9970 	t = n;
9971 	for (s = i386_comment_chars; *s != '\0'; s++)
9972 	  if (*s != '/')
9973 	    *t++ = *s;
9974 	*t = '\0';
9975 	i386_comment_chars = n;
9976       }
9977 #endif
9978       break;
9979 
9980     case OPTION_MARCH:
9981       saved = xstrdup (arg);
9982       arch = saved;
9983       /* Allow -march=+nosse.  */
9984       if (*arch == '+')
9985 	arch++;
9986       do
9987 	{
9988 	  if (*arch == '.')
9989 	    as_fatal (_("invalid -march= option: `%s'"), arg);
9990 	  next = strchr (arch, '+');
9991 	  if (next)
9992 	    *next++ = '\0';
9993 	  for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
9994 	    {
9995 	      if (strcmp (arch, cpu_arch [j].name) == 0)
9996 		{
9997 		  /* Processor.  */
9998 		  if (! cpu_arch[j].flags.bitfield.cpui386)
9999 		    continue;
10000 
10001 		  cpu_arch_name = cpu_arch[j].name;
10002 		  cpu_sub_arch_name = NULL;
10003 		  cpu_arch_flags = cpu_arch[j].flags;
10004 		  cpu_arch_isa = cpu_arch[j].type;
10005 		  cpu_arch_isa_flags = cpu_arch[j].flags;
10006 		  if (!cpu_arch_tune_set)
10007 		    {
10008 		      cpu_arch_tune = cpu_arch_isa;
10009 		      cpu_arch_tune_flags = cpu_arch_isa_flags;
10010 		    }
10011 		  break;
10012 		}
10013 	      else if (*cpu_arch [j].name == '.'
10014 		       && strcmp (arch, cpu_arch [j].name + 1) == 0)
10015 		{
10016 		  /* ISA entension.  */
10017 		  i386_cpu_flags flags;
10018 
10019 		  flags = cpu_flags_or (cpu_arch_flags,
10020 					cpu_arch[j].flags);
10021 
10022 		  if (!valid_iamcu_cpu_flags (&flags))
10023 		    as_fatal (_("`%s' isn't valid for Intel MCU"), arch);
10024 		  else if (!cpu_flags_equal (&flags, &cpu_arch_flags))
10025 		    {
10026 		      if (cpu_sub_arch_name)
10027 			{
10028 			  char *name = cpu_sub_arch_name;
10029 			  cpu_sub_arch_name = concat (name,
10030 						      cpu_arch[j].name,
10031 						      (const char *) NULL);
10032 			  free (name);
10033 			}
10034 		      else
10035 			cpu_sub_arch_name = xstrdup (cpu_arch[j].name);
10036 		      cpu_arch_flags = flags;
10037 		      cpu_arch_isa_flags = flags;
10038 		    }
10039 		  break;
10040 		}
10041 	    }
10042 
10043 	  if (j >= ARRAY_SIZE (cpu_arch))
10044 	    {
10045 	      /* Disable an ISA entension.  */
10046 	      for (j = 0; j < ARRAY_SIZE (cpu_noarch); j++)
10047 		if (strcmp (arch, cpu_noarch [j].name) == 0)
10048 		  {
10049 		    i386_cpu_flags flags;
10050 
10051 		    flags = cpu_flags_and_not (cpu_arch_flags,
10052 					       cpu_noarch[j].flags);
10053 		    if (!cpu_flags_equal (&flags, &cpu_arch_flags))
10054 		      {
10055 			if (cpu_sub_arch_name)
10056 			  {
10057 			    char *name = cpu_sub_arch_name;
10058 			    cpu_sub_arch_name = concat (arch,
10059 							(const char *) NULL);
10060 			    free (name);
10061 			  }
10062 			else
10063 			  cpu_sub_arch_name = xstrdup (arch);
10064 			cpu_arch_flags = flags;
10065 			cpu_arch_isa_flags = flags;
10066 		      }
10067 		    break;
10068 		  }
10069 
10070 	      if (j >= ARRAY_SIZE (cpu_noarch))
10071 		j = ARRAY_SIZE (cpu_arch);
10072 	    }
10073 
10074 	  if (j >= ARRAY_SIZE (cpu_arch))
10075 	    as_fatal (_("invalid -march= option: `%s'"), arg);
10076 
10077 	  arch = next;
10078 	}
10079       while (next != NULL);
10080       free (saved);
10081       break;
10082 
10083     case OPTION_MTUNE:
10084       if (*arg == '.')
10085 	as_fatal (_("invalid -mtune= option: `%s'"), arg);
10086       for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
10087 	{
10088 	  if (strcmp (arg, cpu_arch [j].name) == 0)
10089 	    {
10090 	      cpu_arch_tune_set = 1;
10091 	      cpu_arch_tune = cpu_arch [j].type;
10092 	      cpu_arch_tune_flags = cpu_arch[j].flags;
10093 	      break;
10094 	    }
10095 	}
10096       if (j >= ARRAY_SIZE (cpu_arch))
10097 	as_fatal (_("invalid -mtune= option: `%s'"), arg);
10098       break;
10099 
10100     case OPTION_MMNEMONIC:
10101       if (strcasecmp (arg, "att") == 0)
10102 	intel_mnemonic = 0;
10103       else if (strcasecmp (arg, "intel") == 0)
10104 	intel_mnemonic = 1;
10105       else
10106 	as_fatal (_("invalid -mmnemonic= option: `%s'"), arg);
10107       break;
10108 
10109     case OPTION_MSYNTAX:
10110       if (strcasecmp (arg, "att") == 0)
10111 	intel_syntax = 0;
10112       else if (strcasecmp (arg, "intel") == 0)
10113 	intel_syntax = 1;
10114       else
10115 	as_fatal (_("invalid -msyntax= option: `%s'"), arg);
10116       break;
10117 
10118     case OPTION_MINDEX_REG:
10119       allow_index_reg = 1;
10120       break;
10121 
10122     case OPTION_MNAKED_REG:
10123       allow_naked_reg = 1;
10124       break;
10125 
10126     case OPTION_MOLD_GCC:
10127       old_gcc = 1;
10128       break;
10129 
10130     case OPTION_MSSE2AVX:
10131       sse2avx = 1;
10132       break;
10133 
10134     case OPTION_MSSE_CHECK:
10135       if (strcasecmp (arg, "error") == 0)
10136 	sse_check = check_error;
10137       else if (strcasecmp (arg, "warning") == 0)
10138 	sse_check = check_warning;
10139       else if (strcasecmp (arg, "none") == 0)
10140 	sse_check = check_none;
10141       else
10142 	as_fatal (_("invalid -msse-check= option: `%s'"), arg);
10143       break;
10144 
10145     case OPTION_MOPERAND_CHECK:
10146       if (strcasecmp (arg, "error") == 0)
10147 	operand_check = check_error;
10148       else if (strcasecmp (arg, "warning") == 0)
10149 	operand_check = check_warning;
10150       else if (strcasecmp (arg, "none") == 0)
10151 	operand_check = check_none;
10152       else
10153 	as_fatal (_("invalid -moperand-check= option: `%s'"), arg);
10154       break;
10155 
10156     case OPTION_MAVXSCALAR:
10157       if (strcasecmp (arg, "128") == 0)
10158 	avxscalar = vex128;
10159       else if (strcasecmp (arg, "256") == 0)
10160 	avxscalar = vex256;
10161       else
10162 	as_fatal (_("invalid -mavxscalar= option: `%s'"), arg);
10163       break;
10164 
10165     case OPTION_MADD_BND_PREFIX:
10166       add_bnd_prefix = 1;
10167       break;
10168 
10169     case OPTION_MEVEXLIG:
10170       if (strcmp (arg, "128") == 0)
10171 	evexlig = evexl128;
10172       else if (strcmp (arg, "256") == 0)
10173 	evexlig = evexl256;
10174       else  if (strcmp (arg, "512") == 0)
10175 	evexlig = evexl512;
10176       else
10177 	as_fatal (_("invalid -mevexlig= option: `%s'"), arg);
10178       break;
10179 
10180     case OPTION_MEVEXRCIG:
10181       if (strcmp (arg, "rne") == 0)
10182 	evexrcig = rne;
10183       else if (strcmp (arg, "rd") == 0)
10184 	evexrcig = rd;
10185       else if (strcmp (arg, "ru") == 0)
10186 	evexrcig = ru;
10187       else if (strcmp (arg, "rz") == 0)
10188 	evexrcig = rz;
10189       else
10190 	as_fatal (_("invalid -mevexrcig= option: `%s'"), arg);
10191       break;
10192 
10193     case OPTION_MEVEXWIG:
10194       if (strcmp (arg, "0") == 0)
10195 	evexwig = evexw0;
10196       else if (strcmp (arg, "1") == 0)
10197 	evexwig = evexw1;
10198       else
10199 	as_fatal (_("invalid -mevexwig= option: `%s'"), arg);
10200       break;
10201 
10202 # if defined (TE_PE) || defined (TE_PEP)
10203     case OPTION_MBIG_OBJ:
10204       use_big_obj = 1;
10205       break;
10206 #endif
10207 
10208     case OPTION_MOMIT_LOCK_PREFIX:
10209       if (strcasecmp (arg, "yes") == 0)
10210         omit_lock_prefix = 1;
10211       else if (strcasecmp (arg, "no") == 0)
10212         omit_lock_prefix = 0;
10213       else
10214         as_fatal (_("invalid -momit-lock-prefix= option: `%s'"), arg);
10215       break;
10216 
10217     case OPTION_MFENCE_AS_LOCK_ADD:
10218       if (strcasecmp (arg, "yes") == 0)
10219         avoid_fence = 1;
10220       else if (strcasecmp (arg, "no") == 0)
10221         avoid_fence = 0;
10222       else
10223         as_fatal (_("invalid -mfence-as-lock-add= option: `%s'"), arg);
10224       break;
10225 
10226     case OPTION_MRELAX_RELOCATIONS:
10227       if (strcasecmp (arg, "yes") == 0)
10228         generate_relax_relocations = 1;
10229       else if (strcasecmp (arg, "no") == 0)
10230         generate_relax_relocations = 0;
10231       else
10232         as_fatal (_("invalid -mrelax-relocations= option: `%s'"), arg);
10233       break;
10234 
10235     case OPTION_MAMD64:
10236       intel64 = 0;
10237       break;
10238 
10239     case OPTION_MINTEL64:
10240       intel64 = 1;
10241       break;
10242 
10243     default:
10244       return 0;
10245     }
10246   return 1;
10247 }
10248 
10249 #define MESSAGE_TEMPLATE \
10250 "                                                                                "
10251 
10252 static char *
output_message(FILE * stream,char * p,char * message,char * start,int * left_p,const char * name,int len)10253 output_message (FILE *stream, char *p, char *message, char *start,
10254 		int *left_p, const char *name, int len)
10255 {
10256   int size = sizeof (MESSAGE_TEMPLATE);
10257   int left = *left_p;
10258 
10259   /* Reserve 2 spaces for ", " or ",\0" */
10260   left -= len + 2;
10261 
10262   /* Check if there is any room.  */
10263   if (left >= 0)
10264     {
10265       if (p != start)
10266 	{
10267 	  *p++ = ',';
10268 	  *p++ = ' ';
10269 	}
10270       p = mempcpy (p, name, len);
10271     }
10272   else
10273     {
10274       /* Output the current message now and start a new one.  */
10275       *p++ = ',';
10276       *p = '\0';
10277       fprintf (stream, "%s\n", message);
10278       p = start;
10279       left = size - (start - message) - len - 2;
10280 
10281       gas_assert (left >= 0);
10282 
10283       p = mempcpy (p, name, len);
10284     }
10285 
10286   *left_p = left;
10287   return p;
10288 }
10289 
10290 static void
show_arch(FILE * stream,int ext,int check)10291 show_arch (FILE *stream, int ext, int check)
10292 {
10293   static char message[] = MESSAGE_TEMPLATE;
10294   char *start = message + 27;
10295   char *p;
10296   int size = sizeof (MESSAGE_TEMPLATE);
10297   int left;
10298   const char *name;
10299   int len;
10300   unsigned int j;
10301 
10302   p = start;
10303   left = size - (start - message);
10304   for (j = 0; j < ARRAY_SIZE (cpu_arch); j++)
10305     {
10306       /* Should it be skipped?  */
10307       if (cpu_arch [j].skip)
10308 	continue;
10309 
10310       name = cpu_arch [j].name;
10311       len = cpu_arch [j].len;
10312       if (*name == '.')
10313 	{
10314 	  /* It is an extension.  Skip if we aren't asked to show it.  */
10315 	  if (ext)
10316 	    {
10317 	      name++;
10318 	      len--;
10319 	    }
10320 	  else
10321 	    continue;
10322 	}
10323       else if (ext)
10324 	{
10325 	  /* It is an processor.  Skip if we show only extension.  */
10326 	  continue;
10327 	}
10328       else if (check && ! cpu_arch[j].flags.bitfield.cpui386)
10329 	{
10330 	  /* It is an impossible processor - skip.  */
10331 	  continue;
10332 	}
10333 
10334       p = output_message (stream, p, message, start, &left, name, len);
10335     }
10336 
10337   /* Display disabled extensions.  */
10338   if (ext)
10339     for (j = 0; j < ARRAY_SIZE (cpu_noarch); j++)
10340       {
10341 	name = cpu_noarch [j].name;
10342 	len = cpu_noarch [j].len;
10343 	p = output_message (stream, p, message, start, &left, name,
10344 			    len);
10345       }
10346 
10347   *p = '\0';
10348   fprintf (stream, "%s\n", message);
10349 }
10350 
10351 void
md_show_usage(FILE * stream)10352 md_show_usage (FILE *stream)
10353 {
10354 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10355   fprintf (stream, _("\
10356   -Q                      ignored\n\
10357   -V                      print assembler version number\n\
10358   -k                      ignored\n"));
10359 #endif
10360   fprintf (stream, _("\
10361   -n                      Do not optimize code alignment\n\
10362   -q                      quieten some warnings\n"));
10363 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10364   fprintf (stream, _("\
10365   -s                      ignored\n"));
10366 #endif
10367 #if (defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
10368      || defined (TE_PE) || defined (TE_PEP))
10369   fprintf (stream, _("\
10370   --32/--64/--x32         generate 32bit/64bit/x32 code\n"));
10371 #endif
10372 #ifdef SVR4_COMMENT_CHARS
10373   fprintf (stream, _("\
10374   --divide                do not treat `/' as a comment character\n"));
10375 #else
10376   fprintf (stream, _("\
10377   --divide                ignored\n"));
10378 #endif
10379   fprintf (stream, _("\
10380   -march=CPU[,+EXTENSION...]\n\
10381                           generate code for CPU and EXTENSION, CPU is one of:\n"));
10382   show_arch (stream, 0, 1);
10383   fprintf (stream, _("\
10384                           EXTENSION is combination of:\n"));
10385   show_arch (stream, 1, 0);
10386   fprintf (stream, _("\
10387   -mtune=CPU              optimize for CPU, CPU is one of:\n"));
10388   show_arch (stream, 0, 0);
10389   fprintf (stream, _("\
10390   -msse2avx               encode SSE instructions with VEX prefix\n"));
10391   fprintf (stream, _("\
10392   -msse-check=[none|error|warning]\n\
10393                           check SSE instructions\n"));
10394   fprintf (stream, _("\
10395   -moperand-check=[none|error|warning]\n\
10396                           check operand combinations for validity\n"));
10397   fprintf (stream, _("\
10398   -mavxscalar=[128|256]   encode scalar AVX instructions with specific vector\n\
10399                            length\n"));
10400   fprintf (stream, _("\
10401   -mevexlig=[128|256|512] encode scalar EVEX instructions with specific vector\n\
10402                            length\n"));
10403   fprintf (stream, _("\
10404   -mevexwig=[0|1]         encode EVEX instructions with specific EVEX.W value\n\
10405                            for EVEX.W bit ignored instructions\n"));
10406   fprintf (stream, _("\
10407   -mevexrcig=[rne|rd|ru|rz]\n\
10408                           encode EVEX instructions with specific EVEX.RC value\n\
10409                            for SAE-only ignored instructions\n"));
10410   fprintf (stream, _("\
10411   -mmnemonic=[att|intel]  use AT&T/Intel mnemonic\n"));
10412   fprintf (stream, _("\
10413   -msyntax=[att|intel]    use AT&T/Intel syntax\n"));
10414   fprintf (stream, _("\
10415   -mindex-reg             support pseudo index registers\n"));
10416   fprintf (stream, _("\
10417   -mnaked-reg             don't require `%%' prefix for registers\n"));
10418   fprintf (stream, _("\
10419   -mold-gcc               support old (<= 2.8.1) versions of gcc\n"));
10420   fprintf (stream, _("\
10421   -madd-bnd-prefix        add BND prefix for all valid branches\n"));
10422   fprintf (stream, _("\
10423   -mshared                disable branch optimization for shared code\n"));
10424 # if defined (TE_PE) || defined (TE_PEP)
10425   fprintf (stream, _("\
10426   -mbig-obj               generate big object files\n"));
10427 #endif
10428   fprintf (stream, _("\
10429   -momit-lock-prefix=[no|yes]\n\
10430                           strip all lock prefixes\n"));
10431   fprintf (stream, _("\
10432   -mfence-as-lock-add=[no|yes]\n\
10433                           encode lfence, mfence and sfence as\n\
10434                            lock addl $0x0, (%%{re}sp)\n"));
10435   fprintf (stream, _("\
10436   -mrelax-relocations=[no|yes]\n\
10437                           generate relax relocations\n"));
10438   fprintf (stream, _("\
10439   -mamd64                 accept only AMD64 ISA\n"));
10440   fprintf (stream, _("\
10441   -mintel64               accept only Intel64 ISA\n"));
10442 }
10443 
10444 #if ((defined (OBJ_MAYBE_COFF) && defined (OBJ_MAYBE_AOUT)) \
10445      || defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF) \
10446      || defined (TE_PE) || defined (TE_PEP) || defined (OBJ_MACH_O))
10447 
10448 /* Pick the target format to use.  */
10449 
10450 const char *
i386_target_format(void)10451 i386_target_format (void)
10452 {
10453   if (!strncmp (default_arch, "x86_64", 6))
10454     {
10455       update_code_flag (CODE_64BIT, 1);
10456       if (default_arch[6] == '\0')
10457 	x86_elf_abi = X86_64_ABI;
10458       else
10459 	x86_elf_abi = X86_64_X32_ABI;
10460     }
10461   else if (!strcmp (default_arch, "i386"))
10462     update_code_flag (CODE_32BIT, 1);
10463   else if (!strcmp (default_arch, "iamcu"))
10464     {
10465       update_code_flag (CODE_32BIT, 1);
10466       if (cpu_arch_isa == PROCESSOR_UNKNOWN)
10467 	{
10468 	  static const i386_cpu_flags iamcu_flags = CPU_IAMCU_FLAGS;
10469 	  cpu_arch_name = "iamcu";
10470 	  cpu_sub_arch_name = NULL;
10471 	  cpu_arch_flags = iamcu_flags;
10472 	  cpu_arch_isa = PROCESSOR_IAMCU;
10473 	  cpu_arch_isa_flags = iamcu_flags;
10474 	  if (!cpu_arch_tune_set)
10475 	    {
10476 	      cpu_arch_tune = cpu_arch_isa;
10477 	      cpu_arch_tune_flags = cpu_arch_isa_flags;
10478 	    }
10479 	}
10480       else
10481 	as_fatal (_("Intel MCU doesn't support `%s' architecture"),
10482 		  cpu_arch_name);
10483     }
10484   else
10485     as_fatal (_("unknown architecture"));
10486 
10487   if (cpu_flags_all_zero (&cpu_arch_isa_flags))
10488     cpu_arch_isa_flags = cpu_arch[flag_code == CODE_64BIT].flags;
10489   if (cpu_flags_all_zero (&cpu_arch_tune_flags))
10490     cpu_arch_tune_flags = cpu_arch[flag_code == CODE_64BIT].flags;
10491 
10492   switch (OUTPUT_FLAVOR)
10493     {
10494 #if defined (OBJ_MAYBE_AOUT) || defined (OBJ_AOUT)
10495     case bfd_target_aout_flavour:
10496       return AOUT_TARGET_FORMAT;
10497 #endif
10498 #if defined (OBJ_MAYBE_COFF) || defined (OBJ_COFF)
10499 # if defined (TE_PE) || defined (TE_PEP)
10500     case bfd_target_coff_flavour:
10501       if (flag_code == CODE_64BIT)
10502 	return use_big_obj ? "pe-bigobj-x86-64" : "pe-x86-64";
10503       else
10504 	return "pe-i386";
10505 # elif defined (TE_GO32)
10506     case bfd_target_coff_flavour:
10507       return "coff-go32";
10508 # else
10509     case bfd_target_coff_flavour:
10510       return "coff-i386";
10511 # endif
10512 #endif
10513 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
10514     case bfd_target_elf_flavour:
10515       {
10516 	const char *format;
10517 
10518 	switch (x86_elf_abi)
10519 	  {
10520 	  default:
10521 	    format = ELF_TARGET_FORMAT;
10522 	    break;
10523 	  case X86_64_ABI:
10524 	    use_rela_relocations = 1;
10525 	    object_64bit = 1;
10526 	    format = ELF_TARGET_FORMAT64;
10527 	    break;
10528 	  case X86_64_X32_ABI:
10529 	    use_rela_relocations = 1;
10530 	    object_64bit = 1;
10531 	    disallow_64bit_reloc = 1;
10532 	    format = ELF_TARGET_FORMAT32;
10533 	    break;
10534 	  }
10535 	if (cpu_arch_isa == PROCESSOR_L1OM)
10536 	  {
10537 	    if (x86_elf_abi != X86_64_ABI)
10538 	      as_fatal (_("Intel L1OM is 64bit only"));
10539 	    return ELF_TARGET_L1OM_FORMAT;
10540 	  }
10541 	else if (cpu_arch_isa == PROCESSOR_K1OM)
10542 	  {
10543 	    if (x86_elf_abi != X86_64_ABI)
10544 	      as_fatal (_("Intel K1OM is 64bit only"));
10545 	    return ELF_TARGET_K1OM_FORMAT;
10546 	  }
10547 	else if (cpu_arch_isa == PROCESSOR_IAMCU)
10548 	  {
10549 	    if (x86_elf_abi != I386_ABI)
10550 	      as_fatal (_("Intel MCU is 32bit only"));
10551 	    return ELF_TARGET_IAMCU_FORMAT;
10552 	  }
10553 	else
10554 	  return format;
10555       }
10556 #endif
10557 #if defined (OBJ_MACH_O)
10558     case bfd_target_mach_o_flavour:
10559       if (flag_code == CODE_64BIT)
10560 	{
10561 	  use_rela_relocations = 1;
10562 	  object_64bit = 1;
10563 	  return "mach-o-x86-64";
10564 	}
10565       else
10566 	return "mach-o-i386";
10567 #endif
10568     default:
10569       abort ();
10570       return NULL;
10571     }
10572 }
10573 
10574 #endif /* OBJ_MAYBE_ more than one  */
10575 
10576 symbolS *
md_undefined_symbol(char * name)10577 md_undefined_symbol (char *name)
10578 {
10579   if (name[0] == GLOBAL_OFFSET_TABLE_NAME[0]
10580       && name[1] == GLOBAL_OFFSET_TABLE_NAME[1]
10581       && name[2] == GLOBAL_OFFSET_TABLE_NAME[2]
10582       && strcmp (name, GLOBAL_OFFSET_TABLE_NAME) == 0)
10583     {
10584       if (!GOT_symbol)
10585 	{
10586 	  if (symbol_find (name))
10587 	    as_bad (_("GOT already in symbol table"));
10588 	  GOT_symbol = symbol_new (name, undefined_section,
10589 				   (valueT) 0, &zero_address_frag);
10590 	};
10591       return GOT_symbol;
10592     }
10593   return 0;
10594 }
10595 
10596 /* Round up a section size to the appropriate boundary.  */
10597 
10598 valueT
md_section_align(segT segment ATTRIBUTE_UNUSED,valueT size)10599 md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
10600 {
10601 #if (defined (OBJ_AOUT) || defined (OBJ_MAYBE_AOUT))
10602   if (OUTPUT_FLAVOR == bfd_target_aout_flavour)
10603     {
10604       /* For a.out, force the section size to be aligned.  If we don't do
10605 	 this, BFD will align it for us, but it will not write out the
10606 	 final bytes of the section.  This may be a bug in BFD, but it is
10607 	 easier to fix it here since that is how the other a.out targets
10608 	 work.  */
10609       int align;
10610 
10611       align = bfd_get_section_alignment (stdoutput, segment);
10612       size = ((size + (1 << align) - 1) & (-((valueT) 1 << align)));
10613     }
10614 #endif
10615 
10616   return size;
10617 }
10618 
10619 /* On the i386, PC-relative offsets are relative to the start of the
10620    next instruction.  That is, the address of the offset, plus its
10621    size, since the offset is always the last part of the insn.  */
10622 
10623 long
md_pcrel_from(fixS * fixP)10624 md_pcrel_from (fixS *fixP)
10625 {
10626   return fixP->fx_size + fixP->fx_where + fixP->fx_frag->fr_address;
10627 }
10628 
10629 #ifndef I386COFF
10630 
10631 static void
s_bss(int ignore ATTRIBUTE_UNUSED)10632 s_bss (int ignore ATTRIBUTE_UNUSED)
10633 {
10634   int temp;
10635 
10636 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10637   if (IS_ELF)
10638     obj_elf_section_change_hook ();
10639 #endif
10640   temp = get_absolute_expression ();
10641   subseg_set (bss_section, (subsegT) temp);
10642   demand_empty_rest_of_line ();
10643 }
10644 
10645 #endif
10646 
10647 void
i386_validate_fix(fixS * fixp)10648 i386_validate_fix (fixS *fixp)
10649 {
10650   if (fixp->fx_subsy)
10651     {
10652       if (fixp->fx_subsy == GOT_symbol)
10653 	{
10654 	  if (fixp->fx_r_type == BFD_RELOC_32_PCREL)
10655 	    {
10656 	      if (!object_64bit)
10657 		abort ();
10658 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10659 	      if (fixp->fx_tcbit2)
10660 		fixp->fx_r_type = (fixp->fx_tcbit
10661 				   ? BFD_RELOC_X86_64_REX_GOTPCRELX
10662 				   : BFD_RELOC_X86_64_GOTPCRELX);
10663 	      else
10664 #endif
10665 		fixp->fx_r_type = BFD_RELOC_X86_64_GOTPCREL;
10666 	    }
10667 	  else
10668 	    {
10669 	      if (!object_64bit)
10670 		fixp->fx_r_type = BFD_RELOC_386_GOTOFF;
10671 	      else
10672 		fixp->fx_r_type = BFD_RELOC_X86_64_GOTOFF64;
10673 	    }
10674 	  fixp->fx_subsy = 0;
10675 	}
10676     }
10677 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10678   else if (!object_64bit)
10679     {
10680       if (fixp->fx_r_type == BFD_RELOC_386_GOT32
10681 	  && fixp->fx_tcbit2)
10682 	fixp->fx_r_type = BFD_RELOC_386_GOT32X;
10683     }
10684 #endif
10685 }
10686 
10687 arelent *
tc_gen_reloc(asection * section ATTRIBUTE_UNUSED,fixS * fixp)10688 tc_gen_reloc (asection *section ATTRIBUTE_UNUSED, fixS *fixp)
10689 {
10690   arelent *rel;
10691   bfd_reloc_code_real_type code;
10692 
10693   switch (fixp->fx_r_type)
10694     {
10695 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
10696     case BFD_RELOC_SIZE32:
10697     case BFD_RELOC_SIZE64:
10698       if (S_IS_DEFINED (fixp->fx_addsy)
10699 	  && !S_IS_EXTERNAL (fixp->fx_addsy))
10700 	{
10701 	  /* Resolve size relocation against local symbol to size of
10702 	     the symbol plus addend.  */
10703 	  valueT value = S_GET_SIZE (fixp->fx_addsy) + fixp->fx_offset;
10704 	  if (fixp->fx_r_type == BFD_RELOC_SIZE32
10705 	      && !fits_in_unsigned_long (value))
10706 	    as_bad_where (fixp->fx_file, fixp->fx_line,
10707 			  _("symbol size computation overflow"));
10708 	  fixp->fx_addsy = NULL;
10709 	  fixp->fx_subsy = NULL;
10710 	  md_apply_fix (fixp, (valueT *) &value, NULL);
10711 	  return NULL;
10712 	}
10713 #endif
10714 
10715     case BFD_RELOC_X86_64_PLT32:
10716     case BFD_RELOC_X86_64_GOT32:
10717     case BFD_RELOC_X86_64_GOTPCREL:
10718     case BFD_RELOC_X86_64_GOTPCRELX:
10719     case BFD_RELOC_X86_64_REX_GOTPCRELX:
10720     case BFD_RELOC_386_PLT32:
10721     case BFD_RELOC_386_GOT32:
10722     case BFD_RELOC_386_GOT32X:
10723     case BFD_RELOC_386_GOTOFF:
10724     case BFD_RELOC_386_GOTPC:
10725     case BFD_RELOC_386_TLS_GD:
10726     case BFD_RELOC_386_TLS_LDM:
10727     case BFD_RELOC_386_TLS_LDO_32:
10728     case BFD_RELOC_386_TLS_IE_32:
10729     case BFD_RELOC_386_TLS_IE:
10730     case BFD_RELOC_386_TLS_GOTIE:
10731     case BFD_RELOC_386_TLS_LE_32:
10732     case BFD_RELOC_386_TLS_LE:
10733     case BFD_RELOC_386_TLS_GOTDESC:
10734     case BFD_RELOC_386_TLS_DESC_CALL:
10735     case BFD_RELOC_X86_64_TLSGD:
10736     case BFD_RELOC_X86_64_TLSLD:
10737     case BFD_RELOC_X86_64_DTPOFF32:
10738     case BFD_RELOC_X86_64_DTPOFF64:
10739     case BFD_RELOC_X86_64_GOTTPOFF:
10740     case BFD_RELOC_X86_64_TPOFF32:
10741     case BFD_RELOC_X86_64_TPOFF64:
10742     case BFD_RELOC_X86_64_GOTOFF64:
10743     case BFD_RELOC_X86_64_GOTPC32:
10744     case BFD_RELOC_X86_64_GOT64:
10745     case BFD_RELOC_X86_64_GOTPCREL64:
10746     case BFD_RELOC_X86_64_GOTPC64:
10747     case BFD_RELOC_X86_64_GOTPLT64:
10748     case BFD_RELOC_X86_64_PLTOFF64:
10749     case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
10750     case BFD_RELOC_X86_64_TLSDESC_CALL:
10751     case BFD_RELOC_RVA:
10752     case BFD_RELOC_VTABLE_ENTRY:
10753     case BFD_RELOC_VTABLE_INHERIT:
10754 #ifdef TE_PE
10755     case BFD_RELOC_32_SECREL:
10756 #endif
10757       code = fixp->fx_r_type;
10758       break;
10759     case BFD_RELOC_X86_64_32S:
10760       if (!fixp->fx_pcrel)
10761 	{
10762 	  /* Don't turn BFD_RELOC_X86_64_32S into BFD_RELOC_32.  */
10763 	  code = fixp->fx_r_type;
10764 	  break;
10765 	}
10766     default:
10767       if (fixp->fx_pcrel)
10768 	{
10769 	  switch (fixp->fx_size)
10770 	    {
10771 	    default:
10772 	      as_bad_where (fixp->fx_file, fixp->fx_line,
10773 			    _("can not do %d byte pc-relative relocation"),
10774 			    fixp->fx_size);
10775 	      code = BFD_RELOC_32_PCREL;
10776 	      break;
10777 	    case 1: code = BFD_RELOC_8_PCREL;  break;
10778 	    case 2: code = BFD_RELOC_16_PCREL; break;
10779 	    case 4: code = BFD_RELOC_32_PCREL; break;
10780 #ifdef BFD64
10781 	    case 8: code = BFD_RELOC_64_PCREL; break;
10782 #endif
10783 	    }
10784 	}
10785       else
10786 	{
10787 	  switch (fixp->fx_size)
10788 	    {
10789 	    default:
10790 	      as_bad_where (fixp->fx_file, fixp->fx_line,
10791 			    _("can not do %d byte relocation"),
10792 			    fixp->fx_size);
10793 	      code = BFD_RELOC_32;
10794 	      break;
10795 	    case 1: code = BFD_RELOC_8;  break;
10796 	    case 2: code = BFD_RELOC_16; break;
10797 	    case 4: code = BFD_RELOC_32; break;
10798 #ifdef BFD64
10799 	    case 8: code = BFD_RELOC_64; break;
10800 #endif
10801 	    }
10802 	}
10803       break;
10804     }
10805 
10806   if ((code == BFD_RELOC_32
10807        || code == BFD_RELOC_32_PCREL
10808        || code == BFD_RELOC_X86_64_32S)
10809       && GOT_symbol
10810       && fixp->fx_addsy == GOT_symbol)
10811     {
10812       if (!object_64bit)
10813 	code = BFD_RELOC_386_GOTPC;
10814       else
10815 	code = BFD_RELOC_X86_64_GOTPC32;
10816     }
10817   if ((code == BFD_RELOC_64 || code == BFD_RELOC_64_PCREL)
10818       && GOT_symbol
10819       && fixp->fx_addsy == GOT_symbol)
10820     {
10821       code = BFD_RELOC_X86_64_GOTPC64;
10822     }
10823 
10824   rel = XNEW (arelent);
10825   rel->sym_ptr_ptr = XNEW (asymbol *);
10826   *rel->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
10827 
10828   rel->address = fixp->fx_frag->fr_address + fixp->fx_where;
10829 
10830   if (!use_rela_relocations)
10831     {
10832       /* HACK: Since i386 ELF uses Rel instead of Rela, encode the
10833 	 vtable entry to be used in the relocation's section offset.  */
10834       if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
10835 	rel->address = fixp->fx_offset;
10836 #if defined (OBJ_COFF) && defined (TE_PE)
10837       else if (fixp->fx_addsy && S_IS_WEAK (fixp->fx_addsy))
10838 	rel->addend = fixp->fx_addnumber - (S_GET_VALUE (fixp->fx_addsy) * 2);
10839       else
10840 #endif
10841       rel->addend = 0;
10842     }
10843   /* Use the rela in 64bit mode.  */
10844   else
10845     {
10846       if (disallow_64bit_reloc)
10847 	switch (code)
10848 	  {
10849 	  case BFD_RELOC_X86_64_DTPOFF64:
10850 	  case BFD_RELOC_X86_64_TPOFF64:
10851 	  case BFD_RELOC_64_PCREL:
10852 	  case BFD_RELOC_X86_64_GOTOFF64:
10853 	  case BFD_RELOC_X86_64_GOT64:
10854 	  case BFD_RELOC_X86_64_GOTPCREL64:
10855 	  case BFD_RELOC_X86_64_GOTPC64:
10856 	  case BFD_RELOC_X86_64_GOTPLT64:
10857 	  case BFD_RELOC_X86_64_PLTOFF64:
10858 	    as_bad_where (fixp->fx_file, fixp->fx_line,
10859 			  _("cannot represent relocation type %s in x32 mode"),
10860 			  bfd_get_reloc_code_name (code));
10861 	    break;
10862 	  default:
10863 	    break;
10864 	  }
10865 
10866       if (!fixp->fx_pcrel)
10867 	rel->addend = fixp->fx_offset;
10868       else
10869 	switch (code)
10870 	  {
10871 	  case BFD_RELOC_X86_64_PLT32:
10872 	  case BFD_RELOC_X86_64_GOT32:
10873 	  case BFD_RELOC_X86_64_GOTPCREL:
10874 	  case BFD_RELOC_X86_64_GOTPCRELX:
10875 	  case BFD_RELOC_X86_64_REX_GOTPCRELX:
10876 	  case BFD_RELOC_X86_64_TLSGD:
10877 	  case BFD_RELOC_X86_64_TLSLD:
10878 	  case BFD_RELOC_X86_64_GOTTPOFF:
10879 	  case BFD_RELOC_X86_64_GOTPC32_TLSDESC:
10880 	  case BFD_RELOC_X86_64_TLSDESC_CALL:
10881 	    rel->addend = fixp->fx_offset - fixp->fx_size;
10882 	    break;
10883 	  default:
10884 	    rel->addend = (section->vma
10885 			   - fixp->fx_size
10886 			   + fixp->fx_addnumber
10887 			   + md_pcrel_from (fixp));
10888 	    break;
10889 	  }
10890     }
10891 
10892   rel->howto = bfd_reloc_type_lookup (stdoutput, code);
10893   if (rel->howto == NULL)
10894     {
10895       as_bad_where (fixp->fx_file, fixp->fx_line,
10896 		    _("cannot represent relocation type %s"),
10897 		    bfd_get_reloc_code_name (code));
10898       /* Set howto to a garbage value so that we can keep going.  */
10899       rel->howto = bfd_reloc_type_lookup (stdoutput, BFD_RELOC_32);
10900       gas_assert (rel->howto != NULL);
10901     }
10902 
10903   return rel;
10904 }
10905 
10906 #include "tc-i386-intel.c"
10907 
10908 void
tc_x86_parse_to_dw2regnum(expressionS * exp)10909 tc_x86_parse_to_dw2regnum (expressionS *exp)
10910 {
10911   int saved_naked_reg;
10912   char saved_register_dot;
10913 
10914   saved_naked_reg = allow_naked_reg;
10915   allow_naked_reg = 1;
10916   saved_register_dot = register_chars['.'];
10917   register_chars['.'] = '.';
10918   allow_pseudo_reg = 1;
10919   expression_and_evaluate (exp);
10920   allow_pseudo_reg = 0;
10921   register_chars['.'] = saved_register_dot;
10922   allow_naked_reg = saved_naked_reg;
10923 
10924   if (exp->X_op == O_register && exp->X_add_number >= 0)
10925     {
10926       if ((addressT) exp->X_add_number < i386_regtab_size)
10927 	{
10928 	  exp->X_op = O_constant;
10929 	  exp->X_add_number = i386_regtab[exp->X_add_number]
10930 			      .dw2_regnum[flag_code >> 1];
10931 	}
10932       else
10933 	exp->X_op = O_illegal;
10934     }
10935 }
10936 
10937 void
tc_x86_frame_initial_instructions(void)10938 tc_x86_frame_initial_instructions (void)
10939 {
10940   static unsigned int sp_regno[2];
10941 
10942   if (!sp_regno[flag_code >> 1])
10943     {
10944       char *saved_input = input_line_pointer;
10945       char sp[][4] = {"esp", "rsp"};
10946       expressionS exp;
10947 
10948       input_line_pointer = sp[flag_code >> 1];
10949       tc_x86_parse_to_dw2regnum (&exp);
10950       gas_assert (exp.X_op == O_constant);
10951       sp_regno[flag_code >> 1] = exp.X_add_number;
10952       input_line_pointer = saved_input;
10953     }
10954 
10955   cfi_add_CFA_def_cfa (sp_regno[flag_code >> 1], -x86_cie_data_alignment);
10956   cfi_add_CFA_offset (x86_dwarf2_return_column, x86_cie_data_alignment);
10957 }
10958 
10959 int
x86_dwarf2_addr_size(void)10960 x86_dwarf2_addr_size (void)
10961 {
10962 #if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
10963   if (x86_elf_abi == X86_64_X32_ABI)
10964     return 4;
10965 #endif
10966   return bfd_arch_bits_per_address (stdoutput) / 8;
10967 }
10968 
10969 int
i386_elf_section_type(const char * str,size_t len)10970 i386_elf_section_type (const char *str, size_t len)
10971 {
10972   if (flag_code == CODE_64BIT
10973       && len == sizeof ("unwind") - 1
10974       && strncmp (str, "unwind", 6) == 0)
10975     return SHT_X86_64_UNWIND;
10976 
10977   return -1;
10978 }
10979 
10980 #ifdef TE_SOLARIS
10981 void
i386_solaris_fix_up_eh_frame(segT sec)10982 i386_solaris_fix_up_eh_frame (segT sec)
10983 {
10984   if (flag_code == CODE_64BIT)
10985     elf_section_type (sec) = SHT_X86_64_UNWIND;
10986 }
10987 #endif
10988 
10989 #ifdef TE_PE
10990 void
tc_pe_dwarf2_emit_offset(symbolS * symbol,unsigned int size)10991 tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
10992 {
10993   expressionS exp;
10994 
10995   exp.X_op = O_secrel;
10996   exp.X_add_symbol = symbol;
10997   exp.X_add_number = 0;
10998   emit_expr (&exp, size);
10999 }
11000 #endif
11001 
11002 #if defined (OBJ_ELF) || defined (OBJ_MAYBE_ELF)
11003 /* For ELF on x86-64, add support for SHF_X86_64_LARGE.  */
11004 
11005 bfd_vma
x86_64_section_letter(int letter,const char ** ptr_msg)11006 x86_64_section_letter (int letter, const char **ptr_msg)
11007 {
11008   if (flag_code == CODE_64BIT)
11009     {
11010       if (letter == 'l')
11011 	return SHF_X86_64_LARGE;
11012 
11013       *ptr_msg = _("bad .section directive: want a,l,w,x,M,S,G,T in string");
11014     }
11015   else
11016     *ptr_msg = _("bad .section directive: want a,w,x,M,S,G,T in string");
11017   return -1;
11018 }
11019 
11020 bfd_vma
x86_64_section_word(char * str,size_t len)11021 x86_64_section_word (char *str, size_t len)
11022 {
11023   if (len == 5 && flag_code == CODE_64BIT && CONST_STRNEQ (str, "large"))
11024     return SHF_X86_64_LARGE;
11025 
11026   return -1;
11027 }
11028 
11029 static void
handle_large_common(int small ATTRIBUTE_UNUSED)11030 handle_large_common (int small ATTRIBUTE_UNUSED)
11031 {
11032   if (flag_code != CODE_64BIT)
11033     {
11034       s_comm_internal (0, elf_common_parse);
11035       as_warn (_(".largecomm supported only in 64bit mode, producing .comm"));
11036     }
11037   else
11038     {
11039       static segT lbss_section;
11040       asection *saved_com_section_ptr = elf_com_section_ptr;
11041       asection *saved_bss_section = bss_section;
11042 
11043       if (lbss_section == NULL)
11044 	{
11045 	  flagword applicable;
11046 	  segT seg = now_seg;
11047 	  subsegT subseg = now_subseg;
11048 
11049 	  /* The .lbss section is for local .largecomm symbols.  */
11050 	  lbss_section = subseg_new (".lbss", 0);
11051 	  applicable = bfd_applicable_section_flags (stdoutput);
11052 	  bfd_set_section_flags (stdoutput, lbss_section,
11053 				 applicable & SEC_ALLOC);
11054 	  seg_info (lbss_section)->bss = 1;
11055 
11056 	  subseg_set (seg, subseg);
11057 	}
11058 
11059       elf_com_section_ptr = &_bfd_elf_large_com_section;
11060       bss_section = lbss_section;
11061 
11062       s_comm_internal (0, elf_common_parse);
11063 
11064       elf_com_section_ptr = saved_com_section_ptr;
11065       bss_section = saved_bss_section;
11066     }
11067 }
11068 #endif /* OBJ_ELF || OBJ_MAYBE_ELF */
11069