1 /* ----------------------------------------------------------------------- *
2 *
3 * Copyright 1996-2020 The NASM Authors - All Rights Reserved
4 * See the file AUTHORS included with the NASM distribution for
5 * the specific copyright holders.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following
9 * conditions are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above
14 * copyright notice, this list of conditions and the following
15 * disclaimer in the documentation and/or other materials provided
16 * with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
19 * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
20 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
21 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
29 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
30 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 *
32 * ----------------------------------------------------------------------- */
33
34 /*
35 * assemble.c code generation for the Netwide Assembler
36 *
37 * Bytecode specification
38 * ----------------------
39 *
40 *
41 * Codes Mnemonic Explanation
42 *
43 * \0 terminates the code. (Unless it's a literal of course.)
44 * \1..\4 that many literal bytes follow in the code stream
45 * \5 add 4 to the primary operand number (b, low octdigit)
46 * \6 add 4 to the secondary operand number (a, middle octdigit)
47 * \7 add 4 to both the primary and the secondary operand number
48 * \10..\13 a literal byte follows in the code stream, to be added
49 * to the register value of operand 0..3
50 * \14..\17 the position of index register operand in MIB (BND insns)
51 * \20..\23 ib a byte immediate operand, from operand 0..3
52 * \24..\27 ib,u a zero-extended byte immediate operand, from operand 0..3
53 * \30..\33 iw a word immediate operand, from operand 0..3
54 * \34..\37 iwd select between \3[0-3] and \4[0-3] depending on 16/32 bit
55 * assembly mode or the operand-size override on the operand
56 * \40..\43 id a long immediate operand, from operand 0..3
57 * \44..\47 iwdq select between \3[0-3], \4[0-3] and \5[4-7]
58 * depending on the address size of the instruction.
59 * \50..\53 rel8 a byte relative operand, from operand 0..3
60 * \54..\57 iq a qword immediate operand, from operand 0..3
61 * \60..\63 rel16 a word relative operand, from operand 0..3
62 * \64..\67 rel select between \6[0-3] and \7[0-3] depending on 16/32 bit
63 * assembly mode or the operand-size override on the operand
64 * \70..\73 rel32 a long relative operand, from operand 0..3
65 * \74..\77 seg a word constant, from the _segment_ part of operand 0..3
66 * \1ab a ModRM, calculated on EA in operand a, with the spare
67 * field the register value of operand b.
68 * \172\ab the register number from operand a in bits 7..4, with
69 * the 4-bit immediate from operand b in bits 3..0.
70 * \173\xab the register number from operand a in bits 7..4, with
71 * the value b in bits 3..0.
72 * \174..\177 the register number from operand 0..3 in bits 7..4, and
73 * an arbitrary value in bits 3..0 (assembled as zero.)
74 * \2ab a ModRM, calculated on EA in operand a, with the spare
75 * field equal to digit b.
76 *
77 * \240..\243 this instruction uses EVEX rather than REX or VEX/XOP, with the
78 * V field taken from operand 0..3.
79 * \250 this instruction uses EVEX rather than REX or VEX/XOP, with the
80 * V field set to 1111b.
81 *
82 * EVEX prefixes are followed by the sequence:
83 * \cm\wlp\tup where cm is:
84 * cc 00m mmm
85 * c = 2 for EVEX and mmmm is the M field (EVEX.P0[3:0])
86 * and wlp is:
87 * 00 wwl lpp
88 * [l0] ll = 0 (.128, .lz)
89 * [l1] ll = 1 (.256)
90 * [l2] ll = 2 (.512)
91 * [lig] ll = 3 for EVEX.L'L don't care (always assembled as 0)
92 *
93 * [w0] ww = 0 for W = 0
94 * [w1] ww = 1 for W = 1
95 * [wig] ww = 2 for W don't care (always assembled as 0)
96 * [ww] ww = 3 for W used as REX.W
97 *
98 * [p0] pp = 0 for no prefix
99 * [60] pp = 1 for legacy prefix 60
100 * [f3] pp = 2
101 * [f2] pp = 3
102 *
103 * tup is tuple type for Disp8*N from %tuple_codes in insns.pl
104 * (compressed displacement encoding)
105 *
106 * \254..\257 id,s a signed 32-bit operand to be extended to 64 bits.
107 * \260..\263 this instruction uses VEX/XOP rather than REX, with the
108 * V field taken from operand 0..3.
109 * \270 this instruction uses VEX/XOP rather than REX, with the
110 * V field set to 1111b.
111 *
112 * VEX/XOP prefixes are followed by the sequence:
113 * \tmm\wlp where mm is the M field; and wlp is:
114 * 00 wwl lpp
115 * [l0] ll = 0 for L = 0 (.128, .lz)
116 * [l1] ll = 1 for L = 1 (.256)
117 * [lig] ll = 2 for L don't care (always assembled as 0)
118 *
119 * [w0] ww = 0 for W = 0
120 * [w1 ] ww = 1 for W = 1
121 * [wig] ww = 2 for W don't care (always assembled as 0)
122 * [ww] ww = 3 for W used as REX.W
123 *
124 * t = 0 for VEX (C4/C5), t = 1 for XOP (8F).
125 *
126 * \271 hlexr instruction takes XRELEASE (F3) with or without lock
127 * \272 hlenl instruction takes XACQUIRE/XRELEASE with or without lock
128 * \273 hle instruction takes XACQUIRE/XRELEASE with lock only
129 * \274..\277 ib,s a byte immediate operand, from operand 0..3, sign-extended
130 * to the operand size (if o16/o32/o64 present) or the bit size
131 * \310 a16 indicates fixed 16-bit address size, i.e. optional 0x67.
132 * \311 a32 indicates fixed 32-bit address size, i.e. optional 0x67.
133 * \312 adf (disassembler only) invalid with non-default address size.
134 * \313 a64 indicates fixed 64-bit address size, 0x67 invalid.
135 * \314 norexb (disassembler only) invalid with REX.B
136 * \315 norexx (disassembler only) invalid with REX.X
137 * \316 norexr (disassembler only) invalid with REX.R
138 * \317 norexw (disassembler only) invalid with REX.W
139 * \320 o16 indicates fixed 16-bit operand size, i.e. optional 0x66.
140 * \321 o32 indicates fixed 32-bit operand size, i.e. optional 0x66.
141 * \322 odf indicates that this instruction is only valid when the
142 * operand size is the default (instruction to disassembler,
143 * generates no code in the assembler)
144 * \323 o64nw indicates fixed 64-bit operand size, REX on extensions only.
145 * \324 o64 indicates 64-bit operand size requiring REX prefix.
146 * \325 nohi instruction which always uses spl/bpl/sil/dil
147 * \326 nof3 instruction not valid with 0xF3 REP prefix. Hint for
148 disassembler only; for SSE instructions.
149 * \330 a literal byte follows in the code stream, to be added
150 * to the condition code value of the instruction.
151 * \331 norep instruction not valid with REP prefix. Hint for
152 * disassembler only; for SSE instructions.
153 * \332 f2i REP prefix (0xF2 byte) used as opcode extension.
154 * \333 f3i REP prefix (0xF3 byte) used as opcode extension.
155 * \334 rex.l LOCK prefix used as REX.R (used in non-64-bit mode)
156 * \335 repe disassemble a rep (0xF3 byte) prefix as repe not rep.
157 * \336 mustrep force a REP(E) prefix (0xF3) even if not specified.
158 * \337 mustrepne force a REPNE prefix (0xF2) even if not specified.
159 * \336-\337 are still listed as prefixes in the disassembler.
160 * \340 resb reserve <operand 0> bytes of uninitialized storage.
161 * Operand 0 had better be a segmentless constant.
162 * \341 wait this instruction needs a WAIT "prefix"
163 * \360 np no SSE prefix (== \364\331)
164 * \361 66 SSE prefix (== \366\331)
165 * \364 !osp operand-size prefix (0x66) not permitted
166 * \365 !asp address-size prefix (0x67) not permitted
167 * \366 operand-size prefix (0x66) used as opcode extension
168 * \367 address-size prefix (0x67) used as opcode extension
169 * \370,\371 jcc8 match only if operand 0 meets byte jump criteria.
170 * jmp8 370 is used for Jcc, 371 is used for JMP.
171 * \373 jlen assemble 0x03 if bits==16, 0x05 if bits==32;
172 * used for conditional jump over longer jump
173 * \374 vsibx|vm32x|vm64x this instruction takes an XMM VSIB memory EA
174 * \375 vsiby|vm32y|vm64y this instruction takes an YMM VSIB memory EA
175 * \376 vsibz|vm32z|vm64z this instruction takes an ZMM VSIB memory EA
176 */
177
178 #include "compiler.h"
179
180
181 #include "nasm.h"
182 #include "nasmlib.h"
183 #include "error.h"
184 #include "assemble.h"
185 #include "insns.h"
186 #include "tables.h"
187 #include "disp8.h"
188 #include "listing.h"
189
190 enum match_result {
191 /*
192 * Matching errors. These should be sorted so that more specific
193 * errors come later in the sequence.
194 */
195 MERR_INVALOP,
196 MERR_OPSIZEMISSING,
197 MERR_OPSIZEMISMATCH,
198 MERR_BRNOTHERE,
199 MERR_BRNUMMISMATCH,
200 MERR_MASKNOTHERE,
201 MERR_DECONOTHERE,
202 MERR_BADCPU,
203 MERR_BADMODE,
204 MERR_BADHLE,
205 MERR_ENCMISMATCH,
206 MERR_BADBND,
207 MERR_BADREPNE,
208 MERR_REGSETSIZE,
209 MERR_REGSET,
210 /*
211 * Matching success; the conditional ones first
212 */
213 MOK_JUMP, /* Matching OK but needs jmp_match() */
214 MOK_GOOD /* Matching unconditionally OK */
215 };
216
217 typedef struct {
218 enum ea_type type; /* what kind of EA is this? */
219 int sib_present; /* is a SIB byte necessary? */
220 int bytes; /* # of bytes of offset needed */
221 int size; /* lazy - this is sib+bytes+1 */
222 uint8_t modrm, sib, rex, rip; /* the bytes themselves */
223 int8_t disp8; /* compressed displacement for EVEX */
224 } ea;
225
226 #define GEN_SIB(scale, index, base) \
227 (((scale) << 6) | ((index) << 3) | ((base)))
228
229 #define GEN_MODRM(mod, reg, rm) \
230 (((mod) << 6) | (((reg) & 7) << 3) | ((rm) & 7))
231
232 static int64_t calcsize(int32_t, int64_t, int, insn *,
233 const struct itemplate *);
234 static int emit_prefix(struct out_data *data, const int bits, insn *ins);
235 static void gencode(struct out_data *data, insn *ins);
236 static enum match_result find_match(const struct itemplate **tempp,
237 insn *instruction,
238 int32_t segment, int64_t offset, int bits);
239 static enum match_result matches(const struct itemplate *, insn *, int bits);
240 static opflags_t regflag(const operand *);
241 static int32_t regval(const operand *);
242 static int rexflags(int, opflags_t, int);
243 static int op_rexflags(const operand *, int);
244 static int op_evexflags(const operand *, int, uint8_t);
245 static void add_asp(insn *, int);
246
247 static enum ea_type process_ea(operand *, ea *, int, int,
248 opflags_t, insn *, const char **);
249
absolute_op(const struct operand * o)250 static inline bool absolute_op(const struct operand *o)
251 {
252 return o->segment == NO_SEG && o->wrt == NO_SEG &&
253 !(o->opflags & OPFLAG_RELATIVE);
254 }
255
has_prefix(insn * ins,enum prefix_pos pos,int prefix)256 static int has_prefix(insn * ins, enum prefix_pos pos, int prefix)
257 {
258 return ins->prefixes[pos] == prefix;
259 }
260
assert_no_prefix(insn * ins,enum prefix_pos pos)261 static void assert_no_prefix(insn * ins, enum prefix_pos pos)
262 {
263 if (ins->prefixes[pos])
264 nasm_nonfatal("invalid %s prefix", prefix_name(ins->prefixes[pos]));
265 }
266
size_name(int size)267 static const char *size_name(int size)
268 {
269 switch (size) {
270 case 1:
271 return "byte";
272 case 2:
273 return "word";
274 case 4:
275 return "dword";
276 case 8:
277 return "qword";
278 case 10:
279 return "tword";
280 case 16:
281 return "oword";
282 case 32:
283 return "yword";
284 case 64:
285 return "zword";
286 default:
287 return "???";
288 }
289 }
290
warn_overflow(int size)291 static void warn_overflow(int size)
292 {
293 nasm_warn(ERR_PASS2 | WARN_NUMBER_OVERFLOW, "%s data exceeds bounds",
294 size_name(size));
295 }
296
warn_overflow_const(int64_t data,int size)297 static void warn_overflow_const(int64_t data, int size)
298 {
299 if (overflow_general(data, size))
300 warn_overflow(size);
301 }
302
warn_overflow_out(int64_t data,int size,enum out_sign sign)303 static void warn_overflow_out(int64_t data, int size, enum out_sign sign)
304 {
305 bool err;
306
307 switch (sign) {
308 case OUT_WRAP:
309 err = overflow_general(data, size);
310 break;
311 case OUT_SIGNED:
312 err = overflow_signed(data, size);
313 break;
314 case OUT_UNSIGNED:
315 err = overflow_unsigned(data, size);
316 break;
317 default:
318 panic();
319 break;
320 }
321
322 if (err)
323 warn_overflow(size);
324 }
325
326 /*
327 * This routine wrappers the real output format's output routine,
328 * in order to pass a copy of the data off to the listing file
329 * generator at the same time, flatten unnecessary relocations,
330 * and verify backend compatibility.
331 */
332 /*
333 * This warning is currently issued by backends, but in the future
334 * this code should be centralized.
335 *
336 *!zeroing [on] RESx in initialized section becomes zero
337 *! a \c{RESx} directive was used in a section which contains
338 *! initialized data, and the output format does not support
339 *! this. Instead, this will be replaced with explicit zero
340 *! content, which may produce a large output file.
341 */
out(struct out_data * data)342 static void out(struct out_data *data)
343 {
344 static struct last_debug_info {
345 struct src_location where;
346 int32_t segment;
347 } dbg;
348 union {
349 uint8_t b[8];
350 uint64_t q;
351 } xdata;
352 size_t asize, amax;
353 uint64_t zeropad = 0;
354 int64_t addrval;
355 int32_t fixseg; /* Segment for which to produce fixed data */
356
357 if (!data->size)
358 return; /* Nothing to do */
359
360 /*
361 * Convert addresses to RAWDATA if possible
362 * XXX: not all backends want this for global symbols!!!!
363 */
364 switch (data->type) {
365 case OUT_ADDRESS:
366 addrval = data->toffset;
367 fixseg = NO_SEG; /* Absolute address is fixed data */
368 goto address;
369
370 case OUT_RELADDR:
371 addrval = data->toffset - data->relbase;
372 fixseg = data->segment; /* Our own segment is fixed data */
373 goto address;
374
375 address:
376 nasm_assert(data->size <= 8);
377 asize = data->size;
378 amax = ofmt->maxbits >> 3; /* Maximum address size in bytes */
379 if ((ofmt->flags & OFMT_KEEP_ADDR) == 0 && data->tsegment == fixseg &&
380 data->twrt == NO_SEG) {
381 if (asize >= (size_t)(data->bits >> 3))
382 data->sign = OUT_WRAP; /* Support address space wrapping for low-bit modes */
383 warn_overflow_out(addrval, asize, data->sign);
384 xdata.q = cpu_to_le64(addrval);
385 data->data = xdata.b;
386 data->type = OUT_RAWDATA;
387 asize = amax = 0; /* No longer an address */
388 }
389 break;
390
391 case OUT_SEGMENT:
392 nasm_assert(data->size <= 8);
393 asize = data->size;
394 amax = 2;
395 break;
396
397 default:
398 asize = amax = 0; /* Not an address */
399 break;
400 }
401
402 /*
403 * If the source location or output segment has changed,
404 * let the debug backend know. Some backends really don't
405 * like being given a NULL filename as can happen if we
406 * use -Lb and expand a macro, so filter out that case.
407 */
408 data->where = src_where();
409 if (data->where.filename &&
410 (!src_location_same(data->where, dbg.where) |
411 (data->segment != dbg.segment))) {
412 dbg.where = data->where;
413 dbg.segment = data->segment;
414 dfmt->linenum(dbg.where.filename, dbg.where.lineno, data->segment);
415 }
416
417 if (asize > amax) {
418 if (data->type == OUT_RELADDR || data->sign == OUT_SIGNED) {
419 nasm_nonfatal("%u-bit signed relocation unsupported by output format %s",
420 (unsigned int)(asize << 3), ofmt->shortname);
421 } else {
422 /*!
423 *!zext-reloc [on] relocation zero-extended to match output format
424 *! warns that a relocation has been zero-extended due
425 *! to limitations in the output format.
426 */
427 nasm_warn(WARN_ZEXT_RELOC,
428 "%u-bit %s relocation zero-extended from %u bits",
429 (unsigned int)(asize << 3),
430 data->type == OUT_SEGMENT ? "segment" : "unsigned",
431 (unsigned int)(amax << 3));
432 }
433 zeropad = data->size - amax;
434 data->size = amax;
435 }
436 lfmt->output(data);
437
438 if (likely(data->segment != NO_SEG)) {
439 ofmt->output(data);
440 } else {
441 /* Outputting to ABSOLUTE section - only reserve is permitted */
442 if (data->type != OUT_RESERVE)
443 nasm_nonfatal("attempt to assemble code in [ABSOLUTE] space");
444 /* No need to push to the backend */
445 }
446
447 data->offset += data->size;
448 data->insoffs += data->size;
449
450 if (zeropad) {
451 data->type = OUT_ZERODATA;
452 data->size = zeropad;
453 lfmt->output(data);
454 ofmt->output(data);
455 data->offset += zeropad;
456 data->insoffs += zeropad;
457 data->size += zeropad; /* Restore original size value */
458 }
459 }
460
out_rawdata(struct out_data * data,const void * rawdata,size_t size)461 static inline void out_rawdata(struct out_data *data, const void *rawdata,
462 size_t size)
463 {
464 data->type = OUT_RAWDATA;
465 data->data = rawdata;
466 data->size = size;
467 out(data);
468 }
469
out_rawbyte(struct out_data * data,uint8_t byte)470 static void out_rawbyte(struct out_data *data, uint8_t byte)
471 {
472 data->type = OUT_RAWDATA;
473 data->data = &byte;
474 data->size = 1;
475 out(data);
476 }
477
out_reserve(struct out_data * data,uint64_t size)478 static inline void out_reserve(struct out_data *data, uint64_t size)
479 {
480 data->type = OUT_RESERVE;
481 data->size = size;
482 out(data);
483 }
484
out_segment(struct out_data * data,const struct operand * opx)485 static void out_segment(struct out_data *data, const struct operand *opx)
486 {
487 if (opx->opflags & OPFLAG_RELATIVE)
488 nasm_nonfatal("segment references cannot be relative");
489
490 data->type = OUT_SEGMENT;
491 data->sign = OUT_UNSIGNED;
492 data->size = 2;
493 data->toffset = opx->offset;
494 data->tsegment = ofmt->segbase(opx->segment | 1);
495 data->twrt = opx->wrt;
496 out(data);
497 }
498
out_imm(struct out_data * data,const struct operand * opx,int size,enum out_sign sign)499 static void out_imm(struct out_data *data, const struct operand *opx,
500 int size, enum out_sign sign)
501 {
502 if (opx->segment != NO_SEG && (opx->segment & 1)) {
503 /*
504 * This is actually a segment reference, but eval() has
505 * already called ofmt->segbase() for us. Sigh.
506 */
507 if (size < 2)
508 nasm_nonfatal("segment reference must be 16 bits");
509
510 data->type = OUT_SEGMENT;
511 } else {
512 data->type = (opx->opflags & OPFLAG_RELATIVE)
513 ? OUT_RELADDR : OUT_ADDRESS;
514 }
515 data->sign = sign;
516 data->toffset = opx->offset;
517 data->tsegment = opx->segment;
518 data->twrt = opx->wrt;
519 /*
520 * XXX: improve this if at some point in the future we can
521 * distinguish the subtrahend in expressions like [foo - bar]
522 * where bar is a symbol in the current segment. However, at the
523 * current point, if OPFLAG_RELATIVE is set that subtraction has
524 * already occurred.
525 */
526 data->relbase = 0;
527 data->size = size;
528 out(data);
529 }
530
out_reladdr(struct out_data * data,const struct operand * opx,int size)531 static void out_reladdr(struct out_data *data, const struct operand *opx,
532 int size)
533 {
534 if (opx->opflags & OPFLAG_RELATIVE)
535 nasm_nonfatal("invalid use of self-relative expression");
536
537 data->type = OUT_RELADDR;
538 data->sign = OUT_SIGNED;
539 data->size = size;
540 data->toffset = opx->offset;
541 data->tsegment = opx->segment;
542 data->twrt = opx->wrt;
543 data->relbase = data->offset + (data->inslen - data->insoffs);
544 out(data);
545 }
546
jmp_match(int32_t segment,int64_t offset,int bits,insn * ins,const struct itemplate * temp)547 static bool jmp_match(int32_t segment, int64_t offset, int bits,
548 insn * ins, const struct itemplate *temp)
549 {
550 int64_t isize;
551 const uint8_t *code = temp->code;
552 uint8_t c = code[0];
553 bool is_byte;
554
555 if (((c & ~1) != 0370) || (ins->oprs[0].type & STRICT))
556 return false;
557 if (!optimizing.level || (optimizing.flag & OPTIM_DISABLE_JMP_MATCH))
558 return false;
559 if (optimizing.level < 0 && c == 0371)
560 return false;
561
562 isize = calcsize(segment, offset, bits, ins, temp);
563
564 if (ins->oprs[0].opflags & OPFLAG_UNKNOWN)
565 /* Be optimistic in pass 1 */
566 return true;
567
568 if (ins->oprs[0].segment != segment)
569 return false;
570
571 isize = ins->oprs[0].offset - offset - isize; /* isize is delta */
572 is_byte = (isize >= -128 && isize <= 127); /* is it byte size? */
573
574 if (is_byte && c == 0371 && ins->prefixes[PPS_REP] == P_BND) {
575 /* jmp short (opcode eb) cannot be used with bnd prefix. */
576 ins->prefixes[PPS_REP] = P_none;
577 /*!
578 *!bnd [on] invalid BND prefixes
579 *! warns about ineffective use of the \c{BND} prefix when the
580 *! \c{JMP} instruction is converted to the \c{SHORT} form.
581 *! This should be extremely rare since the short \c{JMP} only
582 *! is applicable to jumps inside the same module, but if
583 *! it is legitimate, it may be necessary to use
584 *! \c{bnd jmp dword}.
585 */
586 nasm_warn(WARN_BND | ERR_PASS2 ,
587 "jmp short does not init bnd regs - bnd prefix dropped");
588 }
589
590 return is_byte;
591 }
592
merge_resb(insn * ins,int64_t isize)593 static inline int64_t merge_resb(insn *ins, int64_t isize)
594 {
595 int nbytes = resb_bytes(ins->opcode);
596
597 if (likely(!nbytes))
598 return isize;
599
600 if (isize != nbytes * ins->oprs[0].offset)
601 return isize; /* Has prefixes of some sort */
602
603 ins->oprs[0].offset *= ins->times;
604 isize *= ins->times;
605 ins->times = 1;
606 return isize;
607 }
608
609 /* This must be handle non-power-of-2 alignment values */
pad_bytes(size_t len,size_t align)610 static inline size_t pad_bytes(size_t len, size_t align)
611 {
612 size_t partial = len % align;
613 return partial ? align - partial : 0;
614 }
615
out_eops(struct out_data * data,const extop * e)616 static void out_eops(struct out_data *data, const extop *e)
617 {
618 while (e) {
619 size_t dup = e->dup;
620
621 switch (e->type) {
622 case EOT_NOTHING:
623 break;
624
625 case EOT_EXTOP:
626 while (dup--)
627 out_eops(data, e->val.subexpr);
628 break;
629
630 case EOT_DB_NUMBER:
631 if (e->elem > 8) {
632 nasm_nonfatal("integer supplied as %d-bit data",
633 e->elem << 3);
634 } else {
635 while (dup--) {
636 data->insoffs = 0;
637 data->inslen = data->size = e->elem;
638 data->tsegment = e->val.num.segment;
639 data->toffset = e->val.num.offset;
640 data->twrt = e->val.num.wrt;
641 data->relbase = 0;
642 if (e->val.num.segment != NO_SEG &&
643 (e->val.num.segment & 1)) {
644 data->type = OUT_SEGMENT;
645 data->sign = OUT_UNSIGNED;
646 } else {
647 data->type = e->val.num.relative
648 ? OUT_RELADDR : OUT_ADDRESS;
649 data->sign = OUT_WRAP;
650 }
651 out(data);
652 }
653 }
654 break;
655
656 case EOT_DB_FLOAT:
657 case EOT_DB_STRING:
658 case EOT_DB_STRING_FREE:
659 {
660 size_t pad, len;
661
662 pad = pad_bytes(e->val.string.len, e->elem);
663 len = e->val.string.len + pad;
664
665 while (dup--) {
666 data->insoffs = 0;
667 data->inslen = len;
668 out_rawdata(data, e->val.string.data, e->val.string.len);
669 if (pad)
670 out_rawdata(data, zero_buffer, pad);
671 }
672 break;
673 }
674
675 case EOT_DB_RESERVE:
676 data->insoffs = 0;
677 data->inslen = dup * e->elem;
678 out_reserve(data, data->inslen);
679 break;
680 }
681
682 e = e->next;
683 }
684 }
685
686 /* This is totally just a wild guess what is reasonable... */
687 #define INCBIN_MAX_BUF (ZERO_BUF_SIZE * 16)
688
assemble(int32_t segment,int64_t start,int bits,insn * instruction)689 int64_t assemble(int32_t segment, int64_t start, int bits, insn *instruction)
690 {
691 struct out_data data;
692 const struct itemplate *temp;
693 enum match_result m;
694
695 if (instruction->opcode == I_none)
696 return 0;
697
698 nasm_zero(data);
699 data.offset = start;
700 data.segment = segment;
701 data.itemp = NULL;
702 data.bits = bits;
703
704 if (opcode_is_db(instruction->opcode)) {
705 out_eops(&data, instruction->eops);
706 } else if (instruction->opcode == I_INCBIN) {
707 const char *fname = instruction->eops->val.string.data;
708 FILE *fp;
709 size_t t = instruction->times; /* INCBIN handles TIMES by itself */
710 off_t base = 0;
711 off_t len;
712 const void *map = NULL;
713 char *buf = NULL;
714 size_t blk = 0; /* Buffered I/O block size */
715 size_t m = 0; /* Bytes last read */
716
717 if (!t)
718 goto done;
719
720 fp = nasm_open_read(fname, NF_BINARY|NF_FORMAP);
721 if (!fp) {
722 nasm_nonfatal("`incbin': unable to open file `%s'",
723 fname);
724 goto done;
725 }
726
727 len = nasm_file_size(fp);
728
729 if (len == (off_t)-1) {
730 nasm_nonfatal("`incbin': unable to get length of file `%s'",
731 fname);
732 goto close_done;
733 }
734
735 if (instruction->eops->next) {
736 base = instruction->eops->next->val.num.offset;
737 if (base >= len) {
738 len = 0;
739 } else {
740 len -= base;
741 if (instruction->eops->next->next &&
742 len > (off_t)instruction->eops->next->next->val.num.offset)
743 len = (off_t)instruction->eops->next->next->val.num.offset;
744 }
745 }
746
747 lfmt->set_offset(data.offset);
748 lfmt->uplevel(LIST_INCBIN, len);
749
750 if (!len)
751 goto end_incbin;
752
753 /* Try to map file data */
754 map = nasm_map_file(fp, base, len);
755 if (!map) {
756 blk = len < (off_t)INCBIN_MAX_BUF ? (size_t)len : INCBIN_MAX_BUF;
757 buf = nasm_malloc(blk);
758 }
759
760 while (t--) {
761 /*
762 * Consider these irrelevant for INCBIN, since it is fully
763 * possible that these might be (way) bigger than an int
764 * can hold; there is, however, no reason to widen these
765 * types just for INCBIN. data.inslen == 0 signals to the
766 * backend that these fields are meaningless, if at all
767 * needed.
768 */
769 data.insoffs = 0;
770 data.inslen = 0;
771
772 if (map) {
773 out_rawdata(&data, map, len);
774 } else if ((off_t)m == len) {
775 out_rawdata(&data, buf, len);
776 } else {
777 off_t l = len;
778
779 if (fseeko(fp, base, SEEK_SET) < 0 || ferror(fp)) {
780 nasm_nonfatal("`incbin': unable to seek on file `%s'",
781 fname);
782 goto end_incbin;
783 }
784 while (l > 0) {
785 m = fread(buf, 1, l < (off_t)blk ? (size_t)l : blk, fp);
786 if (!m || feof(fp)) {
787 /*
788 * This shouldn't happen unless the file
789 * actually changes while we are reading
790 * it.
791 */
792 nasm_nonfatal("`incbin': unexpected EOF while"
793 " reading file `%s'", fname);
794 goto end_incbin;
795 }
796 out_rawdata(&data, buf, m);
797 l -= m;
798 }
799 }
800 }
801 end_incbin:
802 lfmt->downlevel(LIST_INCBIN);
803 if (instruction->times > 1) {
804 lfmt->uplevel(LIST_TIMES, instruction->times);
805 lfmt->downlevel(LIST_TIMES);
806 }
807 if (ferror(fp)) {
808 nasm_nonfatal("`incbin': error while"
809 " reading file `%s'", fname);
810 }
811 close_done:
812 if (buf)
813 nasm_free(buf);
814 if (map)
815 nasm_unmap_file(map, len);
816 fclose(fp);
817 done:
818 instruction->times = 1; /* Tell the upper layer not to iterate */
819 ;
820 } else {
821 /* "Real" instruction */
822
823 /* Check to see if we need an address-size prefix */
824 add_asp(instruction, bits);
825
826 m = find_match(&temp, instruction, data.segment, data.offset, bits);
827
828 if (m == MOK_GOOD) {
829 /* Matches! */
830 if (unlikely(itemp_has(temp, IF_OBSOLETE))) {
831 errflags warning;
832 const char *whathappened;
833 const char *validity;
834 bool never = itemp_has(temp, IF_NEVER);
835
836 /*
837 * If IF_OBSOLETE is set, warn the user. Different
838 * warning classes for "obsolete but valid for this
839 * specific CPU" and "obsolete and gone."
840 *
841 *!obsolete-removed [on] instruction obsolete and removed on the target CPU
842 *! warns for an instruction which has been removed
843 *! from the architecture, and is no longer included
844 *! in the CPU definition given in the \c{[CPU]}
845 *! directive, for example \c{POP CS}, the opcode for
846 *! which, \c{0Fh}, instead is an opcode prefix on
847 *! CPUs newer than the first generation 8086.
848 *
849 *!obsolete-nop [on] instruction obsolete and is a noop on the target CPU
850 *! warns for an instruction which has been removed
851 *! from the architecture, but has been architecturally
852 *! defined to be a noop for future CPUs.
853 *
854 *!obsolete-valid [on] instruction obsolete but valid on the target CPU
855 *! warns for an instruction which has been removed
856 *! from the architecture, but is still valid on the
857 *! specific CPU given in the \c{CPU} directive. Code
858 *! using these instructions is most likely not
859 *! forward compatible.
860 */
861
862 whathappened = never ? "never implemented" : "obsolete";
863
864 if (!never && !iflag_cmp_cpu_level(&insns_flags[temp->iflag_idx], &cpu)) {
865 warning = WARN_OBSOLETE_VALID;
866 validity = "but valid on";
867 } else if (itemp_has(temp, IF_NOP)) {
868 warning = WARN_OBSOLETE_NOP;
869 validity = "and is a noop on";
870 } else {
871 warning = WARN_OBSOLETE_REMOVED;
872 validity = never ? "and invalid on" : "and removed from";
873 }
874
875 nasm_warn(warning, "instruction %s %s the target CPU",
876 whathappened, validity);
877 }
878
879 data.itemp = temp;
880 data.bits = bits;
881 data.insoffs = 0;
882
883 data.inslen = calcsize(data.segment, data.offset,
884 bits, instruction, temp);
885 nasm_assert(data.inslen >= 0);
886 data.inslen = merge_resb(instruction, data.inslen);
887
888 gencode(&data, instruction);
889 nasm_assert(data.insoffs == data.inslen);
890 } else {
891 /* No match */
892 switch (m) {
893 case MERR_OPSIZEMISSING:
894 nasm_nonfatal("operation size not specified");
895 break;
896 case MERR_OPSIZEMISMATCH:
897 nasm_nonfatal("mismatch in operand sizes");
898 break;
899 case MERR_BRNOTHERE:
900 nasm_nonfatal("broadcast not permitted on this operand");
901 break;
902 case MERR_BRNUMMISMATCH:
903 nasm_nonfatal("mismatch in the number of broadcasting elements");
904 break;
905 case MERR_MASKNOTHERE:
906 nasm_nonfatal("mask not permitted on this operand");
907 break;
908 case MERR_DECONOTHERE:
909 nasm_nonfatal("unsupported mode decorator for instruction");
910 break;
911 case MERR_BADCPU:
912 nasm_nonfatal("no instruction for this cpu level");
913 break;
914 case MERR_BADMODE:
915 nasm_nonfatal("instruction not supported in %d-bit mode", bits);
916 break;
917 case MERR_ENCMISMATCH:
918 nasm_nonfatal("specific encoding scheme not available");
919 break;
920 case MERR_BADBND:
921 nasm_nonfatal("bnd prefix is not allowed");
922 break;
923 case MERR_BADREPNE:
924 nasm_nonfatal("%s prefix is not allowed",
925 (has_prefix(instruction, PPS_REP, P_REPNE) ?
926 "repne" : "repnz"));
927 break;
928 case MERR_REGSETSIZE:
929 nasm_nonfatal("invalid register set size");
930 break;
931 case MERR_REGSET:
932 nasm_nonfatal("register set not valid for operand");
933 break;
934 default:
935 nasm_nonfatal("invalid combination of opcode and operands");
936 break;
937 }
938
939 instruction->times = 1; /* Avoid repeated error messages */
940 }
941 }
942 return data.offset - start;
943 }
944
eops_typeinfo(const extop * e)945 static int32_t eops_typeinfo(const extop *e)
946 {
947 int32_t typeinfo = 0;
948
949 while (e) {
950 switch (e->type) {
951 case EOT_NOTHING:
952 break;
953
954 case EOT_EXTOP:
955 typeinfo |= eops_typeinfo(e->val.subexpr);
956 break;
957
958 case EOT_DB_FLOAT:
959 switch (e->elem) {
960 case 1: typeinfo |= TY_BYTE; break;
961 case 2: typeinfo |= TY_WORD; break;
962 case 4: typeinfo |= TY_FLOAT; break;
963 case 8: typeinfo |= TY_QWORD; break; /* double? */
964 case 10: typeinfo |= TY_TBYTE; break; /* long double? */
965 case 16: typeinfo |= TY_YWORD; break;
966 case 32: typeinfo |= TY_ZWORD; break;
967 default: break;
968 }
969 break;
970
971 default:
972 switch (e->elem) {
973 case 1: typeinfo |= TY_BYTE; break;
974 case 2: typeinfo |= TY_WORD; break;
975 case 4: typeinfo |= TY_DWORD; break;
976 case 8: typeinfo |= TY_QWORD; break;
977 case 10: typeinfo |= TY_TBYTE; break;
978 case 16: typeinfo |= TY_YWORD; break;
979 case 32: typeinfo |= TY_ZWORD; break;
980 default: break;
981 }
982 break;
983 }
984 e = e->next;
985 }
986
987 return typeinfo;
988 }
989
debug_set_db_type(insn * instruction)990 static inline void debug_set_db_type(insn *instruction)
991 {
992
993 int32_t typeinfo = TYS_ELEMENTS(instruction->operands);
994
995 typeinfo |= eops_typeinfo(instruction->eops);
996 dfmt->debug_typevalue(typeinfo);
997 }
998
debug_set_type(insn * instruction)999 static void debug_set_type(insn *instruction)
1000 {
1001 int32_t typeinfo;
1002
1003 if (opcode_is_resb(instruction->opcode)) {
1004 typeinfo = TYS_ELEMENTS(instruction->oprs[0].offset);
1005
1006 switch (instruction->opcode) {
1007 case I_RESB:
1008 typeinfo |= TY_BYTE;
1009 break;
1010 case I_RESW:
1011 typeinfo |= TY_WORD;
1012 break;
1013 case I_RESD:
1014 typeinfo |= TY_DWORD;
1015 break;
1016 case I_RESQ:
1017 typeinfo |= TY_QWORD;
1018 break;
1019 case I_REST:
1020 typeinfo |= TY_TBYTE;
1021 break;
1022 case I_RESO:
1023 typeinfo |= TY_OWORD;
1024 break;
1025 case I_RESY:
1026 typeinfo |= TY_YWORD;
1027 break;
1028 case I_RESZ:
1029 typeinfo |= TY_ZWORD;
1030 break;
1031 default:
1032 panic();
1033 }
1034 } else {
1035 typeinfo = TY_LABEL;
1036 }
1037
1038 dfmt->debug_typevalue(typeinfo);
1039 }
1040
1041
1042 /* Proecess an EQU directive */
define_equ(insn * instruction)1043 static void define_equ(insn * instruction)
1044 {
1045 if (!instruction->label) {
1046 nasm_nonfatal("EQU not preceded by label");
1047 } else if (instruction->operands == 1 &&
1048 (instruction->oprs[0].type & IMMEDIATE) &&
1049 instruction->oprs[0].wrt == NO_SEG) {
1050 define_label(instruction->label,
1051 instruction->oprs[0].segment,
1052 instruction->oprs[0].offset, false);
1053 } else if (instruction->operands == 2
1054 && (instruction->oprs[0].type & IMMEDIATE)
1055 && (instruction->oprs[0].type & COLON)
1056 && instruction->oprs[0].segment == NO_SEG
1057 && instruction->oprs[0].wrt == NO_SEG
1058 && (instruction->oprs[1].type & IMMEDIATE)
1059 && instruction->oprs[1].segment == NO_SEG
1060 && instruction->oprs[1].wrt == NO_SEG) {
1061 define_label(instruction->label,
1062 instruction->oprs[0].offset | SEG_ABS,
1063 instruction->oprs[1].offset, false);
1064 } else {
1065 nasm_nonfatal("bad syntax for EQU");
1066 }
1067 }
1068
len_extops(const extop * e)1069 static int64_t len_extops(const extop *e)
1070 {
1071 int64_t isize = 0;
1072 size_t pad;
1073
1074 while (e) {
1075 switch (e->type) {
1076 case EOT_NOTHING:
1077 break;
1078
1079 case EOT_EXTOP:
1080 isize += e->dup * len_extops(e->val.subexpr);
1081 break;
1082
1083 case EOT_DB_STRING:
1084 case EOT_DB_STRING_FREE:
1085 case EOT_DB_FLOAT:
1086 pad = pad_bytes(e->val.string.len, e->elem);
1087 isize += e->dup * (e->val.string.len + pad);
1088 break;
1089
1090 case EOT_DB_NUMBER:
1091 warn_overflow_const(e->val.num.offset, e->elem);
1092 isize += e->dup * e->elem;
1093 break;
1094
1095 case EOT_DB_RESERVE:
1096 isize += e->dup;
1097 break;
1098 }
1099
1100 e = e->next;
1101 }
1102
1103 return isize;
1104 }
1105
insn_size(int32_t segment,int64_t offset,int bits,insn * instruction)1106 int64_t insn_size(int32_t segment, int64_t offset, int bits, insn *instruction)
1107 {
1108 const struct itemplate *temp;
1109 enum match_result m;
1110 int64_t isize = 0;
1111
1112 if (instruction->opcode == I_none) {
1113 return 0;
1114 } else if (instruction->opcode == I_EQU) {
1115 define_equ(instruction);
1116 return 0;
1117 } else if (opcode_is_db(instruction->opcode)) {
1118 isize = len_extops(instruction->eops);
1119 debug_set_db_type(instruction);
1120 return isize;
1121 } else if (instruction->opcode == I_INCBIN) {
1122 const extop *e = instruction->eops;
1123 const char *fname = e->val.string.data;
1124 off_t len;
1125
1126 len = nasm_file_size_by_path(fname);
1127 if (len == (off_t)-1) {
1128 nasm_nonfatal("`incbin': unable to get length of file `%s'",
1129 fname);
1130 return 0;
1131 }
1132
1133 e = e->next;
1134 if (e) {
1135 if (len <= (off_t)e->val.num.offset) {
1136 len = 0;
1137 } else {
1138 len -= e->val.num.offset;
1139 e = e->next;
1140 if (e && len > (off_t)e->val.num.offset) {
1141 len = (off_t)e->val.num.offset;
1142 }
1143 }
1144 }
1145
1146 len *= instruction->times;
1147 instruction->times = 1; /* Tell the upper layer to not iterate */
1148
1149 return len;
1150 } else {
1151 /* Normal instruction, or RESx */
1152
1153 /* Check to see if we need an address-size prefix */
1154 add_asp(instruction, bits);
1155
1156 m = find_match(&temp, instruction, segment, offset, bits);
1157 if (m != MOK_GOOD)
1158 return -1; /* No match */
1159
1160 isize = calcsize(segment, offset, bits, instruction, temp);
1161 debug_set_type(instruction);
1162 isize = merge_resb(instruction, isize);
1163
1164 return isize;
1165 }
1166 }
1167
bad_hle_warn(const insn * ins,uint8_t hleok)1168 static void bad_hle_warn(const insn * ins, uint8_t hleok)
1169 {
1170 enum prefixes rep_pfx = ins->prefixes[PPS_REP];
1171 enum whatwarn { w_none, w_lock, w_inval } ww;
1172 static const enum whatwarn warn[2][4] =
1173 {
1174 { w_inval, w_inval, w_none, w_lock }, /* XACQUIRE */
1175 { w_inval, w_none, w_none, w_lock }, /* XRELEASE */
1176 };
1177 unsigned int n;
1178
1179 n = (unsigned int)rep_pfx - P_XACQUIRE;
1180 if (n > 1)
1181 return; /* Not XACQUIRE/XRELEASE */
1182
1183 ww = warn[n][hleok];
1184 if (!is_class(MEMORY, ins->oprs[0].type))
1185 ww = w_inval; /* HLE requires operand 0 to be memory */
1186
1187 /*!
1188 *!hle [on] invalid HLE prefixes
1189 *! warns about invalid use of the HLE \c{XACQUIRE} or \c{XRELEASE}
1190 *! prefixes.
1191 */
1192 switch (ww) {
1193 case w_none:
1194 break;
1195
1196 case w_lock:
1197 if (ins->prefixes[PPS_LOCK] != P_LOCK) {
1198 nasm_warn(WARN_HLE | ERR_PASS2,
1199 "%s with this instruction requires lock",
1200 prefix_name(rep_pfx));
1201 }
1202 break;
1203
1204 case w_inval:
1205 nasm_warn(WARN_HLE | ERR_PASS2,
1206 "%s invalid with this instruction",
1207 prefix_name(rep_pfx));
1208 break;
1209 }
1210 }
1211
1212 /* Common construct */
1213 #define case3(x) case (x): case (x)+1: case (x)+2
1214 #define case4(x) case3(x): case (x)+3
1215
calcsize(int32_t segment,int64_t offset,int bits,insn * ins,const struct itemplate * temp)1216 static int64_t calcsize(int32_t segment, int64_t offset, int bits,
1217 insn * ins, const struct itemplate *temp)
1218 {
1219 const uint8_t *codes = temp->code;
1220 int64_t length = 0;
1221 uint8_t c;
1222 int rex_mask = ~0;
1223 int op1, op2;
1224 struct operand *opx;
1225 uint8_t opex = 0;
1226 enum ea_type eat;
1227 uint8_t hleok = 0;
1228 bool lockcheck = true;
1229 enum reg_enum mib_index = R_none; /* For a separate index MIB reg form */
1230 const char *errmsg;
1231
1232 ins->rex = 0; /* Ensure REX is reset */
1233 eat = EA_SCALAR; /* Expect a scalar EA */
1234 memset(ins->evex_p, 0, 3); /* Ensure EVEX is reset */
1235
1236 if (ins->prefixes[PPS_OSIZE] == P_O64)
1237 ins->rex |= REX_W;
1238
1239 (void)segment; /* Don't warn that this parameter is unused */
1240 (void)offset; /* Don't warn that this parameter is unused */
1241
1242 while (*codes) {
1243 c = *codes++;
1244 op1 = (c & 3) + ((opex & 1) << 2);
1245 op2 = ((c >> 3) & 3) + ((opex & 2) << 1);
1246 opx = &ins->oprs[op1];
1247 opex = 0; /* For the next iteration */
1248
1249 switch (c) {
1250 case4(01):
1251 codes += c, length += c;
1252 break;
1253
1254 case3(05):
1255 opex = c;
1256 break;
1257
1258 case4(010):
1259 ins->rex |=
1260 op_rexflags(opx, REX_B|REX_H|REX_P|REX_W);
1261 codes++, length++;
1262 break;
1263
1264 case4(014):
1265 /* this is an index reg of MIB operand */
1266 mib_index = opx->basereg;
1267 break;
1268
1269 case4(020):
1270 case4(024):
1271 length++;
1272 break;
1273
1274 case4(030):
1275 length += 2;
1276 break;
1277
1278 case4(034):
1279 if (opx->type & (BITS16 | BITS32 | BITS64))
1280 length += (opx->type & BITS16) ? 2 : 4;
1281 else
1282 length += (bits == 16) ? 2 : 4;
1283 break;
1284
1285 case4(040):
1286 length += 4;
1287 break;
1288
1289 case4(044):
1290 length += ins->addr_size >> 3;
1291 break;
1292
1293 case4(050):
1294 length++;
1295 break;
1296
1297 case4(054):
1298 length += 8; /* MOV reg64/imm */
1299 break;
1300
1301 case4(060):
1302 length += 2;
1303 break;
1304
1305 case4(064):
1306 if (opx->type & (BITS16 | BITS32 | BITS64))
1307 length += (opx->type & BITS16) ? 2 : 4;
1308 else
1309 length += (bits == 16) ? 2 : 4;
1310 break;
1311
1312 case4(070):
1313 length += 4;
1314 break;
1315
1316 case4(074):
1317 length += 2;
1318 break;
1319
1320 case 0172:
1321 case 0173:
1322 codes++;
1323 length++;
1324 break;
1325
1326 case4(0174):
1327 length++;
1328 break;
1329
1330 case4(0240):
1331 ins->rex |= REX_EV;
1332 ins->vexreg = regval(opx);
1333 ins->evex_p[2] |= op_evexflags(opx, EVEX_P2VP, 2); /* High-16 NDS */
1334 ins->vex_cm = *codes++;
1335 ins->vex_wlp = *codes++;
1336 ins->evex_tuple = (*codes++ - 0300);
1337 break;
1338
1339 case 0250:
1340 ins->rex |= REX_EV;
1341 ins->vexreg = 0;
1342 ins->vex_cm = *codes++;
1343 ins->vex_wlp = *codes++;
1344 ins->evex_tuple = (*codes++ - 0300);
1345 break;
1346
1347 case4(0254):
1348 length += 4;
1349 break;
1350
1351 case4(0260):
1352 ins->rex |= REX_V;
1353 ins->vexreg = regval(opx);
1354 ins->vex_cm = *codes++;
1355 ins->vex_wlp = *codes++;
1356 break;
1357
1358 case 0270:
1359 ins->rex |= REX_V;
1360 ins->vexreg = 0;
1361 ins->vex_cm = *codes++;
1362 ins->vex_wlp = *codes++;
1363 break;
1364
1365 case3(0271):
1366 hleok = c & 3;
1367 break;
1368
1369 case4(0274):
1370 length++;
1371 break;
1372
1373 case4(0300):
1374 break;
1375
1376 case 0310:
1377 if (bits == 64)
1378 return -1;
1379 length += (bits != 16) && !has_prefix(ins, PPS_ASIZE, P_A16);
1380 break;
1381
1382 case 0311:
1383 length += (bits != 32) && !has_prefix(ins, PPS_ASIZE, P_A32);
1384 break;
1385
1386 case 0312:
1387 break;
1388
1389 case 0313:
1390 if (bits != 64 || has_prefix(ins, PPS_ASIZE, P_A16) ||
1391 has_prefix(ins, PPS_ASIZE, P_A32))
1392 return -1;
1393 break;
1394
1395 case4(0314):
1396 break;
1397
1398 case 0320:
1399 {
1400 enum prefixes pfx = ins->prefixes[PPS_OSIZE];
1401 if (pfx == P_O16)
1402 break;
1403 if (pfx != P_none)
1404 nasm_warn(WARN_OTHER|ERR_PASS2, "invalid operand size prefix");
1405 else
1406 ins->prefixes[PPS_OSIZE] = P_O16;
1407 break;
1408 }
1409
1410 case 0321:
1411 {
1412 enum prefixes pfx = ins->prefixes[PPS_OSIZE];
1413 if (pfx == P_O32)
1414 break;
1415 if (pfx != P_none)
1416 nasm_warn(WARN_OTHER|ERR_PASS2, "invalid operand size prefix");
1417 else
1418 ins->prefixes[PPS_OSIZE] = P_O32;
1419 break;
1420 }
1421
1422 case 0322:
1423 break;
1424
1425 case 0323:
1426 rex_mask &= ~REX_W;
1427 break;
1428
1429 case 0324:
1430 ins->rex |= REX_W;
1431 break;
1432
1433 case 0325:
1434 ins->rex |= REX_NH;
1435 break;
1436
1437 case 0326:
1438 break;
1439
1440 case 0330:
1441 codes++, length++;
1442 break;
1443
1444 case 0331:
1445 break;
1446
1447 case 0332:
1448 case 0333:
1449 length++;
1450 break;
1451
1452 case 0334:
1453 ins->rex |= REX_L;
1454 break;
1455
1456 case 0335:
1457 break;
1458
1459 case 0336:
1460 if (!ins->prefixes[PPS_REP])
1461 ins->prefixes[PPS_REP] = P_REP;
1462 break;
1463
1464 case 0337:
1465 if (!ins->prefixes[PPS_REP])
1466 ins->prefixes[PPS_REP] = P_REPNE;
1467 break;
1468
1469 case 0340:
1470 if (!absolute_op(&ins->oprs[0]))
1471 nasm_nonfatal("attempt to reserve non-constant"
1472 " quantity of BSS space");
1473 else if (ins->oprs[0].opflags & OPFLAG_FORWARD)
1474 nasm_warn(WARN_OTHER, "forward reference in RESx "
1475 "can have unpredictable results");
1476 else
1477 length += ins->oprs[0].offset * resb_bytes(ins->opcode);
1478 break;
1479
1480 case 0341:
1481 if (!ins->prefixes[PPS_WAIT])
1482 ins->prefixes[PPS_WAIT] = P_WAIT;
1483 break;
1484
1485 case 0360:
1486 break;
1487
1488 case 0361:
1489 length++;
1490 break;
1491
1492 case 0364:
1493 case 0365:
1494 break;
1495
1496 case 0366:
1497 case 0367:
1498 length++;
1499 break;
1500
1501 case 0370:
1502 case 0371:
1503 break;
1504
1505 case 0373:
1506 length++;
1507 break;
1508
1509 case 0374:
1510 eat = EA_XMMVSIB;
1511 break;
1512
1513 case 0375:
1514 eat = EA_YMMVSIB;
1515 break;
1516
1517 case 0376:
1518 eat = EA_ZMMVSIB;
1519 break;
1520
1521 case4(0100):
1522 case4(0110):
1523 case4(0120):
1524 case4(0130):
1525 case4(0200):
1526 case4(0204):
1527 case4(0210):
1528 case4(0214):
1529 case4(0220):
1530 case4(0224):
1531 case4(0230):
1532 case4(0234):
1533 {
1534 ea ea_data;
1535 int rfield;
1536 opflags_t rflags;
1537 struct operand *opy = &ins->oprs[op2];
1538 struct operand *op_er_sae;
1539
1540 ea_data.rex = 0; /* Ensure ea.REX is initially 0 */
1541
1542 if (c <= 0177) {
1543 /* pick rfield from operand b (opx) */
1544 rflags = regflag(opx);
1545 rfield = nasm_regvals[opx->basereg];
1546 } else {
1547 rflags = 0;
1548 rfield = c & 7;
1549 }
1550
1551 /* EVEX.b1 : evex_brerop contains the operand position */
1552 op_er_sae = (ins->evex_brerop >= 0 ?
1553 &ins->oprs[ins->evex_brerop] : NULL);
1554
1555 if (op_er_sae && (op_er_sae->decoflags & (ER | SAE))) {
1556 /* set EVEX.b */
1557 ins->evex_p[2] |= EVEX_P2B;
1558 if (op_er_sae->decoflags & ER) {
1559 /* set EVEX.RC (rounding control) */
1560 ins->evex_p[2] |= ((ins->evex_rm - BRC_RN) << 5)
1561 & EVEX_P2RC;
1562 }
1563 } else {
1564 /* set EVEX.L'L (vector length) */
1565 ins->evex_p[2] |= ((ins->vex_wlp << (5 - 2)) & EVEX_P2LL);
1566 ins->evex_p[1] |= ((ins->vex_wlp << (7 - 4)) & EVEX_P1W);
1567 if (opy->decoflags & BRDCAST_MASK) {
1568 /* set EVEX.b */
1569 ins->evex_p[2] |= EVEX_P2B;
1570 }
1571 }
1572
1573 if (itemp_has(temp, IF_MIB)) {
1574 opy->eaflags |= EAF_MIB;
1575 /*
1576 * if a separate form of MIB (ICC style) is used,
1577 * the index reg info is merged into mem operand
1578 */
1579 if (mib_index != R_none) {
1580 opy->indexreg = mib_index;
1581 opy->scale = 1;
1582 opy->hintbase = mib_index;
1583 opy->hinttype = EAH_NOTBASE;
1584 }
1585 }
1586
1587 if (process_ea(opy, &ea_data, bits,
1588 rfield, rflags, ins, &errmsg) != eat) {
1589 nasm_nonfatal("%s", errmsg);
1590 return -1;
1591 } else {
1592 ins->rex |= ea_data.rex;
1593 length += ea_data.size;
1594 }
1595 }
1596 break;
1597
1598 default:
1599 nasm_panic("internal instruction table corrupt"
1600 ": instruction code \\%o (0x%02X) given", c, c);
1601 break;
1602 }
1603 }
1604
1605 ins->rex &= rex_mask;
1606
1607 if (ins->rex & REX_NH) {
1608 if (ins->rex & REX_H) {
1609 nasm_nonfatal("instruction cannot use high registers");
1610 return -1;
1611 }
1612 ins->rex &= ~REX_P; /* Don't force REX prefix due to high reg */
1613 }
1614
1615 switch (ins->prefixes[PPS_VEX]) {
1616 case P_EVEX:
1617 if (!(ins->rex & REX_EV))
1618 return -1;
1619 break;
1620 case P_VEX3:
1621 case P_VEX2:
1622 if (!(ins->rex & REX_V))
1623 return -1;
1624 break;
1625 default:
1626 break;
1627 }
1628
1629 if (ins->rex & (REX_V | REX_EV)) {
1630 int bad32 = REX_R|REX_W|REX_X|REX_B;
1631
1632 if (ins->rex & REX_H) {
1633 nasm_nonfatal("cannot use high register in AVX instruction");
1634 return -1;
1635 }
1636 switch (ins->vex_wlp & 060) {
1637 case 000:
1638 case 040:
1639 ins->rex &= ~REX_W;
1640 break;
1641 case 020:
1642 ins->rex |= REX_W;
1643 bad32 &= ~REX_W;
1644 break;
1645 case 060:
1646 /* Follow REX_W */
1647 break;
1648 }
1649
1650 if (bits != 64 && ((ins->rex & bad32) || ins->vexreg > 7)) {
1651 nasm_nonfatal("invalid operands in non-64-bit mode");
1652 return -1;
1653 } else if (!(ins->rex & REX_EV) &&
1654 ((ins->vexreg > 15) || (ins->evex_p[0] & 0xf0))) {
1655 nasm_nonfatal("invalid high-16 register in non-AVX-512");
1656 return -1;
1657 }
1658 if (ins->rex & REX_EV)
1659 length += 4;
1660 else if (ins->vex_cm != 1 || (ins->rex & (REX_W|REX_X|REX_B)) ||
1661 ins->prefixes[PPS_VEX] == P_VEX3)
1662 length += 3;
1663 else
1664 length += 2;
1665 } else if (ins->rex & REX_MASK) {
1666 if (ins->rex & REX_H) {
1667 nasm_nonfatal("cannot use high register in rex instruction");
1668 return -1;
1669 } else if (bits == 64) {
1670 length++;
1671 } else if ((ins->rex & REX_L) &&
1672 !(ins->rex & (REX_P|REX_W|REX_X|REX_B)) &&
1673 iflag_cpu_level_ok(&cpu, IF_X86_64)) {
1674 /* LOCK-as-REX.R */
1675 assert_no_prefix(ins, PPS_LOCK);
1676 lockcheck = false; /* Already errored, no need for warning */
1677 length++;
1678 } else {
1679 nasm_nonfatal("invalid operands in non-64-bit mode");
1680 return -1;
1681 }
1682 }
1683
1684 if (has_prefix(ins, PPS_LOCK, P_LOCK) && lockcheck &&
1685 (!itemp_has(temp,IF_LOCK) || !is_class(MEMORY, ins->oprs[0].type))) {
1686 /*!
1687 *!lock [on] LOCK prefix on unlockable instructions
1688 *! warns about \c{LOCK} prefixes on unlockable instructions.
1689 */
1690 nasm_warn(WARN_LOCK | ERR_PASS2 , "instruction is not lockable");
1691 }
1692
1693 bad_hle_warn(ins, hleok);
1694
1695 /*
1696 * when BND prefix is set by DEFAULT directive,
1697 * BND prefix is added to every appropriate instruction line
1698 * unless it is overridden by NOBND prefix.
1699 */
1700 if (globalbnd &&
1701 (itemp_has(temp, IF_BND) && !has_prefix(ins, PPS_REP, P_NOBND)))
1702 ins->prefixes[PPS_REP] = P_BND;
1703
1704 /*
1705 * Add length of legacy prefixes
1706 */
1707 length += emit_prefix(NULL, bits, ins);
1708
1709 return length;
1710 }
1711
emit_rex(struct out_data * data,insn * ins)1712 static inline void emit_rex(struct out_data *data, insn *ins)
1713 {
1714 if (data->bits == 64) {
1715 if ((ins->rex & REX_MASK) &&
1716 !(ins->rex & (REX_V | REX_EV)) &&
1717 !ins->rex_done) {
1718 uint8_t rex = (ins->rex & REX_MASK) | REX_P;
1719 out_rawbyte(data, rex);
1720 ins->rex_done = true;
1721 }
1722 }
1723 }
1724
emit_prefix(struct out_data * data,const int bits,insn * ins)1725 static int emit_prefix(struct out_data *data, const int bits, insn *ins)
1726 {
1727 int bytes = 0;
1728 int j;
1729
1730 for (j = 0; j < MAXPREFIX; j++) {
1731 uint8_t c = 0;
1732 switch (ins->prefixes[j]) {
1733 case P_WAIT:
1734 c = 0x9B;
1735 break;
1736 case P_LOCK:
1737 c = 0xF0;
1738 break;
1739 case P_REPNE:
1740 case P_REPNZ:
1741 case P_XACQUIRE:
1742 case P_BND:
1743 c = 0xF2;
1744 break;
1745 case P_REPE:
1746 case P_REPZ:
1747 case P_REP:
1748 case P_XRELEASE:
1749 c = 0xF3;
1750 break;
1751 case R_CS:
1752 if (bits == 64)
1753 nasm_warn(WARN_OTHER|ERR_PASS2, "cs segment base generated, "
1754 "but will be ignored in 64-bit mode");
1755 c = 0x2E;
1756 break;
1757 case R_DS:
1758 if (bits == 64)
1759 nasm_warn(WARN_OTHER|ERR_PASS2, "ds segment base generated, "
1760 "but will be ignored in 64-bit mode");
1761 c = 0x3E;
1762 break;
1763 case R_ES:
1764 if (bits == 64)
1765 nasm_warn(WARN_OTHER|ERR_PASS2, "es segment base generated, "
1766 "but will be ignored in 64-bit mode");
1767 c = 0x26;
1768 break;
1769 case R_FS:
1770 c = 0x64;
1771 break;
1772 case R_GS:
1773 c = 0x65;
1774 break;
1775 case R_SS:
1776 if (bits == 64) {
1777 nasm_warn(WARN_OTHER|ERR_PASS2, "ss segment base generated, "
1778 "but will be ignored in 64-bit mode");
1779 }
1780 c = 0x36;
1781 break;
1782 case R_SEGR6:
1783 case R_SEGR7:
1784 nasm_nonfatal("segr6 and segr7 cannot be used as prefixes");
1785 break;
1786 case P_A16:
1787 if (bits == 64) {
1788 nasm_nonfatal("16-bit addressing is not supported "
1789 "in 64-bit mode");
1790 } else if (bits != 16)
1791 c = 0x67;
1792 break;
1793 case P_A32:
1794 if (bits != 32)
1795 c = 0x67;
1796 break;
1797 case P_A64:
1798 if (bits != 64) {
1799 nasm_nonfatal("64-bit addressing is only supported "
1800 "in 64-bit mode");
1801 }
1802 break;
1803 case P_ASP:
1804 c = 0x67;
1805 break;
1806 case P_O16:
1807 if (bits != 16)
1808 c = 0x66;
1809 break;
1810 case P_O32:
1811 if (bits == 16)
1812 c = 0x66;
1813 break;
1814 case P_O64:
1815 /* REX.W */
1816 break;
1817 case P_OSP:
1818 c = 0x66;
1819 break;
1820 case P_EVEX:
1821 case P_VEX3:
1822 case P_VEX2:
1823 case P_NOBND:
1824 case P_none:
1825 break;
1826 default:
1827 nasm_panic("invalid instruction prefix");
1828 }
1829 if (c) {
1830 if (data)
1831 out_rawbyte(data, c);
1832 bytes++;
1833 }
1834 }
1835 return bytes;
1836 }
1837
gencode(struct out_data * data,insn * ins)1838 static void gencode(struct out_data *data, insn *ins)
1839 {
1840 uint8_t c;
1841 uint8_t bytes[4];
1842 int64_t size;
1843 int op1, op2;
1844 struct operand *opx;
1845 const uint8_t *codes = data->itemp->code;
1846 uint8_t opex = 0;
1847 enum ea_type eat = EA_SCALAR;
1848 int r;
1849 const int bits = data->bits;
1850 const char *errmsg;
1851
1852 ins->rex_done = false;
1853
1854 emit_prefix(data, bits, ins);
1855
1856 while (*codes) {
1857 c = *codes++;
1858 op1 = (c & 3) + ((opex & 1) << 2);
1859 op2 = ((c >> 3) & 3) + ((opex & 2) << 1);
1860 opx = &ins->oprs[op1];
1861 opex = 0; /* For the next iteration */
1862
1863
1864 switch (c) {
1865 case 01:
1866 case 02:
1867 case 03:
1868 case 04:
1869 emit_rex(data, ins);
1870 out_rawdata(data, codes, c);
1871 codes += c;
1872 break;
1873
1874 case 05:
1875 case 06:
1876 case 07:
1877 opex = c;
1878 break;
1879
1880 case4(010):
1881 emit_rex(data, ins);
1882 out_rawbyte(data, *codes++ + (regval(opx) & 7));
1883 break;
1884
1885 case4(014):
1886 break;
1887
1888 case4(020):
1889 out_imm(data, opx, 1, OUT_WRAP);
1890 break;
1891
1892 case4(024):
1893 out_imm(data, opx, 1, OUT_UNSIGNED);
1894 break;
1895
1896 case4(030):
1897 out_imm(data, opx, 2, OUT_WRAP);
1898 break;
1899
1900 case4(034):
1901 if (opx->type & (BITS16 | BITS32))
1902 size = (opx->type & BITS16) ? 2 : 4;
1903 else
1904 size = (bits == 16) ? 2 : 4;
1905 out_imm(data, opx, size, OUT_WRAP);
1906 break;
1907
1908 case4(040):
1909 out_imm(data, opx, 4, OUT_WRAP);
1910 break;
1911
1912 case4(044):
1913 size = ins->addr_size >> 3;
1914 out_imm(data, opx, size, OUT_WRAP);
1915 break;
1916
1917 case4(050):
1918 if (opx->segment == data->segment) {
1919 int64_t delta = opx->offset - data->offset
1920 - (data->inslen - data->insoffs);
1921 if (delta > 127 || delta < -128)
1922 nasm_nonfatal("short jump is out of range");
1923 }
1924 out_reladdr(data, opx, 1);
1925 break;
1926
1927 case4(054):
1928 out_imm(data, opx, 8, OUT_WRAP);
1929 break;
1930
1931 case4(060):
1932 out_reladdr(data, opx, 2);
1933 break;
1934
1935 case4(064):
1936 if (opx->type & (BITS16 | BITS32 | BITS64))
1937 size = (opx->type & BITS16) ? 2 : 4;
1938 else
1939 size = (bits == 16) ? 2 : 4;
1940
1941 out_reladdr(data, opx, size);
1942 break;
1943
1944 case4(070):
1945 out_reladdr(data, opx, 4);
1946 break;
1947
1948 case4(074):
1949 if (opx->segment == NO_SEG)
1950 nasm_nonfatal("value referenced by FAR is not relocatable");
1951 out_segment(data, opx);
1952 break;
1953
1954 case 0172:
1955 {
1956 int mask = ins->prefixes[PPS_VEX] == P_EVEX ? 7 : 15;
1957 const struct operand *opy;
1958
1959 c = *codes++;
1960 opx = &ins->oprs[c >> 3];
1961 opy = &ins->oprs[c & 7];
1962 if (!absolute_op(opy))
1963 nasm_nonfatal("non-absolute expression not permitted "
1964 "as argument %d", c & 7);
1965 else if (opy->offset & ~mask)
1966 nasm_warn(ERR_PASS2 | WARN_NUMBER_OVERFLOW,
1967 "is4 argument exceeds bounds");
1968 c = opy->offset & mask;
1969 goto emit_is4;
1970 }
1971
1972 case 0173:
1973 c = *codes++;
1974 opx = &ins->oprs[c >> 4];
1975 c &= 15;
1976 goto emit_is4;
1977
1978 case4(0174):
1979 c = 0;
1980 emit_is4:
1981 r = nasm_regvals[opx->basereg];
1982 out_rawbyte(data, (r << 4) | ((r & 0x10) >> 1) | c);
1983 break;
1984
1985 case4(0254):
1986 if (absolute_op(opx) &&
1987 (int32_t)opx->offset != (int64_t)opx->offset) {
1988 nasm_warn(ERR_PASS2 | WARN_NUMBER_OVERFLOW,
1989 "signed dword immediate exceeds bounds");
1990 }
1991 out_imm(data, opx, 4, OUT_SIGNED);
1992 break;
1993
1994 case4(0240):
1995 case 0250:
1996 codes += 3;
1997 ins->evex_p[2] |= op_evexflags(&ins->oprs[0],
1998 EVEX_P2Z | EVEX_P2AAA, 2);
1999 ins->evex_p[2] ^= EVEX_P2VP; /* 1's complement */
2000 bytes[0] = 0x62;
2001 /* EVEX.X can be set by either REX or EVEX for different reasons */
2002 bytes[1] = ((((ins->rex & 7) << 5) |
2003 (ins->evex_p[0] & (EVEX_P0X | EVEX_P0RP))) ^ 0xf0) |
2004 (ins->vex_cm & EVEX_P0MM);
2005 bytes[2] = ((ins->rex & REX_W) << (7 - 3)) |
2006 ((~ins->vexreg & 15) << 3) |
2007 (1 << 2) | (ins->vex_wlp & 3);
2008 bytes[3] = ins->evex_p[2];
2009 out_rawdata(data, bytes, 4);
2010 break;
2011
2012 case4(0260):
2013 case 0270:
2014 codes += 2;
2015 if (ins->vex_cm != 1 || (ins->rex & (REX_W|REX_X|REX_B)) ||
2016 ins->prefixes[PPS_VEX] == P_VEX3) {
2017 bytes[0] = (ins->vex_cm >> 6) ? 0x8f : 0xc4;
2018 bytes[1] = (ins->vex_cm & 31) | ((~ins->rex & 7) << 5);
2019 bytes[2] = ((ins->rex & REX_W) << (7-3)) |
2020 ((~ins->vexreg & 15)<< 3) | (ins->vex_wlp & 07);
2021 out_rawdata(data, bytes, 3);
2022 } else {
2023 bytes[0] = 0xc5;
2024 bytes[1] = ((~ins->rex & REX_R) << (7-2)) |
2025 ((~ins->vexreg & 15) << 3) | (ins->vex_wlp & 07);
2026 out_rawdata(data, bytes, 2);
2027 }
2028 break;
2029
2030 case 0271:
2031 case 0272:
2032 case 0273:
2033 break;
2034
2035 case4(0274):
2036 {
2037 uint64_t uv, um;
2038 int s;
2039
2040 if (absolute_op(opx)) {
2041 if (ins->rex & REX_W)
2042 s = 64;
2043 else if (ins->prefixes[PPS_OSIZE] == P_O16)
2044 s = 16;
2045 else if (ins->prefixes[PPS_OSIZE] == P_O32)
2046 s = 32;
2047 else
2048 s = bits;
2049
2050 um = (uint64_t)2 << (s-1);
2051 uv = opx->offset;
2052
2053 if (uv > 127 && uv < (uint64_t)-128 &&
2054 (uv < um-128 || uv > um-1)) {
2055 /* If this wasn't explicitly byte-sized, warn as though we
2056 * had fallen through to the imm16/32/64 case.
2057 */
2058 nasm_warn(ERR_PASS2 | WARN_NUMBER_OVERFLOW,
2059 "%s value exceeds bounds",
2060 (opx->type & BITS8) ? "signed byte" :
2061 s == 16 ? "word" :
2062 s == 32 ? "dword" :
2063 "signed dword");
2064 }
2065
2066 /* Output as a raw byte to avoid byte overflow check */
2067 out_rawbyte(data, (uint8_t)uv);
2068 } else {
2069 out_imm(data, opx, 1, OUT_WRAP); /* XXX: OUT_SIGNED? */
2070 }
2071 break;
2072 }
2073
2074 case4(0300):
2075 break;
2076
2077 case 0310:
2078 if (bits == 32 && !has_prefix(ins, PPS_ASIZE, P_A16))
2079 out_rawbyte(data, 0x67);
2080 break;
2081
2082 case 0311:
2083 if (bits != 32 && !has_prefix(ins, PPS_ASIZE, P_A32))
2084 out_rawbyte(data, 0x67);
2085 break;
2086
2087 case 0312:
2088 break;
2089
2090 case 0313:
2091 break;
2092
2093 case4(0314):
2094 break;
2095
2096 case 0320:
2097 case 0321:
2098 break;
2099
2100 case 0322:
2101 case 0323:
2102 break;
2103
2104 case 0324:
2105 ins->rex |= REX_W;
2106 break;
2107
2108 case 0325:
2109 break;
2110
2111 case 0326:
2112 break;
2113
2114 case 0330:
2115 out_rawbyte(data, *codes++ ^ get_cond_opcode(ins->condition));
2116 break;
2117
2118 case 0331:
2119 break;
2120
2121 case 0332:
2122 case 0333:
2123 out_rawbyte(data, c - 0332 + 0xF2);
2124 break;
2125
2126 case 0334:
2127 if (ins->rex & REX_R)
2128 out_rawbyte(data, 0xF0);
2129 ins->rex &= ~(REX_L|REX_R);
2130 break;
2131
2132 case 0335:
2133 break;
2134
2135 case 0336:
2136 case 0337:
2137 break;
2138
2139 case 0340:
2140 if (ins->oprs[0].segment != NO_SEG)
2141 nasm_panic("non-constant BSS size in pass two");
2142
2143 out_reserve(data, ins->oprs[0].offset * resb_bytes(ins->opcode));
2144 break;
2145
2146 case 0341:
2147 break;
2148
2149 case 0360:
2150 break;
2151
2152 case 0361:
2153 out_rawbyte(data, 0x66);
2154 break;
2155
2156 case 0364:
2157 case 0365:
2158 break;
2159
2160 case 0366:
2161 case 0367:
2162 out_rawbyte(data, c - 0366 + 0x66);
2163 break;
2164
2165 case3(0370):
2166 break;
2167
2168 case 0373:
2169 out_rawbyte(data, bits == 16 ? 3 : 5);
2170 break;
2171
2172 case 0374:
2173 eat = EA_XMMVSIB;
2174 break;
2175
2176 case 0375:
2177 eat = EA_YMMVSIB;
2178 break;
2179
2180 case 0376:
2181 eat = EA_ZMMVSIB;
2182 break;
2183
2184 case4(0100):
2185 case4(0110):
2186 case4(0120):
2187 case4(0130):
2188 case4(0200):
2189 case4(0204):
2190 case4(0210):
2191 case4(0214):
2192 case4(0220):
2193 case4(0224):
2194 case4(0230):
2195 case4(0234):
2196 {
2197 ea ea_data;
2198 int rfield;
2199 opflags_t rflags;
2200 uint8_t *p;
2201 struct operand *opy = &ins->oprs[op2];
2202
2203 if (c <= 0177) {
2204 /* pick rfield from operand b (opx) */
2205 rflags = regflag(opx);
2206 rfield = nasm_regvals[opx->basereg];
2207 } else {
2208 /* rfield is constant */
2209 rflags = 0;
2210 rfield = c & 7;
2211 }
2212
2213 if (process_ea(opy, &ea_data, bits,
2214 rfield, rflags, ins, &errmsg) != eat)
2215 nasm_nonfatal("%s", errmsg);
2216
2217 p = bytes;
2218 *p++ = ea_data.modrm;
2219 if (ea_data.sib_present)
2220 *p++ = ea_data.sib;
2221 out_rawdata(data, bytes, p - bytes);
2222
2223 /*
2224 * Make sure the address gets the right offset in case
2225 * the line breaks in the .lst file (BR 1197827)
2226 */
2227
2228 if (ea_data.bytes) {
2229 /* use compressed displacement, if available */
2230 if (ea_data.disp8) {
2231 out_rawbyte(data, ea_data.disp8);
2232 } else if (ea_data.rip) {
2233 out_reladdr(data, opy, ea_data.bytes);
2234 } else {
2235 int asize = ins->addr_size >> 3;
2236
2237 if (overflow_general(opy->offset, asize) ||
2238 signed_bits(opy->offset, ins->addr_size) !=
2239 signed_bits(opy->offset, ea_data.bytes << 3))
2240 warn_overflow(ea_data.bytes);
2241
2242 out_imm(data, opy, ea_data.bytes,
2243 (asize > ea_data.bytes)
2244 ? OUT_SIGNED : OUT_WRAP);
2245 }
2246 }
2247 }
2248 break;
2249
2250 default:
2251 nasm_panic("internal instruction table corrupt"
2252 ": instruction code \\%o (0x%02X) given", c, c);
2253 break;
2254 }
2255 }
2256 }
2257
regflag(const operand * o)2258 static opflags_t regflag(const operand * o)
2259 {
2260 if (!is_register(o->basereg))
2261 nasm_panic("invalid operand passed to regflag()");
2262 return nasm_reg_flags[o->basereg];
2263 }
2264
regval(const operand * o)2265 static int32_t regval(const operand * o)
2266 {
2267 if (!is_register(o->basereg))
2268 nasm_panic("invalid operand passed to regval()");
2269 return nasm_regvals[o->basereg];
2270 }
2271
op_rexflags(const operand * o,int mask)2272 static int op_rexflags(const operand * o, int mask)
2273 {
2274 opflags_t flags;
2275 int val;
2276
2277 if (!is_register(o->basereg))
2278 nasm_panic("invalid operand passed to op_rexflags()");
2279
2280 flags = nasm_reg_flags[o->basereg];
2281 val = nasm_regvals[o->basereg];
2282
2283 return rexflags(val, flags, mask);
2284 }
2285
rexflags(int val,opflags_t flags,int mask)2286 static int rexflags(int val, opflags_t flags, int mask)
2287 {
2288 int rex = 0;
2289
2290 if (val >= 0 && (val & 8))
2291 rex |= REX_B|REX_X|REX_R;
2292 if (flags & BITS64)
2293 rex |= REX_W;
2294 if (!(REG_HIGH & ~flags)) /* AH, CH, DH, BH */
2295 rex |= REX_H;
2296 else if (!(REG8 & ~flags) && val >= 4) /* SPL, BPL, SIL, DIL */
2297 rex |= REX_P;
2298
2299 return rex & mask;
2300 }
2301
evexflags(int val,decoflags_t deco,int mask,uint8_t byte)2302 static int evexflags(int val, decoflags_t deco,
2303 int mask, uint8_t byte)
2304 {
2305 int evex = 0;
2306
2307 switch (byte) {
2308 case 0:
2309 if (val >= 0 && (val & 16))
2310 evex |= (EVEX_P0RP | EVEX_P0X);
2311 break;
2312 case 2:
2313 if (val >= 0 && (val & 16))
2314 evex |= EVEX_P2VP;
2315 if (deco & Z)
2316 evex |= EVEX_P2Z;
2317 if (deco & OPMASK_MASK)
2318 evex |= deco & EVEX_P2AAA;
2319 break;
2320 }
2321 return evex & mask;
2322 }
2323
op_evexflags(const operand * o,int mask,uint8_t byte)2324 static int op_evexflags(const operand * o, int mask, uint8_t byte)
2325 {
2326 int val;
2327
2328 val = nasm_regvals[o->basereg];
2329
2330 return evexflags(val, o->decoflags, mask, byte);
2331 }
2332
find_match(const struct itemplate ** tempp,insn * instruction,int32_t segment,int64_t offset,int bits)2333 static enum match_result find_match(const struct itemplate **tempp,
2334 insn *instruction,
2335 int32_t segment, int64_t offset, int bits)
2336 {
2337 const struct itemplate *temp;
2338 enum match_result m, merr;
2339 opflags_t xsizeflags[MAX_OPERANDS];
2340 bool opsizemissing = false;
2341 int8_t broadcast = instruction->evex_brerop;
2342 int i;
2343
2344 /* broadcasting uses a different data element size */
2345 for (i = 0; i < instruction->operands; i++)
2346 if (i == broadcast)
2347 xsizeflags[i] = instruction->oprs[i].decoflags & BRSIZE_MASK;
2348 else
2349 xsizeflags[i] = instruction->oprs[i].type & SIZE_MASK;
2350
2351 merr = MERR_INVALOP;
2352
2353 for (temp = nasm_instructions[instruction->opcode];
2354 temp->opcode != I_none; temp++) {
2355 m = matches(temp, instruction, bits);
2356 if (m == MOK_JUMP) {
2357 if (jmp_match(segment, offset, bits, instruction, temp))
2358 m = MOK_GOOD;
2359 else
2360 m = MERR_INVALOP;
2361 } else if (m == MERR_OPSIZEMISSING && !itemp_has(temp, IF_SX)) {
2362 /*
2363 * Missing operand size and a candidate for fuzzy matching...
2364 */
2365 for (i = 0; i < temp->operands; i++)
2366 if (i == broadcast)
2367 xsizeflags[i] |= temp->deco[i] & BRSIZE_MASK;
2368 else
2369 xsizeflags[i] |= temp->opd[i] & SIZE_MASK;
2370 opsizemissing = true;
2371 }
2372 if (m > merr)
2373 merr = m;
2374 if (merr == MOK_GOOD)
2375 goto done;
2376 }
2377
2378 /* No match, but see if we can get a fuzzy operand size match... */
2379 if (!opsizemissing)
2380 goto done;
2381
2382 for (i = 0; i < instruction->operands; i++) {
2383 /*
2384 * We ignore extrinsic operand sizes on registers, so we should
2385 * never try to fuzzy-match on them. This also resolves the case
2386 * when we have e.g. "xmmrm128" in two different positions.
2387 */
2388 if (is_class(REGISTER, instruction->oprs[i].type))
2389 continue;
2390
2391 /* This tests if xsizeflags[i] has more than one bit set */
2392 if ((xsizeflags[i] & (xsizeflags[i]-1)))
2393 goto done; /* No luck */
2394
2395 if (i == broadcast) {
2396 instruction->oprs[i].decoflags |= xsizeflags[i];
2397 instruction->oprs[i].type |= (xsizeflags[i] == BR_BITS32 ?
2398 BITS32 : BITS64);
2399 } else {
2400 instruction->oprs[i].type |= xsizeflags[i]; /* Set the size */
2401 }
2402 }
2403
2404 /* Try matching again... */
2405 for (temp = nasm_instructions[instruction->opcode];
2406 temp->opcode != I_none; temp++) {
2407 m = matches(temp, instruction, bits);
2408 if (m == MOK_JUMP) {
2409 if (jmp_match(segment, offset, bits, instruction, temp))
2410 m = MOK_GOOD;
2411 else
2412 m = MERR_INVALOP;
2413 }
2414 if (m > merr)
2415 merr = m;
2416 if (merr == MOK_GOOD)
2417 goto done;
2418 }
2419
2420 done:
2421 *tempp = temp;
2422 return merr;
2423 }
2424
get_broadcast_num(opflags_t opflags,opflags_t brsize)2425 static uint8_t get_broadcast_num(opflags_t opflags, opflags_t brsize)
2426 {
2427 unsigned int opsize = (opflags & SIZE_MASK) >> SIZE_SHIFT;
2428 uint8_t brcast_num;
2429
2430 if (brsize > BITS64)
2431 nasm_fatal("size of broadcasting element is greater than 64 bits");
2432
2433 /*
2434 * The shift term is to take care of the extra BITS80 inserted
2435 * between BITS64 and BITS128.
2436 */
2437 brcast_num = ((opsize / (BITS64 >> SIZE_SHIFT)) * (BITS64 / brsize))
2438 >> (opsize > (BITS64 >> SIZE_SHIFT));
2439
2440 return brcast_num;
2441 }
2442
matches(const struct itemplate * itemp,insn * instruction,int bits)2443 static enum match_result matches(const struct itemplate *itemp,
2444 insn *instruction, int bits)
2445 {
2446 opflags_t size[MAX_OPERANDS], asize;
2447 bool opsizemissing = false;
2448 int i, oprs;
2449
2450 /*
2451 * Check the opcode
2452 */
2453 if (itemp->opcode != instruction->opcode)
2454 return MERR_INVALOP;
2455
2456 /*
2457 * Count the operands
2458 */
2459 if (itemp->operands != instruction->operands)
2460 return MERR_INVALOP;
2461
2462 /*
2463 * Is it legal?
2464 */
2465 if (!(optimizing.level > 0) && itemp_has(itemp, IF_OPT))
2466 return MERR_INVALOP;
2467
2468 /*
2469 * {evex} available?
2470 */
2471 switch (instruction->prefixes[PPS_VEX]) {
2472 case P_EVEX:
2473 if (!itemp_has(itemp, IF_EVEX))
2474 return MERR_ENCMISMATCH;
2475 break;
2476 case P_VEX3:
2477 case P_VEX2:
2478 if (!itemp_has(itemp, IF_VEX))
2479 return MERR_ENCMISMATCH;
2480 break;
2481 default:
2482 break;
2483 }
2484
2485 /*
2486 * Check that no spurious colons or TOs are present
2487 */
2488 for (i = 0; i < itemp->operands; i++)
2489 if (instruction->oprs[i].type & ~itemp->opd[i] & (COLON | TO))
2490 return MERR_INVALOP;
2491
2492 /*
2493 * Process size flags
2494 */
2495 switch (itemp_smask(itemp)) {
2496 case IF_GENBIT(IF_SB):
2497 asize = BITS8;
2498 break;
2499 case IF_GENBIT(IF_SW):
2500 asize = BITS16;
2501 break;
2502 case IF_GENBIT(IF_SD):
2503 asize = BITS32;
2504 break;
2505 case IF_GENBIT(IF_SQ):
2506 asize = BITS64;
2507 break;
2508 case IF_GENBIT(IF_SO):
2509 asize = BITS128;
2510 break;
2511 case IF_GENBIT(IF_SY):
2512 asize = BITS256;
2513 break;
2514 case IF_GENBIT(IF_SZ):
2515 asize = BITS512;
2516 break;
2517 case IF_GENBIT(IF_ANYSIZE):
2518 asize = SIZE_MASK;
2519 break;
2520 case IF_GENBIT(IF_SIZE):
2521 switch (bits) {
2522 case 16:
2523 asize = BITS16;
2524 break;
2525 case 32:
2526 asize = BITS32;
2527 break;
2528 case 64:
2529 asize = BITS64;
2530 break;
2531 default:
2532 asize = 0;
2533 break;
2534 }
2535 break;
2536 default:
2537 asize = 0;
2538 break;
2539 }
2540
2541 if (itemp_armask(itemp)) {
2542 /* S- flags only apply to a specific operand */
2543 i = itemp_arg(itemp);
2544 memset(size, 0, sizeof size);
2545 size[i] = asize;
2546 } else {
2547 /* S- flags apply to all operands */
2548 for (i = 0; i < MAX_OPERANDS; i++)
2549 size[i] = asize;
2550 }
2551
2552 /*
2553 * Check that the operand flags all match up,
2554 * it's a bit tricky so lets be verbose:
2555 *
2556 * 1) Find out the size of operand. If instruction
2557 * doesn't have one specified -- we're trying to
2558 * guess it either from template (IF_S* flag) or
2559 * from code bits.
2560 *
2561 * 2) If template operand do not match the instruction OR
2562 * template has an operand size specified AND this size differ
2563 * from which instruction has (perhaps we got it from code bits)
2564 * we are:
2565 * a) Check that only size of instruction and operand is differ
2566 * other characteristics do match
2567 * b) Perhaps it's a register specified in instruction so
2568 * for such a case we just mark that operand as "size
2569 * missing" and this will turn on fuzzy operand size
2570 * logic facility (handled by a caller)
2571 */
2572 for (i = 0; i < itemp->operands; i++) {
2573 opflags_t type = instruction->oprs[i].type;
2574 decoflags_t deco = instruction->oprs[i].decoflags;
2575 decoflags_t ideco = itemp->deco[i];
2576 bool is_broadcast = deco & BRDCAST_MASK;
2577 uint8_t brcast_num = 0;
2578 opflags_t template_opsize, insn_opsize;
2579
2580 if (!(type & SIZE_MASK))
2581 type |= size[i];
2582
2583 insn_opsize = type & SIZE_MASK;
2584 if (!is_broadcast) {
2585 template_opsize = itemp->opd[i] & SIZE_MASK;
2586 } else {
2587 decoflags_t deco_brsize = ideco & BRSIZE_MASK;
2588
2589 if (~ideco & BRDCAST_MASK)
2590 return MERR_BRNOTHERE;
2591
2592 /*
2593 * when broadcasting, the element size depends on
2594 * the instruction type. decorator flag should match.
2595 */
2596 if (deco_brsize) {
2597 template_opsize = (deco_brsize == BR_BITS32 ? BITS32 : BITS64);
2598 /* calculate the proper number : {1to<brcast_num>} */
2599 brcast_num = get_broadcast_num(itemp->opd[i], template_opsize);
2600 } else {
2601 template_opsize = 0;
2602 }
2603 }
2604
2605 if (~ideco & deco & OPMASK_MASK)
2606 return MERR_MASKNOTHERE;
2607
2608 if (~ideco & deco & (Z_MASK|STATICRND_MASK|SAE_MASK))
2609 return MERR_DECONOTHERE;
2610
2611 if (itemp->opd[i] & ~type & ~(SIZE_MASK|REGSET_MASK))
2612 return MERR_INVALOP;
2613
2614 if (~itemp->opd[i] & type & REGSET_MASK)
2615 return (itemp->opd[i] & REGSET_MASK)
2616 ? MERR_REGSETSIZE : MERR_REGSET;
2617
2618 if (template_opsize) {
2619 if (template_opsize != insn_opsize) {
2620 if (insn_opsize) {
2621 return MERR_INVALOP;
2622 } else if (!is_class(REGISTER, type)) {
2623 /*
2624 * Note: we don't honor extrinsic operand sizes for registers,
2625 * so "missing operand size" for a register should be
2626 * considered a wildcard match rather than an error.
2627 */
2628 opsizemissing = true;
2629 }
2630 } else if (is_broadcast &&
2631 (brcast_num !=
2632 (2U << ((deco & BRNUM_MASK) >> BRNUM_SHIFT)))) {
2633 /*
2634 * broadcasting opsize matches but the number of repeated memory
2635 * element does not match.
2636 * if 64b double precision float is broadcasted to ymm (256b),
2637 * broadcasting decorator must be {1to4}.
2638 */
2639 return MERR_BRNUMMISMATCH;
2640 }
2641 }
2642 }
2643
2644 if (opsizemissing)
2645 return MERR_OPSIZEMISSING;
2646
2647 /*
2648 * Check operand sizes
2649 */
2650 if (itemp_has(itemp, IF_SM) || itemp_has(itemp, IF_SM2)) {
2651 oprs = (itemp_has(itemp, IF_SM2) ? 2 : itemp->operands);
2652 for (i = 0; i < oprs; i++) {
2653 asize = itemp->opd[i] & SIZE_MASK;
2654 if (asize) {
2655 for (i = 0; i < oprs; i++)
2656 size[i] = asize;
2657 break;
2658 }
2659 }
2660 } else {
2661 oprs = itemp->operands;
2662 }
2663
2664 for (i = 0; i < itemp->operands; i++) {
2665 if (!(itemp->opd[i] & SIZE_MASK) &&
2666 (instruction->oprs[i].type & SIZE_MASK & ~size[i]))
2667 return MERR_OPSIZEMISMATCH;
2668 }
2669
2670 /*
2671 * Check template is okay at the set cpu level
2672 */
2673 if (iflag_cmp_cpu_level(&insns_flags[itemp->iflag_idx], &cpu) > 0)
2674 return MERR_BADCPU;
2675
2676 /*
2677 * Verify the appropriate long mode flag.
2678 */
2679 if (itemp_has(itemp, (bits == 64 ? IF_NOLONG : IF_LONG)))
2680 return MERR_BADMODE;
2681
2682 /*
2683 * If we have a HLE prefix, look for the NOHLE flag
2684 */
2685 if (itemp_has(itemp, IF_NOHLE) &&
2686 (has_prefix(instruction, PPS_REP, P_XACQUIRE) ||
2687 has_prefix(instruction, PPS_REP, P_XRELEASE)))
2688 return MERR_BADHLE;
2689
2690 /*
2691 * Check if special handling needed for Jumps
2692 */
2693 if ((itemp->code[0] & ~1) == 0370)
2694 return MOK_JUMP;
2695
2696 /*
2697 * Check if BND prefix is allowed.
2698 * Other 0xF2 (REPNE/REPNZ) prefix is prohibited.
2699 */
2700 if (!itemp_has(itemp, IF_BND) &&
2701 (has_prefix(instruction, PPS_REP, P_BND) ||
2702 has_prefix(instruction, PPS_REP, P_NOBND)))
2703 return MERR_BADBND;
2704 else if (itemp_has(itemp, IF_BND) &&
2705 (has_prefix(instruction, PPS_REP, P_REPNE) ||
2706 has_prefix(instruction, PPS_REP, P_REPNZ)))
2707 return MERR_BADREPNE;
2708
2709 return MOK_GOOD;
2710 }
2711
2712 /*
2713 * Check if ModR/M.mod should/can be 01.
2714 * - EAF_BYTEOFFS is set
2715 * - offset can fit in a byte when EVEX is not used
2716 * - offset can be compressed when EVEX is used
2717 */
2718 #define IS_MOD_01() (!(input->eaflags & EAF_WORDOFFS) && \
2719 (ins->rex & REX_EV ? seg == NO_SEG && !forw_ref && \
2720 is_disp8n(input, ins, &output->disp8) : \
2721 input->eaflags & EAF_BYTEOFFS || (o >= -128 && \
2722 o <= 127 && seg == NO_SEG && !forw_ref)))
2723
process_ea(operand * input,ea * output,int bits,int rfield,opflags_t rflags,insn * ins,const char ** errmsg)2724 static enum ea_type process_ea(operand *input, ea *output, int bits,
2725 int rfield, opflags_t rflags, insn *ins,
2726 const char **errmsg)
2727 {
2728 bool forw_ref = !!(input->opflags & OPFLAG_UNKNOWN);
2729 int addrbits = ins->addr_size;
2730 int eaflags = input->eaflags;
2731
2732 *errmsg = "invalid effective address"; /* Default error message */
2733
2734 output->type = EA_SCALAR;
2735 output->rip = false;
2736 output->disp8 = 0;
2737
2738 /* REX flags for the rfield operand */
2739 output->rex |= rexflags(rfield, rflags, REX_R | REX_P | REX_W | REX_H);
2740 /* EVEX.R' flag for the REG operand */
2741 ins->evex_p[0] |= evexflags(rfield, 0, EVEX_P0RP, 0);
2742
2743 if (is_class(REGISTER, input->type)) {
2744 /*
2745 * It's a direct register.
2746 */
2747 if (!is_register(input->basereg))
2748 goto err;
2749
2750 if (!is_reg_class(REG_EA, input->basereg))
2751 goto err;
2752
2753 /* broadcasting is not available with a direct register operand. */
2754 if (input->decoflags & BRDCAST_MASK) {
2755 *errmsg = "broadcast not allowed with register operand";
2756 goto err;
2757 }
2758
2759 output->rex |= op_rexflags(input, REX_B | REX_P | REX_W | REX_H);
2760 ins->evex_p[0] |= op_evexflags(input, EVEX_P0X, 0);
2761 output->sib_present = false; /* no SIB necessary */
2762 output->bytes = 0; /* no offset necessary either */
2763 output->modrm = GEN_MODRM(3, rfield, nasm_regvals[input->basereg]);
2764 } else {
2765 /*
2766 * It's a memory reference.
2767 */
2768
2769 /* Embedded rounding or SAE is not available with a mem ref operand. */
2770 if (input->decoflags & (ER | SAE)) {
2771 *errmsg = "embedded rounding is available only with "
2772 "register-register operations";
2773 goto err;
2774 }
2775
2776 if (input->basereg == -1 &&
2777 (input->indexreg == -1 || input->scale == 0)) {
2778 /*
2779 * It's a pure offset. If it is an IMMEDIATE, it is a pattern
2780 * in insns.dat which allows an immediate to be used as a memory
2781 * address, in which case apply the default REL/ABS.
2782 */
2783 if (bits == 64) {
2784 if (is_class(IMMEDIATE, input->type)) {
2785 if (!(input->eaflags & EAF_ABS) &&
2786 ((input->eaflags & EAF_REL) || globalrel))
2787 input->type |= IP_REL;
2788 }
2789 if ((input->type & IP_REL) == IP_REL) {
2790 if (input->segment == NO_SEG ||
2791 (input->opflags & OPFLAG_RELATIVE)) {
2792 nasm_warn(WARN_OTHER|ERR_PASS2, "absolute address can not be RIP-relative");
2793 input->type &= ~IP_REL;
2794 input->type |= MEMORY;
2795 }
2796 }
2797 }
2798
2799 if (bits == 64 &&
2800 !(IP_REL & ~input->type) && (eaflags & EAF_MIB)) {
2801 *errmsg = "RIP-relative addressing is prohibited for MIB";
2802 goto err;
2803 }
2804
2805 if (eaflags & EAF_BYTEOFFS ||
2806 (eaflags & EAF_WORDOFFS &&
2807 input->disp_size != (addrbits != 16 ? 32 : 16)))
2808 nasm_warn(WARN_OTHER, "displacement size ignored on absolute address");
2809
2810 if (bits == 64 && (~input->type & IP_REL)) {
2811 output->sib_present = true;
2812 output->sib = GEN_SIB(0, 4, 5);
2813 output->bytes = 4;
2814 output->modrm = GEN_MODRM(0, rfield, 4);
2815 output->rip = false;
2816 } else {
2817 output->sib_present = false;
2818 output->bytes = (addrbits != 16 ? 4 : 2);
2819 output->modrm = GEN_MODRM(0, rfield,
2820 (addrbits != 16 ? 5 : 6));
2821 output->rip = bits == 64;
2822 }
2823 } else {
2824 /*
2825 * It's an indirection.
2826 */
2827 int i = input->indexreg, b = input->basereg, s = input->scale;
2828 int32_t seg = input->segment;
2829 int hb = input->hintbase, ht = input->hinttype;
2830 int t, it, bt; /* register numbers */
2831 opflags_t x, ix, bx; /* register flags */
2832
2833 if (s == 0)
2834 i = -1; /* make this easy, at least */
2835
2836 if (is_register(i)) {
2837 it = nasm_regvals[i];
2838 ix = nasm_reg_flags[i];
2839 } else {
2840 it = -1;
2841 ix = 0;
2842 }
2843
2844 if (is_register(b)) {
2845 bt = nasm_regvals[b];
2846 bx = nasm_reg_flags[b];
2847 } else {
2848 bt = -1;
2849 bx = 0;
2850 }
2851
2852 /* if either one are a vector register... */
2853 if ((ix|bx) & (XMMREG|YMMREG|ZMMREG) & ~REG_EA) {
2854 opflags_t sok = BITS32 | BITS64;
2855 int32_t o = input->offset;
2856 int mod, scale, index, base;
2857
2858 /*
2859 * For a vector SIB, one has to be a vector and the other,
2860 * if present, a GPR. The vector must be the index operand.
2861 */
2862 if (it == -1 || (bx & (XMMREG|YMMREG|ZMMREG) & ~REG_EA)) {
2863 if (s == 0)
2864 s = 1;
2865 else if (s != 1)
2866 goto err;
2867
2868 t = bt, bt = it, it = t;
2869 x = bx, bx = ix, ix = x;
2870 }
2871
2872 if (bt != -1) {
2873 if (REG_GPR & ~bx)
2874 goto err;
2875 if (!(REG64 & ~bx) || !(REG32 & ~bx))
2876 sok &= bx;
2877 else
2878 goto err;
2879 }
2880
2881 /*
2882 * While we're here, ensure the user didn't specify
2883 * WORD or QWORD
2884 */
2885 if (input->disp_size == 16 || input->disp_size == 64)
2886 goto err;
2887
2888 if (addrbits == 16 ||
2889 (addrbits == 32 && !(sok & BITS32)) ||
2890 (addrbits == 64 && !(sok & BITS64)))
2891 goto err;
2892
2893 output->type = ((ix & ZMMREG & ~REG_EA) ? EA_ZMMVSIB
2894 : ((ix & YMMREG & ~REG_EA)
2895 ? EA_YMMVSIB : EA_XMMVSIB));
2896
2897 output->rex |= rexflags(it, ix, REX_X);
2898 output->rex |= rexflags(bt, bx, REX_B);
2899 ins->evex_p[2] |= evexflags(it, 0, EVEX_P2VP, 2);
2900
2901 index = it & 7; /* it is known to be != -1 */
2902
2903 switch (s) {
2904 case 1:
2905 scale = 0;
2906 break;
2907 case 2:
2908 scale = 1;
2909 break;
2910 case 4:
2911 scale = 2;
2912 break;
2913 case 8:
2914 scale = 3;
2915 break;
2916 default: /* then what the smeg is it? */
2917 goto err; /* panic */
2918 }
2919
2920 if (bt == -1) {
2921 base = 5;
2922 mod = 0;
2923 } else {
2924 base = (bt & 7);
2925 if (base != REG_NUM_EBP && o == 0 &&
2926 seg == NO_SEG && !forw_ref &&
2927 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
2928 mod = 0;
2929 else if (IS_MOD_01())
2930 mod = 1;
2931 else
2932 mod = 2;
2933 }
2934
2935 output->sib_present = true;
2936 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
2937 output->modrm = GEN_MODRM(mod, rfield, 4);
2938 output->sib = GEN_SIB(scale, index, base);
2939 } else if ((ix|bx) & (BITS32|BITS64)) {
2940 /*
2941 * it must be a 32/64-bit memory reference. Firstly we have
2942 * to check that all registers involved are type E/Rxx.
2943 */
2944 opflags_t sok = BITS32 | BITS64;
2945 int32_t o = input->offset;
2946
2947 if (it != -1) {
2948 if (!(REG64 & ~ix) || !(REG32 & ~ix))
2949 sok &= ix;
2950 else
2951 goto err;
2952 }
2953
2954 if (bt != -1) {
2955 if (REG_GPR & ~bx)
2956 goto err; /* Invalid register */
2957 if (~sok & bx & SIZE_MASK)
2958 goto err; /* Invalid size */
2959 sok &= bx;
2960 }
2961
2962 /*
2963 * While we're here, ensure the user didn't specify
2964 * WORD or QWORD
2965 */
2966 if (input->disp_size == 16 || input->disp_size == 64)
2967 goto err;
2968
2969 if (addrbits == 16 ||
2970 (addrbits == 32 && !(sok & BITS32)) ||
2971 (addrbits == 64 && !(sok & BITS64)))
2972 goto err;
2973
2974 /* now reorganize base/index */
2975 if (s == 1 && bt != it && bt != -1 && it != -1 &&
2976 ((hb == b && ht == EAH_NOTBASE) ||
2977 (hb == i && ht == EAH_MAKEBASE))) {
2978 /* swap if hints say so */
2979 t = bt, bt = it, it = t;
2980 x = bx, bx = ix, ix = x;
2981 }
2982
2983 if (bt == -1 && s == 1 && !(hb == i && ht == EAH_NOTBASE)) {
2984 /* make single reg base, unless hint */
2985 bt = it, bx = ix, it = -1, ix = 0;
2986 }
2987 if (eaflags & EAF_MIB) {
2988 /* only for mib operands */
2989 if (it == -1 && (hb == b && ht == EAH_NOTBASE)) {
2990 /*
2991 * make a single reg index [reg*1].
2992 * gas uses this form for an explicit index register.
2993 */
2994 it = bt, ix = bx, bt = -1, bx = 0, s = 1;
2995 }
2996 if ((ht == EAH_SUMMED) && bt == -1) {
2997 /* separate once summed index into [base, index] */
2998 bt = it, bx = ix, s--;
2999 }
3000 } else {
3001 if (((s == 2 && it != REG_NUM_ESP &&
3002 (!(eaflags & EAF_TIMESTWO) || (ht == EAH_SUMMED))) ||
3003 s == 3 || s == 5 || s == 9) && bt == -1) {
3004 /* convert 3*EAX to EAX+2*EAX */
3005 bt = it, bx = ix, s--;
3006 }
3007 if (it == -1 && (bt & 7) != REG_NUM_ESP &&
3008 (eaflags & EAF_TIMESTWO) &&
3009 (hb == b && ht == EAH_NOTBASE)) {
3010 /*
3011 * convert [NOSPLIT EAX*1]
3012 * to sib format with 0x0 displacement - [EAX*1+0].
3013 */
3014 it = bt, ix = bx, bt = -1, bx = 0, s = 1;
3015 }
3016 }
3017 if (s == 1 && it == REG_NUM_ESP) {
3018 /* swap ESP into base if scale is 1 */
3019 t = it, it = bt, bt = t;
3020 x = ix, ix = bx, bx = x;
3021 }
3022 if (it == REG_NUM_ESP ||
3023 (s != 1 && s != 2 && s != 4 && s != 8 && it != -1))
3024 goto err; /* wrong, for various reasons */
3025
3026 output->rex |= rexflags(it, ix, REX_X);
3027 output->rex |= rexflags(bt, bx, REX_B);
3028
3029 if (it == -1 && (bt & 7) != REG_NUM_ESP) {
3030 /* no SIB needed */
3031 int mod, rm;
3032
3033 if (bt == -1) {
3034 rm = 5;
3035 mod = 0;
3036 } else {
3037 rm = (bt & 7);
3038 if (rm != REG_NUM_EBP && o == 0 &&
3039 seg == NO_SEG && !forw_ref &&
3040 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
3041 mod = 0;
3042 else if (IS_MOD_01())
3043 mod = 1;
3044 else
3045 mod = 2;
3046 }
3047
3048 output->sib_present = false;
3049 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
3050 output->modrm = GEN_MODRM(mod, rfield, rm);
3051 } else {
3052 /* we need a SIB */
3053 int mod, scale, index, base;
3054
3055 if (it == -1)
3056 index = 4, s = 1;
3057 else
3058 index = (it & 7);
3059
3060 switch (s) {
3061 case 1:
3062 scale = 0;
3063 break;
3064 case 2:
3065 scale = 1;
3066 break;
3067 case 4:
3068 scale = 2;
3069 break;
3070 case 8:
3071 scale = 3;
3072 break;
3073 default: /* then what the smeg is it? */
3074 goto err; /* panic */
3075 }
3076
3077 if (bt == -1) {
3078 base = 5;
3079 mod = 0;
3080 } else {
3081 base = (bt & 7);
3082 if (base != REG_NUM_EBP && o == 0 &&
3083 seg == NO_SEG && !forw_ref &&
3084 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
3085 mod = 0;
3086 else if (IS_MOD_01())
3087 mod = 1;
3088 else
3089 mod = 2;
3090 }
3091
3092 output->sib_present = true;
3093 output->bytes = (bt == -1 || mod == 2 ? 4 : mod);
3094 output->modrm = GEN_MODRM(mod, rfield, 4);
3095 output->sib = GEN_SIB(scale, index, base);
3096 }
3097 } else { /* it's 16-bit */
3098 int mod, rm;
3099 int16_t o = input->offset;
3100
3101 /* check for 64-bit long mode */
3102 if (addrbits == 64)
3103 goto err;
3104
3105 /* check all registers are BX, BP, SI or DI */
3106 if ((b != -1 && b != R_BP && b != R_BX && b != R_SI && b != R_DI) ||
3107 (i != -1 && i != R_BP && i != R_BX && i != R_SI && i != R_DI))
3108 goto err;
3109
3110 /* ensure the user didn't specify DWORD/QWORD */
3111 if (input->disp_size == 32 || input->disp_size == 64)
3112 goto err;
3113
3114 if (s != 1 && i != -1)
3115 goto err; /* no can do, in 16-bit EA */
3116 if (b == -1 && i != -1) {
3117 int tmp = b;
3118 b = i;
3119 i = tmp;
3120 } /* swap */
3121 if ((b == R_SI || b == R_DI) && i != -1) {
3122 int tmp = b;
3123 b = i;
3124 i = tmp;
3125 }
3126 /* have BX/BP as base, SI/DI index */
3127 if (b == i)
3128 goto err; /* shouldn't ever happen, in theory */
3129 if (i != -1 && b != -1 &&
3130 (i == R_BP || i == R_BX || b == R_SI || b == R_DI))
3131 goto err; /* invalid combinations */
3132 if (b == -1) /* pure offset: handled above */
3133 goto err; /* so if it gets to here, panic! */
3134
3135 rm = -1;
3136 if (i != -1)
3137 switch (i * 256 + b) {
3138 case R_SI * 256 + R_BX:
3139 rm = 0;
3140 break;
3141 case R_DI * 256 + R_BX:
3142 rm = 1;
3143 break;
3144 case R_SI * 256 + R_BP:
3145 rm = 2;
3146 break;
3147 case R_DI * 256 + R_BP:
3148 rm = 3;
3149 break;
3150 } else
3151 switch (b) {
3152 case R_SI:
3153 rm = 4;
3154 break;
3155 case R_DI:
3156 rm = 5;
3157 break;
3158 case R_BP:
3159 rm = 6;
3160 break;
3161 case R_BX:
3162 rm = 7;
3163 break;
3164 }
3165 if (rm == -1) /* can't happen, in theory */
3166 goto err; /* so panic if it does */
3167
3168 if (o == 0 && seg == NO_SEG && !forw_ref && rm != 6 &&
3169 !(eaflags & (EAF_BYTEOFFS | EAF_WORDOFFS)))
3170 mod = 0;
3171 else if (IS_MOD_01())
3172 mod = 1;
3173 else
3174 mod = 2;
3175
3176 output->sib_present = false; /* no SIB - it's 16-bit */
3177 output->bytes = mod; /* bytes of offset needed */
3178 output->modrm = GEN_MODRM(mod, rfield, rm);
3179 }
3180 }
3181 }
3182
3183 output->size = 1 + output->sib_present + output->bytes;
3184 return output->type;
3185
3186 err:
3187 return output->type = EA_INVALID;
3188 }
3189
add_asp(insn * ins,int addrbits)3190 static void add_asp(insn *ins, int addrbits)
3191 {
3192 int j, valid;
3193 int defdisp;
3194
3195 valid = (addrbits == 64) ? 64|32 : 32|16;
3196
3197 switch (ins->prefixes[PPS_ASIZE]) {
3198 case P_A16:
3199 valid &= 16;
3200 break;
3201 case P_A32:
3202 valid &= 32;
3203 break;
3204 case P_A64:
3205 valid &= 64;
3206 break;
3207 case P_ASP:
3208 valid &= (addrbits == 32) ? 16 : 32;
3209 break;
3210 default:
3211 break;
3212 }
3213
3214 for (j = 0; j < ins->operands; j++) {
3215 if (is_class(MEMORY, ins->oprs[j].type)) {
3216 opflags_t i, b;
3217
3218 /* Verify as Register */
3219 if (!is_register(ins->oprs[j].indexreg))
3220 i = 0;
3221 else
3222 i = nasm_reg_flags[ins->oprs[j].indexreg];
3223
3224 /* Verify as Register */
3225 if (!is_register(ins->oprs[j].basereg))
3226 b = 0;
3227 else
3228 b = nasm_reg_flags[ins->oprs[j].basereg];
3229
3230 if (ins->oprs[j].scale == 0)
3231 i = 0;
3232
3233 if (!i && !b) {
3234 int ds = ins->oprs[j].disp_size;
3235 if ((addrbits != 64 && ds > 8) ||
3236 (addrbits == 64 && ds == 16))
3237 valid &= ds;
3238 } else {
3239 if (!(REG16 & ~b))
3240 valid &= 16;
3241 if (!(REG32 & ~b))
3242 valid &= 32;
3243 if (!(REG64 & ~b))
3244 valid &= 64;
3245
3246 if (!(REG16 & ~i))
3247 valid &= 16;
3248 if (!(REG32 & ~i))
3249 valid &= 32;
3250 if (!(REG64 & ~i))
3251 valid &= 64;
3252 }
3253 }
3254 }
3255
3256 if (valid & addrbits) {
3257 ins->addr_size = addrbits;
3258 } else if (valid & ((addrbits == 32) ? 16 : 32)) {
3259 /* Add an address size prefix */
3260 ins->prefixes[PPS_ASIZE] = (addrbits == 32) ? P_A16 : P_A32;;
3261 ins->addr_size = (addrbits == 32) ? 16 : 32;
3262 } else {
3263 /* Impossible... */
3264 nasm_nonfatal("impossible combination of address sizes");
3265 ins->addr_size = addrbits; /* Error recovery */
3266 }
3267
3268 defdisp = ins->addr_size == 16 ? 16 : 32;
3269
3270 for (j = 0; j < ins->operands; j++) {
3271 if (!(MEM_OFFS & ~ins->oprs[j].type) &&
3272 (ins->oprs[j].disp_size ? ins->oprs[j].disp_size : defdisp) != ins->addr_size) {
3273 /*
3274 * mem_offs sizes must match the address size; if not,
3275 * strip the MEM_OFFS bit and match only EA instructions
3276 */
3277 ins->oprs[j].type &= ~(MEM_OFFS & ~MEMORY);
3278 }
3279 }
3280 }
3281