1 //! This module defines x86_64-specific machine instruction types.
2
3 #![allow(dead_code)]
4 #![allow(non_snake_case)]
5 #![allow(non_camel_case_types)]
6
7 use core::convert::TryFrom;
8 use smallvec::SmallVec;
9 use std::fmt;
10 use std::string::{String, ToString};
11
12 use regalloc::RegUsageCollector;
13 use regalloc::Set;
14 use regalloc::{RealRegUniverse, Reg, RegClass, RegUsageMapper, SpillSlot, VirtualReg, Writable};
15
16 use crate::binemit::CodeOffset;
17 use crate::ir::types::{B1, B128, B16, B32, B64, B8, F32, F64, I128, I16, I32, I64, I8};
18 use crate::ir::ExternalName;
19 use crate::ir::Type;
20 use crate::machinst::*;
21 use crate::settings::Flags;
22 use crate::{settings, CodegenError, CodegenResult};
23
24 pub mod args;
25 mod emit;
26 #[cfg(test)]
27 mod emit_tests;
28 pub mod regs;
29
30 use args::*;
31 use regs::{create_reg_universe_systemv, show_ireg_sized};
32
33 //=============================================================================
34 // Instructions (top level): definition
35
36 // Don't build these directly. Instead use the Inst:: functions to create them.
37
38 /// Instructions. Destinations are on the RIGHT (a la AT&T syntax).
39 #[derive(Clone)]
40 pub(crate) enum Inst {
41 /// nops of various sizes, including zero
42 Nop { len: u8 },
43
44 /// (add sub and or xor mul adc? sbb?) (32 64) (reg addr imm) reg
45 Alu_RMI_R {
46 is_64: bool,
47 op: RMI_R_Op,
48 src: RMI,
49 dst: Writable<Reg>,
50 },
51
52 /// (imm32 imm64) reg.
53 /// Either: movl $imm32, %reg32 or movabsq $imm64, %reg32
54 Imm_R {
55 dst_is_64: bool,
56 simm64: u64,
57 dst: Writable<Reg>,
58 },
59
60 /// mov (64 32) reg reg
61 Mov_R_R {
62 is_64: bool,
63 src: Reg,
64 dst: Writable<Reg>,
65 },
66
67 /// movz (bl bq wl wq lq) addr reg (good for all ZX loads except 64->64).
68 /// Note that the lq variant doesn't really exist since the default
69 /// zero-extend rule makes it unnecessary. For that case we emit the
70 /// equivalent "movl AM, reg32".
71 MovZX_M_R {
72 extMode: ExtMode,
73 addr: Addr,
74 dst: Writable<Reg>,
75 },
76
77 /// A plain 64-bit integer load, since MovZX_M_R can't represent that
78 Mov64_M_R { addr: Addr, dst: Writable<Reg> },
79
80 /// movs (bl bq wl wq lq) addr reg (good for all SX loads)
81 MovSX_M_R {
82 extMode: ExtMode,
83 addr: Addr,
84 dst: Writable<Reg>,
85 },
86
87 /// mov (b w l q) reg addr (good for all integer stores)
88 Mov_R_M {
89 size: u8, // 1, 2, 4 or 8
90 src: Reg,
91 addr: Addr,
92 },
93
94 /// (shl shr sar) (l q) imm reg
95 Shift_R {
96 is_64: bool,
97 kind: ShiftKind,
98 /// shift count: Some(0 .. #bits-in-type - 1), or None to mean "%cl".
99 num_bits: Option<u8>,
100 dst: Writable<Reg>,
101 },
102
103 /// cmp (b w l q) (reg addr imm) reg
104 Cmp_RMI_R {
105 size: u8, // 1, 2, 4 or 8
106 src: RMI,
107 dst: Reg,
108 },
109
110 /// pushq (reg addr imm)
111 Push64 { src: RMI },
112
113 /// popq reg
114 Pop64 { dst: Writable<Reg> },
115
116 /// call simm32
117 CallKnown {
118 dest: ExternalName,
119 uses: Set<Reg>,
120 defs: Set<Writable<Reg>>,
121 },
122
123 /// callq (reg mem)
124 CallUnknown {
125 dest: RM,
126 //uses: Set<Reg>,
127 //defs: Set<Writable<Reg>>,
128 },
129
130 // ---- branches (exactly one must appear at end of BB) ----
131 /// ret
132 Ret,
133
134 /// A placeholder instruction, generating no code, meaning that a function epilogue must be
135 /// inserted there.
136 EpiloguePlaceholder,
137
138 /// jmp simm32
139 JmpKnown { dest: BranchTarget },
140
141 /// jcond cond target target
142 /// Symmetrical two-way conditional branch.
143 /// Emitted as a compound sequence; the MachBuffer will shrink it
144 /// as appropriate.
145 JmpCondSymm {
146 cc: CC,
147 taken: BranchTarget,
148 not_taken: BranchTarget,
149 },
150
151 /// jmpq (reg mem)
152 JmpUnknown { target: RM },
153 }
154
155 // Handy constructors for Insts.
156
157 // For various sizes, will some number of lowest bits sign extend to be the
158 // same as the whole value?
low32willSXto64(x: u64) -> bool159 pub(crate) fn low32willSXto64(x: u64) -> bool {
160 let xs = x as i64;
161 xs == ((xs << 32) >> 32)
162 }
163
164 impl Inst {
nop(len: u8) -> Self165 pub(crate) fn nop(len: u8) -> Self {
166 debug_assert!(len <= 16);
167 Self::Nop { len }
168 }
169
alu_rmi_r(is_64: bool, op: RMI_R_Op, src: RMI, dst: Writable<Reg>) -> Self170 pub(crate) fn alu_rmi_r(is_64: bool, op: RMI_R_Op, src: RMI, dst: Writable<Reg>) -> Self {
171 debug_assert!(dst.to_reg().get_class() == RegClass::I64);
172 Self::Alu_RMI_R {
173 is_64,
174 op,
175 src,
176 dst,
177 }
178 }
179
imm_r(dst_is_64: bool, simm64: u64, dst: Writable<Reg>) -> Inst180 pub(crate) fn imm_r(dst_is_64: bool, simm64: u64, dst: Writable<Reg>) -> Inst {
181 debug_assert!(dst.to_reg().get_class() == RegClass::I64);
182 if !dst_is_64 {
183 debug_assert!(low32willSXto64(simm64));
184 }
185 Inst::Imm_R {
186 dst_is_64,
187 simm64,
188 dst,
189 }
190 }
191
mov_r_r(is_64: bool, src: Reg, dst: Writable<Reg>) -> Inst192 pub(crate) fn mov_r_r(is_64: bool, src: Reg, dst: Writable<Reg>) -> Inst {
193 debug_assert!(src.get_class() == RegClass::I64);
194 debug_assert!(dst.to_reg().get_class() == RegClass::I64);
195 Inst::Mov_R_R { is_64, src, dst }
196 }
197
movzx_m_r(extMode: ExtMode, addr: Addr, dst: Writable<Reg>) -> Inst198 pub(crate) fn movzx_m_r(extMode: ExtMode, addr: Addr, dst: Writable<Reg>) -> Inst {
199 debug_assert!(dst.to_reg().get_class() == RegClass::I64);
200 Inst::MovZX_M_R { extMode, addr, dst }
201 }
202
mov64_m_r(addr: Addr, dst: Writable<Reg>) -> Inst203 pub(crate) fn mov64_m_r(addr: Addr, dst: Writable<Reg>) -> Inst {
204 debug_assert!(dst.to_reg().get_class() == RegClass::I64);
205 Inst::Mov64_M_R { addr, dst }
206 }
207
movsx_m_r(extMode: ExtMode, addr: Addr, dst: Writable<Reg>) -> Inst208 pub(crate) fn movsx_m_r(extMode: ExtMode, addr: Addr, dst: Writable<Reg>) -> Inst {
209 debug_assert!(dst.to_reg().get_class() == RegClass::I64);
210 Inst::MovSX_M_R { extMode, addr, dst }
211 }
212
mov_r_m( size: u8, src: Reg, addr: Addr, ) -> Inst213 pub(crate) fn mov_r_m(
214 size: u8, // 1, 2, 4 or 8
215 src: Reg,
216 addr: Addr,
217 ) -> Inst {
218 debug_assert!(size == 8 || size == 4 || size == 2 || size == 1);
219 debug_assert!(src.get_class() == RegClass::I64);
220 Inst::Mov_R_M { size, src, addr }
221 }
222
shift_r( is_64: bool, kind: ShiftKind, num_bits: Option<u8>, dst: Writable<Reg>, ) -> Inst223 pub(crate) fn shift_r(
224 is_64: bool,
225 kind: ShiftKind,
226 num_bits: Option<u8>,
227 dst: Writable<Reg>,
228 ) -> Inst {
229 debug_assert!(if let Some(num_bits) = num_bits {
230 num_bits < if is_64 { 64 } else { 32 }
231 } else {
232 true
233 });
234 debug_assert!(dst.to_reg().get_class() == RegClass::I64);
235 Inst::Shift_R {
236 is_64,
237 kind,
238 num_bits,
239 dst,
240 }
241 }
242
cmp_rmi_r( size: u8, src: RMI, dst: Reg, ) -> Inst243 pub(crate) fn cmp_rmi_r(
244 size: u8, // 1, 2, 4 or 8
245 src: RMI,
246 dst: Reg,
247 ) -> Inst {
248 debug_assert!(size == 8 || size == 4 || size == 2 || size == 1);
249 debug_assert!(dst.get_class() == RegClass::I64);
250 Inst::Cmp_RMI_R { size, src, dst }
251 }
252
push64(src: RMI) -> Inst253 pub(crate) fn push64(src: RMI) -> Inst {
254 Inst::Push64 { src }
255 }
256
pop64(dst: Writable<Reg>) -> Inst257 pub(crate) fn pop64(dst: Writable<Reg>) -> Inst {
258 Inst::Pop64 { dst }
259 }
260
call_unknown(dest: RM) -> Inst261 pub(crate) fn call_unknown(dest: RM) -> Inst {
262 Inst::CallUnknown { dest }
263 }
264
ret() -> Inst265 pub(crate) fn ret() -> Inst {
266 Inst::Ret
267 }
268
epilogue_placeholder() -> Inst269 pub(crate) fn epilogue_placeholder() -> Inst {
270 Inst::EpiloguePlaceholder
271 }
272
jmp_known(dest: BranchTarget) -> Inst273 pub(crate) fn jmp_known(dest: BranchTarget) -> Inst {
274 Inst::JmpKnown { dest }
275 }
276
jmp_cond_symm(cc: CC, taken: BranchTarget, not_taken: BranchTarget) -> Inst277 pub(crate) fn jmp_cond_symm(cc: CC, taken: BranchTarget, not_taken: BranchTarget) -> Inst {
278 Inst::JmpCondSymm {
279 cc,
280 taken,
281 not_taken,
282 }
283 }
284
jmp_unknown(target: RM) -> Inst285 pub(crate) fn jmp_unknown(target: RM) -> Inst {
286 Inst::JmpUnknown { target }
287 }
288 }
289
290 //=============================================================================
291 // Instructions: printing
292
293 impl ShowWithRRU for Inst {
show_rru(&self, mb_rru: Option<&RealRegUniverse>) -> String294 fn show_rru(&self, mb_rru: Option<&RealRegUniverse>) -> String {
295 fn ljustify(s: String) -> String {
296 let w = 7;
297 if s.len() >= w {
298 s
299 } else {
300 let need = usize::min(w, w - s.len());
301 s + &format!("{nil: <width$}", nil = "", width = need)
302 }
303 }
304
305 fn ljustify2(s1: String, s2: String) -> String {
306 ljustify(s1 + &s2)
307 }
308
309 fn suffixLQ(is_64: bool) -> String {
310 (if is_64 { "q" } else { "l" }).to_string()
311 }
312
313 fn sizeLQ(is_64: bool) -> u8 {
314 if is_64 {
315 8
316 } else {
317 4
318 }
319 }
320
321 fn suffixBWLQ(size: u8) -> String {
322 match size {
323 1 => "b".to_string(),
324 2 => "w".to_string(),
325 4 => "l".to_string(),
326 8 => "q".to_string(),
327 _ => panic!("Inst(x64).show.suffixBWLQ: size={}", size),
328 }
329 }
330
331 match self {
332 Inst::Nop { len } => format!("{} len={}", ljustify("nop".to_string()), len),
333 Inst::Alu_RMI_R {
334 is_64,
335 op,
336 src,
337 dst,
338 } => format!(
339 "{} {}, {}",
340 ljustify2(op.to_string(), suffixLQ(*is_64)),
341 src.show_rru_sized(mb_rru, sizeLQ(*is_64)),
342 show_ireg_sized(dst.to_reg(), mb_rru, sizeLQ(*is_64)),
343 ),
344 Inst::Imm_R {
345 dst_is_64,
346 simm64,
347 dst,
348 } => {
349 if *dst_is_64 {
350 format!(
351 "{} ${}, {}",
352 ljustify("movabsq".to_string()),
353 *simm64 as i64,
354 show_ireg_sized(dst.to_reg(), mb_rru, 8)
355 )
356 } else {
357 format!(
358 "{} ${}, {}",
359 ljustify("movl".to_string()),
360 (*simm64 as u32) as i32,
361 show_ireg_sized(dst.to_reg(), mb_rru, 4)
362 )
363 }
364 }
365 Inst::Mov_R_R { is_64, src, dst } => format!(
366 "{} {}, {}",
367 ljustify2("mov".to_string(), suffixLQ(*is_64)),
368 show_ireg_sized(*src, mb_rru, sizeLQ(*is_64)),
369 show_ireg_sized(dst.to_reg(), mb_rru, sizeLQ(*is_64))
370 ),
371 Inst::MovZX_M_R { extMode, addr, dst } => {
372 if *extMode == ExtMode::LQ {
373 format!(
374 "{} {}, {}",
375 ljustify("movl".to_string()),
376 addr.show_rru(mb_rru),
377 show_ireg_sized(dst.to_reg(), mb_rru, 4)
378 )
379 } else {
380 format!(
381 "{} {}, {}",
382 ljustify2("movz".to_string(), extMode.to_string()),
383 addr.show_rru(mb_rru),
384 show_ireg_sized(dst.to_reg(), mb_rru, extMode.dst_size())
385 )
386 }
387 }
388 Inst::Mov64_M_R { addr, dst } => format!(
389 "{} {}, {}",
390 ljustify("movq".to_string()),
391 addr.show_rru(mb_rru),
392 dst.show_rru(mb_rru)
393 ),
394 Inst::MovSX_M_R { extMode, addr, dst } => format!(
395 "{} {}, {}",
396 ljustify2("movs".to_string(), extMode.to_string()),
397 addr.show_rru(mb_rru),
398 show_ireg_sized(dst.to_reg(), mb_rru, extMode.dst_size())
399 ),
400 Inst::Mov_R_M { size, src, addr } => format!(
401 "{} {}, {}",
402 ljustify2("mov".to_string(), suffixBWLQ(*size)),
403 show_ireg_sized(*src, mb_rru, *size),
404 addr.show_rru(mb_rru)
405 ),
406 Inst::Shift_R {
407 is_64,
408 kind,
409 num_bits,
410 dst,
411 } => match num_bits {
412 None => format!(
413 "{} %cl, {}",
414 ljustify2(kind.to_string(), suffixLQ(*is_64)),
415 show_ireg_sized(dst.to_reg(), mb_rru, sizeLQ(*is_64))
416 ),
417
418 Some(num_bits) => format!(
419 "{} ${}, {}",
420 ljustify2(kind.to_string(), suffixLQ(*is_64)),
421 num_bits,
422 show_ireg_sized(dst.to_reg(), mb_rru, sizeLQ(*is_64))
423 ),
424 },
425 Inst::Cmp_RMI_R { size, src, dst } => format!(
426 "{} {}, {}",
427 ljustify2("cmp".to_string(), suffixBWLQ(*size)),
428 src.show_rru_sized(mb_rru, *size),
429 show_ireg_sized(*dst, mb_rru, *size)
430 ),
431 Inst::Push64 { src } => {
432 format!("{} {}", ljustify("pushq".to_string()), src.show_rru(mb_rru))
433 }
434 Inst::Pop64 { dst } => {
435 format!("{} {}", ljustify("popq".to_string()), dst.show_rru(mb_rru))
436 }
437 //Inst::CallKnown { target } => format!("{} {:?}", ljustify("call".to_string()), target),
438 Inst::CallKnown { .. } => "**CallKnown**".to_string(),
439 Inst::CallUnknown { dest } => format!(
440 "{} *{}",
441 ljustify("call".to_string()),
442 dest.show_rru(mb_rru)
443 ),
444 Inst::Ret => "ret".to_string(),
445 Inst::EpiloguePlaceholder => "epilogue placeholder".to_string(),
446 Inst::JmpKnown { dest } => {
447 format!("{} {}", ljustify("jmp".to_string()), dest.show_rru(mb_rru))
448 }
449 Inst::JmpCondSymm {
450 cc,
451 taken,
452 not_taken,
453 } => format!(
454 "{} taken={} not_taken={}",
455 ljustify2("j".to_string(), cc.to_string()),
456 taken.show_rru(mb_rru),
457 not_taken.show_rru(mb_rru)
458 ),
459 //
460 Inst::JmpUnknown { target } => format!(
461 "{} *{}",
462 ljustify("jmp".to_string()),
463 target.show_rru(mb_rru)
464 ),
465 }
466 }
467 }
468
469 // Temp hook for legacy printing machinery
470 impl fmt::Debug for Inst {
fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result471 fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
472 // Print the insn without a Universe :-(
473 write!(fmt, "{}", self.show_rru(None))
474 }
475 }
476
x64_get_regs(inst: &Inst, collector: &mut RegUsageCollector)477 fn x64_get_regs(inst: &Inst, collector: &mut RegUsageCollector) {
478 // This is a bit subtle. If some register is in the modified set, then it may not be in either
479 // the use or def sets. However, enforcing that directly is somewhat difficult. Instead,
480 // regalloc.rs will "fix" this for us by removing the the modified set from the use and def
481 // sets.
482 match inst {
483 // ** Nop
484 Inst::Alu_RMI_R {
485 is_64: _,
486 op: _,
487 src,
488 dst,
489 } => {
490 src.get_regs_as_uses(collector);
491 collector.add_mod(*dst);
492 }
493 Inst::Imm_R {
494 dst_is_64: _,
495 simm64: _,
496 dst,
497 } => {
498 collector.add_def(*dst);
499 }
500 Inst::Mov_R_R { is_64: _, src, dst } => {
501 collector.add_use(*src);
502 collector.add_def(*dst);
503 }
504 Inst::MovZX_M_R {
505 extMode: _,
506 addr,
507 dst,
508 } => {
509 addr.get_regs_as_uses(collector);
510 collector.add_def(*dst);
511 }
512 Inst::Mov64_M_R { addr, dst } => {
513 addr.get_regs_as_uses(collector);
514 collector.add_def(*dst);
515 }
516 Inst::MovSX_M_R {
517 extMode: _,
518 addr,
519 dst,
520 } => {
521 addr.get_regs_as_uses(collector);
522 collector.add_def(*dst);
523 }
524 Inst::Mov_R_M { size: _, src, addr } => {
525 collector.add_use(*src);
526 addr.get_regs_as_uses(collector);
527 }
528 Inst::Shift_R {
529 is_64: _,
530 kind: _,
531 num_bits,
532 dst,
533 } => {
534 if num_bits.is_none() {
535 collector.add_use(regs::rcx());
536 }
537 collector.add_mod(*dst);
538 }
539 Inst::Cmp_RMI_R { size: _, src, dst } => {
540 src.get_regs_as_uses(collector);
541 collector.add_use(*dst); // yes, really `add_use`
542 }
543 Inst::Push64 { src } => {
544 src.get_regs_as_uses(collector);
545 collector.add_mod(Writable::from_reg(regs::rsp()));
546 }
547 Inst::Pop64 { dst } => {
548 collector.add_def(*dst);
549 }
550 Inst::CallKnown {
551 dest: _,
552 uses: _,
553 defs: _,
554 } => {
555 // FIXME add arg regs (iru.used) and caller-saved regs (iru.defined)
556 unimplemented!();
557 }
558 Inst::CallUnknown { dest } => {
559 dest.get_regs_as_uses(collector);
560 }
561 Inst::Ret => {}
562 Inst::EpiloguePlaceholder => {}
563 Inst::JmpKnown { dest: _ } => {}
564 Inst::JmpCondSymm {
565 cc: _,
566 taken: _,
567 not_taken: _,
568 } => {}
569 //Inst::JmpUnknown { target } => {
570 // target.get_regs_as_uses(collector);
571 //}
572 Inst::Nop { .. } | Inst::JmpUnknown { .. } => unimplemented!("x64_get_regs inst"),
573 }
574 }
575
576 //=============================================================================
577 // Instructions and subcomponents: map_regs
578
map_use<RUM: RegUsageMapper>(m: &RUM, r: &mut Reg)579 fn map_use<RUM: RegUsageMapper>(m: &RUM, r: &mut Reg) {
580 if r.is_virtual() {
581 let new = m.get_use(r.to_virtual_reg()).unwrap().to_reg();
582 *r = new;
583 }
584 }
585
map_def<RUM: RegUsageMapper>(m: &RUM, r: &mut Writable<Reg>)586 fn map_def<RUM: RegUsageMapper>(m: &RUM, r: &mut Writable<Reg>) {
587 if r.to_reg().is_virtual() {
588 let new = m.get_def(r.to_reg().to_virtual_reg()).unwrap().to_reg();
589 *r = Writable::from_reg(new);
590 }
591 }
592
map_mod<RUM: RegUsageMapper>(m: &RUM, r: &mut Writable<Reg>)593 fn map_mod<RUM: RegUsageMapper>(m: &RUM, r: &mut Writable<Reg>) {
594 if r.to_reg().is_virtual() {
595 let new = m.get_mod(r.to_reg().to_virtual_reg()).unwrap().to_reg();
596 *r = Writable::from_reg(new);
597 }
598 }
599
600 impl Addr {
map_uses<RUM: RegUsageMapper>(&mut self, map: &RUM)601 fn map_uses<RUM: RegUsageMapper>(&mut self, map: &RUM) {
602 match self {
603 Addr::IR {
604 simm32: _,
605 ref mut base,
606 } => map_use(map, base),
607 Addr::IRRS {
608 simm32: _,
609 ref mut base,
610 ref mut index,
611 shift: _,
612 } => {
613 map_use(map, base);
614 map_use(map, index);
615 }
616 }
617 }
618 }
619
620 impl RMI {
map_uses<RUM: RegUsageMapper>(&mut self, map: &RUM)621 fn map_uses<RUM: RegUsageMapper>(&mut self, map: &RUM) {
622 match self {
623 RMI::R { ref mut reg } => map_use(map, reg),
624 RMI::M { ref mut addr } => addr.map_uses(map),
625 RMI::I { simm32: _ } => {}
626 }
627 }
628 }
629
630 impl RM {
map_uses<RUM: RegUsageMapper>(&mut self, map: &RUM)631 fn map_uses<RUM: RegUsageMapper>(&mut self, map: &RUM) {
632 match self {
633 RM::R { ref mut reg } => map_use(map, reg),
634 RM::M { ref mut addr } => addr.map_uses(map),
635 }
636 }
637 }
638
x64_map_regs<RUM: RegUsageMapper>(inst: &mut Inst, mapper: &RUM)639 fn x64_map_regs<RUM: RegUsageMapper>(inst: &mut Inst, mapper: &RUM) {
640 // Note this must be carefully synchronized with x64_get_regs.
641 match inst {
642 // ** Nop
643 Inst::Alu_RMI_R {
644 is_64: _,
645 op: _,
646 ref mut src,
647 ref mut dst,
648 } => {
649 src.map_uses(mapper);
650 map_mod(mapper, dst);
651 }
652 Inst::Imm_R {
653 dst_is_64: _,
654 simm64: _,
655 ref mut dst,
656 } => map_def(mapper, dst),
657 Inst::Mov_R_R {
658 is_64: _,
659 ref mut src,
660 ref mut dst,
661 } => {
662 map_use(mapper, src);
663 map_def(mapper, dst);
664 }
665 Inst::MovZX_M_R {
666 extMode: _,
667 ref mut addr,
668 ref mut dst,
669 } => {
670 addr.map_uses(mapper);
671 map_def(mapper, dst);
672 }
673 Inst::Mov64_M_R { addr, dst } => {
674 addr.map_uses(mapper);
675 map_def(mapper, dst);
676 }
677 Inst::MovSX_M_R {
678 extMode: _,
679 ref mut addr,
680 ref mut dst,
681 } => {
682 addr.map_uses(mapper);
683 map_def(mapper, dst);
684 }
685 Inst::Mov_R_M {
686 size: _,
687 ref mut src,
688 ref mut addr,
689 } => {
690 map_use(mapper, src);
691 addr.map_uses(mapper);
692 }
693 Inst::Shift_R {
694 is_64: _,
695 kind: _,
696 num_bits: _,
697 ref mut dst,
698 } => {
699 map_mod(mapper, dst);
700 }
701 Inst::Cmp_RMI_R {
702 size: _,
703 ref mut src,
704 ref mut dst,
705 } => {
706 src.map_uses(mapper);
707 map_use(mapper, dst);
708 }
709 Inst::Push64 { ref mut src } => src.map_uses(mapper),
710 Inst::Pop64 { ref mut dst } => {
711 map_def(mapper, dst);
712 }
713 Inst::CallKnown {
714 dest: _,
715 uses: _,
716 defs: _,
717 } => {}
718 Inst::CallUnknown { dest } => dest.map_uses(mapper),
719 Inst::Ret => {}
720 Inst::EpiloguePlaceholder => {}
721 Inst::JmpKnown { dest: _ } => {}
722 Inst::JmpCondSymm {
723 cc: _,
724 taken: _,
725 not_taken: _,
726 } => {}
727 //Inst::JmpUnknown { target } => {
728 // target.apply_map(mapper);
729 //}
730 Inst::Nop { .. } | Inst::JmpUnknown { .. } => unimplemented!("x64_map_regs opcode"),
731 }
732 }
733
734 //=============================================================================
735 // Instructions: misc functions and external interface
736
737 impl MachInst for Inst {
get_regs(&self, collector: &mut RegUsageCollector)738 fn get_regs(&self, collector: &mut RegUsageCollector) {
739 x64_get_regs(&self, collector)
740 }
741
map_regs<RUM: RegUsageMapper>(&mut self, mapper: &RUM)742 fn map_regs<RUM: RegUsageMapper>(&mut self, mapper: &RUM) {
743 x64_map_regs(self, mapper);
744 }
745
is_move(&self) -> Option<(Writable<Reg>, Reg)>746 fn is_move(&self) -> Option<(Writable<Reg>, Reg)> {
747 // Note (carefully!) that a 32-bit mov *isn't* a no-op since it zeroes
748 // out the upper 32 bits of the destination. For example, we could
749 // conceivably use `movl %reg, %reg` to zero out the top 32 bits of
750 // %reg.
751 match self {
752 Self::Mov_R_R { is_64, src, dst } if *is_64 => Some((*dst, *src)),
753 _ => None,
754 }
755 }
756
is_epilogue_placeholder(&self) -> bool757 fn is_epilogue_placeholder(&self) -> bool {
758 if let Self::EpiloguePlaceholder = self {
759 true
760 } else {
761 false
762 }
763 }
764
is_term<'a>(&'a self) -> MachTerminator<'a>765 fn is_term<'a>(&'a self) -> MachTerminator<'a> {
766 match self {
767 // Interesting cases.
768 &Self::Ret | &Self::EpiloguePlaceholder => MachTerminator::Ret,
769 &Self::JmpKnown { dest } => MachTerminator::Uncond(dest.as_label().unwrap()),
770 &Self::JmpCondSymm {
771 cc: _,
772 taken,
773 not_taken,
774 } => MachTerminator::Cond(taken.as_label().unwrap(), not_taken.as_label().unwrap()),
775 // All other cases are boring.
776 _ => MachTerminator::None,
777 }
778 }
779
gen_move(dst_reg: Writable<Reg>, src_reg: Reg, _ty: Type) -> Inst780 fn gen_move(dst_reg: Writable<Reg>, src_reg: Reg, _ty: Type) -> Inst {
781 let rc_dst = dst_reg.to_reg().get_class();
782 let rc_src = src_reg.get_class();
783 // If this isn't true, we have gone way off the rails.
784 debug_assert!(rc_dst == rc_src);
785 match rc_dst {
786 RegClass::I64 => Inst::mov_r_r(true, src_reg, dst_reg),
787 _ => panic!("gen_move(x64): unhandled regclass"),
788 }
789 }
790
gen_zero_len_nop() -> Inst791 fn gen_zero_len_nop() -> Inst {
792 unimplemented!()
793 }
794
gen_nop(_preferred_size: usize) -> Inst795 fn gen_nop(_preferred_size: usize) -> Inst {
796 unimplemented!()
797 }
798
maybe_direct_reload(&self, _reg: VirtualReg, _slot: SpillSlot) -> Option<Inst>799 fn maybe_direct_reload(&self, _reg: VirtualReg, _slot: SpillSlot) -> Option<Inst> {
800 None
801 }
802
rc_for_type(ty: Type) -> CodegenResult<RegClass>803 fn rc_for_type(ty: Type) -> CodegenResult<RegClass> {
804 match ty {
805 I8 | I16 | I32 | I64 | B1 | B8 | B16 | B32 | B64 => Ok(RegClass::I64),
806 F32 | F64 | I128 | B128 => Ok(RegClass::V128),
807 _ => Err(CodegenError::Unsupported(format!(
808 "Unexpected SSA-value type: {}",
809 ty
810 ))),
811 }
812 }
813
gen_jump(label: MachLabel) -> Inst814 fn gen_jump(label: MachLabel) -> Inst {
815 Inst::jmp_known(BranchTarget::Label(label))
816 }
817
gen_constant(to_reg: Writable<Reg>, value: u64, _: Type) -> SmallVec<[Self; 4]>818 fn gen_constant(to_reg: Writable<Reg>, value: u64, _: Type) -> SmallVec<[Self; 4]> {
819 let mut ret = SmallVec::new();
820 let is64 = value > 0xffff_ffff;
821 ret.push(Inst::imm_r(is64, value, to_reg));
822 ret
823 }
824
reg_universe(flags: &Flags) -> RealRegUniverse825 fn reg_universe(flags: &Flags) -> RealRegUniverse {
826 create_reg_universe_systemv(flags)
827 }
828
worst_case_size() -> CodeOffset829 fn worst_case_size() -> CodeOffset {
830 15
831 }
832
833 type LabelUse = LabelUse;
834 }
835
836 impl MachInstEmit for Inst {
837 type State = ();
838
emit(&self, sink: &mut MachBuffer<Inst>, _flags: &settings::Flags, _: &mut Self::State)839 fn emit(&self, sink: &mut MachBuffer<Inst>, _flags: &settings::Flags, _: &mut Self::State) {
840 emit::emit(self, sink);
841 }
842 }
843
844 /// A label-use (internal relocation) in generated code.
845 #[derive(Clone, Copy, Debug, PartialEq, Eq)]
846 pub(crate) enum LabelUse {
847 /// A 32-bit offset from location of relocation itself, added to the
848 /// existing value at that location.
849 Rel32,
850 }
851
852 impl MachInstLabelUse for LabelUse {
853 const ALIGN: CodeOffset = 1;
854
max_pos_range(self) -> CodeOffset855 fn max_pos_range(self) -> CodeOffset {
856 match self {
857 LabelUse::Rel32 => 0x7fff_ffff,
858 }
859 }
860
max_neg_range(self) -> CodeOffset861 fn max_neg_range(self) -> CodeOffset {
862 match self {
863 LabelUse::Rel32 => 0x8000_0000,
864 }
865 }
866
patch_size(self) -> CodeOffset867 fn patch_size(self) -> CodeOffset {
868 match self {
869 LabelUse::Rel32 => 4,
870 }
871 }
872
patch(self, buffer: &mut [u8], use_offset: CodeOffset, label_offset: CodeOffset)873 fn patch(self, buffer: &mut [u8], use_offset: CodeOffset, label_offset: CodeOffset) {
874 match self {
875 LabelUse::Rel32 => {
876 let addend = i32::from_le_bytes([buffer[0], buffer[1], buffer[2], buffer[3]]);
877 let value = i32::try_from(label_offset)
878 .unwrap()
879 .wrapping_sub(i32::try_from(use_offset).unwrap())
880 .wrapping_add(addend);
881 buffer.copy_from_slice(&value.to_le_bytes()[..]);
882 }
883 }
884 }
885
supports_veneer(self) -> bool886 fn supports_veneer(self) -> bool {
887 match self {
888 LabelUse::Rel32 => false,
889 }
890 }
891
veneer_size(self) -> CodeOffset892 fn veneer_size(self) -> CodeOffset {
893 match self {
894 LabelUse::Rel32 => 0,
895 }
896 }
897
generate_veneer(self, _: &mut [u8], _: CodeOffset) -> (CodeOffset, LabelUse)898 fn generate_veneer(self, _: &mut [u8], _: CodeOffset) -> (CodeOffset, LabelUse) {
899 match self {
900 LabelUse::Rel32 => {
901 panic!("Veneer not supported for Rel32 label-use.");
902 }
903 }
904 }
905 }
906