1 //! S390x ISA: binary code emission.
2
3 use crate::binemit::{Reloc, StackMap};
4 use crate::ir::condcodes::IntCC;
5 use crate::ir::MemFlags;
6 use crate::ir::{SourceLoc, TrapCode};
7 use crate::isa::s390x::inst::*;
8 use core::convert::TryFrom;
9 use log::debug;
10 use regalloc::{Reg, RegClass};
11
12 /// Memory addressing mode finalization: convert "special" modes (e.g.,
13 /// generic arbitrary stack offset) into real addressing modes, possibly by
14 /// emitting some helper instructions that come immediately before the use
15 /// of this amode.
mem_finalize( mem: &MemArg, state: &EmitState, have_d12: bool, have_d20: bool, have_pcrel: bool, have_index: bool, ) -> (SmallVec<[Inst; 4]>, MemArg)16 pub fn mem_finalize(
17 mem: &MemArg,
18 state: &EmitState,
19 have_d12: bool,
20 have_d20: bool,
21 have_pcrel: bool,
22 have_index: bool,
23 ) -> (SmallVec<[Inst; 4]>, MemArg) {
24 let mut insts = SmallVec::new();
25
26 // Resolve virtual addressing modes.
27 let mem = match mem {
28 &MemArg::RegOffset { off, .. }
29 | &MemArg::InitialSPOffset { off }
30 | &MemArg::NominalSPOffset { off } => {
31 let base = match mem {
32 &MemArg::RegOffset { reg, .. } => reg,
33 &MemArg::InitialSPOffset { .. } | &MemArg::NominalSPOffset { .. } => stack_reg(),
34 _ => unreachable!(),
35 };
36 let adj = match mem {
37 &MemArg::InitialSPOffset { .. } => {
38 state.initial_sp_offset + state.virtual_sp_offset
39 }
40 &MemArg::NominalSPOffset { .. } => state.virtual_sp_offset,
41 _ => 0,
42 };
43 let off = off + adj;
44
45 if let Some(disp) = UImm12::maybe_from_u64(off as u64) {
46 MemArg::BXD12 {
47 base,
48 index: zero_reg(),
49 disp,
50 flags: mem.get_flags(),
51 }
52 } else if let Some(disp) = SImm20::maybe_from_i64(off) {
53 MemArg::BXD20 {
54 base,
55 index: zero_reg(),
56 disp,
57 flags: mem.get_flags(),
58 }
59 } else {
60 let tmp = writable_spilltmp_reg();
61 assert!(base != tmp.to_reg());
62 insts.extend(Inst::load_constant64(tmp, off as u64));
63 MemArg::reg_plus_reg(base, tmp.to_reg(), mem.get_flags())
64 }
65 }
66 _ => mem.clone(),
67 };
68
69 // If this addressing mode cannot be handled by the instruction, use load-address.
70 let need_load_address = match &mem {
71 &MemArg::Label { .. } | &MemArg::Symbol { .. } if !have_pcrel => true,
72 &MemArg::BXD20 { .. } if !have_d20 => true,
73 &MemArg::BXD12 { index, .. } | &MemArg::BXD20 { index, .. } if !have_index => {
74 index != zero_reg()
75 }
76 _ => false,
77 };
78 let mem = if need_load_address {
79 let flags = mem.get_flags();
80 let tmp = writable_spilltmp_reg();
81 insts.push(Inst::LoadAddr { rd: tmp, mem });
82 MemArg::reg(tmp.to_reg(), flags)
83 } else {
84 mem
85 };
86
87 // Convert 12-bit displacement to 20-bit if required.
88 let mem = match &mem {
89 &MemArg::BXD12 {
90 base,
91 index,
92 disp,
93 flags,
94 } if !have_d12 => {
95 assert!(have_d20);
96 MemArg::BXD20 {
97 base,
98 index,
99 disp: SImm20::from_uimm12(disp),
100 flags,
101 }
102 }
103 _ => mem,
104 };
105
106 (insts, mem)
107 }
108
mem_emit( rd: Reg, mem: &MemArg, opcode_rx: Option<u16>, opcode_rxy: Option<u16>, opcode_ril: Option<u16>, add_trap: bool, sink: &mut MachBuffer<Inst>, emit_info: &EmitInfo, state: &mut EmitState, )109 pub fn mem_emit(
110 rd: Reg,
111 mem: &MemArg,
112 opcode_rx: Option<u16>,
113 opcode_rxy: Option<u16>,
114 opcode_ril: Option<u16>,
115 add_trap: bool,
116 sink: &mut MachBuffer<Inst>,
117 emit_info: &EmitInfo,
118 state: &mut EmitState,
119 ) {
120 let (mem_insts, mem) = mem_finalize(
121 mem,
122 state,
123 opcode_rx.is_some(),
124 opcode_rxy.is_some(),
125 opcode_ril.is_some(),
126 true,
127 );
128 for inst in mem_insts.into_iter() {
129 inst.emit(sink, emit_info, state);
130 }
131
132 if add_trap && mem.can_trap() {
133 let srcloc = state.cur_srcloc();
134 if srcloc != SourceLoc::default() {
135 sink.add_trap(srcloc, TrapCode::HeapOutOfBounds);
136 }
137 }
138
139 match &mem {
140 &MemArg::BXD12 {
141 base, index, disp, ..
142 } => {
143 put(
144 sink,
145 &enc_rx(opcode_rx.unwrap(), rd, base, index, disp.bits()),
146 );
147 }
148 &MemArg::BXD20 {
149 base, index, disp, ..
150 } => {
151 put(
152 sink,
153 &enc_rxy(opcode_rxy.unwrap(), rd, base, index, disp.bits()),
154 );
155 }
156 &MemArg::Label { ref target } => {
157 if let Some(l) = target.as_label() {
158 sink.use_label_at_offset(sink.cur_offset(), l, LabelUse::BranchRIL);
159 }
160 put(
161 sink,
162 &enc_ril_b(opcode_ril.unwrap(), rd, target.as_ril_offset_or_zero()),
163 );
164 }
165 &MemArg::Symbol {
166 ref name, offset, ..
167 } => {
168 let reloc = Reloc::S390xPCRel32Dbl;
169 let srcloc = state.cur_srcloc();
170 put_with_reloc(
171 sink,
172 &enc_ril_b(opcode_ril.unwrap(), rd, 0),
173 2,
174 srcloc,
175 reloc,
176 name,
177 offset.into(),
178 );
179 }
180 _ => unreachable!(),
181 }
182 }
183
mem_imm8_emit( imm: u8, mem: &MemArg, opcode_si: u16, opcode_siy: u16, add_trap: bool, sink: &mut MachBuffer<Inst>, emit_info: &EmitInfo, state: &mut EmitState, )184 pub fn mem_imm8_emit(
185 imm: u8,
186 mem: &MemArg,
187 opcode_si: u16,
188 opcode_siy: u16,
189 add_trap: bool,
190 sink: &mut MachBuffer<Inst>,
191 emit_info: &EmitInfo,
192 state: &mut EmitState,
193 ) {
194 let (mem_insts, mem) = mem_finalize(mem, state, true, true, false, false);
195 for inst in mem_insts.into_iter() {
196 inst.emit(sink, emit_info, state);
197 }
198
199 if add_trap && mem.can_trap() {
200 let srcloc = state.cur_srcloc();
201 if srcloc != SourceLoc::default() {
202 sink.add_trap(srcloc, TrapCode::HeapOutOfBounds);
203 }
204 }
205
206 match &mem {
207 &MemArg::BXD12 {
208 base, index, disp, ..
209 } => {
210 assert!(index == zero_reg());
211 put(sink, &enc_si(opcode_si, base, disp.bits(), imm));
212 }
213 &MemArg::BXD20 {
214 base, index, disp, ..
215 } => {
216 assert!(index == zero_reg());
217 put(sink, &enc_siy(opcode_siy, base, disp.bits(), imm));
218 }
219 _ => unreachable!(),
220 }
221 }
222
mem_imm16_emit( imm: i16, mem: &MemArg, opcode_sil: u16, add_trap: bool, sink: &mut MachBuffer<Inst>, emit_info: &EmitInfo, state: &mut EmitState, )223 pub fn mem_imm16_emit(
224 imm: i16,
225 mem: &MemArg,
226 opcode_sil: u16,
227 add_trap: bool,
228 sink: &mut MachBuffer<Inst>,
229 emit_info: &EmitInfo,
230 state: &mut EmitState,
231 ) {
232 let (mem_insts, mem) = mem_finalize(mem, state, true, false, false, false);
233 for inst in mem_insts.into_iter() {
234 inst.emit(sink, emit_info, state);
235 }
236
237 if add_trap && mem.can_trap() {
238 let srcloc = state.cur_srcloc();
239 if srcloc != SourceLoc::default() {
240 sink.add_trap(srcloc, TrapCode::HeapOutOfBounds);
241 }
242 }
243
244 match &mem {
245 &MemArg::BXD12 {
246 base, index, disp, ..
247 } => {
248 assert!(index == zero_reg());
249 put(sink, &enc_sil(opcode_sil, base, disp.bits(), imm));
250 }
251 _ => unreachable!(),
252 }
253 }
254
255 //=============================================================================
256 // Instructions and subcomponents: emission
257
machreg_to_gpr(m: Reg) -> u8258 fn machreg_to_gpr(m: Reg) -> u8 {
259 assert_eq!(m.get_class(), RegClass::I64);
260 u8::try_from(m.to_real_reg().get_hw_encoding()).unwrap()
261 }
262
machreg_to_fpr(m: Reg) -> u8263 fn machreg_to_fpr(m: Reg) -> u8 {
264 assert_eq!(m.get_class(), RegClass::F64);
265 u8::try_from(m.to_real_reg().get_hw_encoding()).unwrap()
266 }
267
machreg_to_gpr_or_fpr(m: Reg) -> u8268 fn machreg_to_gpr_or_fpr(m: Reg) -> u8 {
269 u8::try_from(m.to_real_reg().get_hw_encoding()).unwrap()
270 }
271
272 /// E-type instructions.
273 ///
274 /// 15
275 /// opcode
276 /// 0
277 ///
enc_e(opcode: u16) -> [u8; 2]278 fn enc_e(opcode: u16) -> [u8; 2] {
279 let mut enc: [u8; 2] = [0; 2];
280 let opcode1 = ((opcode >> 8) & 0xff) as u8;
281 let opcode2 = (opcode & 0xff) as u8;
282
283 enc[0] = opcode1;
284 enc[1] = opcode2;
285 enc
286 }
287
288 /// RIa-type instructions.
289 ///
290 /// 31 23 19 15
291 /// opcode1 r1 opcode2 i2
292 /// 24 20 16 0
293 ///
enc_ri_a(opcode: u16, r1: Reg, i2: u16) -> [u8; 4]294 fn enc_ri_a(opcode: u16, r1: Reg, i2: u16) -> [u8; 4] {
295 let mut enc: [u8; 4] = [0; 4];
296 let opcode1 = ((opcode >> 4) & 0xff) as u8;
297 let opcode2 = (opcode & 0xf) as u8;
298 let r1 = machreg_to_gpr(r1) & 0x0f;
299
300 enc[0] = opcode1;
301 enc[1] = r1 << 4 | opcode2;
302 enc[2..].copy_from_slice(&i2.to_be_bytes());
303 enc
304 }
305
306 /// RIb-type instructions.
307 ///
308 /// 31 23 19 15
309 /// opcode1 r1 opcode2 ri2
310 /// 24 20 16 0
311 ///
enc_ri_b(opcode: u16, r1: Reg, ri2: i32) -> [u8; 4]312 fn enc_ri_b(opcode: u16, r1: Reg, ri2: i32) -> [u8; 4] {
313 let mut enc: [u8; 4] = [0; 4];
314 let opcode1 = ((opcode >> 4) & 0xff) as u8;
315 let opcode2 = (opcode & 0xf) as u8;
316 let r1 = machreg_to_gpr(r1) & 0x0f;
317 let ri2 = ((ri2 >> 1) & 0xffff) as u16;
318
319 enc[0] = opcode1;
320 enc[1] = r1 << 4 | opcode2;
321 enc[2..].copy_from_slice(&ri2.to_be_bytes());
322 enc
323 }
324
325 /// RIc-type instructions.
326 ///
327 /// 31 23 19 15
328 /// opcode1 m1 opcode2 ri2
329 /// 24 20 16 0
330 ///
enc_ri_c(opcode: u16, m1: u8, ri2: i32) -> [u8; 4]331 fn enc_ri_c(opcode: u16, m1: u8, ri2: i32) -> [u8; 4] {
332 let mut enc: [u8; 4] = [0; 4];
333 let opcode1 = ((opcode >> 4) & 0xff) as u8;
334 let opcode2 = (opcode & 0xf) as u8;
335 let m1 = m1 & 0x0f;
336 let ri2 = ((ri2 >> 1) & 0xffff) as u16;
337
338 enc[0] = opcode1;
339 enc[1] = m1 << 4 | opcode2;
340 enc[2..].copy_from_slice(&ri2.to_be_bytes());
341 enc
342 }
343
344 /// RIEa-type instructions.
345 ///
346 /// 47 39 35 31 15 11 7
347 /// opcode1 r1 -- i2 m3 -- opcode2
348 /// 40 36 32 16 12 8 0
349 ///
enc_rie_a(opcode: u16, r1: Reg, i2: u16, m3: u8) -> [u8; 6]350 fn enc_rie_a(opcode: u16, r1: Reg, i2: u16, m3: u8) -> [u8; 6] {
351 let mut enc: [u8; 6] = [0; 6];
352 let opcode1 = ((opcode >> 8) & 0xff) as u8;
353 let opcode2 = (opcode & 0xff) as u8;
354 let r1 = machreg_to_gpr(r1) & 0x0f;
355 let m3 = m3 & 0x0f;
356
357 enc[0] = opcode1;
358 enc[1] = r1 << 4;
359 enc[2..4].copy_from_slice(&i2.to_be_bytes());
360 enc[4] = m3 << 4;
361 enc[5] = opcode2;
362 enc
363 }
364
365 /// RIEd-type instructions.
366 ///
367 /// 47 39 35 31 15 7
368 /// opcode1 r1 r3 i2 -- opcode2
369 /// 40 36 32 16 8 0
370 ///
enc_rie_d(opcode: u16, r1: Reg, r3: Reg, i2: u16) -> [u8; 6]371 fn enc_rie_d(opcode: u16, r1: Reg, r3: Reg, i2: u16) -> [u8; 6] {
372 let mut enc: [u8; 6] = [0; 6];
373 let opcode1 = ((opcode >> 8) & 0xff) as u8;
374 let opcode2 = (opcode & 0xff) as u8;
375 let r1 = machreg_to_gpr(r1) & 0x0f;
376 let r3 = machreg_to_gpr(r3) & 0x0f;
377
378 enc[0] = opcode1;
379 enc[1] = r1 << 4 | r3;
380 enc[2..4].copy_from_slice(&i2.to_be_bytes());
381 enc[5] = opcode2;
382 enc
383 }
384
385 /// RIEg-type instructions.
386 ///
387 /// 47 39 35 31 15 7
388 /// opcode1 r1 m3 i2 -- opcode2
389 /// 40 36 32 16 8 0
390 ///
enc_rie_g(opcode: u16, r1: Reg, i2: u16, m3: u8) -> [u8; 6]391 fn enc_rie_g(opcode: u16, r1: Reg, i2: u16, m3: u8) -> [u8; 6] {
392 let mut enc: [u8; 6] = [0; 6];
393 let opcode1 = ((opcode >> 8) & 0xff) as u8;
394 let opcode2 = (opcode & 0xff) as u8;
395 let r1 = machreg_to_gpr(r1) & 0x0f;
396 let m3 = m3 & 0x0f;
397
398 enc[0] = opcode1;
399 enc[1] = r1 << 4 | m3;
400 enc[2..4].copy_from_slice(&i2.to_be_bytes());
401 enc[5] = opcode2;
402 enc
403 }
404
405 /// RILa-type instructions.
406 ///
407 /// 47 39 35 31
408 /// opcode1 r1 opcode2 i2
409 /// 40 36 32 0
410 ///
enc_ril_a(opcode: u16, r1: Reg, i2: u32) -> [u8; 6]411 fn enc_ril_a(opcode: u16, r1: Reg, i2: u32) -> [u8; 6] {
412 let mut enc: [u8; 6] = [0; 6];
413 let opcode1 = ((opcode >> 4) & 0xff) as u8;
414 let opcode2 = (opcode & 0xf) as u8;
415 let r1 = machreg_to_gpr(r1) & 0x0f;
416
417 enc[0] = opcode1;
418 enc[1] = r1 << 4 | opcode2;
419 enc[2..].copy_from_slice(&i2.to_be_bytes());
420 enc
421 }
422
423 /// RILb-type instructions.
424 ///
425 /// 47 39 35 31
426 /// opcode1 r1 opcode2 ri2
427 /// 40 36 32 0
428 ///
enc_ril_b(opcode: u16, r1: Reg, ri2: u32) -> [u8; 6]429 fn enc_ril_b(opcode: u16, r1: Reg, ri2: u32) -> [u8; 6] {
430 let mut enc: [u8; 6] = [0; 6];
431 let opcode1 = ((opcode >> 4) & 0xff) as u8;
432 let opcode2 = (opcode & 0xf) as u8;
433 let r1 = machreg_to_gpr(r1) & 0x0f;
434
435 enc[0] = opcode1;
436 enc[1] = r1 << 4 | opcode2;
437 enc[2..].copy_from_slice(&ri2.to_be_bytes());
438 enc
439 }
440
441 /// RILc-type instructions.
442 ///
443 /// 47 39 35 31
444 /// opcode1 m1 opcode2 i2
445 /// 40 36 32 0
446 ///
enc_ril_c(opcode: u16, m1: u8, ri2: u32) -> [u8; 6]447 fn enc_ril_c(opcode: u16, m1: u8, ri2: u32) -> [u8; 6] {
448 let mut enc: [u8; 6] = [0; 6];
449 let opcode1 = ((opcode >> 4) & 0xff) as u8;
450 let opcode2 = (opcode & 0xf) as u8;
451 let m1 = m1 & 0x0f;
452
453 enc[0] = opcode1;
454 enc[1] = m1 << 4 | opcode2;
455 enc[2..].copy_from_slice(&ri2.to_be_bytes());
456 enc
457 }
458
459 /// RR-type instructions.
460 ///
461 /// 15 7 3
462 /// opcode r1 r2
463 /// 8 4 0
464 ///
enc_rr(opcode: u16, r1: Reg, r2: Reg) -> [u8; 2]465 fn enc_rr(opcode: u16, r1: Reg, r2: Reg) -> [u8; 2] {
466 let mut enc: [u8; 2] = [0; 2];
467 let opcode = (opcode & 0xff) as u8;
468 let r1 = machreg_to_gpr_or_fpr(r1) & 0x0f;
469 let r2 = machreg_to_gpr_or_fpr(r2) & 0x0f;
470
471 enc[0] = opcode;
472 enc[1] = r1 << 4 | r2;
473 enc
474 }
475
476 /// RRD-type instructions.
477 ///
478 /// 31 15 11 7 3
479 /// opcode r1 -- r3 r2
480 /// 16 12 8 4 0
481 ///
enc_rrd(opcode: u16, r1: Reg, r2: Reg, r3: Reg) -> [u8; 4]482 fn enc_rrd(opcode: u16, r1: Reg, r2: Reg, r3: Reg) -> [u8; 4] {
483 let mut enc: [u8; 4] = [0; 4];
484 let opcode1 = ((opcode >> 8) & 0xff) as u8;
485 let opcode2 = (opcode & 0xff) as u8;
486 let r1 = machreg_to_fpr(r1) & 0x0f;
487 let r2 = machreg_to_fpr(r2) & 0x0f;
488 let r3 = machreg_to_fpr(r3) & 0x0f;
489
490 enc[0] = opcode1;
491 enc[1] = opcode2;
492 enc[2] = r1 << 4;
493 enc[3] = r3 << 4 | r2;
494 enc
495 }
496
497 /// RRE-type instructions.
498 ///
499 /// 31 15 7 3
500 /// opcode -- r1 r2
501 /// 16 8 4 0
502 ///
enc_rre(opcode: u16, r1: Reg, r2: Reg) -> [u8; 4]503 fn enc_rre(opcode: u16, r1: Reg, r2: Reg) -> [u8; 4] {
504 let mut enc: [u8; 4] = [0; 4];
505 let opcode1 = ((opcode >> 8) & 0xff) as u8;
506 let opcode2 = (opcode & 0xff) as u8;
507 let r1 = machreg_to_gpr_or_fpr(r1) & 0x0f;
508 let r2 = machreg_to_gpr_or_fpr(r2) & 0x0f;
509
510 enc[0] = opcode1;
511 enc[1] = opcode2;
512 enc[3] = r1 << 4 | r2;
513 enc
514 }
515
516 /// RRFa/b-type instructions.
517 ///
518 /// 31 15 11 7 3
519 /// opcode r3 m4 r1 r2
520 /// 16 12 8 4 0
521 ///
enc_rrf_ab(opcode: u16, r1: Reg, r2: Reg, r3: Reg, m4: u8) -> [u8; 4]522 fn enc_rrf_ab(opcode: u16, r1: Reg, r2: Reg, r3: Reg, m4: u8) -> [u8; 4] {
523 let mut enc: [u8; 4] = [0; 4];
524 let opcode1 = ((opcode >> 8) & 0xff) as u8;
525 let opcode2 = (opcode & 0xff) as u8;
526 let r1 = machreg_to_gpr_or_fpr(r1) & 0x0f;
527 let r2 = machreg_to_gpr_or_fpr(r2) & 0x0f;
528 let r3 = machreg_to_gpr_or_fpr(r3) & 0x0f;
529 let m4 = m4 & 0x0f;
530
531 enc[0] = opcode1;
532 enc[1] = opcode2;
533 enc[2] = r3 << 4 | m4;
534 enc[3] = r1 << 4 | r2;
535 enc
536 }
537
538 /// RRFc/d/e-type instructions.
539 ///
540 /// 31 15 11 7 3
541 /// opcode m3 m4 r1 r2
542 /// 16 12 8 4 0
543 ///
enc_rrf_cde(opcode: u16, r1: Reg, r2: Reg, m3: u8, m4: u8) -> [u8; 4]544 fn enc_rrf_cde(opcode: u16, r1: Reg, r2: Reg, m3: u8, m4: u8) -> [u8; 4] {
545 let mut enc: [u8; 4] = [0; 4];
546 let opcode1 = ((opcode >> 8) & 0xff) as u8;
547 let opcode2 = (opcode & 0xff) as u8;
548 let r1 = machreg_to_gpr_or_fpr(r1) & 0x0f;
549 let r2 = machreg_to_gpr_or_fpr(r2) & 0x0f;
550 let m3 = m3 & 0x0f;
551 let m4 = m4 & 0x0f;
552
553 enc[0] = opcode1;
554 enc[1] = opcode2;
555 enc[2] = m3 << 4 | m4;
556 enc[3] = r1 << 4 | r2;
557 enc
558 }
559
560 /// RS-type instructions.
561 ///
562 /// 31 23 19 15 11
563 /// opcode r1 r3 b2 d2
564 /// 24 20 16 12 0
565 ///
enc_rs(opcode: u16, r1: Reg, r3: Reg, b2: Reg, d2: u32) -> [u8; 4]566 fn enc_rs(opcode: u16, r1: Reg, r3: Reg, b2: Reg, d2: u32) -> [u8; 4] {
567 let opcode = (opcode & 0xff) as u8;
568 let r1 = machreg_to_gpr_or_fpr(r1) & 0x0f;
569 let r3 = machreg_to_gpr_or_fpr(r3) & 0x0f;
570 let b2 = machreg_to_gpr(b2) & 0x0f;
571 let d2_lo = (d2 & 0xff) as u8;
572 let d2_hi = ((d2 >> 8) & 0x0f) as u8;
573
574 let mut enc: [u8; 4] = [0; 4];
575 enc[0] = opcode;
576 enc[1] = r1 << 4 | r3;
577 enc[2] = b2 << 4 | d2_hi;
578 enc[3] = d2_lo;
579 enc
580 }
581
582 /// RSY-type instructions.
583 ///
584 /// 47 39 35 31 27 15 7
585 /// opcode1 r1 r3 b2 dl2 dh2 opcode2
586 /// 40 36 32 28 16 8 0
587 ///
enc_rsy(opcode: u16, r1: Reg, r3: Reg, b2: Reg, d2: u32) -> [u8; 6]588 fn enc_rsy(opcode: u16, r1: Reg, r3: Reg, b2: Reg, d2: u32) -> [u8; 6] {
589 let opcode1 = ((opcode >> 8) & 0xff) as u8;
590 let opcode2 = (opcode & 0xff) as u8;
591 let r1 = machreg_to_gpr_or_fpr(r1) & 0x0f;
592 let r3 = machreg_to_gpr_or_fpr(r3) & 0x0f;
593 let b2 = machreg_to_gpr(b2) & 0x0f;
594 let dl2_lo = (d2 & 0xff) as u8;
595 let dl2_hi = ((d2 >> 8) & 0x0f) as u8;
596 let dh2 = ((d2 >> 12) & 0xff) as u8;
597
598 let mut enc: [u8; 6] = [0; 6];
599 enc[0] = opcode1;
600 enc[1] = r1 << 4 | r3;
601 enc[2] = b2 << 4 | dl2_hi;
602 enc[3] = dl2_lo;
603 enc[4] = dh2;
604 enc[5] = opcode2;
605 enc
606 }
607
608 /// RX-type instructions.
609 ///
610 /// 31 23 19 15 11
611 /// opcode r1 x2 b2 d2
612 /// 24 20 16 12 0
613 ///
enc_rx(opcode: u16, r1: Reg, b2: Reg, x2: Reg, d2: u32) -> [u8; 4]614 fn enc_rx(opcode: u16, r1: Reg, b2: Reg, x2: Reg, d2: u32) -> [u8; 4] {
615 let opcode = (opcode & 0xff) as u8;
616 let r1 = machreg_to_gpr_or_fpr(r1) & 0x0f;
617 let b2 = machreg_to_gpr(b2) & 0x0f;
618 let x2 = machreg_to_gpr(x2) & 0x0f;
619 let d2_lo = (d2 & 0xff) as u8;
620 let d2_hi = ((d2 >> 8) & 0x0f) as u8;
621
622 let mut enc: [u8; 4] = [0; 4];
623 enc[0] = opcode;
624 enc[1] = r1 << 4 | x2;
625 enc[2] = b2 << 4 | d2_hi;
626 enc[3] = d2_lo;
627 enc
628 }
629
630 /// RXY-type instructions.
631 ///
632 /// 47 39 35 31 27 15 7
633 /// opcode1 r1 x2 b2 dl2 dh2 opcode2
634 /// 40 36 32 28 16 8 0
635 ///
enc_rxy(opcode: u16, r1: Reg, b2: Reg, x2: Reg, d2: u32) -> [u8; 6]636 fn enc_rxy(opcode: u16, r1: Reg, b2: Reg, x2: Reg, d2: u32) -> [u8; 6] {
637 let opcode1 = ((opcode >> 8) & 0xff) as u8;
638 let opcode2 = (opcode & 0xff) as u8;
639 let r1 = machreg_to_gpr_or_fpr(r1) & 0x0f;
640 let b2 = machreg_to_gpr(b2) & 0x0f;
641 let x2 = machreg_to_gpr(x2) & 0x0f;
642 let dl2_lo = (d2 & 0xff) as u8;
643 let dl2_hi = ((d2 >> 8) & 0x0f) as u8;
644 let dh2 = ((d2 >> 12) & 0xff) as u8;
645
646 let mut enc: [u8; 6] = [0; 6];
647 enc[0] = opcode1;
648 enc[1] = r1 << 4 | x2;
649 enc[2] = b2 << 4 | dl2_hi;
650 enc[3] = dl2_lo;
651 enc[4] = dh2;
652 enc[5] = opcode2;
653 enc
654 }
655
656 /// SI-type instructions.
657 ///
658 /// 31 23 15 11
659 /// opcode i2 b1 d1
660 /// 24 16 12 0
661 ///
enc_si(opcode: u16, b1: Reg, d1: u32, i2: u8) -> [u8; 4]662 fn enc_si(opcode: u16, b1: Reg, d1: u32, i2: u8) -> [u8; 4] {
663 let opcode = (opcode & 0xff) as u8;
664 let b1 = machreg_to_gpr(b1) & 0x0f;
665 let d1_lo = (d1 & 0xff) as u8;
666 let d1_hi = ((d1 >> 8) & 0x0f) as u8;
667
668 let mut enc: [u8; 4] = [0; 4];
669 enc[0] = opcode;
670 enc[1] = i2;
671 enc[2] = b1 << 4 | d1_hi;
672 enc[3] = d1_lo;
673 enc
674 }
675
676 /// SIL-type instructions.
677 ///
678 /// 47 31 27 15
679 /// opcode b1 d1 i2
680 /// 32 28 16 0
681 ///
enc_sil(opcode: u16, b1: Reg, d1: u32, i2: i16) -> [u8; 6]682 fn enc_sil(opcode: u16, b1: Reg, d1: u32, i2: i16) -> [u8; 6] {
683 let opcode1 = ((opcode >> 8) & 0xff) as u8;
684 let opcode2 = (opcode & 0xff) as u8;
685 let b1 = machreg_to_gpr(b1) & 0x0f;
686 let d1_lo = (d1 & 0xff) as u8;
687 let d1_hi = ((d1 >> 8) & 0x0f) as u8;
688
689 let mut enc: [u8; 6] = [0; 6];
690 enc[0] = opcode1;
691 enc[1] = opcode2;
692 enc[2] = b1 << 4 | d1_hi;
693 enc[3] = d1_lo;
694 enc[4..].copy_from_slice(&i2.to_be_bytes());
695 enc
696 }
697
698 /// SIY-type instructions.
699 ///
700 /// 47 39 31 27 15 7
701 /// opcode1 i2 b1 dl1 dh1 opcode2
702 /// 40 32 28 16 8 0
703 ///
enc_siy(opcode: u16, b1: Reg, d1: u32, i2: u8) -> [u8; 6]704 fn enc_siy(opcode: u16, b1: Reg, d1: u32, i2: u8) -> [u8; 6] {
705 let opcode1 = ((opcode >> 8) & 0xff) as u8;
706 let opcode2 = (opcode & 0xff) as u8;
707 let b1 = machreg_to_gpr(b1) & 0x0f;
708 let dl1_lo = (d1 & 0xff) as u8;
709 let dl1_hi = ((d1 >> 8) & 0x0f) as u8;
710 let dh1 = ((d1 >> 12) & 0xff) as u8;
711
712 let mut enc: [u8; 6] = [0; 6];
713 enc[0] = opcode1;
714 enc[1] = i2;
715 enc[2] = b1 << 4 | dl1_hi;
716 enc[3] = dl1_lo;
717 enc[4] = dh1;
718 enc[5] = opcode2;
719 enc
720 }
721
722 /// VRR-type instructions.
723 ///
724 /// 47 39 35 31 27 23 19 15 11 7
725 /// opcode1 v1 v2 v3 - m6 m5 m4 rxb opcode2
726 /// 40 36 32 28 24 20 16 12 8 0
727 ///
enc_vrr(opcode: u16, v1: Reg, v2: Reg, v3: Reg, m4: u8, m5: u8, m6: u8) -> [u8; 6]728 fn enc_vrr(opcode: u16, v1: Reg, v2: Reg, v3: Reg, m4: u8, m5: u8, m6: u8) -> [u8; 6] {
729 let opcode1 = ((opcode >> 8) & 0xff) as u8;
730 let opcode2 = (opcode & 0xff) as u8;
731 let rxb = 0; // FIXME
732 let v1 = machreg_to_fpr(v1) & 0x0f; // FIXME
733 let v2 = machreg_to_fpr(v2) & 0x0f; // FIXME
734 let v3 = machreg_to_fpr(v3) & 0x0f; // FIXME
735 let m4 = m4 & 0x0f;
736 let m5 = m5 & 0x0f;
737 let m6 = m6 & 0x0f;
738
739 let mut enc: [u8; 6] = [0; 6];
740 enc[0] = opcode1;
741 enc[1] = v1 << 4 | v2;
742 enc[2] = v3 << 4;
743 enc[3] = m6 << 4 | m5;
744 enc[4] = m4 << 4 | rxb;
745 enc[5] = opcode2;
746 enc
747 }
748
749 /// VRX-type instructions.
750 ///
751 /// 47 39 35 31 27 15 11 7
752 /// opcode1 v1 x2 b2 d2 m3 rxb opcode2
753 /// 40 36 32 28 16 12 8 0
754 ///
enc_vrx(opcode: u16, v1: Reg, b2: Reg, x2: Reg, d2: u32, m3: u8) -> [u8; 6]755 fn enc_vrx(opcode: u16, v1: Reg, b2: Reg, x2: Reg, d2: u32, m3: u8) -> [u8; 6] {
756 let opcode1 = ((opcode >> 8) & 0xff) as u8;
757 let opcode2 = (opcode & 0xff) as u8;
758 let rxb = 0; // FIXME
759 let v1 = machreg_to_fpr(v1) & 0x0f; // FIXME
760 let b2 = machreg_to_gpr(b2) & 0x0f;
761 let x2 = machreg_to_gpr(x2) & 0x0f;
762 let d2_lo = (d2 & 0xff) as u8;
763 let d2_hi = ((d2 >> 8) & 0x0f) as u8;
764 let m3 = m3 & 0x0f;
765
766 let mut enc: [u8; 6] = [0; 6];
767 enc[0] = opcode1;
768 enc[1] = v1 << 4 | x2;
769 enc[2] = b2 << 4 | d2_hi;
770 enc[3] = d2_lo;
771 enc[4] = m3 << 4 | rxb;
772 enc[5] = opcode2;
773 enc
774 }
775
776 /// Emit encoding to sink.
put(sink: &mut MachBuffer<Inst>, enc: &[u8])777 fn put(sink: &mut MachBuffer<Inst>, enc: &[u8]) {
778 for byte in enc {
779 sink.put1(*byte);
780 }
781 }
782
783 /// Emit encoding to sink, adding a trap on the last byte.
put_with_trap(sink: &mut MachBuffer<Inst>, enc: &[u8], srcloc: SourceLoc, trap_code: TrapCode)784 fn put_with_trap(sink: &mut MachBuffer<Inst>, enc: &[u8], srcloc: SourceLoc, trap_code: TrapCode) {
785 let len = enc.len();
786 for i in 0..len - 1 {
787 sink.put1(enc[i]);
788 }
789 sink.add_trap(srcloc, trap_code);
790 sink.put1(enc[len - 1]);
791 }
792
793 /// Emit encoding to sink, adding a relocation at byte offset.
put_with_reloc( sink: &mut MachBuffer<Inst>, enc: &[u8], offset: usize, ri2_srcloc: SourceLoc, ri2_reloc: Reloc, ri2_name: &ExternalName, ri2_offset: i64, )794 fn put_with_reloc(
795 sink: &mut MachBuffer<Inst>,
796 enc: &[u8],
797 offset: usize,
798 ri2_srcloc: SourceLoc,
799 ri2_reloc: Reloc,
800 ri2_name: &ExternalName,
801 ri2_offset: i64,
802 ) {
803 let len = enc.len();
804 for i in 0..offset {
805 sink.put1(enc[i]);
806 }
807 sink.add_reloc(ri2_srcloc, ri2_reloc, ri2_name, ri2_offset + offset as i64);
808 for i in offset..len {
809 sink.put1(enc[i]);
810 }
811 }
812
813 /// State carried between emissions of a sequence of instructions.
814 #[derive(Default, Clone, Debug)]
815 pub struct EmitState {
816 pub(crate) initial_sp_offset: i64,
817 pub(crate) virtual_sp_offset: i64,
818 /// Safepoint stack map for upcoming instruction, as provided to `pre_safepoint()`.
819 stack_map: Option<StackMap>,
820 /// Current source-code location corresponding to instruction to be emitted.
821 cur_srcloc: SourceLoc,
822 }
823
824 impl MachInstEmitState<Inst> for EmitState {
new(abi: &dyn ABICallee<I = Inst>) -> Self825 fn new(abi: &dyn ABICallee<I = Inst>) -> Self {
826 EmitState {
827 virtual_sp_offset: 0,
828 initial_sp_offset: abi.frame_size() as i64,
829 stack_map: None,
830 cur_srcloc: SourceLoc::default(),
831 }
832 }
833
pre_safepoint(&mut self, stack_map: StackMap)834 fn pre_safepoint(&mut self, stack_map: StackMap) {
835 self.stack_map = Some(stack_map);
836 }
837
pre_sourceloc(&mut self, srcloc: SourceLoc)838 fn pre_sourceloc(&mut self, srcloc: SourceLoc) {
839 self.cur_srcloc = srcloc;
840 }
841 }
842
843 impl EmitState {
take_stack_map(&mut self) -> Option<StackMap>844 fn take_stack_map(&mut self) -> Option<StackMap> {
845 self.stack_map.take()
846 }
847
clear_post_insn(&mut self)848 fn clear_post_insn(&mut self) {
849 self.stack_map = None;
850 }
851
cur_srcloc(&self) -> SourceLoc852 fn cur_srcloc(&self) -> SourceLoc {
853 self.cur_srcloc
854 }
855 }
856
857 /// Constant state used during function compilation.
858 pub struct EmitInfo(settings::Flags);
859
860 impl EmitInfo {
new(flags: settings::Flags) -> Self861 pub(crate) fn new(flags: settings::Flags) -> Self {
862 Self(flags)
863 }
864 }
865
866 impl MachInstEmitInfo for EmitInfo {
flags(&self) -> &settings::Flags867 fn flags(&self) -> &settings::Flags {
868 &self.0
869 }
870 }
871
872 impl MachInstEmit for Inst {
873 type State = EmitState;
874 type Info = EmitInfo;
875
emit(&self, sink: &mut MachBuffer<Inst>, emit_info: &Self::Info, state: &mut EmitState)876 fn emit(&self, sink: &mut MachBuffer<Inst>, emit_info: &Self::Info, state: &mut EmitState) {
877 // N.B.: we *must* not exceed the "worst-case size" used to compute
878 // where to insert islands, except when islands are explicitly triggered
879 // (with an `EmitIsland`). We check this in debug builds. This is `mut`
880 // to allow disabling the check for `JTSequence`, which is always
881 // emitted following an `EmitIsland`.
882 let mut start_off = sink.cur_offset();
883
884 match self {
885 &Inst::AluRRR { alu_op, rd, rn, rm } => {
886 let (opcode, have_rr) = match alu_op {
887 ALUOp::Add32 => (0xb9f8, true), // ARK
888 ALUOp::Add64 => (0xb9e8, true), // AGRK
889 ALUOp::Sub32 => (0xb9f9, true), // SRK
890 ALUOp::Sub64 => (0xb9e9, true), // SGRK
891 ALUOp::Mul32 => (0xb9fd, true), // MSRKC
892 ALUOp::Mul64 => (0xb9ed, true), // MSGRKC
893 ALUOp::And32 => (0xb9f4, true), // NRK
894 ALUOp::And64 => (0xb9e4, true), // NGRK
895 ALUOp::Orr32 => (0xb9f6, true), // ORK
896 ALUOp::Orr64 => (0xb9e6, true), // OGRK
897 ALUOp::Xor32 => (0xb9f7, true), // XRK
898 ALUOp::Xor64 => (0xb9e7, true), // XGRK
899 ALUOp::AndNot32 => (0xb974, false), // NNRK
900 ALUOp::AndNot64 => (0xb964, false), // NNGRK
901 ALUOp::OrrNot32 => (0xb976, false), // NORK
902 ALUOp::OrrNot64 => (0xb966, false), // NOGRK
903 ALUOp::XorNot32 => (0xb977, false), // NXRK
904 ALUOp::XorNot64 => (0xb967, false), // NXGRK
905 _ => unreachable!(),
906 };
907 if have_rr && rd.to_reg() == rn {
908 let inst = Inst::AluRR { alu_op, rd, rm };
909 inst.emit(sink, emit_info, state);
910 } else {
911 put(sink, &enc_rrf_ab(opcode, rd.to_reg(), rn, rm, 0));
912 }
913 }
914 &Inst::AluRRSImm16 {
915 alu_op,
916 rd,
917 rn,
918 imm,
919 } => {
920 if rd.to_reg() == rn {
921 let inst = Inst::AluRSImm16 { alu_op, rd, imm };
922 inst.emit(sink, emit_info, state);
923 } else {
924 let opcode = match alu_op {
925 ALUOp::Add32 => 0xecd8, // AHIK
926 ALUOp::Add64 => 0xecd9, // AGHIK
927 _ => unreachable!(),
928 };
929 put(sink, &enc_rie_d(opcode, rd.to_reg(), rn, imm as u16));
930 }
931 }
932 &Inst::AluRR { alu_op, rd, rm } => {
933 let (opcode, is_rre) = match alu_op {
934 ALUOp::Add32 => (0x1a, false), // AR
935 ALUOp::Add64 => (0xb908, true), // AGR
936 ALUOp::Add64Ext32 => (0xb918, true), // AGFR
937 ALUOp::Sub32 => (0x1b, false), // SR
938 ALUOp::Sub64 => (0xb909, true), // SGR
939 ALUOp::Sub64Ext32 => (0xb919, true), // SGFR
940 ALUOp::Mul32 => (0xb252, true), // MSR
941 ALUOp::Mul64 => (0xb90c, true), // MSGR
942 ALUOp::Mul64Ext32 => (0xb91c, true), // MSGFR
943 ALUOp::And32 => (0x14, false), // NR
944 ALUOp::And64 => (0xb980, true), // NGR
945 ALUOp::Orr32 => (0x16, false), // OR
946 ALUOp::Orr64 => (0xb981, true), // OGR
947 ALUOp::Xor32 => (0x17, false), // XR
948 ALUOp::Xor64 => (0xb982, true), // XGR
949 _ => unreachable!(),
950 };
951 if is_rre {
952 put(sink, &enc_rre(opcode, rd.to_reg(), rm));
953 } else {
954 put(sink, &enc_rr(opcode, rd.to_reg(), rm));
955 }
956 }
957 &Inst::AluRX {
958 alu_op,
959 rd,
960 ref mem,
961 } => {
962 let (opcode_rx, opcode_rxy) = match alu_op {
963 ALUOp::Add32 => (Some(0x5a), Some(0xe35a)), // A(Y)
964 ALUOp::Add32Ext16 => (Some(0x4a), Some(0xe34a)), // AH(Y)
965 ALUOp::Add64 => (None, Some(0xe308)), // AG
966 ALUOp::Add64Ext16 => (None, Some(0xe338)), // AGH
967 ALUOp::Add64Ext32 => (None, Some(0xe318)), // AGF
968 ALUOp::Sub32 => (Some(0x5b), Some(0xe35b)), // S(Y)
969 ALUOp::Sub32Ext16 => (Some(0x4b), Some(0xe37b)), // SH(Y)
970 ALUOp::Sub64 => (None, Some(0xe309)), // SG
971 ALUOp::Sub64Ext16 => (None, Some(0xe339)), // SGH
972 ALUOp::Sub64Ext32 => (None, Some(0xe319)), // SGF
973 ALUOp::Mul32 => (Some(0x71), Some(0xe351)), // MS(Y)
974 ALUOp::Mul32Ext16 => (Some(0x4c), Some(0xe37c)), // MH(Y)
975 ALUOp::Mul64 => (None, Some(0xe30c)), // MSG
976 ALUOp::Mul64Ext16 => (None, Some(0xe33c)), // MSH
977 ALUOp::Mul64Ext32 => (None, Some(0xe31c)), // MSGF
978 ALUOp::And32 => (Some(0x54), Some(0xe354)), // N(Y)
979 ALUOp::And64 => (None, Some(0xe380)), // NG
980 ALUOp::Orr32 => (Some(0x56), Some(0xe356)), // O(Y)
981 ALUOp::Orr64 => (None, Some(0xe381)), // OG
982 ALUOp::Xor32 => (Some(0x57), Some(0xe357)), // X(Y)
983 ALUOp::Xor64 => (None, Some(0xe382)), // XG
984 _ => unreachable!(),
985 };
986 let rd = rd.to_reg();
987 mem_emit(
988 rd, mem, opcode_rx, opcode_rxy, None, true, sink, emit_info, state,
989 );
990 }
991 &Inst::AluRSImm16 { alu_op, rd, imm } => {
992 let opcode = match alu_op {
993 ALUOp::Add32 => 0xa7a, // AHI
994 ALUOp::Add64 => 0xa7b, // AGHI
995 ALUOp::Mul32 => 0xa7c, // MHI
996 ALUOp::Mul64 => 0xa7d, // MGHI
997 _ => unreachable!(),
998 };
999 put(sink, &enc_ri_a(opcode, rd.to_reg(), imm as u16));
1000 }
1001 &Inst::AluRSImm32 { alu_op, rd, imm } => {
1002 let opcode = match alu_op {
1003 ALUOp::Add32 => 0xc29, // AFI
1004 ALUOp::Add64 => 0xc28, // AGFI
1005 ALUOp::Mul32 => 0xc21, // MSFI
1006 ALUOp::Mul64 => 0xc20, // MSGFI
1007 _ => unreachable!(),
1008 };
1009 put(sink, &enc_ril_a(opcode, rd.to_reg(), imm as u32));
1010 }
1011 &Inst::AluRUImm32 { alu_op, rd, imm } => {
1012 let opcode = match alu_op {
1013 ALUOp::Add32 => 0xc2b, // ALFI
1014 ALUOp::Add64 => 0xc2a, // ALGFI
1015 ALUOp::Sub32 => 0xc25, // SLFI
1016 ALUOp::Sub64 => 0xc24, // SLGFI
1017 _ => unreachable!(),
1018 };
1019 put(sink, &enc_ril_a(opcode, rd.to_reg(), imm));
1020 }
1021 &Inst::AluRUImm16Shifted { alu_op, rd, imm } => {
1022 let opcode = match (alu_op, imm.shift) {
1023 (ALUOp::And32, 0) => 0xa57, // NILL
1024 (ALUOp::And32, 1) => 0xa56, // NILH
1025 (ALUOp::And64, 0) => 0xa57, // NILL
1026 (ALUOp::And64, 1) => 0xa56, // NILH
1027 (ALUOp::And64, 2) => 0xa55, // NIHL
1028 (ALUOp::And64, 3) => 0xa54, // NIHL
1029 (ALUOp::Orr32, 0) => 0xa5b, // OILL
1030 (ALUOp::Orr32, 1) => 0xa5a, // OILH
1031 (ALUOp::Orr64, 0) => 0xa5b, // OILL
1032 (ALUOp::Orr64, 1) => 0xa5a, // OILH
1033 (ALUOp::Orr64, 2) => 0xa59, // OIHL
1034 (ALUOp::Orr64, 3) => 0xa58, // OIHH
1035 _ => unreachable!(),
1036 };
1037 put(sink, &enc_ri_a(opcode, rd.to_reg(), imm.bits));
1038 }
1039 &Inst::AluRUImm32Shifted { alu_op, rd, imm } => {
1040 let opcode = match (alu_op, imm.shift) {
1041 (ALUOp::And32, 0) => 0xc0b, // NILF
1042 (ALUOp::And64, 0) => 0xc0b, // NILF
1043 (ALUOp::And64, 1) => 0xc0a, // NIHF
1044 (ALUOp::Orr32, 0) => 0xc0d, // OILF
1045 (ALUOp::Orr64, 0) => 0xc0d, // OILF
1046 (ALUOp::Orr64, 1) => 0xc0c, // OILF
1047 (ALUOp::Xor32, 0) => 0xc07, // XILF
1048 (ALUOp::Xor64, 0) => 0xc07, // XILF
1049 (ALUOp::Xor64, 1) => 0xc06, // XILH
1050 _ => unreachable!(),
1051 };
1052 put(sink, &enc_ril_a(opcode, rd.to_reg(), imm.bits));
1053 }
1054
1055 &Inst::SMulWide { rn, rm } => {
1056 let opcode = 0xb9ec; // MGRK
1057 put(sink, &enc_rrf_ab(opcode, gpr(0), rn, rm, 0));
1058 }
1059 &Inst::UMulWide { rn } => {
1060 let opcode = 0xb986; // MLGR
1061 put(sink, &enc_rre(opcode, gpr(0), rn));
1062 }
1063 &Inst::SDivMod32 { rn } => {
1064 let opcode = 0xb91d; // DSGFR
1065 let srcloc = state.cur_srcloc();
1066 let trap_code = TrapCode::IntegerDivisionByZero;
1067 put_with_trap(sink, &enc_rre(opcode, gpr(0), rn), srcloc, trap_code);
1068 }
1069 &Inst::SDivMod64 { rn } => {
1070 let opcode = 0xb90d; // DSGR
1071 let srcloc = state.cur_srcloc();
1072 let trap_code = TrapCode::IntegerDivisionByZero;
1073 put_with_trap(sink, &enc_rre(opcode, gpr(0), rn), srcloc, trap_code);
1074 }
1075 &Inst::UDivMod32 { rn } => {
1076 let opcode = 0xb997; // DLR
1077 let srcloc = state.cur_srcloc();
1078 let trap_code = TrapCode::IntegerDivisionByZero;
1079 put_with_trap(sink, &enc_rre(opcode, gpr(0), rn), srcloc, trap_code);
1080 }
1081 &Inst::UDivMod64 { rn } => {
1082 let opcode = 0xb987; // DLGR
1083 let srcloc = state.cur_srcloc();
1084 let trap_code = TrapCode::IntegerDivisionByZero;
1085 put_with_trap(sink, &enc_rre(opcode, gpr(0), rn), srcloc, trap_code);
1086 }
1087 &Inst::Flogr { rn } => {
1088 let opcode = 0xb983; // FLOGR
1089 put(sink, &enc_rre(opcode, gpr(0), rn));
1090 }
1091
1092 &Inst::ShiftRR {
1093 shift_op,
1094 rd,
1095 rn,
1096 shift_imm,
1097 shift_reg,
1098 } => {
1099 let opcode = match shift_op {
1100 ShiftOp::RotL32 => 0xeb1d, // RLL
1101 ShiftOp::RotL64 => 0xeb1c, // RLLG
1102 ShiftOp::LShL32 => 0xebdf, // SLLK (SLL ?)
1103 ShiftOp::LShL64 => 0xeb0d, // SLLG
1104 ShiftOp::LShR32 => 0xebde, // SRLK (SRL ?)
1105 ShiftOp::LShR64 => 0xeb0c, // SRLG
1106 ShiftOp::AShR32 => 0xebdc, // SRAK (SRA ?)
1107 ShiftOp::AShR64 => 0xeb0a, // SRAG
1108 };
1109 let shift_reg = match shift_reg {
1110 Some(reg) => reg,
1111 None => zero_reg(),
1112 };
1113 put(
1114 sink,
1115 &enc_rsy(opcode, rd.to_reg(), rn, shift_reg, shift_imm.bits()),
1116 );
1117 }
1118
1119 &Inst::UnaryRR { op, rd, rn } => {
1120 match op {
1121 UnaryOp::Abs32 => {
1122 let opcode = 0x10; // LPR
1123 put(sink, &enc_rr(opcode, rd.to_reg(), rn));
1124 }
1125 UnaryOp::Abs64 => {
1126 let opcode = 0xb900; // LPGR
1127 put(sink, &enc_rre(opcode, rd.to_reg(), rn));
1128 }
1129 UnaryOp::Abs64Ext32 => {
1130 let opcode = 0xb910; // LPGFR
1131 put(sink, &enc_rre(opcode, rd.to_reg(), rn));
1132 }
1133 UnaryOp::Neg32 => {
1134 let opcode = 0x13; // LCR
1135 put(sink, &enc_rr(opcode, rd.to_reg(), rn));
1136 }
1137 UnaryOp::Neg64 => {
1138 let opcode = 0xb903; // LCGR
1139 put(sink, &enc_rre(opcode, rd.to_reg(), rn));
1140 }
1141 UnaryOp::Neg64Ext32 => {
1142 let opcode = 0xb913; // LCGFR
1143 put(sink, &enc_rre(opcode, rd.to_reg(), rn));
1144 }
1145 UnaryOp::PopcntByte => {
1146 let opcode = 0xb9e1; // POPCNT
1147 put(sink, &enc_rrf_cde(opcode, rd.to_reg(), rn, 0, 0));
1148 }
1149 UnaryOp::PopcntReg => {
1150 let opcode = 0xb9e1; // POPCNT
1151 put(sink, &enc_rrf_cde(opcode, rd.to_reg(), rn, 8, 0));
1152 }
1153 }
1154 }
1155
1156 &Inst::Extend {
1157 rd,
1158 rn,
1159 signed,
1160 from_bits,
1161 to_bits,
1162 } => {
1163 let opcode = match (signed, from_bits, to_bits) {
1164 (_, 1, 32) => 0xb926, // LBR
1165 (_, 1, 64) => 0xb906, // LGBR
1166 (false, 8, 32) => 0xb994, // LLCR
1167 (false, 8, 64) => 0xb984, // LLGCR
1168 (true, 8, 32) => 0xb926, // LBR
1169 (true, 8, 64) => 0xb906, // LGBR
1170 (false, 16, 32) => 0xb995, // LLHR
1171 (false, 16, 64) => 0xb985, // LLGHR
1172 (true, 16, 32) => 0xb927, // LHR
1173 (true, 16, 64) => 0xb907, // LGHR
1174 (false, 32, 64) => 0xb916, // LLGFR
1175 (true, 32, 64) => 0xb914, // LGFR
1176 _ => panic!(
1177 "Unsupported extend combination: signed = {}, from_bits = {}, to_bits = {}",
1178 signed, from_bits, to_bits
1179 ),
1180 };
1181 put(sink, &enc_rre(opcode, rd.to_reg(), rn));
1182 }
1183
1184 &Inst::CmpRR { op, rn, rm } => {
1185 let (opcode, is_rre) = match op {
1186 CmpOp::CmpS32 => (0x19, false), // CR
1187 CmpOp::CmpS64 => (0xb920, true), // CGR
1188 CmpOp::CmpS64Ext32 => (0xb930, true), // CGFR
1189 CmpOp::CmpL32 => (0x15, false), // CLR
1190 CmpOp::CmpL64 => (0xb921, true), // CLGR
1191 CmpOp::CmpL64Ext32 => (0xb931, true), // CLGFR
1192 _ => unreachable!(),
1193 };
1194 if is_rre {
1195 put(sink, &enc_rre(opcode, rn, rm));
1196 } else {
1197 put(sink, &enc_rr(opcode, rn, rm));
1198 }
1199 }
1200 &Inst::CmpRX { op, rn, ref mem } => {
1201 let (opcode_rx, opcode_rxy, opcode_ril) = match op {
1202 CmpOp::CmpS32 => (Some(0x59), Some(0xe359), Some(0xc6d)), // C(Y), CRL
1203 CmpOp::CmpS32Ext16 => (Some(0x49), Some(0xe379), Some(0xc65)), // CH(Y), CHRL
1204 CmpOp::CmpS64 => (None, Some(0xe320), Some(0xc68)), // CG, CGRL
1205 CmpOp::CmpS64Ext16 => (None, Some(0xe334), Some(0xc64)), // CGH, CGHRL
1206 CmpOp::CmpS64Ext32 => (None, Some(0xe330), Some(0xc6c)), // CGF, CGFRL
1207 CmpOp::CmpL32 => (Some(0x55), Some(0xe355), Some(0xc6f)), // CL(Y), CLRL
1208 CmpOp::CmpL32Ext16 => (None, None, Some(0xc67)), // CLHRL
1209 CmpOp::CmpL64 => (None, Some(0xe321), Some(0xc6a)), // CLG, CLGRL
1210 CmpOp::CmpL64Ext16 => (None, None, Some(0xc66)), // CLGHRL
1211 CmpOp::CmpL64Ext32 => (None, Some(0xe331), Some(0xc6e)), // CLGF, CLGFRL
1212 };
1213 mem_emit(
1214 rn, mem, opcode_rx, opcode_rxy, opcode_ril, true, sink, emit_info, state,
1215 );
1216 }
1217 &Inst::CmpRSImm16 { op, rn, imm } => {
1218 let opcode = match op {
1219 CmpOp::CmpS32 => 0xa7e, // CHI
1220 CmpOp::CmpS64 => 0xa7f, // CGHI
1221 _ => unreachable!(),
1222 };
1223 put(sink, &enc_ri_a(opcode, rn, imm as u16));
1224 }
1225 &Inst::CmpRSImm32 { op, rn, imm } => {
1226 let opcode = match op {
1227 CmpOp::CmpS32 => 0xc2d, // CFI
1228 CmpOp::CmpS64 => 0xc2c, // CGFI
1229 _ => unreachable!(),
1230 };
1231 put(sink, &enc_ril_a(opcode, rn, imm as u32));
1232 }
1233 &Inst::CmpRUImm32 { op, rn, imm } => {
1234 let opcode = match op {
1235 CmpOp::CmpL32 => 0xc2f, // CLFI
1236 CmpOp::CmpL64 => 0xc2e, // CLGFI
1237 _ => unreachable!(),
1238 };
1239 put(sink, &enc_ril_a(opcode, rn, imm));
1240 }
1241 &Inst::CmpTrapRR {
1242 op,
1243 rn,
1244 rm,
1245 cond,
1246 trap_code,
1247 } => {
1248 let opcode = match op {
1249 CmpOp::CmpS32 => 0xb972, // CRT
1250 CmpOp::CmpS64 => 0xb960, // CGRT
1251 CmpOp::CmpL32 => 0xb973, // CLRT
1252 CmpOp::CmpL64 => 0xb961, // CLGRT
1253 _ => unreachable!(),
1254 };
1255 let srcloc = state.cur_srcloc();
1256 put_with_trap(
1257 sink,
1258 &enc_rrf_cde(opcode, rn, rm, cond.bits(), 0),
1259 srcloc,
1260 trap_code,
1261 );
1262 }
1263 &Inst::CmpTrapRSImm16 {
1264 op,
1265 rn,
1266 imm,
1267 cond,
1268 trap_code,
1269 } => {
1270 let opcode = match op {
1271 CmpOp::CmpS32 => 0xec72, // CIT
1272 CmpOp::CmpS64 => 0xec70, // CGIT
1273 _ => unreachable!(),
1274 };
1275 let srcloc = state.cur_srcloc();
1276 put_with_trap(
1277 sink,
1278 &enc_rie_a(opcode, rn, imm as u16, cond.bits()),
1279 srcloc,
1280 trap_code,
1281 );
1282 }
1283 &Inst::CmpTrapRUImm16 {
1284 op,
1285 rn,
1286 imm,
1287 cond,
1288 trap_code,
1289 } => {
1290 let opcode = match op {
1291 CmpOp::CmpL32 => 0xec73, // CLFIT
1292 CmpOp::CmpL64 => 0xec71, // CLGIT
1293 _ => unreachable!(),
1294 };
1295 let srcloc = state.cur_srcloc();
1296 put_with_trap(
1297 sink,
1298 &enc_rie_a(opcode, rn, imm, cond.bits()),
1299 srcloc,
1300 trap_code,
1301 );
1302 }
1303
1304 &Inst::Load32 { rd, ref mem }
1305 | &Inst::Load32ZExt8 { rd, ref mem }
1306 | &Inst::Load32SExt8 { rd, ref mem }
1307 | &Inst::Load32ZExt16 { rd, ref mem }
1308 | &Inst::Load32SExt16 { rd, ref mem }
1309 | &Inst::Load64 { rd, ref mem }
1310 | &Inst::Load64ZExt8 { rd, ref mem }
1311 | &Inst::Load64SExt8 { rd, ref mem }
1312 | &Inst::Load64ZExt16 { rd, ref mem }
1313 | &Inst::Load64SExt16 { rd, ref mem }
1314 | &Inst::Load64ZExt32 { rd, ref mem }
1315 | &Inst::Load64SExt32 { rd, ref mem }
1316 | &Inst::LoadRev16 { rd, ref mem }
1317 | &Inst::LoadRev32 { rd, ref mem }
1318 | &Inst::LoadRev64 { rd, ref mem }
1319 | &Inst::FpuLoad32 { rd, ref mem }
1320 | &Inst::FpuLoad64 { rd, ref mem } => {
1321 let (opcode_rx, opcode_rxy, opcode_ril) = match self {
1322 &Inst::Load32 { .. } => (Some(0x58), Some(0xe358), Some(0xc4d)), // L(Y), LRL
1323 &Inst::Load32ZExt8 { .. } => (None, Some(0xe394), None), // LLC
1324 &Inst::Load32SExt8 { .. } => (None, Some(0xe376), None), // LB
1325 &Inst::Load32ZExt16 { .. } => (None, Some(0xe395), Some(0xc42)), // LLH, LLHRL
1326 &Inst::Load32SExt16 { .. } => (Some(0x48), Some(0xe378), Some(0xc45)), // LH(Y), LHRL
1327 &Inst::Load64 { .. } => (None, Some(0xe304), Some(0xc48)), // LG, LGRL
1328 &Inst::Load64ZExt8 { .. } => (None, Some(0xe390), None), // LLGC
1329 &Inst::Load64SExt8 { .. } => (None, Some(0xe377), None), // LGB
1330 &Inst::Load64ZExt16 { .. } => (None, Some(0xe391), Some(0xc46)), // LLGH, LLGHRL
1331 &Inst::Load64SExt16 { .. } => (None, Some(0xe315), Some(0xc44)), // LGH, LGHRL
1332 &Inst::Load64ZExt32 { .. } => (None, Some(0xe316), Some(0xc4e)), // LLGF, LLGFRL
1333 &Inst::Load64SExt32 { .. } => (None, Some(0xe314), Some(0xc4c)), // LGF, LGFRL
1334 &Inst::LoadRev16 { .. } => (None, Some(0xe31f), None), // LRVH
1335 &Inst::LoadRev32 { .. } => (None, Some(0xe31e), None), // LRV
1336 &Inst::LoadRev64 { .. } => (None, Some(0xe30f), None), // LRVG
1337 &Inst::FpuLoad32 { .. } => (Some(0x78), Some(0xed64), None), // LE(Y)
1338 &Inst::FpuLoad64 { .. } => (Some(0x68), Some(0xed65), None), // LD(Y)
1339 _ => unreachable!(),
1340 };
1341 let rd = rd.to_reg();
1342 mem_emit(
1343 rd, mem, opcode_rx, opcode_rxy, opcode_ril, true, sink, emit_info, state,
1344 );
1345 }
1346 &Inst::FpuLoadRev32 { rd, ref mem } | &Inst::FpuLoadRev64 { rd, ref mem } => {
1347 let opcode = match self {
1348 &Inst::FpuLoadRev32 { .. } => 0xe603, // VLEBRF
1349 &Inst::FpuLoadRev64 { .. } => 0xe602, // VLEBRG
1350 _ => unreachable!(),
1351 };
1352
1353 let (mem_insts, mem) = mem_finalize(mem, state, true, false, false, true);
1354 for inst in mem_insts.into_iter() {
1355 inst.emit(sink, emit_info, state);
1356 }
1357
1358 let srcloc = state.cur_srcloc();
1359 if srcloc != SourceLoc::default() && mem.can_trap() {
1360 sink.add_trap(srcloc, TrapCode::HeapOutOfBounds);
1361 }
1362
1363 match &mem {
1364 &MemArg::BXD12 {
1365 base, index, disp, ..
1366 } => {
1367 put(
1368 sink,
1369 &enc_vrx(opcode, rd.to_reg(), base, index, disp.bits(), 0),
1370 );
1371 }
1372 _ => unreachable!(),
1373 }
1374 }
1375
1376 &Inst::Store8 { rd, ref mem }
1377 | &Inst::Store16 { rd, ref mem }
1378 | &Inst::Store32 { rd, ref mem }
1379 | &Inst::Store64 { rd, ref mem }
1380 | &Inst::StoreRev16 { rd, ref mem }
1381 | &Inst::StoreRev32 { rd, ref mem }
1382 | &Inst::StoreRev64 { rd, ref mem }
1383 | &Inst::FpuStore32 { rd, ref mem }
1384 | &Inst::FpuStore64 { rd, ref mem } => {
1385 let (opcode_rx, opcode_rxy, opcode_ril) = match self {
1386 &Inst::Store8 { .. } => (Some(0x42), Some(0xe372), None), // STC(Y)
1387 &Inst::Store16 { .. } => (Some(0x40), Some(0xe370), Some(0xc47)), // STH(Y), STHRL
1388 &Inst::Store32 { .. } => (Some(0x50), Some(0xe350), Some(0xc4f)), // ST(Y), STRL
1389 &Inst::Store64 { .. } => (None, Some(0xe324), Some(0xc4b)), // STG, STGRL
1390 &Inst::StoreRev16 { .. } => (None, Some(0xe33f), None), // STRVH
1391 &Inst::StoreRev32 { .. } => (None, Some(0xe33e), None), // STRV
1392 &Inst::StoreRev64 { .. } => (None, Some(0xe32f), None), // STRVG
1393 &Inst::FpuStore32 { .. } => (Some(0x70), Some(0xed66), None), // STE(Y)
1394 &Inst::FpuStore64 { .. } => (Some(0x60), Some(0xed67), None), // STD(Y)
1395 _ => unreachable!(),
1396 };
1397 mem_emit(
1398 rd, mem, opcode_rx, opcode_rxy, opcode_ril, true, sink, emit_info, state,
1399 );
1400 }
1401 &Inst::StoreImm8 { imm, ref mem } => {
1402 let opcode_si = 0x92; // MVI
1403 let opcode_siy = 0xeb52; // MVIY
1404 mem_imm8_emit(
1405 imm, mem, opcode_si, opcode_siy, true, sink, emit_info, state,
1406 );
1407 }
1408 &Inst::StoreImm16 { imm, ref mem }
1409 | &Inst::StoreImm32SExt16 { imm, ref mem }
1410 | &Inst::StoreImm64SExt16 { imm, ref mem } => {
1411 let opcode = match self {
1412 &Inst::StoreImm16 { .. } => 0xe544, // MVHHI
1413 &Inst::StoreImm32SExt16 { .. } => 0xe54c, // MVHI
1414 &Inst::StoreImm64SExt16 { .. } => 0xe548, // MVGHI
1415 _ => unreachable!(),
1416 };
1417 mem_imm16_emit(imm, mem, opcode, true, sink, emit_info, state);
1418 }
1419 &Inst::FpuStoreRev32 { rd, ref mem } | &Inst::FpuStoreRev64 { rd, ref mem } => {
1420 let opcode = match self {
1421 &Inst::FpuStoreRev32 { .. } => 0xe60b, // VSTEBRF
1422 &Inst::FpuStoreRev64 { .. } => 0xe60a, // VSTEBRG
1423 _ => unreachable!(),
1424 };
1425
1426 let (mem_insts, mem) = mem_finalize(mem, state, true, false, false, true);
1427 for inst in mem_insts.into_iter() {
1428 inst.emit(sink, emit_info, state);
1429 }
1430
1431 let srcloc = state.cur_srcloc();
1432 if srcloc != SourceLoc::default() && mem.can_trap() {
1433 sink.add_trap(srcloc, TrapCode::HeapOutOfBounds);
1434 }
1435
1436 match &mem {
1437 &MemArg::BXD12 {
1438 base, index, disp, ..
1439 } => {
1440 put(sink, &enc_vrx(opcode, rd, base, index, disp.bits(), 0));
1441 }
1442 _ => unreachable!(),
1443 }
1444 }
1445
1446 &Inst::LoadMultiple64 {
1447 rt,
1448 rt2,
1449 addr_reg,
1450 addr_off,
1451 } => {
1452 let opcode = 0xeb04; // LMG
1453 let rt = rt.to_reg();
1454 let rt2 = rt2.to_reg();
1455 put(sink, &enc_rsy(opcode, rt, rt2, addr_reg, addr_off.bits()));
1456 }
1457 &Inst::StoreMultiple64 {
1458 rt,
1459 rt2,
1460 addr_reg,
1461 addr_off,
1462 } => {
1463 let opcode = 0xeb24; // STMG
1464 put(sink, &enc_rsy(opcode, rt, rt2, addr_reg, addr_off.bits()));
1465 }
1466
1467 &Inst::LoadAddr { rd, ref mem } => {
1468 let opcode_rx = Some(0x41); // LA
1469 let opcode_rxy = Some(0xe371); // LAY
1470 let opcode_ril = Some(0xc00); // LARL
1471 let rd = rd.to_reg();
1472 mem_emit(
1473 rd, mem, opcode_rx, opcode_rxy, opcode_ril, false, sink, emit_info, state,
1474 );
1475 }
1476
1477 &Inst::Mov64 { rd, rm } => {
1478 let opcode = 0xb904; // LGR
1479 put(sink, &enc_rre(opcode, rd.to_reg(), rm));
1480 }
1481 &Inst::Mov32 { rd, rm } => {
1482 let opcode = 0x18; // LR
1483 put(sink, &enc_rr(opcode, rd.to_reg(), rm));
1484 }
1485 &Inst::Mov32Imm { rd, imm } => {
1486 let opcode = 0xc09; // IILF
1487 put(sink, &enc_ril_a(opcode, rd.to_reg(), imm));
1488 }
1489 &Inst::Mov32SImm16 { rd, imm } => {
1490 let opcode = 0xa78; // LHI
1491 put(sink, &enc_ri_a(opcode, rd.to_reg(), imm as u16));
1492 }
1493 &Inst::Mov64SImm16 { rd, imm } => {
1494 let opcode = 0xa79; // LGHI
1495 put(sink, &enc_ri_a(opcode, rd.to_reg(), imm as u16));
1496 }
1497 &Inst::Mov64SImm32 { rd, imm } => {
1498 let opcode = 0xc01; // LGFI
1499 put(sink, &enc_ril_a(opcode, rd.to_reg(), imm as u32));
1500 }
1501 &Inst::CMov32 { rd, cond, rm } => {
1502 let opcode = 0xb9f2; // LOCR
1503 put(sink, &enc_rrf_cde(opcode, rd.to_reg(), rm, cond.bits(), 0));
1504 }
1505 &Inst::CMov64 { rd, cond, rm } => {
1506 let opcode = 0xb9e2; // LOCGR
1507 put(sink, &enc_rrf_cde(opcode, rd.to_reg(), rm, cond.bits(), 0));
1508 }
1509 &Inst::CMov32SImm16 { rd, cond, imm } => {
1510 let opcode = 0xec42; // LOCHI
1511 put(
1512 sink,
1513 &enc_rie_g(opcode, rd.to_reg(), imm as u16, cond.bits()),
1514 );
1515 }
1516 &Inst::CMov64SImm16 { rd, cond, imm } => {
1517 let opcode = 0xec46; // LOCGHI
1518 put(
1519 sink,
1520 &enc_rie_g(opcode, rd.to_reg(), imm as u16, cond.bits()),
1521 );
1522 }
1523 &Inst::Mov64UImm16Shifted { rd, imm } => {
1524 let opcode = match imm.shift {
1525 0 => 0xa5f, // LLILL
1526 1 => 0xa5e, // LLILH
1527 2 => 0xa5d, // LLIHL
1528 3 => 0xa5c, // LLIHH
1529 _ => unreachable!(),
1530 };
1531 put(sink, &enc_ri_a(opcode, rd.to_reg(), imm.bits));
1532 }
1533 &Inst::Mov64UImm32Shifted { rd, imm } => {
1534 let opcode = match imm.shift {
1535 0 => 0xc0f, // LLILF
1536 1 => 0xc0e, // LLIHF
1537 _ => unreachable!(),
1538 };
1539 put(sink, &enc_ril_a(opcode, rd.to_reg(), imm.bits));
1540 }
1541 &Inst::Insert64UImm16Shifted { rd, imm } => {
1542 let opcode = match imm.shift {
1543 0 => 0xa53, // IILL
1544 1 => 0xa52, // IILH
1545 2 => 0xa51, // IIHL
1546 3 => 0xa50, // IIHH
1547 _ => unreachable!(),
1548 };
1549 put(sink, &enc_ri_a(opcode, rd.to_reg(), imm.bits));
1550 }
1551 &Inst::Insert64UImm32Shifted { rd, imm } => {
1552 let opcode = match imm.shift {
1553 0 => 0xc09, // IILF
1554 1 => 0xc08, // IIHF
1555 _ => unreachable!(),
1556 };
1557 put(sink, &enc_ril_a(opcode, rd.to_reg(), imm.bits));
1558 }
1559 &Inst::LoadExtNameFar {
1560 rd,
1561 ref name,
1562 offset,
1563 } => {
1564 let opcode = 0xa75; // BRAS
1565 let srcloc = state.cur_srcloc();
1566 let reg = writable_spilltmp_reg().to_reg();
1567 put(sink, &enc_ri_b(opcode, reg, 12));
1568 sink.add_reloc(srcloc, Reloc::Abs8, name, offset);
1569 if emit_info.flags().emit_all_ones_funcaddrs() {
1570 sink.put8(u64::max_value());
1571 } else {
1572 sink.put8(0);
1573 }
1574 let inst = Inst::Load64 {
1575 rd,
1576 mem: MemArg::reg(reg, MemFlags::trusted()),
1577 };
1578 inst.emit(sink, emit_info, state);
1579 }
1580
1581 &Inst::FpuMove32 { rd, rn } => {
1582 let opcode = 0x38; // LER
1583 put(sink, &enc_rr(opcode, rd.to_reg(), rn));
1584 }
1585 &Inst::FpuMove64 { rd, rn } => {
1586 let opcode = 0x28; // LDR
1587 put(sink, &enc_rr(opcode, rd.to_reg(), rn));
1588 }
1589 &Inst::FpuCMov32 { rd, cond, rm } => {
1590 let opcode = 0xa74; // BCR
1591 put(sink, &enc_ri_c(opcode, cond.invert().bits(), 4 + 2));
1592 let opcode = 0x38; // LER
1593 put(sink, &enc_rr(opcode, rd.to_reg(), rm));
1594 }
1595 &Inst::FpuCMov64 { rd, cond, rm } => {
1596 let opcode = 0xa74; // BCR
1597 put(sink, &enc_ri_c(opcode, cond.invert().bits(), 4 + 2));
1598 let opcode = 0x28; // LDR
1599 put(sink, &enc_rr(opcode, rd.to_reg(), rm));
1600 }
1601 &Inst::MovToFpr { rd, rn } => {
1602 let opcode = 0xb3c1; // LDGR
1603 put(sink, &enc_rre(opcode, rd.to_reg(), rn));
1604 }
1605 &Inst::MovFromFpr { rd, rn } => {
1606 let opcode = 0xb3cd; // LGDR
1607 put(sink, &enc_rre(opcode, rd.to_reg(), rn));
1608 }
1609 &Inst::LoadFpuConst32 { rd, const_data } => {
1610 let opcode = 0xa75; // BRAS
1611 let reg = writable_spilltmp_reg().to_reg();
1612 put(sink, &enc_ri_b(opcode, reg, 8));
1613 sink.put4(const_data.to_bits().swap_bytes());
1614 let inst = Inst::FpuLoad32 {
1615 rd,
1616 mem: MemArg::reg(reg, MemFlags::trusted()),
1617 };
1618 inst.emit(sink, emit_info, state);
1619 }
1620 &Inst::LoadFpuConst64 { rd, const_data } => {
1621 let opcode = 0xa75; // BRAS
1622 let reg = writable_spilltmp_reg().to_reg();
1623 put(sink, &enc_ri_b(opcode, reg, 12));
1624 sink.put8(const_data.to_bits().swap_bytes());
1625 let inst = Inst::FpuLoad64 {
1626 rd,
1627 mem: MemArg::reg(reg, MemFlags::trusted()),
1628 };
1629 inst.emit(sink, emit_info, state);
1630 }
1631
1632 &Inst::FpuCopysign { rd, rn, rm } => {
1633 let opcode = 0xb372; // CPSDR
1634 put(sink, &enc_rrf_ab(opcode, rd.to_reg(), rn, rm, 0));
1635 }
1636 &Inst::FpuRR { fpu_op, rd, rn } => {
1637 let opcode = match fpu_op {
1638 FPUOp1::Abs32 => 0xb300, // LPEBR
1639 FPUOp1::Abs64 => 0xb310, // LPDBR
1640 FPUOp1::Neg32 => 0xb303, // LCEBR
1641 FPUOp1::Neg64 => 0xb313, // LCDBR
1642 FPUOp1::NegAbs32 => 0xb301, // LNEBR
1643 FPUOp1::NegAbs64 => 0xb311, // LNDBR
1644 FPUOp1::Sqrt32 => 0xb314, // SQEBR
1645 FPUOp1::Sqrt64 => 0xb315, // SQDBR
1646 FPUOp1::Cvt32To64 => 0xb304, // LDEBR
1647 FPUOp1::Cvt64To32 => 0xb344, // LEDBR
1648 };
1649 put(sink, &enc_rre(opcode, rd.to_reg(), rn));
1650 }
1651 &Inst::FpuRRR { fpu_op, rd, rm } => {
1652 let opcode = match fpu_op {
1653 FPUOp2::Add32 => 0xb30a, // AEBR
1654 FPUOp2::Add64 => 0xb31a, // ADBR
1655 FPUOp2::Sub32 => 0xb30b, // SEBR
1656 FPUOp2::Sub64 => 0xb31b, // SDBR
1657 FPUOp2::Mul32 => 0xb317, // MEEBR
1658 FPUOp2::Mul64 => 0xb31c, // MDBR
1659 FPUOp2::Div32 => 0xb30d, // DEBR
1660 FPUOp2::Div64 => 0xb31d, // DDBR
1661 _ => unimplemented!(),
1662 };
1663 put(sink, &enc_rre(opcode, rd.to_reg(), rm));
1664 }
1665 &Inst::FpuRRRR { fpu_op, rd, rn, rm } => {
1666 let opcode = match fpu_op {
1667 FPUOp3::MAdd32 => 0xb30e, // MAEBR
1668 FPUOp3::MAdd64 => 0xb31e, // MADBR
1669 FPUOp3::MSub32 => 0xb30f, // MSEBR
1670 FPUOp3::MSub64 => 0xb31f, // MSDBR
1671 };
1672 put(sink, &enc_rrd(opcode, rd.to_reg(), rm, rn));
1673 }
1674 &Inst::FpuToInt { op, rd, rn } => {
1675 let opcode = match op {
1676 FpuToIntOp::F32ToI32 => 0xb398, // CFEBRA
1677 FpuToIntOp::F32ToU32 => 0xb39c, // CLFEBR
1678 FpuToIntOp::F32ToI64 => 0xb3a8, // CGEBRA
1679 FpuToIntOp::F32ToU64 => 0xb3ac, // CLGEBR
1680 FpuToIntOp::F64ToI32 => 0xb399, // CFDBRA
1681 FpuToIntOp::F64ToU32 => 0xb39d, // CLFDBR
1682 FpuToIntOp::F64ToI64 => 0xb3a9, // CGDBRA
1683 FpuToIntOp::F64ToU64 => 0xb3ad, // CLGDBR
1684 };
1685 put(sink, &enc_rrf_cde(opcode, rd.to_reg(), rn, 5, 0));
1686 }
1687 &Inst::IntToFpu { op, rd, rn } => {
1688 let opcode = match op {
1689 IntToFpuOp::I32ToF32 => 0xb394, // CEFBRA
1690 IntToFpuOp::U32ToF32 => 0xb390, // CELFBR
1691 IntToFpuOp::I64ToF32 => 0xb3a4, // CEGBRA
1692 IntToFpuOp::U64ToF32 => 0xb3a0, // CELGBR
1693 IntToFpuOp::I32ToF64 => 0xb395, // CDFBRA
1694 IntToFpuOp::U32ToF64 => 0xb391, // CDLFBR
1695 IntToFpuOp::I64ToF64 => 0xb3a5, // CDGBRA
1696 IntToFpuOp::U64ToF64 => 0xb3a1, // CDLGBR
1697 };
1698 put(sink, &enc_rrf_cde(opcode, rd.to_reg(), rn, 0, 0));
1699 }
1700 &Inst::FpuRound { op, rd, rn } => {
1701 let (opcode, m3) = match op {
1702 FpuRoundMode::Minus32 => (0xb357, 7), // FIEBR
1703 FpuRoundMode::Minus64 => (0xb35f, 7), // FIDBR
1704 FpuRoundMode::Plus32 => (0xb357, 6), // FIEBR
1705 FpuRoundMode::Plus64 => (0xb35f, 6), // FIDBR
1706 FpuRoundMode::Zero32 => (0xb357, 5), // FIEBR
1707 FpuRoundMode::Zero64 => (0xb35f, 5), // FIDBR
1708 FpuRoundMode::Nearest32 => (0xb357, 4), // FIEBR
1709 FpuRoundMode::Nearest64 => (0xb35f, 4), // FIDBR
1710 };
1711 put(sink, &enc_rrf_cde(opcode, rd.to_reg(), rn, m3, 0));
1712 }
1713 &Inst::FpuVecRRR { fpu_op, rd, rn, rm } => {
1714 let (opcode, m4) = match fpu_op {
1715 FPUOp2::Max32 => (0xe7ef, 2), // VFMAX
1716 FPUOp2::Max64 => (0xe7ef, 3), // VFMAX
1717 FPUOp2::Min32 => (0xe7ee, 2), // VFMIN
1718 FPUOp2::Min64 => (0xe7ee, 3), // VFMIN
1719 _ => unimplemented!(),
1720 };
1721 put(sink, &enc_vrr(opcode, rd.to_reg(), rn, rm, m4, 8, 1));
1722 }
1723 &Inst::FpuCmp32 { rn, rm } => {
1724 let opcode = 0xb309; // CEBR
1725 put(sink, &enc_rre(opcode, rn, rm));
1726 }
1727 &Inst::FpuCmp64 { rn, rm } => {
1728 let opcode = 0xb319; // CDBR
1729 put(sink, &enc_rre(opcode, rn, rm));
1730 }
1731
1732 &Inst::Call { link, ref info } => {
1733 let opcode = 0xc05; // BRASL
1734 let reloc = Reloc::S390xPCRel32Dbl;
1735 let srcloc = state.cur_srcloc();
1736 if let Some(s) = state.take_stack_map() {
1737 sink.add_stack_map(StackMapExtent::UpcomingBytes(6), s);
1738 }
1739 put_with_reloc(
1740 sink,
1741 &enc_ril_b(opcode, link.to_reg(), 0),
1742 2,
1743 srcloc,
1744 reloc,
1745 &info.dest,
1746 0,
1747 );
1748 if info.opcode.is_call() {
1749 sink.add_call_site(srcloc, info.opcode);
1750 }
1751 }
1752 &Inst::CallInd { link, ref info } => {
1753 let opcode = 0x0d; // BASR
1754 let srcloc = state.cur_srcloc();
1755 if let Some(s) = state.take_stack_map() {
1756 sink.add_stack_map(StackMapExtent::UpcomingBytes(2), s);
1757 }
1758 put(sink, &enc_rr(opcode, link.to_reg(), info.rn));
1759 if info.opcode.is_call() {
1760 sink.add_call_site(srcloc, info.opcode);
1761 }
1762 }
1763 &Inst::Ret { link } => {
1764 let opcode = 0x07; // BCR
1765 put(sink, &enc_rr(opcode, gpr(15), link));
1766 }
1767 &Inst::EpiloguePlaceholder => {
1768 // Noop; this is just a placeholder for epilogues.
1769 }
1770 &Inst::Jump { ref dest } => {
1771 let off = sink.cur_offset();
1772 // Indicate that the jump uses a label, if so, so that a fixup can occur later.
1773 if let Some(l) = dest.as_label() {
1774 sink.use_label_at_offset(off, l, LabelUse::BranchRIL);
1775 sink.add_uncond_branch(off, off + 6, l);
1776 }
1777 // Emit the jump itself.
1778 let opcode = 0xc04; // BCRL
1779 put(sink, &enc_ril_c(opcode, 15, dest.as_ril_offset_or_zero()));
1780 }
1781 &Inst::IndirectBr { rn, .. } => {
1782 let opcode = 0x07; // BCR
1783 put(sink, &enc_rr(opcode, gpr(15), rn));
1784 }
1785 &Inst::CondBr {
1786 ref taken,
1787 ref not_taken,
1788 cond,
1789 } => {
1790 let opcode = 0xc04; // BCRL
1791
1792 // Conditional part first.
1793 let cond_off = sink.cur_offset();
1794 if let Some(l) = taken.as_label() {
1795 sink.use_label_at_offset(cond_off, l, LabelUse::BranchRIL);
1796 let inverted = &enc_ril_c(opcode, cond.invert().bits(), 0);
1797 sink.add_cond_branch(cond_off, cond_off + 6, l, inverted);
1798 }
1799 put(
1800 sink,
1801 &enc_ril_c(opcode, cond.bits(), taken.as_ril_offset_or_zero()),
1802 );
1803
1804 // Unconditional part next.
1805 let uncond_off = sink.cur_offset();
1806 if let Some(l) = not_taken.as_label() {
1807 sink.use_label_at_offset(uncond_off, l, LabelUse::BranchRIL);
1808 sink.add_uncond_branch(uncond_off, uncond_off + 6, l);
1809 }
1810 put(
1811 sink,
1812 &enc_ril_c(opcode, 15, not_taken.as_ril_offset_or_zero()),
1813 );
1814 }
1815 &Inst::OneWayCondBr { ref target, cond } => {
1816 let opcode = 0xc04; // BCRL
1817 if let Some(l) = target.as_label() {
1818 sink.use_label_at_offset(sink.cur_offset(), l, LabelUse::BranchRIL);
1819 }
1820 put(
1821 sink,
1822 &enc_ril_c(opcode, cond.bits(), target.as_ril_offset_or_zero()),
1823 );
1824 }
1825 &Inst::Nop0 => {}
1826 &Inst::Nop2 => {
1827 put(sink, &enc_e(0x0707));
1828 }
1829 &Inst::Debugtrap => {
1830 put(sink, &enc_e(0x0001));
1831 }
1832 &Inst::Trap { trap_code } => {
1833 if let Some(s) = state.take_stack_map() {
1834 sink.add_stack_map(StackMapExtent::UpcomingBytes(2), s);
1835 }
1836 let srcloc = state.cur_srcloc();
1837 put_with_trap(sink, &enc_e(0x0000), srcloc, trap_code);
1838 }
1839 &Inst::TrapIf { cond, trap_code } => {
1840 // Branch over trap if condition is false.
1841 let opcode = 0xa74; // BCR
1842 put(sink, &enc_ri_c(opcode, cond.invert().bits(), 4 + 2));
1843 // Now emit the actual trap.
1844 if let Some(s) = state.take_stack_map() {
1845 sink.add_stack_map(StackMapExtent::UpcomingBytes(2), s);
1846 }
1847 let srcloc = state.cur_srcloc();
1848 put_with_trap(sink, &enc_e(0x0000), srcloc, trap_code);
1849 }
1850 &Inst::JTSequence {
1851 ridx,
1852 rtmp1,
1853 rtmp2,
1854 ref info,
1855 ..
1856 } => {
1857 let table_label = sink.get_label();
1858
1859 // This sequence is *one* instruction in the vcode, and is expanded only here at
1860 // emission time, because we cannot allow the regalloc to insert spills/reloads in
1861 // the middle; we depend on hardcoded PC-rel addressing below.
1862
1863 // Bounds-check index and branch to default.
1864 let inst = Inst::CmpRUImm32 {
1865 op: CmpOp::CmpL64,
1866 rn: ridx,
1867 imm: info.targets.len() as u32,
1868 };
1869 inst.emit(sink, emit_info, state);
1870 let inst = Inst::OneWayCondBr {
1871 target: info.default_target,
1872 cond: Cond::from_intcc(IntCC::UnsignedGreaterThanOrEqual),
1873 };
1874 inst.emit(sink, emit_info, state);
1875
1876 // Set rtmp2 to index scaled by entry size.
1877 let inst = Inst::ShiftRR {
1878 shift_op: ShiftOp::LShL64,
1879 rd: rtmp2,
1880 rn: ridx,
1881 shift_imm: SImm20::maybe_from_i64(2).unwrap(),
1882 shift_reg: None,
1883 };
1884 inst.emit(sink, emit_info, state);
1885
1886 // Set rtmp1 to address of jump table.
1887 let inst = Inst::LoadAddr {
1888 rd: rtmp1,
1889 mem: MemArg::Label {
1890 target: BranchTarget::Label(table_label),
1891 },
1892 };
1893 inst.emit(sink, emit_info, state);
1894
1895 // Set rtmp2 to value loaded out of jump table.
1896 let inst = Inst::Load64SExt32 {
1897 rd: rtmp2,
1898 mem: MemArg::reg_plus_reg(rtmp1.to_reg(), rtmp2.to_reg(), MemFlags::trusted()),
1899 };
1900 inst.emit(sink, emit_info, state);
1901
1902 // Set rtmp1 to target address (rtmp1 + rtmp2).
1903 let inst = Inst::AluRRR {
1904 alu_op: ALUOp::Add64,
1905 rd: rtmp1,
1906 rn: rtmp1.to_reg(),
1907 rm: rtmp2.to_reg(),
1908 };
1909 inst.emit(sink, emit_info, state);
1910
1911 // Branch to computed address. (`targets` here is only used for successor queries
1912 // and is not needed for emission.)
1913 let inst = Inst::IndirectBr {
1914 rn: rtmp1.to_reg(),
1915 targets: vec![],
1916 };
1917 inst.emit(sink, emit_info, state);
1918
1919 // Emit jump table (table of 32-bit offsets).
1920 sink.bind_label(table_label);
1921 let jt_off = sink.cur_offset();
1922 for &target in info.targets.iter() {
1923 let word_off = sink.cur_offset();
1924 let off_into_table = word_off - jt_off;
1925 sink.use_label_at_offset(
1926 word_off,
1927 target.as_label().unwrap(),
1928 LabelUse::PCRel32,
1929 );
1930 sink.put4(off_into_table.swap_bytes());
1931 }
1932
1933 // Lowering produces an EmitIsland before using a JTSequence, so we can safely
1934 // disable the worst-case-size check in this case.
1935 start_off = sink.cur_offset();
1936 }
1937
1938 &Inst::VirtualSPOffsetAdj { offset } => {
1939 debug!(
1940 "virtual sp offset adjusted by {} -> {}",
1941 offset,
1942 state.virtual_sp_offset + offset
1943 );
1944 state.virtual_sp_offset += offset;
1945 }
1946
1947 &Inst::ValueLabelMarker { .. } => {
1948 // Nothing; this is only used to compute debug info.
1949 }
1950
1951 &Inst::Unwind { ref inst } => {
1952 sink.add_unwind(inst.clone());
1953 }
1954 }
1955
1956 let end_off = sink.cur_offset();
1957 debug_assert!((end_off - start_off) <= Inst::worst_case_size());
1958
1959 state.clear_post_insn();
1960 }
1961
pretty_print(&self, mb_rru: Option<&RealRegUniverse>, state: &mut EmitState) -> String1962 fn pretty_print(&self, mb_rru: Option<&RealRegUniverse>, state: &mut EmitState) -> String {
1963 self.print_with_state(mb_rru, state)
1964 }
1965 }
1966