1 //! Implementation of a standard S390x ABI.
2 //!
3 //! This machine uses the "vanilla" ABI implementation from abi_impl.rs,
4 //! however a few details are different from the description there:
5 //!
6 //! - On s390x, the caller must provide a "register save area" of 160
7 //!   bytes to any function it calls.  The called function is free to use
8 //!   this space for any purpose; usually to save callee-saved GPRs.
9 //!   (Note that while this area is allocated by the caller, it is counted
10 //!   as part of the callee's stack frame; in particular, the callee's CFA
11 //!   is the top of the register save area, not the incoming SP value.)
12 //!
13 //! - Overflow arguments are passed on the stack starting immediately
14 //!   above the register save area.  On s390x, this space is allocated
15 //!   only once directly in the prologue, using a size large enough to
16 //!   hold overflow arguments for every call in the function.
17 //!
18 //! - On s390x we do not use a frame pointer register; instead, every
19 //!   element of the stack frame is addressed via (constant) offsets
20 //!   from the stack pointer.  Note that due to the above (and because
21 //!   there are no variable-sized stack allocations in cranelift), the
22 //!   value of the stack pointer register never changes after the
23 //!   initial allocation in the function prologue.
24 //!
25 //! Overall, the stack frame layout on s390x is as follows:
26 //!
27 //! ```plain
28 //!   (high address)
29 //!
30 //!                              +---------------------------+
31 //!                              |          ...              |
32 //! CFA                  ----->  | stack args                |
33 //!                              +---------------------------+
34 //!                              |          ...              |
35 //!                              | 160 bytes reg save area   |
36 //! SP at function entry ----->  | (used to save GPRs)       |
37 //!                              +---------------------------+
38 //!                              |          ...              |
39 //!                              | clobbered callee-saves    |
40 //!                              | (used to save FPRs)       |
41 //! unwind-frame base     ---->  | (alloc'd by prologue)     |
42 //!                              +---------------------------+
43 //!                              |          ...              |
44 //!                              | spill slots               |
45 //!                              | (accessed via nominal SP) |
46 //!                              |          ...              |
47 //!                              | stack slots               |
48 //!                              | (accessed via nominal SP) |
49 //! nominal SP --------------->  | (alloc'd by prologue)     |
50 //!                              +---------------------------+
51 //!                              |          ...              |
52 //!                              | args for call             |
53 //!                              | outgoing reg save area    |
54 //! SP during function  ------>  | (alloc'd by prologue)     |
55 //!                              +---------------------------+
56 //!
57 //!   (low address)
58 //! ```
59 
60 use crate::ir;
61 use crate::ir::condcodes::IntCC;
62 use crate::ir::types;
63 use crate::ir::MemFlags;
64 use crate::ir::Type;
65 use crate::isa;
66 use crate::isa::s390x::inst::*;
67 use crate::isa::unwind::UnwindInst;
68 use crate::machinst::*;
69 use crate::settings;
70 use crate::{CodegenError, CodegenResult};
71 use alloc::boxed::Box;
72 use alloc::vec::Vec;
73 use regalloc::{RealReg, Reg, RegClass, Set, Writable};
74 use smallvec::{smallvec, SmallVec};
75 use std::convert::TryFrom;
76 
77 // We use a generic implementation that factors out ABI commonalities.
78 
79 /// Support for the S390x ABI from the callee side (within a function body).
80 pub type S390xABICallee = ABICalleeImpl<S390xMachineDeps>;
81 
82 /// Support for the S390x ABI from the caller side (at a callsite).
83 pub type S390xABICaller = ABICallerImpl<S390xMachineDeps>;
84 
85 /// ABI Register usage
86 
in_int_reg(ty: Type) -> bool87 fn in_int_reg(ty: Type) -> bool {
88     match ty {
89         types::I8 | types::I16 | types::I32 | types::I64 | types::R64 => true,
90         types::B1 | types::B8 | types::B16 | types::B32 | types::B64 => true,
91         _ => false,
92     }
93 }
94 
in_flt_reg(ty: Type) -> bool95 fn in_flt_reg(ty: Type) -> bool {
96     match ty {
97         types::F32 | types::F64 => true,
98         _ => false,
99     }
100 }
101 
get_intreg_for_arg(idx: usize) -> Option<Reg>102 fn get_intreg_for_arg(idx: usize) -> Option<Reg> {
103     match idx {
104         0 => Some(regs::gpr(2)),
105         1 => Some(regs::gpr(3)),
106         2 => Some(regs::gpr(4)),
107         3 => Some(regs::gpr(5)),
108         4 => Some(regs::gpr(6)),
109         _ => None,
110     }
111 }
112 
get_fltreg_for_arg(idx: usize) -> Option<Reg>113 fn get_fltreg_for_arg(idx: usize) -> Option<Reg> {
114     match idx {
115         0 => Some(regs::fpr(0)),
116         1 => Some(regs::fpr(2)),
117         2 => Some(regs::fpr(4)),
118         3 => Some(regs::fpr(6)),
119         _ => None,
120     }
121 }
122 
get_intreg_for_ret(idx: usize) -> Option<Reg>123 fn get_intreg_for_ret(idx: usize) -> Option<Reg> {
124     match idx {
125         0 => Some(regs::gpr(2)),
126         // ABI extension to support multi-value returns:
127         1 => Some(regs::gpr(3)),
128         2 => Some(regs::gpr(4)),
129         3 => Some(regs::gpr(5)),
130         _ => None,
131     }
132 }
133 
get_fltreg_for_ret(idx: usize) -> Option<Reg>134 fn get_fltreg_for_ret(idx: usize) -> Option<Reg> {
135     match idx {
136         0 => Some(regs::fpr(0)),
137         // ABI extension to support multi-value returns:
138         1 => Some(regs::fpr(2)),
139         2 => Some(regs::fpr(4)),
140         3 => Some(regs::fpr(6)),
141         _ => None,
142     }
143 }
144 
145 /// This is the limit for the size of argument and return-value areas on the
146 /// stack. We place a reasonable limit here to avoid integer overflow issues
147 /// with 32-bit arithmetic: for now, 128 MB.
148 static STACK_ARG_RET_SIZE_LIMIT: u64 = 128 * 1024 * 1024;
149 
150 impl Into<MemArg> for StackAMode {
into(self) -> MemArg151     fn into(self) -> MemArg {
152         match self {
153             StackAMode::FPOffset(off, _ty) => MemArg::InitialSPOffset { off },
154             StackAMode::NominalSPOffset(off, _ty) => MemArg::NominalSPOffset { off },
155             StackAMode::SPOffset(off, _ty) => {
156                 MemArg::reg_plus_off(stack_reg(), off, MemFlags::trusted())
157             }
158         }
159     }
160 }
161 
162 /// S390x-specific ABI behavior. This struct just serves as an implementation
163 /// point for the trait; it is never actually instantiated.
164 pub struct S390xMachineDeps;
165 
166 impl ABIMachineSpec for S390xMachineDeps {
167     type I = Inst;
168 
word_bits() -> u32169     fn word_bits() -> u32 {
170         64
171     }
172 
173     /// Return required stack alignment in bytes.
stack_align(_call_conv: isa::CallConv) -> u32174     fn stack_align(_call_conv: isa::CallConv) -> u32 {
175         8
176     }
177 
compute_arg_locs( call_conv: isa::CallConv, _flags: &settings::Flags, params: &[ir::AbiParam], args_or_rets: ArgsOrRets, add_ret_area_ptr: bool, ) -> CodegenResult<(Vec<ABIArg>, i64, Option<usize>)>178     fn compute_arg_locs(
179         call_conv: isa::CallConv,
180         _flags: &settings::Flags,
181         params: &[ir::AbiParam],
182         args_or_rets: ArgsOrRets,
183         add_ret_area_ptr: bool,
184     ) -> CodegenResult<(Vec<ABIArg>, i64, Option<usize>)> {
185         let mut next_gpr = 0;
186         let mut next_fpr = 0;
187         let mut next_stack: u64 = 0;
188         let mut ret = vec![];
189 
190         if args_or_rets == ArgsOrRets::Args {
191             next_stack = 160;
192         }
193 
194         for i in 0..params.len() {
195             let param = &params[i];
196 
197             // Validate "purpose".
198             match &param.purpose {
199                 &ir::ArgumentPurpose::VMContext
200                 | &ir::ArgumentPurpose::Normal
201                 | &ir::ArgumentPurpose::StackLimit
202                 | &ir::ArgumentPurpose::SignatureId => {}
203                 _ => panic!(
204                     "Unsupported argument purpose {:?} in signature: {:?}",
205                     param.purpose, params
206                 ),
207             }
208 
209             let intreg = in_int_reg(param.value_type);
210             let fltreg = in_flt_reg(param.value_type);
211             debug_assert!(intreg || fltreg);
212             debug_assert!(!(intreg && fltreg));
213 
214             let (next_reg, candidate) = if intreg {
215                 let candidate = match args_or_rets {
216                     ArgsOrRets::Args => get_intreg_for_arg(next_gpr),
217                     ArgsOrRets::Rets => get_intreg_for_ret(next_gpr),
218                 };
219                 (&mut next_gpr, candidate)
220             } else {
221                 let candidate = match args_or_rets {
222                     ArgsOrRets::Args => get_fltreg_for_arg(next_fpr),
223                     ArgsOrRets::Rets => get_fltreg_for_ret(next_fpr),
224                 };
225                 (&mut next_fpr, candidate)
226             };
227 
228             // In the Wasmtime ABI only the first return value can be in a register.
229             let candidate =
230                 if call_conv.extends_wasmtime() && args_or_rets == ArgsOrRets::Rets && i > 0 {
231                     None
232                 } else {
233                     candidate
234                 };
235 
236             if let Some(reg) = candidate {
237                 ret.push(ABIArg::reg(
238                     reg.to_real_reg(),
239                     param.value_type,
240                     param.extension,
241                     param.purpose,
242                 ));
243                 *next_reg += 1;
244             } else {
245                 // Compute size. Every argument or return value takes a slot of
246                 // at least 8 bytes, except for return values in the Wasmtime ABI.
247                 let size = (ty_bits(param.value_type) / 8) as u64;
248                 let slot_size = if call_conv.extends_wasmtime() && args_or_rets == ArgsOrRets::Rets
249                 {
250                     size
251                 } else {
252                     std::cmp::max(size, 8)
253                 };
254 
255                 // Align the stack slot.
256                 debug_assert!(slot_size.is_power_of_two());
257                 next_stack = align_to(next_stack, slot_size);
258 
259                 // If the type is actually of smaller size (and the argument
260                 // was not extended), it is passed right-aligned.
261                 let offset = if size < slot_size && param.extension == ir::ArgumentExtension::None {
262                     slot_size - size
263                 } else {
264                     0
265                 };
266                 ret.push(ABIArg::stack(
267                     (next_stack + offset) as i64,
268                     param.value_type,
269                     param.extension,
270                     param.purpose,
271                 ));
272                 next_stack += slot_size;
273             }
274         }
275 
276         next_stack = align_to(next_stack, 8);
277 
278         let extra_arg = if add_ret_area_ptr {
279             debug_assert!(args_or_rets == ArgsOrRets::Args);
280             if let Some(reg) = get_intreg_for_arg(next_gpr) {
281                 ret.push(ABIArg::reg(
282                     reg.to_real_reg(),
283                     types::I64,
284                     ir::ArgumentExtension::None,
285                     ir::ArgumentPurpose::Normal,
286                 ));
287             } else {
288                 ret.push(ABIArg::stack(
289                     next_stack as i64,
290                     types::I64,
291                     ir::ArgumentExtension::None,
292                     ir::ArgumentPurpose::Normal,
293                 ));
294                 next_stack += 8;
295             }
296             Some(ret.len() - 1)
297         } else {
298             None
299         };
300 
301         // To avoid overflow issues, limit the arg/return size to something
302         // reasonable -- here, 128 MB.
303         if next_stack > STACK_ARG_RET_SIZE_LIMIT {
304             return Err(CodegenError::ImplLimitExceeded);
305         }
306 
307         Ok((ret, next_stack as i64, extra_arg))
308     }
309 
fp_to_arg_offset(_call_conv: isa::CallConv, _flags: &settings::Flags) -> i64310     fn fp_to_arg_offset(_call_conv: isa::CallConv, _flags: &settings::Flags) -> i64 {
311         0
312     }
313 
gen_load_stack(mem: StackAMode, into_reg: Writable<Reg>, ty: Type) -> Inst314     fn gen_load_stack(mem: StackAMode, into_reg: Writable<Reg>, ty: Type) -> Inst {
315         Inst::gen_load(into_reg, mem.into(), ty)
316     }
317 
gen_store_stack(mem: StackAMode, from_reg: Reg, ty: Type) -> Inst318     fn gen_store_stack(mem: StackAMode, from_reg: Reg, ty: Type) -> Inst {
319         Inst::gen_store(mem.into(), from_reg, ty)
320     }
321 
gen_move(to_reg: Writable<Reg>, from_reg: Reg, ty: Type) -> Inst322     fn gen_move(to_reg: Writable<Reg>, from_reg: Reg, ty: Type) -> Inst {
323         Inst::gen_move(to_reg, from_reg, ty)
324     }
325 
gen_extend( to_reg: Writable<Reg>, from_reg: Reg, signed: bool, from_bits: u8, to_bits: u8, ) -> Inst326     fn gen_extend(
327         to_reg: Writable<Reg>,
328         from_reg: Reg,
329         signed: bool,
330         from_bits: u8,
331         to_bits: u8,
332     ) -> Inst {
333         assert!(from_bits < to_bits);
334         Inst::Extend {
335             rd: to_reg,
336             rn: from_reg,
337             signed,
338             from_bits,
339             to_bits,
340         }
341     }
342 
gen_ret() -> Inst343     fn gen_ret() -> Inst {
344         Inst::Ret { link: gpr(14) }
345     }
346 
gen_add_imm(into_reg: Writable<Reg>, from_reg: Reg, imm: u32) -> SmallInstVec<Inst>347     fn gen_add_imm(into_reg: Writable<Reg>, from_reg: Reg, imm: u32) -> SmallInstVec<Inst> {
348         let mut insts = SmallVec::new();
349         if let Some(imm) = UImm12::maybe_from_u64(imm as u64) {
350             insts.push(Inst::LoadAddr {
351                 rd: into_reg,
352                 mem: MemArg::BXD12 {
353                     base: from_reg,
354                     index: zero_reg(),
355                     disp: imm,
356                     flags: MemFlags::trusted(),
357                 },
358             });
359         } else if let Some(imm) = SImm20::maybe_from_i64(imm as i64) {
360             insts.push(Inst::LoadAddr {
361                 rd: into_reg,
362                 mem: MemArg::BXD20 {
363                     base: from_reg,
364                     index: zero_reg(),
365                     disp: imm,
366                     flags: MemFlags::trusted(),
367                 },
368             });
369         } else {
370             if from_reg != into_reg.to_reg() {
371                 insts.push(Inst::mov64(into_reg, from_reg));
372             }
373             insts.push(Inst::AluRUImm32 {
374                 alu_op: ALUOp::Add64,
375                 rd: into_reg,
376                 imm,
377             });
378         }
379         insts
380     }
381 
gen_stack_lower_bound_trap(limit_reg: Reg) -> SmallInstVec<Inst>382     fn gen_stack_lower_bound_trap(limit_reg: Reg) -> SmallInstVec<Inst> {
383         let mut insts = SmallVec::new();
384         insts.push(Inst::CmpTrapRR {
385             op: CmpOp::CmpL64,
386             rn: stack_reg(),
387             rm: limit_reg,
388             cond: Cond::from_intcc(IntCC::UnsignedLessThanOrEqual),
389             trap_code: ir::TrapCode::StackOverflow,
390         });
391         insts
392     }
393 
gen_epilogue_placeholder() -> Inst394     fn gen_epilogue_placeholder() -> Inst {
395         Inst::EpiloguePlaceholder
396     }
397 
gen_get_stack_addr(mem: StackAMode, into_reg: Writable<Reg>, _ty: Type) -> Inst398     fn gen_get_stack_addr(mem: StackAMode, into_reg: Writable<Reg>, _ty: Type) -> Inst {
399         let mem = mem.into();
400         Inst::LoadAddr { rd: into_reg, mem }
401     }
402 
get_stacklimit_reg() -> Reg403     fn get_stacklimit_reg() -> Reg {
404         spilltmp_reg()
405     }
406 
gen_load_base_offset(into_reg: Writable<Reg>, base: Reg, offset: i32, ty: Type) -> Inst407     fn gen_load_base_offset(into_reg: Writable<Reg>, base: Reg, offset: i32, ty: Type) -> Inst {
408         let mem = MemArg::reg_plus_off(base, offset.into(), MemFlags::trusted());
409         Inst::gen_load(into_reg, mem, ty)
410     }
411 
gen_store_base_offset(base: Reg, offset: i32, from_reg: Reg, ty: Type) -> Inst412     fn gen_store_base_offset(base: Reg, offset: i32, from_reg: Reg, ty: Type) -> Inst {
413         let mem = MemArg::reg_plus_off(base, offset.into(), MemFlags::trusted());
414         Inst::gen_store(mem, from_reg, ty)
415     }
416 
gen_sp_reg_adjust(imm: i32) -> SmallInstVec<Inst>417     fn gen_sp_reg_adjust(imm: i32) -> SmallInstVec<Inst> {
418         if imm == 0 {
419             return SmallVec::new();
420         }
421 
422         let mut insts = SmallVec::new();
423         if let Ok(imm) = i16::try_from(imm) {
424             insts.push(Inst::AluRSImm16 {
425                 alu_op: ALUOp::Add64,
426                 rd: writable_stack_reg(),
427                 imm,
428             });
429         } else {
430             insts.push(Inst::AluRSImm32 {
431                 alu_op: ALUOp::Add64,
432                 rd: writable_stack_reg(),
433                 imm,
434             });
435         }
436         insts
437     }
438 
gen_nominal_sp_adj(offset: i32) -> Inst439     fn gen_nominal_sp_adj(offset: i32) -> Inst {
440         Inst::VirtualSPOffsetAdj {
441             offset: offset.into(),
442         }
443     }
444 
gen_prologue_frame_setup(_flags: &settings::Flags) -> SmallInstVec<Inst>445     fn gen_prologue_frame_setup(_flags: &settings::Flags) -> SmallInstVec<Inst> {
446         SmallVec::new()
447     }
448 
gen_epilogue_frame_restore(_flags: &settings::Flags) -> SmallInstVec<Inst>449     fn gen_epilogue_frame_restore(_flags: &settings::Flags) -> SmallInstVec<Inst> {
450         SmallVec::new()
451     }
452 
gen_probestack(_: u32) -> SmallInstVec<Self::I>453     fn gen_probestack(_: u32) -> SmallInstVec<Self::I> {
454         // TODO: implement if we ever require stack probes on an s390x host
455         // (unlikely unless Lucet is ported)
456         smallvec![]
457     }
458 
459     // Returns stack bytes used as well as instructions. Does not adjust
460     // nominal SP offset; abi_impl generic code will do that.
gen_clobber_save( call_conv: isa::CallConv, flags: &settings::Flags, clobbers: &Set<Writable<RealReg>>, fixed_frame_storage_size: u32, outgoing_args_size: u32, ) -> (u64, SmallVec<[Inst; 16]>)461     fn gen_clobber_save(
462         call_conv: isa::CallConv,
463         flags: &settings::Flags,
464         clobbers: &Set<Writable<RealReg>>,
465         fixed_frame_storage_size: u32,
466         outgoing_args_size: u32,
467     ) -> (u64, SmallVec<[Inst; 16]>) {
468         let mut insts = SmallVec::new();
469 
470         // Collect clobbered registers.
471         let (clobbered_gpr, clobbered_fpr) = get_regs_saved_in_prologue(call_conv, clobbers);
472         let mut first_clobbered_gpr = 16;
473         for reg in clobbered_gpr {
474             let enc = reg.to_reg().get_hw_encoding();
475             if enc < first_clobbered_gpr {
476                 first_clobbered_gpr = enc;
477             }
478         }
479         let clobber_size = clobbered_fpr.len() * 8;
480         if flags.unwind_info() {
481             insts.push(Inst::Unwind {
482                 inst: UnwindInst::DefineNewFrame {
483                     offset_upward_to_caller_sp: 160,
484                     offset_downward_to_clobbers: clobber_size as u32,
485                 },
486             });
487         }
488 
489         // Use STMG to save clobbered GPRs into save area.
490         if first_clobbered_gpr < 16 {
491             let offset = 8 * first_clobbered_gpr as i64;
492             insts.push(Inst::StoreMultiple64 {
493                 rt: gpr(first_clobbered_gpr as u8),
494                 rt2: gpr(15),
495                 addr_reg: stack_reg(),
496                 addr_off: SImm20::maybe_from_i64(offset).unwrap(),
497             });
498         }
499         if flags.unwind_info() {
500             for i in first_clobbered_gpr..16 {
501                 insts.push(Inst::Unwind {
502                     inst: UnwindInst::SaveReg {
503                         clobber_offset: clobber_size as u32 + (i * 8) as u32,
504                         reg: gpr(i as u8).to_real_reg(),
505                     },
506                 });
507             }
508         }
509 
510         // Decrement stack pointer.
511         let stack_size =
512             outgoing_args_size as i32 + clobber_size as i32 + fixed_frame_storage_size as i32;
513         insts.extend(Self::gen_sp_reg_adjust(-stack_size));
514         if flags.unwind_info() {
515             insts.push(Inst::Unwind {
516                 inst: UnwindInst::StackAlloc {
517                     size: stack_size as u32,
518                 },
519             });
520         }
521 
522         let sp_adj = outgoing_args_size as i32;
523         if sp_adj > 0 {
524             insts.push(Self::gen_nominal_sp_adj(sp_adj));
525         }
526 
527         // Save FPRs.
528         for (i, reg) in clobbered_fpr.iter().enumerate() {
529             insts.push(Inst::FpuStore64 {
530                 rd: reg.to_reg().to_reg(),
531                 mem: MemArg::reg_plus_off(
532                     stack_reg(),
533                     (i * 8) as i64 + outgoing_args_size as i64 + fixed_frame_storage_size as i64,
534                     MemFlags::trusted(),
535                 ),
536             });
537             if flags.unwind_info() {
538                 insts.push(Inst::Unwind {
539                     inst: UnwindInst::SaveReg {
540                         clobber_offset: (i * 8) as u32,
541                         reg: reg.to_reg(),
542                     },
543                 });
544             }
545         }
546 
547         (clobber_size as u64, insts)
548     }
549 
gen_clobber_restore( call_conv: isa::CallConv, _: &settings::Flags, clobbers: &Set<Writable<RealReg>>, fixed_frame_storage_size: u32, outgoing_args_size: u32, ) -> SmallVec<[Inst; 16]>550     fn gen_clobber_restore(
551         call_conv: isa::CallConv,
552         _: &settings::Flags,
553         clobbers: &Set<Writable<RealReg>>,
554         fixed_frame_storage_size: u32,
555         outgoing_args_size: u32,
556     ) -> SmallVec<[Inst; 16]> {
557         let mut insts = SmallVec::new();
558 
559         // Collect clobbered registers.
560         let (clobbered_gpr, clobbered_fpr) = get_regs_saved_in_prologue(call_conv, clobbers);
561         let mut first_clobbered_gpr = 16;
562         for reg in clobbered_gpr {
563             let enc = reg.to_reg().get_hw_encoding();
564             if enc < first_clobbered_gpr {
565                 first_clobbered_gpr = enc;
566             }
567         }
568         let clobber_size = clobbered_fpr.len() * 8;
569 
570         // Restore FPRs.
571         for (i, reg) in clobbered_fpr.iter().enumerate() {
572             insts.push(Inst::FpuLoad64 {
573                 rd: Writable::from_reg(reg.to_reg().to_reg()),
574                 mem: MemArg::reg_plus_off(
575                     stack_reg(),
576                     (i * 8) as i64 + outgoing_args_size as i64 + fixed_frame_storage_size as i64,
577                     MemFlags::trusted(),
578                 ),
579             });
580         }
581 
582         // Increment stack pointer unless it will be restored implicitly.
583         let stack_size =
584             outgoing_args_size as i32 + clobber_size as i32 + fixed_frame_storage_size as i32;
585         let implicit_sp_restore = first_clobbered_gpr < 16
586             && SImm20::maybe_from_i64(8 * first_clobbered_gpr as i64 + stack_size as i64).is_some();
587         if !implicit_sp_restore {
588             insts.extend(Self::gen_sp_reg_adjust(stack_size));
589         }
590 
591         // Use LMG to restore clobbered GPRs from save area.
592         if first_clobbered_gpr < 16 {
593             let mut offset = 8 * first_clobbered_gpr as i64;
594             if implicit_sp_restore {
595                 offset += stack_size as i64;
596             }
597             insts.push(Inst::LoadMultiple64 {
598                 rt: writable_gpr(first_clobbered_gpr as u8),
599                 rt2: writable_gpr(15),
600                 addr_reg: stack_reg(),
601                 addr_off: SImm20::maybe_from_i64(offset).unwrap(),
602             });
603         }
604 
605         insts
606     }
607 
gen_call( dest: &CallDest, uses: Vec<Reg>, defs: Vec<Writable<Reg>>, opcode: ir::Opcode, tmp: Writable<Reg>, _callee_conv: isa::CallConv, _caller_conv: isa::CallConv, ) -> SmallVec<[(InstIsSafepoint, Inst); 2]>608     fn gen_call(
609         dest: &CallDest,
610         uses: Vec<Reg>,
611         defs: Vec<Writable<Reg>>,
612         opcode: ir::Opcode,
613         tmp: Writable<Reg>,
614         _callee_conv: isa::CallConv,
615         _caller_conv: isa::CallConv,
616     ) -> SmallVec<[(InstIsSafepoint, Inst); 2]> {
617         let mut insts = SmallVec::new();
618         match &dest {
619             &CallDest::ExtName(ref name, RelocDistance::Near) => insts.push((
620                 InstIsSafepoint::Yes,
621                 Inst::Call {
622                     link: writable_gpr(14),
623                     info: Box::new(CallInfo {
624                         dest: name.clone(),
625                         uses,
626                         defs,
627                         opcode,
628                     }),
629                 },
630             )),
631             &CallDest::ExtName(ref name, RelocDistance::Far) => {
632                 insts.push((
633                     InstIsSafepoint::No,
634                     Inst::LoadExtNameFar {
635                         rd: tmp,
636                         name: Box::new(name.clone()),
637                         offset: 0,
638                     },
639                 ));
640                 insts.push((
641                     InstIsSafepoint::Yes,
642                     Inst::CallInd {
643                         link: writable_gpr(14),
644                         info: Box::new(CallIndInfo {
645                             rn: tmp.to_reg(),
646                             uses,
647                             defs,
648                             opcode,
649                         }),
650                     },
651                 ));
652             }
653             &CallDest::Reg(reg) => insts.push((
654                 InstIsSafepoint::Yes,
655                 Inst::CallInd {
656                     link: writable_gpr(14),
657                     info: Box::new(CallIndInfo {
658                         rn: *reg,
659                         uses,
660                         defs,
661                         opcode,
662                     }),
663                 },
664             )),
665         }
666 
667         insts
668     }
669 
gen_memcpy( _call_conv: isa::CallConv, _dst: Reg, _src: Reg, _size: usize, ) -> SmallVec<[Self::I; 8]>670     fn gen_memcpy(
671         _call_conv: isa::CallConv,
672         _dst: Reg,
673         _src: Reg,
674         _size: usize,
675     ) -> SmallVec<[Self::I; 8]> {
676         unimplemented!("StructArgs not implemented for S390X yet");
677     }
678 
get_number_of_spillslots_for_value(rc: RegClass, ty: Type) -> u32679     fn get_number_of_spillslots_for_value(rc: RegClass, ty: Type) -> u32 {
680         // We allocate in terms of 8-byte slots.
681         match (rc, ty) {
682             (RegClass::I64, _) => 1,
683             (RegClass::F64, _) => 1,
684             _ => panic!("Unexpected register class!"),
685         }
686     }
687 
688     /// Get the current virtual-SP offset from an instruction-emission state.
get_virtual_sp_offset_from_state(s: &EmitState) -> i64689     fn get_virtual_sp_offset_from_state(s: &EmitState) -> i64 {
690         s.virtual_sp_offset
691     }
692 
693     /// Get the nominal-SP-to-FP offset from an instruction-emission state.
get_nominal_sp_to_fp(s: &EmitState) -> i64694     fn get_nominal_sp_to_fp(s: &EmitState) -> i64 {
695         s.initial_sp_offset
696     }
697 
get_regs_clobbered_by_call(call_conv_of_callee: isa::CallConv) -> Vec<Writable<Reg>>698     fn get_regs_clobbered_by_call(call_conv_of_callee: isa::CallConv) -> Vec<Writable<Reg>> {
699         let mut caller_saved = Vec::new();
700         for i in 0..15 {
701             let x = writable_gpr(i);
702             if is_reg_clobbered_by_call(call_conv_of_callee, x.to_reg().to_real_reg()) {
703                 caller_saved.push(x);
704             }
705         }
706         for i in 0..15 {
707             let v = writable_fpr(i);
708             if is_reg_clobbered_by_call(call_conv_of_callee, v.to_reg().to_real_reg()) {
709                 caller_saved.push(v);
710             }
711         }
712         caller_saved
713     }
714 
get_ext_mode( _call_conv: isa::CallConv, specified: ir::ArgumentExtension, ) -> ir::ArgumentExtension715     fn get_ext_mode(
716         _call_conv: isa::CallConv,
717         specified: ir::ArgumentExtension,
718     ) -> ir::ArgumentExtension {
719         specified
720     }
721 }
722 
is_reg_saved_in_prologue(_call_conv: isa::CallConv, r: RealReg) -> bool723 fn is_reg_saved_in_prologue(_call_conv: isa::CallConv, r: RealReg) -> bool {
724     match r.get_class() {
725         RegClass::I64 => {
726             // r6 - r15 inclusive are callee-saves.
727             r.get_hw_encoding() >= 6 && r.get_hw_encoding() <= 15
728         }
729         RegClass::F64 => {
730             // f8 - f15 inclusive are callee-saves.
731             r.get_hw_encoding() >= 8 && r.get_hw_encoding() <= 15
732         }
733         _ => panic!("Unexpected RegClass"),
734     }
735 }
736 
get_regs_saved_in_prologue( call_conv: isa::CallConv, regs: &Set<Writable<RealReg>>, ) -> (Vec<Writable<RealReg>>, Vec<Writable<RealReg>>)737 fn get_regs_saved_in_prologue(
738     call_conv: isa::CallConv,
739     regs: &Set<Writable<RealReg>>,
740 ) -> (Vec<Writable<RealReg>>, Vec<Writable<RealReg>>) {
741     let mut int_saves = vec![];
742     let mut fpr_saves = vec![];
743     for &reg in regs.iter() {
744         if is_reg_saved_in_prologue(call_conv, reg.to_reg()) {
745             match reg.to_reg().get_class() {
746                 RegClass::I64 => int_saves.push(reg),
747                 RegClass::F64 => fpr_saves.push(reg),
748                 _ => panic!("Unexpected RegClass"),
749             }
750         }
751     }
752     // Sort registers for deterministic code output.
753     int_saves.sort_by_key(|r| r.to_reg().get_index());
754     fpr_saves.sort_by_key(|r| r.to_reg().get_index());
755     (int_saves, fpr_saves)
756 }
757 
is_reg_clobbered_by_call(_call_conv: isa::CallConv, r: RealReg) -> bool758 fn is_reg_clobbered_by_call(_call_conv: isa::CallConv, r: RealReg) -> bool {
759     match r.get_class() {
760         RegClass::I64 => {
761             // r0 - r5 inclusive are caller-saves.
762             r.get_hw_encoding() <= 5
763         }
764         RegClass::F64 => {
765             // f0 - f7 inclusive are caller-saves.
766             r.get_hw_encoding() <= 7
767         }
768         _ => panic!("Unexpected RegClass"),
769     }
770 }
771