1 /*
2  * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
3  * Copyright (c) 2016, 2019, SAP SE. All rights reserved.
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This code is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 only, as
8  * published by the Free Software Foundation.
9  *
10  * This code is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * version 2 for more details (a copy is included in the LICENSE file that
14  * accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License version
17  * 2 along with this work; if not, write to the Free Software Foundation,
18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21  * or visit www.oracle.com if you need additional information or have any
22  * questions.
23  *
24  */
25 
26 #ifndef CPU_S390_MACROASSEMBLER_S390_HPP
27 #define CPU_S390_MACROASSEMBLER_S390_HPP
28 
29 #include "asm/assembler.hpp"
30 #include "oops/accessDecorators.hpp"
31 
32 #define MODERN_IFUN(name)  ((void (MacroAssembler::*)(Register, int64_t, Register, Register))&MacroAssembler::name)
33 #define CLASSIC_IFUN(name) ((void (MacroAssembler::*)(Register, int64_t, Register, Register))&MacroAssembler::name)
34 #define MODERN_FFUN(name)  ((void (MacroAssembler::*)(FloatRegister, int64_t, Register, Register))&MacroAssembler::name)
35 #define CLASSIC_FFUN(name) ((void (MacroAssembler::*)(FloatRegister, int64_t, Register, Register))&MacroAssembler::name)
36 
37 class MacroAssembler: public Assembler {
38  public:
MacroAssembler(CodeBuffer * code)39   MacroAssembler(CodeBuffer* code) : Assembler(code) {}
40 
41   //
42   // Optimized instruction emitters
43   //
44 
45   // Move register if destination register and target register are different.
46   void lr_if_needed(Register rd, Register rs);
47   void lgr_if_needed(Register rd, Register rs);
48   void llgfr_if_needed(Register rd, Register rs);
49   void ldr_if_needed(FloatRegister rd, FloatRegister rs);
50 
51   void move_reg_if_needed(Register dest, BasicType dest_type, Register src, BasicType src_type);
52   void move_freg_if_needed(FloatRegister dest, BasicType dest_type, FloatRegister src, BasicType src_type);
53 
54   void freg2mem_opt(FloatRegister reg,
55                     int64_t       disp,
56                     Register      index,
57                     Register      base,
58                     void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register),
59                     void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register),
60                     Register      scratch = Z_R0);
61   void freg2mem_opt(FloatRegister reg,
62                     const Address &a, bool is_double = true);
63 
64   void mem2freg_opt(FloatRegister reg,
65                     int64_t       disp,
66                     Register      index,
67                     Register      base,
68                     void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register),
69                     void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register),
70                     Register      scratch = Z_R0);
71   void mem2freg_opt(FloatRegister reg,
72                     const Address &a, bool is_double = true);
73 
74   void reg2mem_opt(Register reg,
75                    int64_t  disp,
76                    Register index,
77                    Register base,
78                    void (MacroAssembler::*modern) (Register, int64_t, Register, Register),
79                    void (MacroAssembler::*classic)(Register, int64_t, Register, Register),
80                    Register scratch = Z_R0);
81   // returns offset of the store instruction
82   int reg2mem_opt(Register reg, const Address &a, bool is_double = true);
83 
84   void mem2reg_opt(Register reg,
85                    int64_t  disp,
86                    Register index,
87                    Register base,
88                    void (MacroAssembler::*modern) (Register, int64_t, Register, Register),
89                    void (MacroAssembler::*classic)(Register, int64_t, Register, Register));
90   void mem2reg_opt(Register reg, const Address &a, bool is_double = true);
91   void mem2reg_signed_opt(Register reg, const Address &a);
92 
93   // AND immediate and set condition code, works for 64 bit immediates/operation as well.
94    void and_imm(Register r, long mask, Register tmp = Z_R0, bool wide = false);
95 
96   // 1's complement, 32bit or 64bit. Optimized to exploit distinct operands facility.
97   // Note: The condition code is neither preserved nor correctly set by this code!!!
98   // Note: (wide == false) does not protect the high order half of the target register
99   // from alternation. It only serves as optimization hint for 32-bit results.
100   void not_(Register r1, Register r2 = noreg, bool wide = false);  // r1 = ~r2
101 
102   // Expanded support of all "rotate_then_<logicalOP>" instructions.
103   //
104   // Generalize and centralize rotate_then_<logicalOP> emitter.
105   // Functional description. For details, see Principles of Operation, Chapter 7, "Rotate Then Insert..."
106   //  - Bits  in a register are numbered left (most significant) to right (least significant), i.e. [0..63].
107   //  - Bytes in a register are numbered left (most significant) to right (least significant), i.e. [0..7].
108   //  - Register src is rotated to the left by (nRotate&0x3f) positions.
109   //  - Negative values for nRotate result in a rotation to the right by abs(nRotate) positions.
110   //  - The bits in positions [lBitPos..rBitPos] of the _ROTATED_ src operand take part in the
111   //    logical operation performed on the contents (in those positions) of the dst operand.
112   //  - The logical operation that is performed on the dst operand is one of
113   //     o insert the selected bits (replacing the original contents of those bit positions)
114   //     o and the selected bits with the corresponding bits of the dst operand
115   //     o or  the selected bits with the corresponding bits of the dst operand
116   //     o xor the selected bits with the corresponding bits of the dst operand
117   //  - For clear_dst == true, the destination register is cleared before the bits are inserted.
118   //    For clear_dst == false, only the bit positions that get data inserted from src
119   //    are changed. All other bit positions remain unchanged.
120   //  - For test_only == true,  the result of the logicalOP is only used to set the condition code, dst remains unchanged.
121   //    For test_only == false, the result of the logicalOP replaces the selected bits of dst.
122   //  - src32bit and dst32bit indicate the respective register is used as 32bit value only.
123   //    Knowledge can simplify code generation.
124   //
125   // Here is an important performance note, valid for all <logicalOP>s except "insert":
126   //   Due to the too complex nature of the operation, it cannot be done in a single cycle.
127   //   Timing constraints require the instructions to be cracked into two micro-ops, taking
128   //   one or two cycles each to execute. In some cases, an additional pipeline bubble might get added.
129   //   Macroscopically, that makes up for a three- or four-cycle instruction where you would
130   //   expect just a single cycle.
131   //   It is thus not beneficial from a performance point of view to exploit those instructions.
132   //   Other reasons (code compactness, register pressure, ...) might outweigh this penalty.
133   //
134   unsigned long create_mask(int lBitPos, int rBitPos);
135   void rotate_then_mask(Register dst, Register src, int lBitPos, int rBitPos,
136                         int nRotate, bool src32bit, bool dst32bit, bool oneBits);
137   void rotate_then_insert(Register dst, Register src, int lBitPos, int rBitPos, int nRotate,
138                           bool clear_dst);
139   void rotate_then_and(Register dst, Register src, int lBitPos, int rBitPos, int nRotate,
140                        bool test_only);
141   void rotate_then_or(Register dst, Register src, int lBitPos, int rBitPos, int nRotate,
142                       bool test_onlyt);
143   void rotate_then_xor(Register dst, Register src, int lBitPos, int rBitPos, int nRotate,
144                        bool test_only);
145 
146   void add64(Register r1, RegisterOrConstant inc);
147 
148   // Helper function to multiply the 64bit contents of a register by a 16bit constant.
149   // The optimization tries to avoid the mghi instruction, since it uses the FPU for
150   // calculation and is thus rather slow.
151   //
152   // There is no handling for special cases, e.g. cval==0 or cval==1.
153   //
154   // Returns len of generated code block.
155   unsigned int mul_reg64_const16(Register rval, Register work, int cval);
156 
157   // Generic operation r1 := r2 + imm.
158   void add2reg(Register r1, int64_t imm, Register r2 = noreg);
159   // Generic operation r := b + x + d.
160   void add2reg_with_index(Register r, int64_t d, Register x, Register b = noreg);
161 
162   // Add2mem* methods for direct memory increment.
163   void add2mem_32(const Address &a, int64_t imm, Register tmp);
164   void add2mem_64(const Address &a, int64_t imm, Register tmp);
165 
166   // *((int8_t*)(dst)) |= imm8
167   inline void or2mem_8(Address& dst, int64_t imm8);
168 
169   // Load values by size and signedness.
170   void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed);
171   void store_sized_value(Register src, Address dst, size_t size_in_bytes);
172 
173   // Load values with large offsets to base address.
174  private:
175   int  split_largeoffset(int64_t si20_offset, Register tmp, bool fixed_codelen, bool accumulate);
176  public:
177   void load_long_largeoffset(Register t, int64_t si20, Register a, Register tmp);
178   void load_float_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp);
179   void load_double_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp);
180 
181  private:
182   long toc_distance();
183  public:
184   void load_toc(Register Rtoc);
185   void load_long_pcrelative(Register Rdst, address dataLocation);
load_long_pcrelative_size()186   static int load_long_pcrelative_size() { return 6; }
187   void load_addr_pcrelative(Register Rdst, address dataLocation);
load_addr_pcrel_size()188   static int load_addr_pcrel_size() { return 6; } // Just a LARL.
189 
190   // Load a value from memory and test (set CC).
191   void load_and_test_byte    (Register dst, const Address &a);
192   void load_and_test_short   (Register dst, const Address &a);
193   void load_and_test_int     (Register dst, const Address &a);
194   void load_and_test_int2long(Register dst, const Address &a);
195   void load_and_test_long    (Register dst, const Address &a);
196 
197   // Test a bit in memory. Result is reflected in CC.
198   void testbit(const Address &a, unsigned int bit);
199   // Test a bit in a register. Result is reflected in CC.
200   void testbit(Register r, unsigned int bitPos);
201 
202   void prefetch_read(Address a);
203   void prefetch_update(Address a);
204 
205   // Clear a register, i.e. load const zero into reg. Return len (in bytes) of
206   // generated instruction(s).
207   //   whole_reg: Clear 64 bits if true, 32 bits otherwise.
208   //   set_cc: Use instruction that sets the condition code, if true.
209   int clear_reg(Register r, bool whole_reg = true, bool set_cc = true);
210 
211 #ifdef ASSERT
212   int preset_reg(Register r, unsigned long pattern, int pattern_len);
213 #endif
214 
215   // Clear (store zeros) a small piece of memory.
216   // CAUTION: Do not use this for atomic memory clearing. Use store_const() instead.
217   //   addr: Address descriptor of memory to clear.
218   //         Index register will not be used!
219   //   size: Number of bytes to clear.
220   void clear_mem(const Address& addr, unsigned size);
221 
222   // Move immediate values to memory. Currently supports 32 and 64 bit stores,
223   // but may be extended to 16 bit store operation, if needed.
224   // For details, see implementation in *.cpp file.
225          int store_const(const Address &dest, long imm,
226                          unsigned int lm, unsigned int lc,
227                          Register scratch = Z_R0);
228   inline int store_const(const Address &dest, long imm,
229                          Register scratch = Z_R0, bool is_long = true);
230 
231   // Move/initialize arbitrarily large memory area. No check for destructive overlap.
232   // Being interruptible, these instructions need a retry-loop.
233   void move_long_ext(Register dst, Register src, unsigned int pad);
234 
235   void compare_long_ext(Register left, Register right, unsigned int pad);
236   void compare_long_uni(Register left, Register right, unsigned int pad);
237 
238   void search_string(Register end, Register start);
239   void search_string_uni(Register end, Register start);
240 
241   // Translate instructions
242   // Being interruptible, these instructions need a retry-loop.
243   void translate_oo(Register dst, Register src, uint mask);
244   void translate_ot(Register dst, Register src, uint mask);
245   void translate_to(Register dst, Register src, uint mask);
246   void translate_tt(Register dst, Register src, uint mask);
247 
248   // Crypto instructions.
249   // Being interruptible, these instructions need a retry-loop.
250   void cksm(Register crcBuff, Register srcBuff);
251   void km( Register dstBuff, Register srcBuff);
252   void kmc(Register dstBuff, Register srcBuff);
253   void kimd(Register srcBuff);
254   void klmd(Register srcBuff);
255   void kmac(Register srcBuff);
256 
257   // nop padding
258   void align(int modulus);
259   void align_address(int modulus);
260 
261   //
262   // Constants, loading constants, TOC support
263   //
264 
265   // Load generic address: d <- base(a) + index(a) + disp(a).
266   inline void load_address(Register d, const Address &a);
267   // Load absolute address (and try to optimize).
268   void load_absolute_address(Register d, address addr);
269 
270   // Address of Z_ARG1 and argument_offset.
271   // If temp_reg == arg_slot, arg_slot will be overwritten.
272   Address argument_address(RegisterOrConstant arg_slot,
273                            Register temp_reg = noreg,
274                            int64_t extra_slot_offset = 0);
275 
276   // Load a narrow ptr constant (oop or klass ptr).
277   void load_narrow_oop( Register t, narrowOop a);
278   void load_narrow_klass(Register t, Klass* k);
279 
280   static bool is_load_const_32to64(address pos);
is_load_narrow_oop(address pos)281   static bool is_load_narrow_oop(address pos)   { return is_load_const_32to64(pos); }
is_load_narrow_klass(address pos)282   static bool is_load_narrow_klass(address pos) { return is_load_const_32to64(pos); }
283 
load_const_32to64_size()284   static int  load_const_32to64_size()          { return 6; }
load_narrow_oop_size()285   static bool load_narrow_oop_size()            { return load_const_32to64_size(); }
load_narrow_klass_size()286   static bool load_narrow_klass_size()          { return load_const_32to64_size(); }
287 
288   static int  patch_load_const_32to64(address pos, int64_t a);
289   static int  patch_load_narrow_oop(address pos, oop o);
290   static int  patch_load_narrow_klass(address pos, Klass* k);
291 
292   // cOops. CLFI exploit.
293   void compare_immediate_narrow_oop(Register oop1, narrowOop oop2);
294   void compare_immediate_narrow_klass(Register op1, Klass* op2);
295   static bool is_compare_immediate32(address pos);
296   static bool is_compare_immediate_narrow_oop(address pos);
297   static bool is_compare_immediate_narrow_klass(address pos);
compare_immediate_narrow_size()298   static int  compare_immediate_narrow_size()       { return 6; }
compare_immediate_narrow_oop_size()299   static int  compare_immediate_narrow_oop_size()   { return compare_immediate_narrow_size(); }
compare_immediate_narrow_klass_size()300   static int  compare_immediate_narrow_klass_size() { return compare_immediate_narrow_size(); }
301   static int  patch_compare_immediate_32(address pos, int64_t a);
302   static int  patch_compare_immediate_narrow_oop(address pos, oop o);
303   static int  patch_compare_immediate_narrow_klass(address pos, Klass* k);
304 
305   // Load a 32bit constant into a 64bit register.
306   void load_const_32to64(Register t, int64_t x, bool sign_extend=true);
307   // Load a 64 bit constant.
308          void load_const(Register t, long a);
309   inline void load_const(Register t, void* a);
310   inline void load_const(Register t, Label& L);
311   inline void load_const(Register t, const AddressLiteral& a);
312   // Get the 64 bit constant from a `load_const' sequence.
313   static long get_const(address load_const);
314   // Patch the 64 bit constant of a `load_const' sequence. This is a low level
315   // procedure. It neither flushes the instruction cache nor is it atomic.
316   static void patch_const(address load_const, long x);
load_const_size()317   static int load_const_size() { return 12; }
318 
319   // Turn a char into boolean. NOTE: destroys r.
320   void c2bool(Register r, Register t = Z_R0);
321 
322   // Optimized version of load_const for constants that do not need to be
323   // loaded by a sequence of instructions of fixed length and that do not
324   // need to be patched.
325   int load_const_optimized_rtn_len(Register t, long x, bool emit);
326   inline void load_const_optimized(Register t, long x);
327   inline void load_const_optimized(Register t, void* a);
328   inline void load_const_optimized(Register t, Label& L);
329   inline void load_const_optimized(Register t, const AddressLiteral& a);
330 
331  public:
332 
333   //----------------------------------------------------------
334   //            oops in code             -------------
335   //  including compressed oops support  -------------
336   //----------------------------------------------------------
337 
338   // Metadata in code that we have to keep track of.
339   AddressLiteral allocate_metadata_address(Metadata* obj); // allocate_index
340   AddressLiteral constant_metadata_address(Metadata* obj); // find_index
341 
342   // allocate_index
343   AddressLiteral allocate_oop_address(jobject obj);
344   // find_index
345   AddressLiteral constant_oop_address(jobject obj);
346   // Uses allocate_oop_address.
347   inline void set_oop         (jobject obj, Register d);
348   // Uses constant_oop_address.
349   inline void set_oop_constant(jobject obj, Register d);
350   // Uses constant_metadata_address.
351   inline bool set_metadata_constant(Metadata* md, Register d);
352 
353   virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr,
354                                                 Register tmp,
355                                                 int offset);
356   //
357   // branch, jump
358   //
359 
360   // Use one generic function for all branch patches.
361   static unsigned long patched_branch(address dest_pos, unsigned long inst, address inst_pos);
362 
363   void pd_patch_instruction(address branch, address target, const char* file, int line);
364 
365   // Extract relative address from "relative" instructions.
366   static long get_pcrel_offset(unsigned long inst);
367   static long get_pcrel_offset(address pc);
368   static address get_target_addr_pcrel(address pc);
369 
370   static inline bool is_call_pcrelative_short(unsigned long inst);
371   static inline bool is_call_pcrelative_long(unsigned long inst);
372   static inline bool is_branch_pcrelative_short(unsigned long inst);
373   static inline bool is_branch_pcrelative_long(unsigned long inst);
374   static inline bool is_compareandbranch_pcrelative_short(unsigned long inst);
375   static inline bool is_branchoncount_pcrelative_short(unsigned long inst);
376   static inline bool is_branchonindex32_pcrelative_short(unsigned long inst);
377   static inline bool is_branchonindex64_pcrelative_short(unsigned long inst);
378   static inline bool is_branchonindex_pcrelative_short(unsigned long inst);
379   static inline bool is_branch_pcrelative16(unsigned long inst);
380   static inline bool is_branch_pcrelative32(unsigned long inst);
381   static inline bool is_branch_pcrelative(unsigned long inst);
382   static inline bool is_load_pcrelative_long(unsigned long inst);
383   static inline bool is_misc_pcrelative_long(unsigned long inst);
384   static inline bool is_pcrelative_short(unsigned long inst);
385   static inline bool is_pcrelative_long(unsigned long inst);
386   // PCrelative TOC access. Variants with address argument.
387   static inline bool is_load_pcrelative_long(address iLoc);
388   static inline bool is_pcrelative_short(address iLoc);
389   static inline bool is_pcrelative_long(address iLoc);
390 
391   static inline bool is_pcrelative_instruction(address iloc);
392   static inline bool is_load_addr_pcrel(address a);
393 
394   static void patch_target_addr_pcrel(address pc, address con);
patch_addr_pcrel(address pc,address con)395   static void patch_addr_pcrel(address pc, address con) {
396     patch_target_addr_pcrel(pc, con); // Just delegate. This is only for nativeInst_s390.cpp.
397   }
398 
399   //---------------------------------------------------------
400   //  Some macros for more comfortable assembler programming.
401   //---------------------------------------------------------
402 
403   // NOTE: pass NearLabel T to signal that the branch target T will be bound to a near address.
404 
405   void compare32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& target);
406   void compareU32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& target);
407   void compare64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& target);
408   void compareU64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& target);
409 
410   void branch_optimized(Assembler::branch_condition cond, address branch_target);
411   void branch_optimized(Assembler::branch_condition cond, Label&  branch_target);
412   void compare_and_branch_optimized(Register r1,
413                                     Register r2,
414                                     Assembler::branch_condition cond,
415                                     address  branch_addr,
416                                     bool     len64,
417                                     bool     has_sign);
418   void compare_and_branch_optimized(Register r1,
419                                     jlong    x2,
420                                     Assembler::branch_condition cond,
421                                     Label&   branch_target,
422                                     bool     len64,
423                                     bool     has_sign);
424   void compare_and_branch_optimized(Register r1,
425                                     Register r2,
426                                     Assembler::branch_condition cond,
427                                     Label&   branch_target,
428                                     bool     len64,
429                                     bool     has_sign);
430 
431   //
432   // Support for frame handling
433   //
434   // Specify the register that should be stored as the return pc in the
435   // current frame (default is R14).
436   inline void save_return_pc(Register pc = Z_R14);
437   inline void restore_return_pc();
438 
439   // Get current PC.
440   address get_PC(Register result);
441 
442   // Get current PC + offset. Offset given in bytes, must be even!
443   address get_PC(Register result, int64_t offset);
444 
445   // Get size of instruction at pc (which must point to valid code).
446   void instr_size(Register size, Register pc);
447 
448   // Accessing, and in particular modifying, a stack location is only safe if
449   // the stack pointer (Z_SP) is set such that the accessed stack location is
450   // in the reserved range.
451   //
452   // From a performance point of view, it is desirable not to change the SP
453   // first and then immediately use it to access the freshly reserved space.
454   // That opens a small gap, though. If, just after storing some value (the
455   // frame pointer) into the to-be-reserved space, an interrupt is caught,
456   // the handler might use the space beyond Z_SP for it's own purpose.
457   // If that happens, the stored value might get altered.
458 
459   // Resize current frame either relatively wrt to current SP or absolute.
460   void resize_frame_sub(Register offset, Register fp, bool load_fp=true);
461   void resize_frame_abs_with_offset(Register newSP, Register fp, int offset, bool load_fp);
462   void resize_frame_absolute(Register addr, Register fp, bool load_fp);
463   void resize_frame(RegisterOrConstant offset, Register fp, bool load_fp=true);
464 
465   // Push a frame of size bytes, if copy_sp is false, old_sp must already
466   // contain a copy of Z_SP.
467   void push_frame(Register bytes, Register old_sp, bool copy_sp = true, bool bytes_with_inverted_sign = false);
468 
469   // Push a frame of size `bytes'. no abi space provided.
470   // Don't rely on register locking, instead pass a scratch register
471   // (Z_R0 by default).
472   // CAUTION! passing registers >= Z_R2 may produce bad results on
473   // old CPUs!
474   unsigned int push_frame(unsigned int bytes, Register scratch = Z_R0);
475 
476   // Push a frame of size `bytes' with abi160 on top.
477   unsigned int push_frame_abi160(unsigned int bytes);
478 
479   // Pop current C frame.
480   void pop_frame();
481   // Pop current C frame and restore return PC register (Z_R14).
482   void pop_frame_restore_retPC(int frame_size_in_bytes);
483 
484   //
485   // Calls
486   //
487 
488  private:
489   address _last_calls_return_pc;
490 
491  public:
492   // Support for VM calls. This is the base routine called by the
493   // different versions of call_VM_leaf. The interpreter may customize
494   // this version by overriding it for its purposes (e.g., to
495   // save/restore additional registers when doing a VM call).
496   void call_VM_leaf_base(address entry_point);
497   void call_VM_leaf_base(address entry_point, bool allow_relocation);
498 
499   // It is imperative that all calls into the VM are handled via the
500   // call_VM macros. They make sure that the stack linkage is setup
501   // correctly. Call_VM's correspond to ENTRY/ENTRY_X entry points
502   // while call_VM_leaf's correspond to LEAF entry points.
503   //
504   // This is the base routine called by the different versions of
505   // call_VM. The interpreter may customize this version by overriding
506   // it for its purposes (e.g., to save/restore additional registers
507   // when doing a VM call).
508 
509   // If no last_java_sp is specified (noreg) then SP will be used instead.
510 
511   virtual void call_VM_base(
512     Register        oop_result,        // Where an oop-result ends up if any; use noreg otherwise.
513     Register        last_java_sp,      // To set up last_Java_frame in stubs; use noreg otherwise.
514     address         entry_point,       // The entry point.
515     bool            check_exception);  // Flag which indicates if exception should be checked.
516   virtual void call_VM_base(
517     Register        oop_result,       // Where an oop-result ends up if any; use noreg otherwise.
518     Register        last_java_sp,     // To set up last_Java_frame in stubs; use noreg otherwise.
519     address         entry_point,      // The entry point.
520     bool            allow_relocation, // Flag to request generation of relocatable code.
521     bool            check_exception); // Flag which indicates if exception should be checked.
522 
523   // Call into the VM.
524   // Passes the thread pointer (in Z_ARG1) as a prepended argument.
525   // Makes sure oop return values are visible to the GC.
526   void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
527   void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
528   void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
529   void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2,
530                Register arg_3, bool check_exceptions = true);
531 
532   void call_VM_static(Register oop_result, address entry_point, bool check_exceptions = true);
533   void call_VM_static(Register oop_result, address entry_point, Register arg_1, Register arg_2,
534                       Register arg_3, bool check_exceptions = true);
535 
536   // Overloaded with last_java_sp.
537   void call_VM(Register oop_result, Register last_java_sp, address entry_point, bool check_exceptions = true);
538   void call_VM(Register oop_result, Register last_java_sp, address entry_point,
539                Register arg_1, bool check_exceptions = true);
540   void call_VM(Register oop_result, Register last_java_sp, address entry_point,
541                Register arg_1, Register arg_2, bool check_exceptions = true);
542   void call_VM(Register oop_result, Register last_java_sp, address entry_point,
543                Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
544 
545   void call_VM_leaf(address entry_point);
546   void call_VM_leaf(address entry_point, Register arg_1);
547   void call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
548   void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
549 
550   // Really static VM leaf call (never patched).
551   void call_VM_leaf_static(address entry_point);
552   void call_VM_leaf_static(address entry_point, Register arg_1);
553   void call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2);
554   void call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2, Register arg_3);
555 
556   // Call a C function via its function entry. Updates and returns _last_calls_return_pc.
557   inline address call(Register function_entry);
558   inline address call_c(Register function_entry);
559          address call_c(address function_entry);
560   // Variant for really static (non-relocatable) calls which are never patched.
561          address call_c_static(address function_entry);
562   // TOC or pc-relative call + emits a runtime_call relocation.
563          address call_c_opt(address function_entry);
564 
565   inline address call_stub(Register function_entry);
566   inline address call_stub(address  function_entry);
567 
568   // Get the pc where the last call will return to. Returns _last_calls_return_pc.
569   inline address last_calls_return_pc();
570 
571  private:
572   static bool is_call_far_patchable_variant0_at(address instruction_addr); // Dynamic TOC: load target addr from CP and call.
573   static bool is_call_far_patchable_variant2_at(address instruction_addr); // PC-relative call, prefixed with NOPs.
574 
575 
576  public:
577   bool           call_far_patchable(address target, int64_t toc_offset);
578   static bool    is_call_far_patchable_at(address inst_start);             // All supported forms of patchable calls.
579   static bool    is_call_far_patchable_pcrelative_at(address inst_start);  // Pc-relative call with leading nops.
580   static bool    is_call_far_pcrelative(address instruction_addr);         // Pure far pc-relative call, with one leading size adjustment nop.
581   static void    set_dest_of_call_far_patchable_at(address inst_start, address target, int64_t toc_offset);
582   static address get_dest_of_call_far_patchable_at(address inst_start, address toc_start);
583 
584   void align_call_far_patchable(address pc);
585 
586   // PCrelative TOC access.
587 
588   // This value is independent of code position - constant for the lifetime of the VM.
call_far_patchable_size()589   static int call_far_patchable_size() {
590     return load_const_from_toc_size() + call_byregister_size();
591   }
592 
call_far_patchable_ret_addr_offset()593   static int call_far_patchable_ret_addr_offset() { return call_far_patchable_size(); }
594 
call_far_patchable_requires_alignment_nop(address pc)595   static bool call_far_patchable_requires_alignment_nop(address pc) {
596     int size = call_far_patchable_size();
597     return ((intptr_t)(pc + size) & 0x03L) != 0;
598   }
599 
600   // END OF PCrelative TOC access.
601 
jump_byregister_size()602   static int jump_byregister_size()          { return 2; }
jump_pcrelative_size()603   static int jump_pcrelative_size()          { return 4; }
jump_far_pcrelative_size()604   static int jump_far_pcrelative_size()      { return 6; }
call_byregister_size()605   static int call_byregister_size()          { return 2; }
call_pcrelative_size()606   static int call_pcrelative_size()          { return 4; }
call_far_pcrelative_size()607   static int call_far_pcrelative_size()      { return 2 + 6; } // Prepend each BRASL with a nop.
call_far_pcrelative_size_raw()608   static int call_far_pcrelative_size_raw()  { return 6; }     // Prepend each BRASL with a nop.
609 
610   //
611   // Java utilities
612   //
613 
614   // These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code.
615   // The implementation is only non-empty for the InterpreterMacroAssembler,
616   // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
617   virtual void check_and_handle_popframe(Register java_thread);
618   virtual void check_and_handle_earlyret(Register java_thread);
619 
620   // Polling page support.
621   enum poll_mask {
622     mask_stackbang = 0xde, // 222 (dec)
623     mask_safepoint = 0x6f, // 111 (dec)
624     mask_profiling = 0xba  // 186 (dec)
625   };
626 
627   // Read from the polling page.
628   void load_from_polling_page(Register polling_page_address, int64_t offset = 0);
629 
630   // Check if given instruction is a read from the polling page
631   // as emitted by load_from_polling_page.
632   static bool is_load_from_polling_page(address instr_loc);
633   // Extract poll address from instruction and ucontext.
634   static address get_poll_address(address instr_loc, void* ucontext);
635   // Extract poll register from instruction.
636   static uint get_poll_register(address instr_loc);
637 
638   // Check if safepoint requested and if so branch
639   void safepoint_poll(Label& slow_path, Register temp_reg);
640 
641   // Stack overflow checking
642   void bang_stack_with_offset(int offset);
643 
644   // Check for reserved stack access in method being exited. If the reserved
645   // stack area was accessed, protect it again and throw StackOverflowError.
646   // Uses Z_R1.
647   void reserved_stack_check(Register return_pc);
648 
649   // Atomics
650   // -- none?
651 
652   void tlab_allocate(Register obj,                // Result: pointer to object after successful allocation
653                      Register var_size_in_bytes,  // Object size in bytes if unknown at compile time; invalid otherwise.
654                      int      con_size_in_bytes,  // Object size in bytes if   known at compile time.
655                      Register t1,                 // temp register
656                      Label&   slow_case);         // Continuation point if fast allocation fails.
657 
658   // Emitter for interface method lookup.
659   //   input: recv_klass, intf_klass, itable_index
660   //   output: method_result
661   //   kills: itable_index, temp1_reg, Z_R0, Z_R1
662   void lookup_interface_method(Register           recv_klass,
663                                Register           intf_klass,
664                                RegisterOrConstant itable_index,
665                                Register           method_result,
666                                Register           temp1_reg,
667                                Label&             no_such_interface,
668                                bool               return_method = true);
669 
670   // virtual method calling
671   void lookup_virtual_method(Register             recv_klass,
672                              RegisterOrConstant   vtable_index,
673                              Register             method_result);
674 
675   // Factor out code to call ic_miss_handler.
676   unsigned int call_ic_miss_handler(Label& ICM, int trapMarker, int requiredSize, Register scratch);
677   void nmethod_UEP(Label& ic_miss);
678 
679   // Emitters for "partial subtype" checks.
680 
681   // Test sub_klass against super_klass, with fast and slow paths.
682 
683   // The fast path produces a tri-state answer: yes / no / maybe-slow.
684   // One of the three labels can be NULL, meaning take the fall-through.
685   // If super_check_offset is -1, the value is loaded up from super_klass.
686   // No registers are killed, except temp_reg and temp2_reg.
687   // If super_check_offset is not -1, temp1_reg is not used and can be noreg.
688   void check_klass_subtype_fast_path(Register sub_klass,
689                                      Register super_klass,
690                                      Register temp1_reg,
691                                      Label*   L_success,
692                                      Label*   L_failure,
693                                      Label*   L_slow_path,
694                                      RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
695 
696   // The rest of the type check; must be wired to a corresponding fast path.
697   // It does not repeat the fast path logic, so don't use it standalone.
698   // The temp_reg can be noreg, if no temps are available.
699   // It can also be sub_klass or super_klass, meaning it's OK to kill that one.
700   // Updates the sub's secondary super cache as necessary.
701   void check_klass_subtype_slow_path(Register Rsubklass,
702                                      Register Rsuperklas,
703                                      Register Rarray_ptr, // tmp
704                                      Register Rlength,    // tmp
705                                      Label* L_success,
706                                      Label* L_failure);
707 
708   // Simplified, combined version, good for typical uses.
709   // Falls through on failure.
710   void check_klass_subtype(Register sub_klass,
711                            Register super_klass,
712                            Register temp1_reg,
713                            Register temp2_reg,
714                            Label&   L_success);
715 
716   void clinit_barrier(Register klass,
717                       Register thread,
718                       Label* L_fast_path = NULL,
719                       Label* L_slow_path = NULL);
720 
721   // Increment a counter at counter_address when the eq condition code is set.
722   // Kills registers tmp1_reg and tmp2_reg and preserves the condition code.
723   void increment_counter_eq(address counter_address, Register tmp1_reg, Register tmp2_reg);
724   // Biased locking support
725   // Upon entry,obj_reg must contain the target object, and mark_reg
726   // must contain the target object's header.
727   // Destroys mark_reg if an attempt is made to bias an anonymously
728   // biased lock. In this case a failure will go either to the slow
729   // case or fall through with the notEqual condition code set with
730   // the expectation that the slow case in the runtime will be called.
731   // In the fall-through case where the CAS-based lock is done,
732   // mark_reg is not destroyed.
733   void biased_locking_enter(Register obj_reg, Register mark_reg, Register temp_reg,
734                             Register temp2_reg, Label& done, Label* slow_case = NULL);
735   // Upon entry, the base register of mark_addr must contain the oop.
736   // Destroys temp_reg.
737   // If allow_delay_slot_filling is set to true, the next instruction
738   // emitted after this one will go in an annulled delay slot if the
739   // biased locking exit case failed.
740   void biased_locking_exit(Register mark_addr, Register temp_reg, Label& done);
741 
742   void compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias = UseBiasedLocking);
743   void compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias = UseBiasedLocking);
744 
745   void resolve_jobject(Register value, Register tmp1, Register tmp2);
746 
747   // Support for last Java frame (but use call_VM instead where possible).
748  private:
749   void set_last_Java_frame(Register last_Java_sp, Register last_Java_pc, bool allow_relocation);
750   void reset_last_Java_frame(bool allow_relocation);
751   void set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1, bool allow_relocation);
752  public:
753   inline void set_last_Java_frame(Register last_java_sp, Register last_Java_pc);
754   inline void set_last_Java_frame_static(Register last_java_sp, Register last_Java_pc);
755   inline void reset_last_Java_frame(void);
756   inline void reset_last_Java_frame_static(void);
757   inline void set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1);
758   inline void set_top_ijava_frame_at_SP_as_last_Java_frame_static(Register sp, Register tmp1);
759 
760   void set_thread_state(JavaThreadState new_state);
761 
762   // Read vm result from thread.
763   void get_vm_result  (Register oop_result);
764   void get_vm_result_2(Register result);
765 
766   // Vm result is currently getting hijacked to for oop preservation.
767   void set_vm_result(Register oop_result);
768 
769   // Support for NULL-checks
770   //
771   // Generates code that causes a NULL OS exception if the content of reg is NULL.
772   // If the accessed location is M[reg + offset] and the offset is known, provide the
773   // offset. No explicit code generation is needed if the offset is within a certain
774   // range (0 <= offset <= page_size).
775   //
776   // %%%%%% Currently not done for z/Architecture
777 
778   void null_check(Register reg, Register tmp = Z_R0, int64_t offset = -1);
779   static bool needs_explicit_null_check(intptr_t offset);  // Implemented in shared file ?!
780   static bool uses_implicit_null_check(void* address);
781 
782   // Klass oop manipulations if compressed.
783   void encode_klass_not_null(Register dst, Register src = noreg);
784   void decode_klass_not_null(Register dst, Register src);
785   void decode_klass_not_null(Register dst);
786   void load_klass(Register klass, Address mem);
787   void load_klass(Register klass, Register src_oop);
788   void load_prototype_header(Register Rheader, Register Rsrc_oop);
789   void store_klass(Register klass, Register dst_oop, Register ck = noreg); // Klass will get compressed if ck not provided.
790   void store_klass_gap(Register s, Register dst_oop);
791 
792   // This function calculates the size of the code generated by
793   //   decode_klass_not_null(register dst)
794   // when (Universe::heap() != NULL). Hence, if the instructions
795   // it generates change, then this method needs to be updated.
796   static int instr_size_for_decode_klass_not_null();
797 
798   void encode_heap_oop(Register oop);
799   void encode_heap_oop_not_null(Register oop);
800 
801   static int get_oop_base_pow2_offset(uint64_t oop_base);
802   int  get_oop_base(Register Rbase, uint64_t oop_base);
803   int  get_oop_base_complement(Register Rbase, uint64_t oop_base);
804   void compare_heap_oop(Register Rop1, Address mem, bool maybeNULL);
805   void compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybeNULL);
806 
807   // Access heap oop, handle encoding and GC barriers.
808  private:
809   void access_store_at(BasicType type, DecoratorSet decorators,
810                        const Address& addr, Register val,
811                        Register tmp1, Register tmp2, Register tmp3);
812   void access_load_at(BasicType type, DecoratorSet decorators,
813                       const Address& addr, Register dst,
814                       Register tmp1, Register tmp2, Label *is_null = NULL);
815 
816  public:
817   // tmp1 and tmp2 are used with decorators ON_PHANTOM_OOP_REF or ON_WEAK_OOP_REF.
818   void load_heap_oop(Register dest, const Address &a,
819                      Register tmp1, Register tmp2,
820                      DecoratorSet decorators = 0, Label *is_null = NULL);
821   void store_heap_oop(Register Roop, const Address &a,
822                       Register tmp1, Register tmp2, Register tmp3,
823                       DecoratorSet decorators = 0);
824 
825   void oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL,
826                    Register Rbase = Z_R1, int pow2_offset = -1, bool only32bitValid = false);
827   void oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL,
828                    Register Rbase = Z_R1, int pow2_offset = -1);
829 
830   void resolve_oop_handle(Register result);
831   void load_mirror_from_const_method(Register mirror, Register const_method);
832   void load_method_holder(Register holder, Register method);
833 
834   //--------------------------
835   //---  Operations on arrays.
836   //--------------------------
837   unsigned int Clear_Array(Register cnt_arg, Register base_pointer_arg, Register odd_tmp_reg);
838   unsigned int Clear_Array_Const(long cnt, Register base);
839   unsigned int Clear_Array_Const_Big(long cnt, Register base_pointer_arg, Register odd_tmp_reg);
840   unsigned int CopyRawMemory_AlignedDisjoint(Register src_reg, Register dst_reg,
841                                              Register cnt_reg,
842                                              Register tmp1_reg, Register tmp2_reg);
843 
844 
845   // Emit an oop const to the constant pool and set a relocation info
846   // with address current_pc. Return the TOC offset of the constant.
847   int store_const_in_toc(AddressLiteral& val);
848   int store_oop_in_toc(AddressLiteral& oop);
849   // Emit an oop const to the constant pool via store_oop_in_toc, or
850   // emit a scalar const to the constant pool via store_const_in_toc,
851   // and load the constant into register dst.
852   bool load_const_from_toc(Register dst, AddressLiteral& a, Register Rtoc = noreg);
853   // Get CPU version dependent size of load_const sequence.
854   // The returned value is valid only for code sequences
855   // generated by load_const, not load_const_optimized.
load_const_from_toc_size()856   static int load_const_from_toc_size() {
857     return load_long_pcrelative_size();
858   }
859   bool load_oop_from_toc(Register dst, AddressLiteral& a, Register Rtoc = noreg);
860   static intptr_t get_const_from_toc(address pc);
861   static void     set_const_in_toc(address pc, unsigned long new_data, CodeBlob *cb);
862 
863   // Dynamic TOC.
864   static bool is_load_const(address a);
865   static bool is_load_const_from_toc_pcrelative(address a);
is_load_const_from_toc(address a)866   static bool is_load_const_from_toc(address a) { return is_load_const_from_toc_pcrelative(a); }
867 
868   // PCrelative TOC access.
is_call_byregister(address a)869   static bool is_call_byregister(address a) { return is_z_basr(*(short*)a); }
870   static bool is_load_const_from_toc_call(address a);
871   static bool is_load_const_call(address a);
load_const_call_size()872   static int load_const_call_size() { return load_const_size() + call_byregister_size(); }
load_const_from_toc_call_size()873   static int load_const_from_toc_call_size() { return load_const_from_toc_size() + call_byregister_size(); }
874   // Offset is +/- 2**32 -> use long.
875   static long get_load_const_from_toc_offset(address a);
876 
877   // Bit operations for single register operands.
878   inline void lshift(Register r, int places, bool doubl = true);   // <<
879   inline void rshift(Register r, int places, bool doubl = true);   // >>
880 
881   //
882   // Debugging
883   //
884 
885   // Assert on CC (condition code in CPU state).
886   void asm_assert(bool check_equal, const char* msg, int id) PRODUCT_RETURN;
887   void asm_assert_low(const char *msg, int id) PRODUCT_RETURN;
888   void asm_assert_high(const char *msg, int id) PRODUCT_RETURN;
asm_assert_eq(const char * msg,int id)889   void asm_assert_eq(const char* msg, int id) { asm_assert(true, msg, id); }
asm_assert_ne(const char * msg,int id)890   void asm_assert_ne(const char* msg, int id) { asm_assert(false, msg, id); }
891 
892   void asm_assert_static(bool check_equal, const char* msg, int id) PRODUCT_RETURN;
893 
894  private:
895   // Emit assertions.
896   void asm_assert_mems_zero(bool check_equal, bool allow_relocation, int size, int64_t mem_offset,
897                             Register mem_base, const char* msg, int id) PRODUCT_RETURN;
898 
899  public:
asm_assert_mem4_is_zero(int64_t mem_offset,Register mem_base,const char * msg,int id)900   inline void asm_assert_mem4_is_zero(int64_t mem_offset, Register mem_base, const char* msg, int id) {
901     asm_assert_mems_zero(true,  true, 4, mem_offset, mem_base, msg, id);
902   }
asm_assert_mem8_is_zero(int64_t mem_offset,Register mem_base,const char * msg,int id)903   inline void asm_assert_mem8_is_zero(int64_t mem_offset, Register mem_base, const char* msg, int id) {
904     asm_assert_mems_zero(true,  true, 8, mem_offset, mem_base, msg, id);
905   }
asm_assert_mem4_isnot_zero(int64_t mem_offset,Register mem_base,const char * msg,int id)906   inline void asm_assert_mem4_isnot_zero(int64_t mem_offset, Register mem_base, const char* msg, int id) {
907     asm_assert_mems_zero(false, true, 4, mem_offset, mem_base, msg, id);
908   }
asm_assert_mem8_isnot_zero(int64_t mem_offset,Register mem_base,const char * msg,int id)909   inline void asm_assert_mem8_isnot_zero(int64_t mem_offset, Register mem_base, const char* msg, int id) {
910     asm_assert_mems_zero(false, true, 8, mem_offset, mem_base, msg, id);
911   }
912 
asm_assert_mem4_is_zero_static(int64_t mem_offset,Register mem_base,const char * msg,int id)913   inline void asm_assert_mem4_is_zero_static(int64_t mem_offset, Register mem_base, const char* msg, int id) {
914     asm_assert_mems_zero(true,  false, 4, mem_offset, mem_base, msg, id);
915   }
asm_assert_mem8_is_zero_static(int64_t mem_offset,Register mem_base,const char * msg,int id)916   inline void asm_assert_mem8_is_zero_static(int64_t mem_offset, Register mem_base, const char* msg, int id) {
917     asm_assert_mems_zero(true,  false, 8, mem_offset, mem_base, msg, id);
918   }
asm_assert_mem4_isnot_zero_static(int64_t mem_offset,Register mem_base,const char * msg,int id)919   inline void asm_assert_mem4_isnot_zero_static(int64_t mem_offset, Register mem_base, const char* msg, int id) {
920     asm_assert_mems_zero(false, false, 4, mem_offset, mem_base, msg, id);
921   }
asm_assert_mem8_isnot_zero_static(int64_t mem_offset,Register mem_base,const char * msg,int id)922   inline void asm_assert_mem8_isnot_zero_static(int64_t mem_offset, Register mem_base, const char* msg, int id) {
923     asm_assert_mems_zero(false, false, 8, mem_offset, mem_base, msg, id);
924   }
925   void asm_assert_frame_size(Register expected_size, Register tmp, const char* msg, int id) PRODUCT_RETURN;
926 
927   // Verify Z_thread contents.
928   void verify_thread();
929 
930   // Save and restore functions: Exclude Z_R0.
931   void save_volatile_regs(   Register dst, int offset, bool include_fp, bool include_flags);
932   void restore_volatile_regs(Register src, int offset, bool include_fp, bool include_flags);
933 
934   // Only if +VerifyOops.
935   // Kills Z_R0.
936   void verify_oop(Register reg, const char* s = "broken oop");
937   // Kills Z_R0, condition code.
938   void verify_oop_addr(Address addr, const char* msg = "contains broken oop");
939 
940   // TODO: verify_method and klass metadata (compare against vptr?).
_verify_method_ptr(Register reg,const char * msg,const char * file,int line)941   void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
_verify_klass_ptr(Register reg,const char * msg,const char * file,int line)942   void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line) {}
943 
944 #define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
945 #define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
946 
947  private:
948   // Generate printout in stop().
949   static const char* stop_types[];
950   enum {
951     stop_stop               = 0,
952     stop_untested           = 1,
953     stop_unimplemented      = 2,
954     stop_shouldnotreachhere = 3,
955     stop_end                = 4
956   };
957   // Prints msg and stops execution.
958   void    stop(int type, const char* msg, int id = 0);
959   address stop_chain(address reentry, int type, const char* msg, int id, bool allow_relocation); // Non-relocateable code only!!
960   void    stop_static(int type, const char* msg, int id);                                        // Non-relocateable code only!!
961 
962  public:
963 
964   // Prints msg and stops.
stop_chain(address reentry,const char * msg="",int id=0)965   address stop_chain(      address reentry, const char* msg = "", int id = 0) { return stop_chain(reentry, stop_stop, msg, id, true); }
stop_chain_static(address reentry,const char * msg="",int id=0)966   address stop_chain_static(address reentry, const char* msg = "", int id = 0) { return stop_chain(reentry, stop_stop, msg, id, false); }
stop_static(const char * msg="",int id=0)967   void stop_static  (const char* msg = "", int id = 0) { stop_static(stop_stop,   msg, id); }
stop(const char * msg="",int id=0)968   void stop         (const char* msg = "", int id = 0) { stop(stop_stop,          msg, id); }
untested(const char * msg="",int id=0)969   void untested     (const char* msg = "", int id = 0) { stop(stop_untested,      msg, id); }
unimplemented(const char * msg="",int id=0)970   void unimplemented(const char* msg = "", int id = 0) { stop(stop_unimplemented, msg, id); }
should_not_reach_here(const char * msg="",int id=-1)971   void should_not_reach_here(const char* msg = "", int id = -1) { stop(stop_shouldnotreachhere, msg, id); }
972 
973   // Factor out part of stop into subroutine to save space.
974   void stop_subroutine();
975 
976   // Prints msg, but don't stop.
977   void warn(const char* msg);
978 
979   //-----------------------------
980   //---  basic block tracing code
981   //-----------------------------
982   void trace_basic_block(uint i);
983   void init_basic_block_trace();
984   // Number of bytes a basic block gets larger due to the tracing code macro (worst case).
985   // Currently, worst case is 48 bytes. 64 puts us securely on the safe side.
basic_blck_trace_blk_size_incr()986   static int basic_blck_trace_blk_size_incr() { return 64; }
987 
988   // Write pattern 0x0101010101010101 in region [low-before, high+after].
989   // Low and high may be the same registers. Before and after are
990   // the numbers of 8-byte words.
991   void zap_from_to(Register low, Register high, Register tmp1 = Z_R0, Register tmp2 = Z_R1,
992                    int before = 0, int after = 0) PRODUCT_RETURN;
993 
994   // Emitters for CRC32 calculation.
995   // A note on invertCRC:
996   //   Unfortunately, internal representation of crc differs between CRC32 and CRC32C.
997   //   CRC32 holds it's current crc value in the externally visible representation.
998   //   CRC32C holds it's current crc value in internal format, ready for updating.
999   //   Thus, the crc value must be bit-flipped before updating it in the CRC32 case.
1000   //   In the CRC32C case, it must be bit-flipped when it is given to the outside world (getValue()).
1001   //   The bool invertCRC parameter indicates whether bit-flipping is required before updates.
1002  private:
1003   void fold_byte_crc32(Register crc, Register table, Register val, Register tmp);
1004   void fold_8bit_crc32(Register crc, Register table, Register tmp);
1005   void update_byte_crc32( Register crc, Register val, Register table);
1006   void update_byteLoop_crc32(Register crc, Register buf, Register len, Register table,
1007                              Register data);
1008   void update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,
1009                           Register t0,  Register t1,  Register t2,  Register t3);
1010  public:
1011   void kernel_crc32_singleByteReg(Register crc, Register val, Register table,
1012                                   bool invertCRC);
1013   void kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
1014                                bool invertCRC);
1015   void kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
1016                           Register t0,  Register t1,  Register t2,  Register t3,
1017                           bool invertCRC);
1018   void kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
1019                           Register t0,  Register t1,  Register t2,  Register t3,
1020                           bool invertCRC);
1021 
1022   // Emitters for BigInteger.multiplyToLen intrinsic
1023   // note: length of result array (zlen) is passed on the stack
1024  private:
1025   void add2_with_carry(Register dest_hi, Register dest_lo,
1026                        Register src1, Register src2);
1027   void multiply_64_x_64_loop(Register x, Register xstart,
1028                              Register x_xstart,
1029                              Register y, Register y_idx, Register z,
1030                              Register carry, Register product,
1031                              Register idx, Register kdx);
1032   void multiply_add_128_x_128(Register x_xstart, Register y, Register z,
1033                               Register yz_idx, Register idx,
1034                               Register carry, Register product, int offset);
1035   void multiply_128_x_128_loop(Register x_xstart,
1036                                Register y, Register z,
1037                                Register yz_idx, Register idx,
1038                                Register jdx,
1039                                Register carry, Register product,
1040                                Register carry2);
1041  public:
1042   void multiply_to_len(Register x, Register xlen,
1043                        Register y, Register ylen,
1044                        Register z,
1045                        Register tmp1, Register tmp2,
1046                        Register tmp3, Register tmp4, Register tmp5);
1047 };
1048 
1049 /**
1050  * class SkipIfEqual:
1051  *
1052  * Instantiating this class will result in assembly code being output that will
1053  * jump around any code emitted between the creation of the instance and it's
1054  * automatic destruction at the end of a scope block, depending on the value of
1055  * the flag passed to the constructor, which will be checked at run-time.
1056  */
1057 class SkipIfEqual {
1058  private:
1059   MacroAssembler* _masm;
1060   Label _label;
1061 
1062  public:
1063   SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value, Register _rscratch);
1064   ~SkipIfEqual();
1065 };
1066 
1067 #ifdef ASSERT
1068 // Return false (e.g. important for our impl. of virtual calls).
pd_check_instruction_mark()1069 inline bool AbstractAssembler::pd_check_instruction_mark() { return false; }
1070 #endif
1071 
1072 #endif // CPU_S390_MACROASSEMBLER_S390_HPP
1073