1 /*
2  * Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
3  * Copyright (c) 2016, 2019, SAP SE. All rights reserved.
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This code is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 only, as
8  * published by the Free Software Foundation.
9  *
10  * This code is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * version 2 for more details (a copy is included in the LICENSE file that
14  * accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License version
17  * 2 along with this work; if not, write to the Free Software Foundation,
18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21  * or visit www.oracle.com if you need additional information or have any
22  * questions.
23  *
24  */
25 
26 #include "precompiled.hpp"
27 #include "asm/codeBuffer.hpp"
28 #include "asm/macroAssembler.inline.hpp"
29 #include "compiler/disassembler.hpp"
30 #include "gc/shared/barrierSet.hpp"
31 #include "gc/shared/barrierSetAssembler.hpp"
32 #include "gc/shared/collectedHeap.inline.hpp"
33 #include "interpreter/interpreter.hpp"
34 #include "gc/shared/cardTableBarrierSet.hpp"
35 #include "memory/resourceArea.hpp"
36 #include "memory/universe.hpp"
37 #include "oops/accessDecorators.hpp"
38 #include "oops/compressedOops.inline.hpp"
39 #include "oops/klass.inline.hpp"
40 #include "prims/methodHandles.hpp"
41 #include "registerSaver_s390.hpp"
42 #include "runtime/biasedLocking.hpp"
43 #include "runtime/icache.hpp"
44 #include "runtime/interfaceSupport.inline.hpp"
45 #include "runtime/objectMonitor.hpp"
46 #include "runtime/os.hpp"
47 #include "runtime/safepoint.hpp"
48 #include "runtime/safepointMechanism.hpp"
49 #include "runtime/sharedRuntime.hpp"
50 #include "runtime/stubRoutines.hpp"
51 #include "utilities/events.hpp"
52 #include "utilities/macros.hpp"
53 #include "utilities/powerOfTwo.hpp"
54 
55 #include <ucontext.h>
56 
57 #define BLOCK_COMMENT(str) block_comment(str)
58 #define BIND(label)        bind(label); BLOCK_COMMENT(#label ":")
59 
60 // Move 32-bit register if destination and source are different.
lr_if_needed(Register rd,Register rs)61 void MacroAssembler::lr_if_needed(Register rd, Register rs) {
62   if (rs != rd) { z_lr(rd, rs); }
63 }
64 
65 // Move register if destination and source are different.
lgr_if_needed(Register rd,Register rs)66 void MacroAssembler::lgr_if_needed(Register rd, Register rs) {
67   if (rs != rd) { z_lgr(rd, rs); }
68 }
69 
70 // Zero-extend 32-bit register into 64-bit register if destination and source are different.
llgfr_if_needed(Register rd,Register rs)71 void MacroAssembler::llgfr_if_needed(Register rd, Register rs) {
72   if (rs != rd) { z_llgfr(rd, rs); }
73 }
74 
75 // Move float register if destination and source are different.
ldr_if_needed(FloatRegister rd,FloatRegister rs)76 void MacroAssembler::ldr_if_needed(FloatRegister rd, FloatRegister rs) {
77   if (rs != rd) { z_ldr(rd, rs); }
78 }
79 
80 // Move integer register if destination and source are different.
81 // It is assumed that shorter-than-int types are already
82 // appropriately sign-extended.
move_reg_if_needed(Register dst,BasicType dst_type,Register src,BasicType src_type)83 void MacroAssembler::move_reg_if_needed(Register dst, BasicType dst_type, Register src,
84                                         BasicType src_type) {
85   assert((dst_type != T_FLOAT) && (dst_type != T_DOUBLE), "use move_freg for float types");
86   assert((src_type != T_FLOAT) && (src_type != T_DOUBLE), "use move_freg for float types");
87 
88   if (dst_type == src_type) {
89     lgr_if_needed(dst, src); // Just move all 64 bits.
90     return;
91   }
92 
93   switch (dst_type) {
94     // Do not support these types for now.
95     //  case T_BOOLEAN:
96     case T_BYTE:  // signed byte
97       switch (src_type) {
98         case T_INT:
99           z_lgbr(dst, src);
100           break;
101         default:
102           ShouldNotReachHere();
103       }
104       return;
105 
106     case T_CHAR:
107     case T_SHORT:
108       switch (src_type) {
109         case T_INT:
110           if (dst_type == T_CHAR) {
111             z_llghr(dst, src);
112           } else {
113             z_lghr(dst, src);
114           }
115           break;
116         default:
117           ShouldNotReachHere();
118       }
119       return;
120 
121     case T_INT:
122       switch (src_type) {
123         case T_BOOLEAN:
124         case T_BYTE:
125         case T_CHAR:
126         case T_SHORT:
127         case T_INT:
128         case T_LONG:
129         case T_OBJECT:
130         case T_ARRAY:
131         case T_VOID:
132         case T_ADDRESS:
133           lr_if_needed(dst, src);
134           // llgfr_if_needed(dst, src);  // zero-extend (in case we need to find a bug).
135           return;
136 
137         default:
138           assert(false, "non-integer src type");
139           return;
140       }
141     case T_LONG:
142       switch (src_type) {
143         case T_BOOLEAN:
144         case T_BYTE:
145         case T_CHAR:
146         case T_SHORT:
147         case T_INT:
148           z_lgfr(dst, src); // sign extension
149           return;
150 
151         case T_LONG:
152         case T_OBJECT:
153         case T_ARRAY:
154         case T_VOID:
155         case T_ADDRESS:
156           lgr_if_needed(dst, src);
157           return;
158 
159         default:
160           assert(false, "non-integer src type");
161           return;
162       }
163       return;
164     case T_OBJECT:
165     case T_ARRAY:
166     case T_VOID:
167     case T_ADDRESS:
168       switch (src_type) {
169         // These types don't make sense to be converted to pointers:
170         //      case T_BOOLEAN:
171         //      case T_BYTE:
172         //      case T_CHAR:
173         //      case T_SHORT:
174 
175         case T_INT:
176           z_llgfr(dst, src); // zero extension
177           return;
178 
179         case T_LONG:
180         case T_OBJECT:
181         case T_ARRAY:
182         case T_VOID:
183         case T_ADDRESS:
184           lgr_if_needed(dst, src);
185           return;
186 
187         default:
188           assert(false, "non-integer src type");
189           return;
190       }
191       return;
192     default:
193       assert(false, "non-integer dst type");
194       return;
195   }
196 }
197 
198 // Move float register if destination and source are different.
move_freg_if_needed(FloatRegister dst,BasicType dst_type,FloatRegister src,BasicType src_type)199 void MacroAssembler::move_freg_if_needed(FloatRegister dst, BasicType dst_type,
200                                          FloatRegister src, BasicType src_type) {
201   assert((dst_type == T_FLOAT) || (dst_type == T_DOUBLE), "use move_reg for int types");
202   assert((src_type == T_FLOAT) || (src_type == T_DOUBLE), "use move_reg for int types");
203   if (dst_type == src_type) {
204     ldr_if_needed(dst, src); // Just move all 64 bits.
205   } else {
206     switch (dst_type) {
207       case T_FLOAT:
208         assert(src_type == T_DOUBLE, "invalid float type combination");
209         z_ledbr(dst, src);
210         return;
211       case T_DOUBLE:
212         assert(src_type == T_FLOAT, "invalid float type combination");
213         z_ldebr(dst, src);
214         return;
215       default:
216         assert(false, "non-float dst type");
217         return;
218     }
219   }
220 }
221 
222 // Optimized emitter for reg to mem operations.
223 // Uses modern instructions if running on modern hardware, classic instructions
224 // otherwise. Prefers (usually shorter) classic instructions if applicable.
225 // Data register (reg) cannot be used as work register.
226 //
227 // Don't rely on register locking, instead pass a scratch register (Z_R0 by default).
228 // CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs!
freg2mem_opt(FloatRegister reg,int64_t disp,Register index,Register base,void (MacroAssembler::* modern)(FloatRegister,int64_t,Register,Register),void (MacroAssembler::* classic)(FloatRegister,int64_t,Register,Register),Register scratch)229 void MacroAssembler::freg2mem_opt(FloatRegister reg,
230                                   int64_t       disp,
231                                   Register      index,
232                                   Register      base,
233                                   void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register),
234                                   void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register),
235                                   Register      scratch) {
236   index = (index == noreg) ? Z_R0 : index;
237   if (Displacement::is_shortDisp(disp)) {
238     (this->*classic)(reg, disp, index, base);
239   } else {
240     if (Displacement::is_validDisp(disp)) {
241       (this->*modern)(reg, disp, index, base);
242     } else {
243       if (scratch != Z_R0 && scratch != Z_R1) {
244         (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
245       } else {
246         if (scratch != Z_R0) {   // scratch == Z_R1
247           if ((scratch == index) || (index == base)) {
248             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
249           } else {
250             add2reg(scratch, disp, base);
251             (this->*classic)(reg, 0, index, scratch);
252             if (base == scratch) {
253               add2reg(base, -disp);  // Restore base.
254             }
255           }
256         } else {   // scratch == Z_R0
257           z_lgr(scratch, base);
258           add2reg(base, disp);
259           (this->*classic)(reg, 0, index, base);
260           z_lgr(base, scratch);      // Restore base.
261         }
262       }
263     }
264   }
265 }
266 
freg2mem_opt(FloatRegister reg,const Address & a,bool is_double)267 void MacroAssembler::freg2mem_opt(FloatRegister reg, const Address &a, bool is_double) {
268   if (is_double) {
269     freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stdy), CLASSIC_FFUN(z_std));
270   } else {
271     freg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_stey), CLASSIC_FFUN(z_ste));
272   }
273 }
274 
275 // Optimized emitter for mem to reg operations.
276 // Uses modern instructions if running on modern hardware, classic instructions
277 // otherwise. Prefers (usually shorter) classic instructions if applicable.
278 // data register (reg) cannot be used as work register.
279 //
280 // Don't rely on register locking, instead pass a scratch register (Z_R0 by default).
281 // CAUTION! Passing registers >= Z_R2 may produce bad results on old CPUs!
mem2freg_opt(FloatRegister reg,int64_t disp,Register index,Register base,void (MacroAssembler::* modern)(FloatRegister,int64_t,Register,Register),void (MacroAssembler::* classic)(FloatRegister,int64_t,Register,Register),Register scratch)282 void MacroAssembler::mem2freg_opt(FloatRegister reg,
283                                   int64_t       disp,
284                                   Register      index,
285                                   Register      base,
286                                   void (MacroAssembler::*modern) (FloatRegister, int64_t, Register, Register),
287                                   void (MacroAssembler::*classic)(FloatRegister, int64_t, Register, Register),
288                                   Register      scratch) {
289   index = (index == noreg) ? Z_R0 : index;
290   if (Displacement::is_shortDisp(disp)) {
291     (this->*classic)(reg, disp, index, base);
292   } else {
293     if (Displacement::is_validDisp(disp)) {
294       (this->*modern)(reg, disp, index, base);
295     } else {
296       if (scratch != Z_R0 && scratch != Z_R1) {
297         (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
298       } else {
299         if (scratch != Z_R0) {   // scratch == Z_R1
300           if ((scratch == index) || (index == base)) {
301             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
302           } else {
303             add2reg(scratch, disp, base);
304             (this->*classic)(reg, 0, index, scratch);
305             if (base == scratch) {
306               add2reg(base, -disp);  // Restore base.
307             }
308           }
309         } else {   // scratch == Z_R0
310           z_lgr(scratch, base);
311           add2reg(base, disp);
312           (this->*classic)(reg, 0, index, base);
313           z_lgr(base, scratch);      // Restore base.
314         }
315       }
316     }
317   }
318 }
319 
mem2freg_opt(FloatRegister reg,const Address & a,bool is_double)320 void MacroAssembler::mem2freg_opt(FloatRegister reg, const Address &a, bool is_double) {
321   if (is_double) {
322     mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ldy), CLASSIC_FFUN(z_ld));
323   } else {
324     mem2freg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_FFUN(z_ley), CLASSIC_FFUN(z_le));
325   }
326 }
327 
328 // Optimized emitter for reg to mem operations.
329 // Uses modern instructions if running on modern hardware, classic instructions
330 // otherwise. Prefers (usually shorter) classic instructions if applicable.
331 // Data register (reg) cannot be used as work register.
332 //
333 // Don't rely on register locking, instead pass a scratch register
334 // (Z_R0 by default)
335 // CAUTION! passing registers >= Z_R2 may produce bad results on old CPUs!
reg2mem_opt(Register reg,int64_t disp,Register index,Register base,void (MacroAssembler::* modern)(Register,int64_t,Register,Register),void (MacroAssembler::* classic)(Register,int64_t,Register,Register),Register scratch)336 void MacroAssembler::reg2mem_opt(Register reg,
337                                  int64_t  disp,
338                                  Register index,
339                                  Register base,
340                                  void (MacroAssembler::*modern) (Register, int64_t, Register, Register),
341                                  void (MacroAssembler::*classic)(Register, int64_t, Register, Register),
342                                  Register scratch) {
343   index = (index == noreg) ? Z_R0 : index;
344   if (Displacement::is_shortDisp(disp)) {
345     (this->*classic)(reg, disp, index, base);
346   } else {
347     if (Displacement::is_validDisp(disp)) {
348       (this->*modern)(reg, disp, index, base);
349     } else {
350       if (scratch != Z_R0 && scratch != Z_R1) {
351         (this->*modern)(reg, disp, index, base);      // Will fail with disp out of range.
352       } else {
353         if (scratch != Z_R0) {   // scratch == Z_R1
354           if ((scratch == index) || (index == base)) {
355             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
356           } else {
357             add2reg(scratch, disp, base);
358             (this->*classic)(reg, 0, index, scratch);
359             if (base == scratch) {
360               add2reg(base, -disp);  // Restore base.
361             }
362           }
363         } else {   // scratch == Z_R0
364           if ((scratch == reg) || (scratch == base) || (reg == base)) {
365             (this->*modern)(reg, disp, index, base);  // Will fail with disp out of range.
366           } else {
367             z_lgr(scratch, base);
368             add2reg(base, disp);
369             (this->*classic)(reg, 0, index, base);
370             z_lgr(base, scratch);    // Restore base.
371           }
372         }
373       }
374     }
375   }
376 }
377 
reg2mem_opt(Register reg,const Address & a,bool is_double)378 int MacroAssembler::reg2mem_opt(Register reg, const Address &a, bool is_double) {
379   int store_offset = offset();
380   if (is_double) {
381     reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_stg), CLASSIC_IFUN(z_stg));
382   } else {
383     reg2mem_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_sty), CLASSIC_IFUN(z_st));
384   }
385   return store_offset;
386 }
387 
388 // Optimized emitter for mem to reg operations.
389 // Uses modern instructions if running on modern hardware, classic instructions
390 // otherwise. Prefers (usually shorter) classic instructions if applicable.
391 // Data register (reg) will be used as work register where possible.
mem2reg_opt(Register reg,int64_t disp,Register index,Register base,void (MacroAssembler::* modern)(Register,int64_t,Register,Register),void (MacroAssembler::* classic)(Register,int64_t,Register,Register))392 void MacroAssembler::mem2reg_opt(Register reg,
393                                  int64_t  disp,
394                                  Register index,
395                                  Register base,
396                                  void (MacroAssembler::*modern) (Register, int64_t, Register, Register),
397                                  void (MacroAssembler::*classic)(Register, int64_t, Register, Register)) {
398   index = (index == noreg) ? Z_R0 : index;
399   if (Displacement::is_shortDisp(disp)) {
400     (this->*classic)(reg, disp, index, base);
401   } else {
402     if (Displacement::is_validDisp(disp)) {
403       (this->*modern)(reg, disp, index, base);
404     } else {
405       if ((reg == index) && (reg == base)) {
406         z_sllg(reg, reg, 1);
407         add2reg(reg, disp);
408         (this->*classic)(reg, 0, noreg, reg);
409       } else if ((reg == index) && (reg != Z_R0)) {
410         add2reg(reg, disp);
411         (this->*classic)(reg, 0, reg, base);
412       } else if (reg == base) {
413         add2reg(reg, disp);
414         (this->*classic)(reg, 0, index, reg);
415       } else if (reg != Z_R0) {
416         add2reg(reg, disp, base);
417         (this->*classic)(reg, 0, index, reg);
418       } else { // reg == Z_R0 && reg != base here
419         add2reg(base, disp);
420         (this->*classic)(reg, 0, index, base);
421         add2reg(base, -disp);
422       }
423     }
424   }
425 }
426 
mem2reg_opt(Register reg,const Address & a,bool is_double)427 void MacroAssembler::mem2reg_opt(Register reg, const Address &a, bool is_double) {
428   if (is_double) {
429     z_lg(reg, a);
430   } else {
431     mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_ly), CLASSIC_IFUN(z_l));
432   }
433 }
434 
mem2reg_signed_opt(Register reg,const Address & a)435 void MacroAssembler::mem2reg_signed_opt(Register reg, const Address &a) {
436   mem2reg_opt(reg, a.disp20(), a.indexOrR0(), a.baseOrR0(), MODERN_IFUN(z_lgf), CLASSIC_IFUN(z_lgf));
437 }
438 
and_imm(Register r,long mask,Register tmp,bool wide)439 void MacroAssembler::and_imm(Register r, long mask,
440                              Register tmp /* = Z_R0 */,
441                              bool wide    /* = false */) {
442   assert(wide || Immediate::is_simm32(mask), "mask value too large");
443 
444   if (!wide) {
445     z_nilf(r, mask);
446     return;
447   }
448 
449   assert(r != tmp, " need a different temporary register !");
450   load_const_optimized(tmp, mask);
451   z_ngr(r, tmp);
452 }
453 
454 // Calculate the 1's complement.
455 // Note: The condition code is neither preserved nor correctly set by this code!!!
456 // Note: (wide == false) does not protect the high order half of the target register
457 //       from alteration. It only serves as optimization hint for 32-bit results.
not_(Register r1,Register r2,bool wide)458 void MacroAssembler::not_(Register r1, Register r2, bool wide) {
459 
460   if ((r2 == noreg) || (r2 == r1)) { // Calc 1's complement in place.
461     z_xilf(r1, -1);
462     if (wide) {
463       z_xihf(r1, -1);
464     }
465   } else { // Distinct src and dst registers.
466     load_const_optimized(r1, -1);
467     z_xgr(r1, r2);
468   }
469 }
470 
create_mask(int lBitPos,int rBitPos)471 unsigned long MacroAssembler::create_mask(int lBitPos, int rBitPos) {
472   assert(lBitPos >=  0,      "zero is  leftmost bit position");
473   assert(rBitPos <= 63,      "63   is rightmost bit position");
474   assert(lBitPos <= rBitPos, "inverted selection interval");
475   return (lBitPos == 0 ? (unsigned long)(-1L) : ((1UL<<(63-lBitPos+1))-1)) & (~((1UL<<(63-rBitPos))-1));
476 }
477 
478 // Helper function for the "Rotate_then_<logicalOP>" emitters.
479 // Rotate src, then mask register contents such that only bits in range survive.
480 // For oneBits == false, all bits not in range are set to 0. Useful for deleting all bits outside range.
481 // For oneBits == true,  all bits not in range are set to 1. Useful for preserving all bits outside range.
482 // The caller must ensure that the selected range only contains bits with defined value.
rotate_then_mask(Register dst,Register src,int lBitPos,int rBitPos,int nRotate,bool src32bit,bool dst32bit,bool oneBits)483 void MacroAssembler::rotate_then_mask(Register dst, Register src, int lBitPos, int rBitPos,
484                                       int nRotate, bool src32bit, bool dst32bit, bool oneBits) {
485   assert(!(dst32bit && lBitPos < 32), "selection interval out of range for int destination");
486   bool sll4rll = (nRotate >= 0) && (nRotate <= (63-rBitPos)); // Substitute SLL(G) for RLL(G).
487   bool srl4rll = (nRotate <  0) && (-nRotate <= lBitPos);     // Substitute SRL(G) for RLL(G).
488   //  Pre-determine which parts of dst will be zero after shift/rotate.
489   bool llZero  =  sll4rll && (nRotate >= 16);
490   bool lhZero  = (sll4rll && (nRotate >= 32)) || (srl4rll && (nRotate <= -48));
491   bool lfZero  = llZero && lhZero;
492   bool hlZero  = (sll4rll && (nRotate >= 48)) || (srl4rll && (nRotate <= -32));
493   bool hhZero  =                                 (srl4rll && (nRotate <= -16));
494   bool hfZero  = hlZero && hhZero;
495 
496   // rotate then mask src operand.
497   // if oneBits == true,  all bits outside selected range are 1s.
498   // if oneBits == false, all bits outside selected range are 0s.
499   if (src32bit) {   // There might be garbage in the upper 32 bits which will get masked away.
500     if (dst32bit) {
501       z_rll(dst, src, nRotate);   // Copy and rotate, upper half of reg remains undisturbed.
502     } else {
503       if      (sll4rll) { z_sllg(dst, src,  nRotate); }
504       else if (srl4rll) { z_srlg(dst, src, -nRotate); }
505       else              { z_rllg(dst, src,  nRotate); }
506     }
507   } else {
508     if      (sll4rll) { z_sllg(dst, src,  nRotate); }
509     else if (srl4rll) { z_srlg(dst, src, -nRotate); }
510     else              { z_rllg(dst, src,  nRotate); }
511   }
512 
513   unsigned long  range_mask    = create_mask(lBitPos, rBitPos);
514   unsigned int   range_mask_h  = (unsigned int)(range_mask >> 32);
515   unsigned int   range_mask_l  = (unsigned int)range_mask;
516   unsigned short range_mask_hh = (unsigned short)(range_mask >> 48);
517   unsigned short range_mask_hl = (unsigned short)(range_mask >> 32);
518   unsigned short range_mask_lh = (unsigned short)(range_mask >> 16);
519   unsigned short range_mask_ll = (unsigned short)range_mask;
520   // Works for z9 and newer H/W.
521   if (oneBits) {
522     if ((~range_mask_l) != 0)                { z_oilf(dst, ~range_mask_l); } // All bits outside range become 1s.
523     if (((~range_mask_h) != 0) && !dst32bit) { z_oihf(dst, ~range_mask_h); }
524   } else {
525     // All bits outside range become 0s
526     if (((~range_mask_l) != 0) &&              !lfZero) {
527       z_nilf(dst, range_mask_l);
528     }
529     if (((~range_mask_h) != 0) && !dst32bit && !hfZero) {
530       z_nihf(dst, range_mask_h);
531     }
532   }
533 }
534 
535 // Rotate src, then insert selected range from rotated src into dst.
536 // Clear dst before, if requested.
rotate_then_insert(Register dst,Register src,int lBitPos,int rBitPos,int nRotate,bool clear_dst)537 void MacroAssembler::rotate_then_insert(Register dst, Register src, int lBitPos, int rBitPos,
538                                         int nRotate, bool clear_dst) {
539   // This version does not depend on src being zero-extended int2long.
540   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
541   z_risbg(dst, src, lBitPos, rBitPos, nRotate, clear_dst); // Rotate, then insert selected, clear the rest.
542 }
543 
544 // Rotate src, then and selected range from rotated src into dst.
545 // Set condition code only if so requested. Otherwise it is unpredictable.
546 // See performance note in macroAssembler_s390.hpp for important information.
rotate_then_and(Register dst,Register src,int lBitPos,int rBitPos,int nRotate,bool test_only)547 void MacroAssembler::rotate_then_and(Register dst, Register src, int lBitPos, int rBitPos,
548                                      int nRotate, bool test_only) {
549   guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
550   // This version does not depend on src being zero-extended int2long.
551   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
552   z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
553 }
554 
555 // Rotate src, then or selected range from rotated src into dst.
556 // Set condition code only if so requested. Otherwise it is unpredictable.
557 // See performance note in macroAssembler_s390.hpp for important information.
rotate_then_or(Register dst,Register src,int lBitPos,int rBitPos,int nRotate,bool test_only)558 void MacroAssembler::rotate_then_or(Register dst, Register src,  int  lBitPos,  int  rBitPos,
559                                     int nRotate, bool test_only) {
560   guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
561   // This version does not depend on src being zero-extended int2long.
562   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
563   z_rosbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
564 }
565 
566 // Rotate src, then xor selected range from rotated src into dst.
567 // Set condition code only if so requested. Otherwise it is unpredictable.
568 // See performance note in macroAssembler_s390.hpp for important information.
rotate_then_xor(Register dst,Register src,int lBitPos,int rBitPos,int nRotate,bool test_only)569 void MacroAssembler::rotate_then_xor(Register dst, Register src,  int  lBitPos,  int  rBitPos,
570                                      int nRotate, bool test_only) {
571   guarantee(!test_only, "Emitter not fit for test_only instruction variant.");
572     // This version does not depend on src being zero-extended int2long.
573   nRotate &= 0x003f;                                       // For risbg, pretend it's an unsigned value.
574   z_rxsbg(dst, src, lBitPos, rBitPos, nRotate, test_only); // Rotate, then xor selected.
575 }
576 
add64(Register r1,RegisterOrConstant inc)577 void MacroAssembler::add64(Register r1, RegisterOrConstant inc) {
578   if (inc.is_register()) {
579     z_agr(r1, inc.as_register());
580   } else { // constant
581     intptr_t imm = inc.as_constant();
582     add2reg(r1, imm);
583   }
584 }
585 // Helper function to multiply the 64bit contents of a register by a 16bit constant.
586 // The optimization tries to avoid the mghi instruction, since it uses the FPU for
587 // calculation and is thus rather slow.
588 //
589 // There is no handling for special cases, e.g. cval==0 or cval==1.
590 //
591 // Returns len of generated code block.
mul_reg64_const16(Register rval,Register work,int cval)592 unsigned int MacroAssembler::mul_reg64_const16(Register rval, Register work, int cval) {
593   int block_start = offset();
594 
595   bool sign_flip = cval < 0;
596   cval = sign_flip ? -cval : cval;
597 
598   BLOCK_COMMENT("Reg64*Con16 {");
599 
600   int bit1 = cval & -cval;
601   if (bit1 == cval) {
602     z_sllg(rval, rval, exact_log2(bit1));
603     if (sign_flip) { z_lcgr(rval, rval); }
604   } else {
605     int bit2 = (cval-bit1) & -(cval-bit1);
606     if ((bit1+bit2) == cval) {
607       z_sllg(work, rval, exact_log2(bit1));
608       z_sllg(rval, rval, exact_log2(bit2));
609       z_agr(rval, work);
610       if (sign_flip) { z_lcgr(rval, rval); }
611     } else {
612       if (sign_flip) { z_mghi(rval, -cval); }
613       else           { z_mghi(rval,  cval); }
614     }
615   }
616   BLOCK_COMMENT("} Reg64*Con16");
617 
618   int block_end = offset();
619   return block_end - block_start;
620 }
621 
622 // Generic operation r1 := r2 + imm.
623 //
624 // Should produce the best code for each supported CPU version.
625 // r2 == noreg yields r1 := r1 + imm
626 // imm == 0 emits either no instruction or r1 := r2 !
627 // NOTES: 1) Don't use this function where fixed sized
628 //           instruction sequences are required!!!
629 //        2) Don't use this function if condition code
630 //           setting is required!
631 //        3) Despite being declared as int64_t, the parameter imm
632 //           must be a simm_32 value (= signed 32-bit integer).
add2reg(Register r1,int64_t imm,Register r2)633 void MacroAssembler::add2reg(Register r1, int64_t imm, Register r2) {
634   assert(Immediate::is_simm32(imm), "probably an implicit conversion went wrong");
635 
636   if (r2 == noreg) { r2 = r1; }
637 
638   // Handle special case imm == 0.
639   if (imm == 0) {
640     lgr_if_needed(r1, r2);
641     // Nothing else to do.
642     return;
643   }
644 
645   if (!PreferLAoverADD || (r2 == Z_R0)) {
646     bool distinctOpnds = VM_Version::has_DistinctOpnds();
647 
648     // Can we encode imm in 16 bits signed?
649     if (Immediate::is_simm16(imm)) {
650       if (r1 == r2) {
651         z_aghi(r1, imm);
652         return;
653       }
654       if (distinctOpnds) {
655         z_aghik(r1, r2, imm);
656         return;
657       }
658       z_lgr(r1, r2);
659       z_aghi(r1, imm);
660       return;
661     }
662   } else {
663     // Can we encode imm in 12 bits unsigned?
664     if (Displacement::is_shortDisp(imm)) {
665       z_la(r1, imm, r2);
666       return;
667     }
668     // Can we encode imm in 20 bits signed?
669     if (Displacement::is_validDisp(imm)) {
670       // Always use LAY instruction, so we don't need the tmp register.
671       z_lay(r1, imm, r2);
672       return;
673     }
674 
675   }
676 
677   // Can handle it (all possible values) with long immediates.
678   lgr_if_needed(r1, r2);
679   z_agfi(r1, imm);
680 }
681 
682 // Generic operation r := b + x + d
683 //
684 // Addition of several operands with address generation semantics - sort of:
685 //  - no restriction on the registers. Any register will do for any operand.
686 //  - x == noreg: operand will be disregarded.
687 //  - b == noreg: will use (contents of) result reg as operand (r := r + d).
688 //  - x == Z_R0:  just disregard
689 //  - b == Z_R0:  use as operand. This is not address generation semantics!!!
690 //
691 // The same restrictions as on add2reg() are valid!!!
add2reg_with_index(Register r,int64_t d,Register x,Register b)692 void MacroAssembler::add2reg_with_index(Register r, int64_t d, Register x, Register b) {
693   assert(Immediate::is_simm32(d), "probably an implicit conversion went wrong");
694 
695   if (x == noreg) { x = Z_R0; }
696   if (b == noreg) { b = r; }
697 
698   // Handle special case x == R0.
699   if (x == Z_R0) {
700     // Can simply add the immediate value to the base register.
701     add2reg(r, d, b);
702     return;
703   }
704 
705   if (!PreferLAoverADD || (b == Z_R0)) {
706     bool distinctOpnds = VM_Version::has_DistinctOpnds();
707     // Handle special case d == 0.
708     if (d == 0) {
709       if (b == x)        { z_sllg(r, b, 1); return; }
710       if (r == x)        { z_agr(r, b);     return; }
711       if (r == b)        { z_agr(r, x);     return; }
712       if (distinctOpnds) { z_agrk(r, x, b); return; }
713       z_lgr(r, b);
714       z_agr(r, x);
715     } else {
716       if (x == b)             { z_sllg(r, x, 1); }
717       else if (r == x)        { z_agr(r, b); }
718       else if (r == b)        { z_agr(r, x); }
719       else if (distinctOpnds) { z_agrk(r, x, b); }
720       else {
721         z_lgr(r, b);
722         z_agr(r, x);
723       }
724       add2reg(r, d);
725     }
726   } else {
727     // Can we encode imm in 12 bits unsigned?
728     if (Displacement::is_shortDisp(d)) {
729       z_la(r, d, x, b);
730       return;
731     }
732     // Can we encode imm in 20 bits signed?
733     if (Displacement::is_validDisp(d)) {
734       z_lay(r, d, x, b);
735       return;
736     }
737     z_la(r, 0, x, b);
738     add2reg(r, d);
739   }
740 }
741 
742 // Generic emitter (32bit) for direct memory increment.
743 // For optimal code, do not specify Z_R0 as temp register.
add2mem_32(const Address & a,int64_t imm,Register tmp)744 void MacroAssembler::add2mem_32(const Address &a, int64_t imm, Register tmp) {
745   if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) {
746     z_asi(a, imm);
747   } else {
748     z_lgf(tmp, a);
749     add2reg(tmp, imm);
750     z_st(tmp, a);
751   }
752 }
753 
add2mem_64(const Address & a,int64_t imm,Register tmp)754 void MacroAssembler::add2mem_64(const Address &a, int64_t imm, Register tmp) {
755   if (VM_Version::has_MemWithImmALUOps() && Immediate::is_simm8(imm)) {
756     z_agsi(a, imm);
757   } else {
758     z_lg(tmp, a);
759     add2reg(tmp, imm);
760     z_stg(tmp, a);
761   }
762 }
763 
load_sized_value(Register dst,Address src,size_t size_in_bytes,bool is_signed)764 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed) {
765   switch (size_in_bytes) {
766     case  8: z_lg(dst, src); break;
767     case  4: is_signed ? z_lgf(dst, src) : z_llgf(dst, src); break;
768     case  2: is_signed ? z_lgh(dst, src) : z_llgh(dst, src); break;
769     case  1: is_signed ? z_lgb(dst, src) : z_llgc(dst, src); break;
770     default: ShouldNotReachHere();
771   }
772 }
773 
store_sized_value(Register src,Address dst,size_t size_in_bytes)774 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) {
775   switch (size_in_bytes) {
776     case  8: z_stg(src, dst); break;
777     case  4: z_st(src, dst); break;
778     case  2: z_sth(src, dst); break;
779     case  1: z_stc(src, dst); break;
780     default: ShouldNotReachHere();
781   }
782 }
783 
784 // Split a si20 offset (20bit, signed) into an ui12 offset (12bit, unsigned) and
785 // a high-order summand in register tmp.
786 //
787 // return value: <  0: No split required, si20 actually has property uimm12.
788 //               >= 0: Split performed. Use return value as uimm12 displacement and
789 //                     tmp as index register.
split_largeoffset(int64_t si20_offset,Register tmp,bool fixed_codelen,bool accumulate)790 int MacroAssembler::split_largeoffset(int64_t si20_offset, Register tmp, bool fixed_codelen, bool accumulate) {
791   assert(Immediate::is_simm20(si20_offset), "sanity");
792   int lg_off = (int)si20_offset &  0x0fff; // Punch out low-order 12 bits, always positive.
793   int ll_off = (int)si20_offset & ~0x0fff; // Force low-order 12 bits to zero.
794   assert((Displacement::is_shortDisp(si20_offset) && (ll_off == 0)) ||
795          !Displacement::is_shortDisp(si20_offset), "unexpected offset values");
796   assert((lg_off+ll_off) == si20_offset, "offset splitup error");
797 
798   Register work = accumulate? Z_R0 : tmp;
799 
800   if (fixed_codelen) {          // Len of code = 10 = 4 + 6.
801     z_lghi(work, ll_off>>12);   // Implicit sign extension.
802     z_slag(work, work, 12);
803   } else {                      // Len of code = 0..10.
804     if (ll_off == 0) { return -1; }
805     // ll_off has 8 significant bits (at most) plus sign.
806     if ((ll_off & 0x0000f000) == 0) {    // Non-zero bits only in upper halfbyte.
807       z_llilh(work, ll_off >> 16);
808       if (ll_off < 0) {                  // Sign-extension required.
809         z_lgfr(work, work);
810       }
811     } else {
812       if ((ll_off & 0x000f0000) == 0) {  // Non-zero bits only in lower halfbyte.
813         z_llill(work, ll_off);
814       } else {                           // Non-zero bits in both halfbytes.
815         z_lghi(work, ll_off>>12);        // Implicit sign extension.
816         z_slag(work, work, 12);
817       }
818     }
819   }
820   if (accumulate) { z_algr(tmp, work); } // len of code += 4
821   return lg_off;
822 }
823 
load_float_largeoffset(FloatRegister t,int64_t si20,Register a,Register tmp)824 void MacroAssembler::load_float_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) {
825   if (Displacement::is_validDisp(si20)) {
826     z_ley(t, si20, a);
827   } else {
828     // Fixed_codelen = true is a simple way to ensure that the size of load_float_largeoffset
829     // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant
830     // pool loads).
831     bool accumulate    = true;
832     bool fixed_codelen = true;
833     Register work;
834 
835     if (fixed_codelen) {
836       z_lgr(tmp, a);  // Lgr_if_needed not applicable due to fixed_codelen.
837     } else {
838       accumulate = (a == tmp);
839     }
840     work = tmp;
841 
842     int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate);
843     if (disp12 < 0) {
844       z_le(t, si20, work);
845     } else {
846       if (accumulate) {
847         z_le(t, disp12, work);
848       } else {
849         z_le(t, disp12, work, a);
850       }
851     }
852   }
853 }
854 
load_double_largeoffset(FloatRegister t,int64_t si20,Register a,Register tmp)855 void MacroAssembler::load_double_largeoffset(FloatRegister t, int64_t si20, Register a, Register tmp) {
856   if (Displacement::is_validDisp(si20)) {
857     z_ldy(t, si20, a);
858   } else {
859     // Fixed_codelen = true is a simple way to ensure that the size of load_double_largeoffset
860     // does not depend on si20 (scratch buffer emit size == code buffer emit size for constant
861     // pool loads).
862     bool accumulate    = true;
863     bool fixed_codelen = true;
864     Register work;
865 
866     if (fixed_codelen) {
867       z_lgr(tmp, a);  // Lgr_if_needed not applicable due to fixed_codelen.
868     } else {
869       accumulate = (a == tmp);
870     }
871     work = tmp;
872 
873     int disp12 = split_largeoffset(si20, work, fixed_codelen, accumulate);
874     if (disp12 < 0) {
875       z_ld(t, si20, work);
876     } else {
877       if (accumulate) {
878         z_ld(t, disp12, work);
879       } else {
880         z_ld(t, disp12, work, a);
881       }
882     }
883   }
884 }
885 
886 // PCrelative TOC access.
887 // Returns distance (in bytes) from current position to start of consts section.
888 // Returns 0 (zero) if no consts section exists or if it has size zero.
toc_distance()889 long MacroAssembler::toc_distance() {
890   CodeSection* cs = code()->consts();
891   return (long)((cs != NULL) ? cs->start()-pc() : 0);
892 }
893 
894 // Implementation on x86/sparc assumes that constant and instruction section are
895 // adjacent, but this doesn't hold. Two special situations may occur, that we must
896 // be able to handle:
897 //   1. const section may be located apart from the inst section.
898 //   2. const section may be empty
899 // In both cases, we use the const section's start address to compute the "TOC",
900 // this seems to occur only temporarily; in the final step we always seem to end up
901 // with the pc-relatice variant.
902 //
903 // PC-relative offset could be +/-2**32 -> use long for disp
904 // Furthermore: makes no sense to have special code for
905 // adjacent const and inst sections.
load_toc(Register Rtoc)906 void MacroAssembler::load_toc(Register Rtoc) {
907   // Simply use distance from start of const section (should be patched in the end).
908   long disp = toc_distance();
909 
910   RelocationHolder rspec = internal_word_Relocation::spec(pc() + disp);
911   relocate(rspec);
912   z_larl(Rtoc, RelAddr::pcrel_off32(disp));  // Offset is in halfwords.
913 }
914 
915 // PCrelative TOC access.
916 // Load from anywhere pcrelative (with relocation of load instr)
load_long_pcrelative(Register Rdst,address dataLocation)917 void MacroAssembler::load_long_pcrelative(Register Rdst, address dataLocation) {
918   address          pc             = this->pc();
919   ptrdiff_t        total_distance = dataLocation - pc;
920   RelocationHolder rspec          = internal_word_Relocation::spec(dataLocation);
921 
922   assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory");
923   assert(total_distance != 0, "sanity");
924 
925   // Some extra safety net.
926   if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) {
927     guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "load_long_pcrelative can't handle distance " INTPTR_FORMAT, total_distance);
928   }
929 
930   (this)->relocate(rspec, relocInfo::pcrel_addr_format);
931   z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance));
932 }
933 
934 
935 // PCrelative TOC access.
936 // Load from anywhere pcrelative (with relocation of load instr)
937 // loaded addr has to be relocated when added to constant pool.
load_addr_pcrelative(Register Rdst,address addrLocation)938 void MacroAssembler::load_addr_pcrelative(Register Rdst, address addrLocation) {
939   address          pc             = this->pc();
940   ptrdiff_t        total_distance = addrLocation - pc;
941   RelocationHolder rspec          = internal_word_Relocation::spec(addrLocation);
942 
943   assert((total_distance & 0x01L) == 0, "halfword alignment is mandatory");
944 
945   // Some extra safety net.
946   if (!RelAddr::is_in_range_of_RelAddr32(total_distance)) {
947     guarantee(RelAddr::is_in_range_of_RelAddr32(total_distance), "load_long_pcrelative can't handle distance " INTPTR_FORMAT, total_distance);
948   }
949 
950   (this)->relocate(rspec, relocInfo::pcrel_addr_format);
951   z_lgrl(Rdst, RelAddr::pcrel_off32(total_distance));
952 }
953 
954 // Generic operation: load a value from memory and test.
955 // CondCode indicates the sign (<0, ==0, >0) of the loaded value.
load_and_test_byte(Register dst,const Address & a)956 void MacroAssembler::load_and_test_byte(Register dst, const Address &a) {
957   z_lb(dst, a);
958   z_ltr(dst, dst);
959 }
960 
load_and_test_short(Register dst,const Address & a)961 void MacroAssembler::load_and_test_short(Register dst, const Address &a) {
962   int64_t disp = a.disp20();
963   if (Displacement::is_shortDisp(disp)) {
964     z_lh(dst, a);
965   } else if (Displacement::is_longDisp(disp)) {
966     z_lhy(dst, a);
967   } else {
968     guarantee(false, "displacement out of range");
969   }
970   z_ltr(dst, dst);
971 }
972 
load_and_test_int(Register dst,const Address & a)973 void MacroAssembler::load_and_test_int(Register dst, const Address &a) {
974   z_lt(dst, a);
975 }
976 
load_and_test_int2long(Register dst,const Address & a)977 void MacroAssembler::load_and_test_int2long(Register dst, const Address &a) {
978   z_ltgf(dst, a);
979 }
980 
load_and_test_long(Register dst,const Address & a)981 void MacroAssembler::load_and_test_long(Register dst, const Address &a) {
982   z_ltg(dst, a);
983 }
984 
985 // Test a bit in memory.
testbit(const Address & a,unsigned int bit)986 void MacroAssembler::testbit(const Address &a, unsigned int bit) {
987   assert(a.index() == noreg, "no index reg allowed in testbit");
988   if (bit <= 7) {
989     z_tm(a.disp() + 3, a.base(), 1 << bit);
990   } else if (bit <= 15) {
991     z_tm(a.disp() + 2, a.base(), 1 << (bit - 8));
992   } else if (bit <= 23) {
993     z_tm(a.disp() + 1, a.base(), 1 << (bit - 16));
994   } else if (bit <= 31) {
995     z_tm(a.disp() + 0, a.base(), 1 << (bit - 24));
996   } else {
997     ShouldNotReachHere();
998   }
999 }
1000 
1001 // Test a bit in a register. Result is reflected in CC.
testbit(Register r,unsigned int bitPos)1002 void MacroAssembler::testbit(Register r, unsigned int bitPos) {
1003   if (bitPos < 16) {
1004     z_tmll(r, 1U<<bitPos);
1005   } else if (bitPos < 32) {
1006     z_tmlh(r, 1U<<(bitPos-16));
1007   } else if (bitPos < 48) {
1008     z_tmhl(r, 1U<<(bitPos-32));
1009   } else if (bitPos < 64) {
1010     z_tmhh(r, 1U<<(bitPos-48));
1011   } else {
1012     ShouldNotReachHere();
1013   }
1014 }
1015 
prefetch_read(Address a)1016 void MacroAssembler::prefetch_read(Address a) {
1017   z_pfd(1, a.disp20(), a.indexOrR0(), a.base());
1018 }
prefetch_update(Address a)1019 void MacroAssembler::prefetch_update(Address a) {
1020   z_pfd(2, a.disp20(), a.indexOrR0(), a.base());
1021 }
1022 
1023 // Clear a register, i.e. load const zero into reg.
1024 // Return len (in bytes) of generated instruction(s).
1025 // whole_reg: Clear 64 bits if true, 32 bits otherwise.
1026 // set_cc:    Use instruction that sets the condition code, if true.
clear_reg(Register r,bool whole_reg,bool set_cc)1027 int MacroAssembler::clear_reg(Register r, bool whole_reg, bool set_cc) {
1028   unsigned int start_off = offset();
1029   if (whole_reg) {
1030     set_cc ? z_xgr(r, r) : z_laz(r, 0, Z_R0);
1031   } else {  // Only 32bit register.
1032     set_cc ? z_xr(r, r) : z_lhi(r, 0);
1033   }
1034   return offset() - start_off;
1035 }
1036 
1037 #ifdef ASSERT
preset_reg(Register r,unsigned long pattern,int pattern_len)1038 int MacroAssembler::preset_reg(Register r, unsigned long pattern, int pattern_len) {
1039   switch (pattern_len) {
1040     case 1:
1041       pattern = (pattern & 0x000000ff)  | ((pattern & 0x000000ff)<<8);
1042     case 2:
1043       pattern = (pattern & 0x0000ffff)  | ((pattern & 0x0000ffff)<<16);
1044     case 4:
1045       pattern = (pattern & 0xffffffffL) | ((pattern & 0xffffffffL)<<32);
1046     case 8:
1047       return load_const_optimized_rtn_len(r, pattern, true);
1048       break;
1049     default:
1050       guarantee(false, "preset_reg: bad len");
1051   }
1052   return 0;
1053 }
1054 #endif
1055 
1056 // addr: Address descriptor of memory to clear index register will not be used !
1057 // size: Number of bytes to clear.
1058 //    !!! DO NOT USE THEM FOR ATOMIC MEMORY CLEARING !!!
1059 //    !!! Use store_const() instead                  !!!
clear_mem(const Address & addr,unsigned size)1060 void MacroAssembler::clear_mem(const Address& addr, unsigned size) {
1061   guarantee(size <= 256, "MacroAssembler::clear_mem: size too large");
1062 
1063   if (size == 1) {
1064     z_mvi(addr, 0);
1065     return;
1066   }
1067 
1068   switch (size) {
1069     case 2: z_mvhhi(addr, 0);
1070       return;
1071     case 4: z_mvhi(addr, 0);
1072       return;
1073     case 8: z_mvghi(addr, 0);
1074       return;
1075     default: ; // Fallthru to xc.
1076   }
1077 
1078   z_xc(addr, size, addr);
1079 }
1080 
align(int modulus)1081 void MacroAssembler::align(int modulus) {
1082   while (offset() % modulus != 0) z_nop();
1083 }
1084 
1085 // Special version for non-relocateable code if required alignment
1086 // is larger than CodeEntryAlignment.
align_address(int modulus)1087 void MacroAssembler::align_address(int modulus) {
1088   while ((uintptr_t)pc() % modulus != 0) z_nop();
1089 }
1090 
argument_address(RegisterOrConstant arg_slot,Register temp_reg,int64_t extra_slot_offset)1091 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
1092                                          Register temp_reg,
1093                                          int64_t extra_slot_offset) {
1094   // On Z, we can have index and disp in an Address. So don't call argument_offset,
1095   // which issues an unnecessary add instruction.
1096   int stackElementSize = Interpreter::stackElementSize;
1097   int64_t offset = extra_slot_offset * stackElementSize;
1098   const Register argbase = Z_esp;
1099   if (arg_slot.is_constant()) {
1100     offset += arg_slot.as_constant() * stackElementSize;
1101     return Address(argbase, offset);
1102   }
1103   // else
1104   assert(temp_reg != noreg, "must specify");
1105   assert(temp_reg != Z_ARG1, "base and index are conflicting");
1106   z_sllg(temp_reg, arg_slot.as_register(), exact_log2(stackElementSize)); // tempreg = arg_slot << 3
1107   return Address(argbase, temp_reg, offset);
1108 }
1109 
1110 
1111 //===================================================================
1112 //===   START   C O N S T A N T S   I N   C O D E   S T R E A M   ===
1113 //===================================================================
1114 //===            P A T CH A B L E   C O N S T A N T S             ===
1115 //===================================================================
1116 
1117 
1118 //---------------------------------------------------
1119 //  Load (patchable) constant into register
1120 //---------------------------------------------------
1121 
1122 
1123 // Load absolute address (and try to optimize).
1124 //   Note: This method is usable only for position-fixed code,
1125 //         referring to a position-fixed target location.
1126 //         If not so, relocations and patching must be used.
load_absolute_address(Register d,address addr)1127 void MacroAssembler::load_absolute_address(Register d, address addr) {
1128   assert(addr != NULL, "should not happen");
1129   BLOCK_COMMENT("load_absolute_address:");
1130   if (addr == NULL) {
1131     z_larl(d, pc()); // Dummy emit for size calc.
1132     return;
1133   }
1134 
1135   if (RelAddr::is_in_range_of_RelAddr32(addr, pc())) {
1136     z_larl(d, addr);
1137     return;
1138   }
1139 
1140   load_const_optimized(d, (long)addr);
1141 }
1142 
1143 // Load a 64bit constant.
1144 // Patchable code sequence, but not atomically patchable.
1145 // Make sure to keep code size constant -> no value-dependent optimizations.
1146 // Do not kill condition code.
load_const(Register t,long x)1147 void MacroAssembler::load_const(Register t, long x) {
1148   // Note: Right shift is only cleanly defined for unsigned types
1149   //       or for signed types with nonnegative values.
1150   Assembler::z_iihf(t, (long)((unsigned long)x >> 32));
1151   Assembler::z_iilf(t, (long)((unsigned long)x & 0xffffffffUL));
1152 }
1153 
1154 // Load a 32bit constant into a 64bit register, sign-extend or zero-extend.
1155 // Patchable code sequence, but not atomically patchable.
1156 // Make sure to keep code size constant -> no value-dependent optimizations.
1157 // Do not kill condition code.
load_const_32to64(Register t,int64_t x,bool sign_extend)1158 void MacroAssembler::load_const_32to64(Register t, int64_t x, bool sign_extend) {
1159   if (sign_extend) { Assembler::z_lgfi(t, x); }
1160   else             { Assembler::z_llilf(t, x); }
1161 }
1162 
1163 // Load narrow oop constant, no decompression.
load_narrow_oop(Register t,narrowOop a)1164 void MacroAssembler::load_narrow_oop(Register t, narrowOop a) {
1165   assert(UseCompressedOops, "must be on to call this method");
1166   load_const_32to64(t, a, false /*sign_extend*/);
1167 }
1168 
1169 // Load narrow klass constant, compression required.
load_narrow_klass(Register t,Klass * k)1170 void MacroAssembler::load_narrow_klass(Register t, Klass* k) {
1171   assert(UseCompressedClassPointers, "must be on to call this method");
1172   narrowKlass encoded_k = CompressedKlassPointers::encode(k);
1173   load_const_32to64(t, encoded_k, false /*sign_extend*/);
1174 }
1175 
1176 //------------------------------------------------------
1177 //  Compare (patchable) constant with register.
1178 //------------------------------------------------------
1179 
1180 // Compare narrow oop in reg with narrow oop constant, no decompression.
compare_immediate_narrow_oop(Register oop1,narrowOop oop2)1181 void MacroAssembler::compare_immediate_narrow_oop(Register oop1, narrowOop oop2) {
1182   assert(UseCompressedOops, "must be on to call this method");
1183 
1184   Assembler::z_clfi(oop1, oop2);
1185 }
1186 
1187 // Compare narrow oop in reg with narrow oop constant, no decompression.
compare_immediate_narrow_klass(Register klass1,Klass * klass2)1188 void MacroAssembler::compare_immediate_narrow_klass(Register klass1, Klass* klass2) {
1189   assert(UseCompressedClassPointers, "must be on to call this method");
1190   narrowKlass encoded_k = CompressedKlassPointers::encode(klass2);
1191 
1192   Assembler::z_clfi(klass1, encoded_k);
1193 }
1194 
1195 //----------------------------------------------------------
1196 //  Check which kind of load_constant we have here.
1197 //----------------------------------------------------------
1198 
1199 // Detection of CPU version dependent load_const sequence.
1200 // The detection is valid only for code sequences generated by load_const,
1201 // not load_const_optimized.
is_load_const(address a)1202 bool MacroAssembler::is_load_const(address a) {
1203   unsigned long inst1, inst2;
1204   unsigned int  len1,  len2;
1205 
1206   len1 = get_instruction(a, &inst1);
1207   len2 = get_instruction(a + len1, &inst2);
1208 
1209   return is_z_iihf(inst1) && is_z_iilf(inst2);
1210 }
1211 
1212 // Detection of CPU version dependent load_const_32to64 sequence.
1213 // Mostly used for narrow oops and narrow Klass pointers.
1214 // The detection is valid only for code sequences generated by load_const_32to64.
is_load_const_32to64(address pos)1215 bool MacroAssembler::is_load_const_32to64(address pos) {
1216   unsigned long inst1, inst2;
1217   unsigned int len1;
1218 
1219   len1 = get_instruction(pos, &inst1);
1220   return is_z_llilf(inst1);
1221 }
1222 
1223 // Detection of compare_immediate_narrow sequence.
1224 // The detection is valid only for code sequences generated by compare_immediate_narrow_oop.
is_compare_immediate32(address pos)1225 bool MacroAssembler::is_compare_immediate32(address pos) {
1226   return is_equal(pos, CLFI_ZOPC, RIL_MASK);
1227 }
1228 
1229 // Detection of compare_immediate_narrow sequence.
1230 // The detection is valid only for code sequences generated by compare_immediate_narrow_oop.
is_compare_immediate_narrow_oop(address pos)1231 bool MacroAssembler::is_compare_immediate_narrow_oop(address pos) {
1232   return is_compare_immediate32(pos);
1233   }
1234 
1235 // Detection of compare_immediate_narrow sequence.
1236 // The detection is valid only for code sequences generated by compare_immediate_narrow_klass.
is_compare_immediate_narrow_klass(address pos)1237 bool MacroAssembler::is_compare_immediate_narrow_klass(address pos) {
1238   return is_compare_immediate32(pos);
1239 }
1240 
1241 //-----------------------------------
1242 //  patch the load_constant
1243 //-----------------------------------
1244 
1245 // CPU-version dependend patching of load_const.
patch_const(address a,long x)1246 void MacroAssembler::patch_const(address a, long x) {
1247   assert(is_load_const(a), "not a load of a constant");
1248   // Note: Right shift is only cleanly defined for unsigned types
1249   //       or for signed types with nonnegative values.
1250   set_imm32((address)a, (long)((unsigned long)x >> 32));
1251   set_imm32((address)(a + 6), (long)((unsigned long)x & 0xffffffffUL));
1252 }
1253 
1254 // Patching the value of CPU version dependent load_const_32to64 sequence.
1255 // The passed ptr MUST be in compressed format!
patch_load_const_32to64(address pos,int64_t np)1256 int MacroAssembler::patch_load_const_32to64(address pos, int64_t np) {
1257   assert(is_load_const_32to64(pos), "not a load of a narrow ptr (oop or klass)");
1258 
1259   set_imm32(pos, np);
1260   return 6;
1261 }
1262 
1263 // Patching the value of CPU version dependent compare_immediate_narrow sequence.
1264 // The passed ptr MUST be in compressed format!
patch_compare_immediate_32(address pos,int64_t np)1265 int MacroAssembler::patch_compare_immediate_32(address pos, int64_t np) {
1266   assert(is_compare_immediate32(pos), "not a compressed ptr compare");
1267 
1268   set_imm32(pos, np);
1269   return 6;
1270 }
1271 
1272 // Patching the immediate value of CPU version dependent load_narrow_oop sequence.
1273 // The passed ptr must NOT be in compressed format!
patch_load_narrow_oop(address pos,oop o)1274 int MacroAssembler::patch_load_narrow_oop(address pos, oop o) {
1275   assert(UseCompressedOops, "Can only patch compressed oops");
1276 
1277   narrowOop no = CompressedOops::encode(o);
1278   return patch_load_const_32to64(pos, no);
1279 }
1280 
1281 // Patching the immediate value of CPU version dependent load_narrow_klass sequence.
1282 // The passed ptr must NOT be in compressed format!
patch_load_narrow_klass(address pos,Klass * k)1283 int MacroAssembler::patch_load_narrow_klass(address pos, Klass* k) {
1284   assert(UseCompressedClassPointers, "Can only patch compressed klass pointers");
1285 
1286   narrowKlass nk = CompressedKlassPointers::encode(k);
1287   return patch_load_const_32to64(pos, nk);
1288 }
1289 
1290 // Patching the immediate value of CPU version dependent compare_immediate_narrow_oop sequence.
1291 // The passed ptr must NOT be in compressed format!
patch_compare_immediate_narrow_oop(address pos,oop o)1292 int MacroAssembler::patch_compare_immediate_narrow_oop(address pos, oop o) {
1293   assert(UseCompressedOops, "Can only patch compressed oops");
1294 
1295   narrowOop no = CompressedOops::encode(o);
1296   return patch_compare_immediate_32(pos, no);
1297 }
1298 
1299 // Patching the immediate value of CPU version dependent compare_immediate_narrow_klass sequence.
1300 // The passed ptr must NOT be in compressed format!
patch_compare_immediate_narrow_klass(address pos,Klass * k)1301 int MacroAssembler::patch_compare_immediate_narrow_klass(address pos, Klass* k) {
1302   assert(UseCompressedClassPointers, "Can only patch compressed klass pointers");
1303 
1304   narrowKlass nk = CompressedKlassPointers::encode(k);
1305   return patch_compare_immediate_32(pos, nk);
1306 }
1307 
1308 //------------------------------------------------------------------------
1309 //  Extract the constant from a load_constant instruction stream.
1310 //------------------------------------------------------------------------
1311 
1312 // Get constant from a load_const sequence.
get_const(address a)1313 long MacroAssembler::get_const(address a) {
1314   assert(is_load_const(a), "not a load of a constant");
1315   unsigned long x;
1316   x =  (((unsigned long) (get_imm32(a,0) & 0xffffffff)) << 32);
1317   x |= (((unsigned long) (get_imm32(a,1) & 0xffffffff)));
1318   return (long) x;
1319 }
1320 
1321 //--------------------------------------
1322 //  Store a constant in memory.
1323 //--------------------------------------
1324 
1325 // General emitter to move a constant to memory.
1326 // The store is atomic.
1327 //  o Address must be given in RS format (no index register)
1328 //  o Displacement should be 12bit unsigned for efficiency. 20bit signed also supported.
1329 //  o Constant can be 1, 2, 4, or 8 bytes, signed or unsigned.
1330 //  o Memory slot can be 1, 2, 4, or 8 bytes, signed or unsigned.
1331 //  o Memory slot must be at least as wide as constant, will assert otherwise.
1332 //  o Signed constants will sign-extend, unsigned constants will zero-extend to slot width.
store_const(const Address & dest,long imm,unsigned int lm,unsigned int lc,Register scratch)1333 int MacroAssembler::store_const(const Address &dest, long imm,
1334                                 unsigned int lm, unsigned int lc,
1335                                 Register scratch) {
1336   int64_t  disp = dest.disp();
1337   Register base = dest.base();
1338   assert(!dest.has_index(), "not supported");
1339   assert((lm==1)||(lm==2)||(lm==4)||(lm==8), "memory   length not supported");
1340   assert((lc==1)||(lc==2)||(lc==4)||(lc==8), "constant length not supported");
1341   assert(lm>=lc, "memory slot too small");
1342   assert(lc==8 || Immediate::is_simm(imm, lc*8), "const out of range");
1343   assert(Displacement::is_validDisp(disp), "displacement out of range");
1344 
1345   bool is_shortDisp = Displacement::is_shortDisp(disp);
1346   int store_offset = -1;
1347 
1348   // For target len == 1 it's easy.
1349   if (lm == 1) {
1350     store_offset = offset();
1351     if (is_shortDisp) {
1352       z_mvi(disp, base, imm);
1353       return store_offset;
1354     } else {
1355       z_mviy(disp, base, imm);
1356       return store_offset;
1357     }
1358   }
1359 
1360   // All the "good stuff" takes an unsigned displacement.
1361   if (is_shortDisp) {
1362     // NOTE: Cannot use clear_mem for imm==0, because it is not atomic.
1363 
1364     store_offset = offset();
1365     switch (lm) {
1366       case 2:  // Lc == 1 handled correctly here, even for unsigned. Instruction does no widening.
1367         z_mvhhi(disp, base, imm);
1368         return store_offset;
1369       case 4:
1370         if (Immediate::is_simm16(imm)) {
1371           z_mvhi(disp, base, imm);
1372           return store_offset;
1373         }
1374         break;
1375       case 8:
1376         if (Immediate::is_simm16(imm)) {
1377           z_mvghi(disp, base, imm);
1378           return store_offset;
1379         }
1380         break;
1381       default:
1382         ShouldNotReachHere();
1383         break;
1384     }
1385   }
1386 
1387   //  Can't optimize, so load value and store it.
1388   guarantee(scratch != noreg, " need a scratch register here !");
1389   if (imm != 0) {
1390     load_const_optimized(scratch, imm);  // Preserves CC anyway.
1391   } else {
1392     // Leave CC alone!!
1393     (void) clear_reg(scratch, true, false); // Indicate unused result.
1394   }
1395 
1396   store_offset = offset();
1397   if (is_shortDisp) {
1398     switch (lm) {
1399       case 2:
1400         z_sth(scratch, disp, Z_R0, base);
1401         return store_offset;
1402       case 4:
1403         z_st(scratch, disp, Z_R0, base);
1404         return store_offset;
1405       case 8:
1406         z_stg(scratch, disp, Z_R0, base);
1407         return store_offset;
1408       default:
1409         ShouldNotReachHere();
1410         break;
1411     }
1412   } else {
1413     switch (lm) {
1414       case 2:
1415         z_sthy(scratch, disp, Z_R0, base);
1416         return store_offset;
1417       case 4:
1418         z_sty(scratch, disp, Z_R0, base);
1419         return store_offset;
1420       case 8:
1421         z_stg(scratch, disp, Z_R0, base);
1422         return store_offset;
1423       default:
1424         ShouldNotReachHere();
1425         break;
1426     }
1427   }
1428   return -1; // should not reach here
1429 }
1430 
1431 //===================================================================
1432 //===       N O T   P A T CH A B L E   C O N S T A N T S          ===
1433 //===================================================================
1434 
1435 // Load constant x into register t with a fast instrcution sequence
1436 // depending on the bits in x. Preserves CC under all circumstances.
load_const_optimized_rtn_len(Register t,long x,bool emit)1437 int MacroAssembler::load_const_optimized_rtn_len(Register t, long x, bool emit) {
1438   if (x == 0) {
1439     int len;
1440     if (emit) {
1441       len = clear_reg(t, true, false);
1442     } else {
1443       len = 4;
1444     }
1445     return len;
1446   }
1447 
1448   if (Immediate::is_simm16(x)) {
1449     if (emit) { z_lghi(t, x); }
1450     return 4;
1451   }
1452 
1453   // 64 bit value: | part1 | part2 | part3 | part4 |
1454   // At least one part is not zero!
1455   // Note: Right shift is only cleanly defined for unsigned types
1456   //       or for signed types with nonnegative values.
1457   int part1 = (int)((unsigned long)x >> 48) & 0x0000ffff;
1458   int part2 = (int)((unsigned long)x >> 32) & 0x0000ffff;
1459   int part3 = (int)((unsigned long)x >> 16) & 0x0000ffff;
1460   int part4 = (int)x & 0x0000ffff;
1461   int part12 = (int)((unsigned long)x >> 32);
1462   int part34 = (int)x;
1463 
1464   // Lower word only (unsigned).
1465   if (part12 == 0) {
1466     if (part3 == 0) {
1467       if (emit) z_llill(t, part4);
1468       return 4;
1469     }
1470     if (part4 == 0) {
1471       if (emit) z_llilh(t, part3);
1472       return 4;
1473     }
1474     if (emit) z_llilf(t, part34);
1475     return 6;
1476   }
1477 
1478   // Upper word only.
1479   if (part34 == 0) {
1480     if (part1 == 0) {
1481       if (emit) z_llihl(t, part2);
1482       return 4;
1483     }
1484     if (part2 == 0) {
1485       if (emit) z_llihh(t, part1);
1486       return 4;
1487     }
1488     if (emit) z_llihf(t, part12);
1489     return 6;
1490   }
1491 
1492   // Lower word only (signed).
1493   if ((part1 == 0x0000ffff) && (part2 == 0x0000ffff) && ((part3 & 0x00008000) != 0)) {
1494     if (emit) z_lgfi(t, part34);
1495     return 6;
1496   }
1497 
1498   int len = 0;
1499 
1500   if ((part1 == 0) || (part2 == 0)) {
1501     if (part1 == 0) {
1502       if (emit) z_llihl(t, part2);
1503       len += 4;
1504     } else {
1505       if (emit) z_llihh(t, part1);
1506       len += 4;
1507     }
1508   } else {
1509     if (emit) z_llihf(t, part12);
1510     len += 6;
1511   }
1512 
1513   if ((part3 == 0) || (part4 == 0)) {
1514     if (part3 == 0) {
1515       if (emit) z_iill(t, part4);
1516       len += 4;
1517     } else {
1518       if (emit) z_iilh(t, part3);
1519       len += 4;
1520     }
1521   } else {
1522     if (emit) z_iilf(t, part34);
1523     len += 6;
1524   }
1525   return len;
1526 }
1527 
1528 //=====================================================================
1529 //===     H I G H E R   L E V E L   B R A N C H   E M I T T E R S   ===
1530 //=====================================================================
1531 
1532 // Note: In the worst case, one of the scratch registers is destroyed!!!
compare32_and_branch(Register r1,RegisterOrConstant x2,branch_condition cond,Label & lbl)1533 void MacroAssembler::compare32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1534   // Right operand is constant.
1535   if (x2.is_constant()) {
1536     jlong value = x2.as_constant();
1537     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/true);
1538     return;
1539   }
1540 
1541   // Right operand is in register.
1542   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/true);
1543 }
1544 
1545 // Note: In the worst case, one of the scratch registers is destroyed!!!
compareU32_and_branch(Register r1,RegisterOrConstant x2,branch_condition cond,Label & lbl)1546 void MacroAssembler::compareU32_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1547   // Right operand is constant.
1548   if (x2.is_constant()) {
1549     jlong value = x2.as_constant();
1550     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/false, /*has_sign=*/false);
1551     return;
1552   }
1553 
1554   // Right operand is in register.
1555   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/false, /*has_sign=*/false);
1556 }
1557 
1558 // Note: In the worst case, one of the scratch registers is destroyed!!!
compare64_and_branch(Register r1,RegisterOrConstant x2,branch_condition cond,Label & lbl)1559 void MacroAssembler::compare64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1560   // Right operand is constant.
1561   if (x2.is_constant()) {
1562     jlong value = x2.as_constant();
1563     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/true);
1564     return;
1565   }
1566 
1567   // Right operand is in register.
1568   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/true);
1569 }
1570 
compareU64_and_branch(Register r1,RegisterOrConstant x2,branch_condition cond,Label & lbl)1571 void MacroAssembler::compareU64_and_branch(Register r1, RegisterOrConstant x2, branch_condition cond, Label& lbl) {
1572   // Right operand is constant.
1573   if (x2.is_constant()) {
1574     jlong value = x2.as_constant();
1575     compare_and_branch_optimized(r1, value, cond, lbl, /*len64=*/true, /*has_sign=*/false);
1576     return;
1577   }
1578 
1579   // Right operand is in register.
1580   compare_and_branch_optimized(r1, x2.as_register(), cond, lbl, /*len64=*/true, /*has_sign=*/false);
1581 }
1582 
1583 // Generate an optimal branch to the branch target.
1584 // Optimal means that a relative branch (brc or brcl) is used if the
1585 // branch distance is short enough. Loading the target address into a
1586 // register and branching via reg is used as fallback only.
1587 //
1588 // Used registers:
1589 //   Z_R1 - work reg. Holds branch target address.
1590 //          Used in fallback case only.
1591 //
1592 // This version of branch_optimized is good for cases where the target address is known
1593 // and constant, i.e. is never changed (no relocation, no patching).
branch_optimized(Assembler::branch_condition cond,address branch_addr)1594 void MacroAssembler::branch_optimized(Assembler::branch_condition cond, address branch_addr) {
1595   address branch_origin = pc();
1596 
1597   if (RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) {
1598     z_brc(cond, branch_addr);
1599   } else if (RelAddr::is_in_range_of_RelAddr32(branch_addr, branch_origin)) {
1600     z_brcl(cond, branch_addr);
1601   } else {
1602     load_const_optimized(Z_R1, branch_addr);  // CC must not get killed by load_const_optimized.
1603     z_bcr(cond, Z_R1);
1604   }
1605 }
1606 
1607 // This version of branch_optimized is good for cases where the target address
1608 // is potentially not yet known at the time the code is emitted.
1609 //
1610 // One very common case is a branch to an unbound label which is handled here.
1611 // The caller might know (or hope) that the branch distance is short enough
1612 // to be encoded in a 16bit relative address. In this case he will pass a
1613 // NearLabel branch_target.
1614 // Care must be taken with unbound labels. Each call to target(label) creates
1615 // an entry in the patch queue for that label to patch all references of the label
1616 // once it gets bound. Those recorded patch locations must be patchable. Otherwise,
1617 // an assertion fires at patch time.
branch_optimized(Assembler::branch_condition cond,Label & branch_target)1618 void MacroAssembler::branch_optimized(Assembler::branch_condition cond, Label& branch_target) {
1619   if (branch_target.is_bound()) {
1620     address branch_addr = target(branch_target);
1621     branch_optimized(cond, branch_addr);
1622   } else if (branch_target.is_near()) {
1623     z_brc(cond, branch_target);  // Caller assures that the target will be in range for z_brc.
1624   } else {
1625     z_brcl(cond, branch_target); // Let's hope target is in range. Otherwise, we will abort at patch time.
1626   }
1627 }
1628 
1629 // Generate an optimal compare and branch to the branch target.
1630 // Optimal means that a relative branch (clgrj, brc or brcl) is used if the
1631 // branch distance is short enough. Loading the target address into a
1632 // register and branching via reg is used as fallback only.
1633 //
1634 // Input:
1635 //   r1 - left compare operand
1636 //   r2 - right compare operand
compare_and_branch_optimized(Register r1,Register r2,Assembler::branch_condition cond,address branch_addr,bool len64,bool has_sign)1637 void MacroAssembler::compare_and_branch_optimized(Register r1,
1638                                                   Register r2,
1639                                                   Assembler::branch_condition cond,
1640                                                   address  branch_addr,
1641                                                   bool     len64,
1642                                                   bool     has_sign) {
1643   unsigned int casenum = (len64?2:0)+(has_sign?0:1);
1644 
1645   address branch_origin = pc();
1646   if (VM_Version::has_CompareBranch() && RelAddr::is_in_range_of_RelAddr16(branch_addr, branch_origin)) {
1647     switch (casenum) {
1648       case 0: z_crj( r1, r2, cond, branch_addr); break;
1649       case 1: z_clrj (r1, r2, cond, branch_addr); break;
1650       case 2: z_cgrj(r1, r2, cond, branch_addr); break;
1651       case 3: z_clgrj(r1, r2, cond, branch_addr); break;
1652       default: ShouldNotReachHere(); break;
1653     }
1654   } else {
1655     switch (casenum) {
1656       case 0: z_cr( r1, r2); break;
1657       case 1: z_clr(r1, r2); break;
1658       case 2: z_cgr(r1, r2); break;
1659       case 3: z_clgr(r1, r2); break;
1660       default: ShouldNotReachHere(); break;
1661     }
1662     branch_optimized(cond, branch_addr);
1663   }
1664 }
1665 
1666 // Generate an optimal compare and branch to the branch target.
1667 // Optimal means that a relative branch (clgij, brc or brcl) is used if the
1668 // branch distance is short enough. Loading the target address into a
1669 // register and branching via reg is used as fallback only.
1670 //
1671 // Input:
1672 //   r1 - left compare operand (in register)
1673 //   x2 - right compare operand (immediate)
compare_and_branch_optimized(Register r1,jlong x2,Assembler::branch_condition cond,Label & branch_target,bool len64,bool has_sign)1674 void MacroAssembler::compare_and_branch_optimized(Register r1,
1675                                                   jlong    x2,
1676                                                   Assembler::branch_condition cond,
1677                                                   Label&   branch_target,
1678                                                   bool     len64,
1679                                                   bool     has_sign) {
1680   address      branch_origin = pc();
1681   bool         x2_imm8       = (has_sign && Immediate::is_simm8(x2)) || (!has_sign && Immediate::is_uimm8(x2));
1682   bool         is_RelAddr16  = branch_target.is_near() ||
1683                                (branch_target.is_bound() &&
1684                                 RelAddr::is_in_range_of_RelAddr16(target(branch_target), branch_origin));
1685   unsigned int casenum       = (len64?2:0)+(has_sign?0:1);
1686 
1687   if (VM_Version::has_CompareBranch() && is_RelAddr16 && x2_imm8) {
1688     switch (casenum) {
1689       case 0: z_cij( r1, x2, cond, branch_target); break;
1690       case 1: z_clij(r1, x2, cond, branch_target); break;
1691       case 2: z_cgij(r1, x2, cond, branch_target); break;
1692       case 3: z_clgij(r1, x2, cond, branch_target); break;
1693       default: ShouldNotReachHere(); break;
1694     }
1695     return;
1696   }
1697 
1698   if (x2 == 0) {
1699     switch (casenum) {
1700       case 0: z_ltr(r1, r1); break;
1701       case 1: z_ltr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication!
1702       case 2: z_ltgr(r1, r1); break;
1703       case 3: z_ltgr(r1, r1); break; // Caution: unsigned test only provides zero/notZero indication!
1704       default: ShouldNotReachHere(); break;
1705     }
1706   } else {
1707     if ((has_sign && Immediate::is_simm16(x2)) || (!has_sign && Immediate::is_uimm(x2, 15))) {
1708       switch (casenum) {
1709         case 0: z_chi(r1, x2); break;
1710         case 1: z_chi(r1, x2); break; // positive immediate < 2**15
1711         case 2: z_cghi(r1, x2); break;
1712         case 3: z_cghi(r1, x2); break; // positive immediate < 2**15
1713         default: break;
1714       }
1715     } else if ( (has_sign && Immediate::is_simm32(x2)) || (!has_sign && Immediate::is_uimm32(x2)) ) {
1716       switch (casenum) {
1717         case 0: z_cfi( r1, x2); break;
1718         case 1: z_clfi(r1, x2); break;
1719         case 2: z_cgfi(r1, x2); break;
1720         case 3: z_clgfi(r1, x2); break;
1721         default: ShouldNotReachHere(); break;
1722       }
1723     } else {
1724       // No instruction with immediate operand possible, so load into register.
1725       Register scratch = (r1 != Z_R0) ? Z_R0 : Z_R1;
1726       load_const_optimized(scratch, x2);
1727       switch (casenum) {
1728         case 0: z_cr( r1, scratch); break;
1729         case 1: z_clr(r1, scratch); break;
1730         case 2: z_cgr(r1, scratch); break;
1731         case 3: z_clgr(r1, scratch); break;
1732         default: ShouldNotReachHere(); break;
1733       }
1734     }
1735   }
1736   branch_optimized(cond, branch_target);
1737 }
1738 
1739 // Generate an optimal compare and branch to the branch target.
1740 // Optimal means that a relative branch (clgrj, brc or brcl) is used if the
1741 // branch distance is short enough. Loading the target address into a
1742 // register and branching via reg is used as fallback only.
1743 //
1744 // Input:
1745 //   r1 - left compare operand
1746 //   r2 - right compare operand
compare_and_branch_optimized(Register r1,Register r2,Assembler::branch_condition cond,Label & branch_target,bool len64,bool has_sign)1747 void MacroAssembler::compare_and_branch_optimized(Register r1,
1748                                                   Register r2,
1749                                                   Assembler::branch_condition cond,
1750                                                   Label&   branch_target,
1751                                                   bool     len64,
1752                                                   bool     has_sign) {
1753   unsigned int casenum = (len64 ? 2 : 0) + (has_sign ? 0 : 1);
1754 
1755   if (branch_target.is_bound()) {
1756     address branch_addr = target(branch_target);
1757     compare_and_branch_optimized(r1, r2, cond, branch_addr, len64, has_sign);
1758   } else {
1759     if (VM_Version::has_CompareBranch() && branch_target.is_near()) {
1760       switch (casenum) {
1761         case 0: z_crj(  r1, r2, cond, branch_target); break;
1762         case 1: z_clrj( r1, r2, cond, branch_target); break;
1763         case 2: z_cgrj( r1, r2, cond, branch_target); break;
1764         case 3: z_clgrj(r1, r2, cond, branch_target); break;
1765         default: ShouldNotReachHere(); break;
1766       }
1767     } else {
1768       switch (casenum) {
1769         case 0: z_cr( r1, r2); break;
1770         case 1: z_clr(r1, r2); break;
1771         case 2: z_cgr(r1, r2); break;
1772         case 3: z_clgr(r1, r2); break;
1773         default: ShouldNotReachHere(); break;
1774       }
1775       branch_optimized(cond, branch_target);
1776     }
1777   }
1778 }
1779 
1780 //===========================================================================
1781 //===   END     H I G H E R   L E V E L   B R A N C H   E M I T T E R S   ===
1782 //===========================================================================
1783 
allocate_metadata_address(Metadata * obj)1784 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) {
1785   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1786   int index = oop_recorder()->allocate_metadata_index(obj);
1787   RelocationHolder rspec = metadata_Relocation::spec(index);
1788   return AddressLiteral((address)obj, rspec);
1789 }
1790 
constant_metadata_address(Metadata * obj)1791 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) {
1792   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1793   int index = oop_recorder()->find_index(obj);
1794   RelocationHolder rspec = metadata_Relocation::spec(index);
1795   return AddressLiteral((address)obj, rspec);
1796 }
1797 
allocate_oop_address(jobject obj)1798 AddressLiteral MacroAssembler::allocate_oop_address(jobject obj) {
1799   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1800   int oop_index = oop_recorder()->allocate_oop_index(obj);
1801   return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
1802 }
1803 
constant_oop_address(jobject obj)1804 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
1805   assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1806   int oop_index = oop_recorder()->find_index(obj);
1807   return AddressLiteral(address(obj), oop_Relocation::spec(oop_index));
1808 }
1809 
1810 // NOTE: destroys r
c2bool(Register r,Register t)1811 void MacroAssembler::c2bool(Register r, Register t) {
1812   z_lcr(t, r);   // t = -r
1813   z_or(r, t);    // r = -r OR r
1814   z_srl(r, 31);  // Yields 0 if r was 0, 1 otherwise.
1815 }
1816 
delayed_value_impl(intptr_t * delayed_value_addr,Register tmp,int offset)1817 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
1818                                                       Register tmp,
1819                                                       int offset) {
1820   intptr_t value = *delayed_value_addr;
1821   if (value != 0) {
1822     return RegisterOrConstant(value + offset);
1823   }
1824 
1825   BLOCK_COMMENT("delayed_value {");
1826   // Load indirectly to solve generation ordering problem.
1827   load_absolute_address(tmp, (address) delayed_value_addr); // tmp = a;
1828   z_lg(tmp, 0, tmp);                   // tmp = *tmp;
1829 
1830 #ifdef ASSERT
1831   NearLabel L;
1832   compare64_and_branch(tmp, (intptr_t)0L, Assembler::bcondNotEqual, L);
1833   z_illtrap();
1834   bind(L);
1835 #endif
1836 
1837   if (offset != 0) {
1838     z_agfi(tmp, offset);               // tmp = tmp + offset;
1839   }
1840 
1841   BLOCK_COMMENT("} delayed_value");
1842   return RegisterOrConstant(tmp);
1843 }
1844 
1845 // Patch instruction `inst' at offset `inst_pos' to refer to `dest_pos'
1846 // and return the resulting instruction.
1847 // Dest_pos and inst_pos are 32 bit only. These parms can only designate
1848 // relative positions.
1849 // Use correct argument types. Do not pre-calculate distance.
patched_branch(address dest_pos,unsigned long inst,address inst_pos)1850 unsigned long MacroAssembler::patched_branch(address dest_pos, unsigned long inst, address inst_pos) {
1851   int c = 0;
1852   unsigned long patched_inst = 0;
1853   if (is_call_pcrelative_short(inst) ||
1854       is_branch_pcrelative_short(inst) ||
1855       is_branchoncount_pcrelative_short(inst) ||
1856       is_branchonindex32_pcrelative_short(inst)) {
1857     c = 1;
1858     int m = fmask(15, 0);    // simm16(-1, 16, 32);
1859     int v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 32);
1860     patched_inst = (inst & ~m) | v;
1861   } else if (is_compareandbranch_pcrelative_short(inst)) {
1862     c = 2;
1863     long m = fmask(31, 16);  // simm16(-1, 16, 48);
1864     long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48);
1865     patched_inst = (inst & ~m) | v;
1866   } else if (is_branchonindex64_pcrelative_short(inst)) {
1867     c = 3;
1868     long m = fmask(31, 16);  // simm16(-1, 16, 48);
1869     long v = simm16(RelAddr::pcrel_off16(dest_pos, inst_pos), 16, 48);
1870     patched_inst = (inst & ~m) | v;
1871   } else if (is_call_pcrelative_long(inst) || is_branch_pcrelative_long(inst)) {
1872     c = 4;
1873     long m = fmask(31, 0);  // simm32(-1, 16, 48);
1874     long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48);
1875     patched_inst = (inst & ~m) | v;
1876   } else if (is_pcrelative_long(inst)) { // These are the non-branch pc-relative instructions.
1877     c = 5;
1878     long m = fmask(31, 0);  // simm32(-1, 16, 48);
1879     long v = simm32(RelAddr::pcrel_off32(dest_pos, inst_pos), 16, 48);
1880     patched_inst = (inst & ~m) | v;
1881   } else {
1882     print_dbg_msg(tty, inst, "not a relative branch", 0);
1883     dump_code_range(tty, inst_pos, 32, "not a pcrelative branch");
1884     ShouldNotReachHere();
1885   }
1886 
1887   long new_off = get_pcrel_offset(patched_inst);
1888   if (new_off != (dest_pos-inst_pos)) {
1889     tty->print_cr("case %d: dest_pos = %p, inst_pos = %p, disp = %ld(%12.12lx)", c, dest_pos, inst_pos, new_off, new_off);
1890     print_dbg_msg(tty, inst,         "<- original instruction: branch patching error", 0);
1891     print_dbg_msg(tty, patched_inst, "<- patched  instruction: branch patching error", 0);
1892 #ifdef LUCY_DBG
1893     VM_Version::z_SIGSEGV();
1894 #endif
1895     ShouldNotReachHere();
1896   }
1897   return patched_inst;
1898 }
1899 
1900 // Only called when binding labels (share/vm/asm/assembler.cpp)
1901 // Pass arguments as intended. Do not pre-calculate distance.
pd_patch_instruction(address branch,address target,const char * file,int line)1902 void MacroAssembler::pd_patch_instruction(address branch, address target, const char* file, int line) {
1903   unsigned long stub_inst;
1904   int           inst_len = get_instruction(branch, &stub_inst);
1905 
1906   set_instruction(branch, patched_branch(target, stub_inst, branch), inst_len);
1907 }
1908 
1909 
1910 // Extract relative address (aka offset).
1911 // inv_simm16 works for 4-byte instructions only.
1912 // compare and branch instructions are 6-byte and have a 16bit offset "in the middle".
get_pcrel_offset(unsigned long inst)1913 long MacroAssembler::get_pcrel_offset(unsigned long inst) {
1914 
1915   if (MacroAssembler::is_pcrelative_short(inst)) {
1916     if (((inst&0xFFFFffff00000000UL) == 0) && ((inst&0x00000000FFFF0000UL) != 0)) {
1917       return RelAddr::inv_pcrel_off16(inv_simm16(inst));
1918     } else {
1919       return RelAddr::inv_pcrel_off16(inv_simm16_48(inst));
1920     }
1921   }
1922 
1923   if (MacroAssembler::is_pcrelative_long(inst)) {
1924     return RelAddr::inv_pcrel_off32(inv_simm32(inst));
1925   }
1926 
1927   print_dbg_msg(tty, inst, "not a pcrelative instruction", 6);
1928 #ifdef LUCY_DBG
1929   VM_Version::z_SIGSEGV();
1930 #else
1931   ShouldNotReachHere();
1932 #endif
1933   return -1;
1934 }
1935 
get_pcrel_offset(address pc)1936 long MacroAssembler::get_pcrel_offset(address pc) {
1937   unsigned long inst;
1938   unsigned int  len = get_instruction(pc, &inst);
1939 
1940 #ifdef ASSERT
1941   long offset;
1942   if (MacroAssembler::is_pcrelative_short(inst) || MacroAssembler::is_pcrelative_long(inst)) {
1943     offset = get_pcrel_offset(inst);
1944   } else {
1945     offset = -1;
1946   }
1947 
1948   if (offset == -1) {
1949     dump_code_range(tty, pc, 32, "not a pcrelative instruction");
1950 #ifdef LUCY_DBG
1951     VM_Version::z_SIGSEGV();
1952 #else
1953     ShouldNotReachHere();
1954 #endif
1955   }
1956   return offset;
1957 #else
1958   return get_pcrel_offset(inst);
1959 #endif // ASSERT
1960 }
1961 
1962 // Get target address from pc-relative instructions.
get_target_addr_pcrel(address pc)1963 address MacroAssembler::get_target_addr_pcrel(address pc) {
1964   assert(is_pcrelative_long(pc), "not a pcrelative instruction");
1965   return pc + get_pcrel_offset(pc);
1966 }
1967 
1968 // Patch pc relative load address.
patch_target_addr_pcrel(address pc,address con)1969 void MacroAssembler::patch_target_addr_pcrel(address pc, address con) {
1970   unsigned long inst;
1971   // Offset is +/- 2**32 -> use long.
1972   ptrdiff_t distance = con - pc;
1973 
1974   get_instruction(pc, &inst);
1975 
1976   if (is_pcrelative_short(inst)) {
1977     *(short *)(pc+2) = RelAddr::pcrel_off16(con, pc);  // Instructions are at least 2-byte aligned, no test required.
1978 
1979     // Some extra safety net.
1980     if (!RelAddr::is_in_range_of_RelAddr16(distance)) {
1981       print_dbg_msg(tty, inst, "distance out of range (16bit)", 4);
1982       dump_code_range(tty, pc, 32, "distance out of range (16bit)");
1983       guarantee(RelAddr::is_in_range_of_RelAddr16(distance), "too far away (more than +/- 2**16");
1984     }
1985     return;
1986   }
1987 
1988   if (is_pcrelative_long(inst)) {
1989     *(int *)(pc+2)   = RelAddr::pcrel_off32(con, pc);
1990 
1991     // Some Extra safety net.
1992     if (!RelAddr::is_in_range_of_RelAddr32(distance)) {
1993       print_dbg_msg(tty, inst, "distance out of range (32bit)", 6);
1994       dump_code_range(tty, pc, 32, "distance out of range (32bit)");
1995       guarantee(RelAddr::is_in_range_of_RelAddr32(distance), "too far away (more than +/- 2**32");
1996     }
1997     return;
1998   }
1999 
2000   guarantee(false, "not a pcrelative instruction to patch!");
2001 }
2002 
2003 // "Current PC" here means the address just behind the basr instruction.
get_PC(Register result)2004 address MacroAssembler::get_PC(Register result) {
2005   z_basr(result, Z_R0); // Don't branch, just save next instruction address in result.
2006   return pc();
2007 }
2008 
2009 // Get current PC + offset.
2010 // Offset given in bytes, must be even!
2011 // "Current PC" here means the address of the larl instruction plus the given offset.
get_PC(Register result,int64_t offset)2012 address MacroAssembler::get_PC(Register result, int64_t offset) {
2013   address here = pc();
2014   z_larl(result, offset/2); // Save target instruction address in result.
2015   return here + offset;
2016 }
2017 
instr_size(Register size,Register pc)2018 void MacroAssembler::instr_size(Register size, Register pc) {
2019   // Extract 2 most significant bits of current instruction.
2020   z_llgc(size, Address(pc));
2021   z_srl(size, 6);
2022   // Compute (x+3)&6 which translates 0->2, 1->4, 2->4, 3->6.
2023   z_ahi(size, 3);
2024   z_nill(size, 6);
2025 }
2026 
2027 // Resize_frame with SP(new) = SP(old) - [offset].
resize_frame_sub(Register offset,Register fp,bool load_fp)2028 void MacroAssembler::resize_frame_sub(Register offset, Register fp, bool load_fp)
2029 {
2030   assert_different_registers(offset, fp, Z_SP);
2031   if (load_fp) { z_lg(fp, _z_abi(callers_sp), Z_SP); }
2032 
2033   z_sgr(Z_SP, offset);
2034   z_stg(fp, _z_abi(callers_sp), Z_SP);
2035 }
2036 
2037 // Resize_frame with SP(new) = [newSP] + offset.
2038 //   This emitter is useful if we already have calculated a pointer
2039 //   into the to-be-allocated stack space, e.g. with special alignment properties,
2040 //   but need some additional space, e.g. for spilling.
2041 //   newSP    is the pre-calculated pointer. It must not be modified.
2042 //   fp       holds, or is filled with, the frame pointer.
2043 //   offset   is the additional increment which is added to addr to form the new SP.
2044 //            Note: specify a negative value to reserve more space!
2045 //   load_fp == true  only indicates that fp is not pre-filled with the frame pointer.
2046 //                    It does not guarantee that fp contains the frame pointer at the end.
resize_frame_abs_with_offset(Register newSP,Register fp,int offset,bool load_fp)2047 void MacroAssembler::resize_frame_abs_with_offset(Register newSP, Register fp, int offset, bool load_fp) {
2048   assert_different_registers(newSP, fp, Z_SP);
2049 
2050   if (load_fp) {
2051     z_lg(fp, _z_abi(callers_sp), Z_SP);
2052   }
2053 
2054   add2reg(Z_SP, offset, newSP);
2055   z_stg(fp, _z_abi(callers_sp), Z_SP);
2056 }
2057 
2058 // Resize_frame with SP(new) = [newSP].
2059 //   load_fp == true  only indicates that fp is not pre-filled with the frame pointer.
2060 //                    It does not guarantee that fp contains the frame pointer at the end.
resize_frame_absolute(Register newSP,Register fp,bool load_fp)2061 void MacroAssembler::resize_frame_absolute(Register newSP, Register fp, bool load_fp) {
2062   assert_different_registers(newSP, fp, Z_SP);
2063 
2064   if (load_fp) {
2065     z_lg(fp, _z_abi(callers_sp), Z_SP); // need to use load/store.
2066   }
2067 
2068   z_lgr(Z_SP, newSP);
2069   if (newSP != Z_R0) { // make sure we generate correct code, no matter what register newSP uses.
2070     z_stg(fp, _z_abi(callers_sp), newSP);
2071   } else {
2072     z_stg(fp, _z_abi(callers_sp), Z_SP);
2073   }
2074 }
2075 
2076 // Resize_frame with SP(new) = SP(old) + offset.
resize_frame(RegisterOrConstant offset,Register fp,bool load_fp)2077 void MacroAssembler::resize_frame(RegisterOrConstant offset, Register fp, bool load_fp) {
2078   assert_different_registers(fp, Z_SP);
2079 
2080   if (load_fp) {
2081     z_lg(fp, _z_abi(callers_sp), Z_SP);
2082   }
2083   add64(Z_SP, offset);
2084   z_stg(fp, _z_abi(callers_sp), Z_SP);
2085 }
2086 
push_frame(Register bytes,Register old_sp,bool copy_sp,bool bytes_with_inverted_sign)2087 void MacroAssembler::push_frame(Register bytes, Register old_sp, bool copy_sp, bool bytes_with_inverted_sign) {
2088 #ifdef ASSERT
2089   assert_different_registers(bytes, old_sp, Z_SP);
2090   if (!copy_sp) {
2091     z_cgr(old_sp, Z_SP);
2092     asm_assert_eq("[old_sp]!=[Z_SP]", 0x211);
2093   }
2094 #endif
2095   if (copy_sp) { z_lgr(old_sp, Z_SP); }
2096   if (bytes_with_inverted_sign) {
2097     z_agr(Z_SP, bytes);
2098   } else {
2099     z_sgr(Z_SP, bytes); // Z_sgfr sufficient, but probably not faster.
2100   }
2101   z_stg(old_sp, _z_abi(callers_sp), Z_SP);
2102 }
2103 
push_frame(unsigned int bytes,Register scratch)2104 unsigned int MacroAssembler::push_frame(unsigned int bytes, Register scratch) {
2105   long offset = Assembler::align(bytes, frame::alignment_in_bytes);
2106   assert(offset > 0, "should push a frame with positive size, size = %ld.", offset);
2107   assert(Displacement::is_validDisp(-offset), "frame size out of range, size = %ld", offset);
2108 
2109   // We must not write outside the current stack bounds (given by Z_SP).
2110   // Thus, we have to first update Z_SP and then store the previous SP as stack linkage.
2111   // We rely on Z_R0 by default to be available as scratch.
2112   z_lgr(scratch, Z_SP);
2113   add2reg(Z_SP, -offset);
2114   z_stg(scratch, _z_abi(callers_sp), Z_SP);
2115 #ifdef ASSERT
2116   // Just make sure nobody uses the value in the default scratch register.
2117   // When another register is used, the caller might rely on it containing the frame pointer.
2118   if (scratch == Z_R0) {
2119     z_iihf(scratch, 0xbaadbabe);
2120     z_iilf(scratch, 0xdeadbeef);
2121   }
2122 #endif
2123   return offset;
2124 }
2125 
2126 // Push a frame of size `bytes' plus abi160 on top.
push_frame_abi160(unsigned int bytes)2127 unsigned int MacroAssembler::push_frame_abi160(unsigned int bytes) {
2128   BLOCK_COMMENT("push_frame_abi160 {");
2129   unsigned int res = push_frame(bytes + frame::z_abi_160_size);
2130   BLOCK_COMMENT("} push_frame_abi160");
2131   return res;
2132 }
2133 
2134 // Pop current C frame.
pop_frame()2135 void MacroAssembler::pop_frame() {
2136   BLOCK_COMMENT("pop_frame:");
2137   Assembler::z_lg(Z_SP, _z_abi(callers_sp), Z_SP);
2138 }
2139 
2140 // Pop current C frame and restore return PC register (Z_R14).
pop_frame_restore_retPC(int frame_size_in_bytes)2141 void MacroAssembler::pop_frame_restore_retPC(int frame_size_in_bytes) {
2142   BLOCK_COMMENT("pop_frame_restore_retPC:");
2143   int retPC_offset = _z_abi16(return_pc) + frame_size_in_bytes;
2144   // If possible, pop frame by add instead of load (a penny saved is a penny got :-).
2145   if (Displacement::is_validDisp(retPC_offset)) {
2146     z_lg(Z_R14, retPC_offset, Z_SP);
2147     add2reg(Z_SP, frame_size_in_bytes);
2148   } else {
2149     add2reg(Z_SP, frame_size_in_bytes);
2150     restore_return_pc();
2151   }
2152 }
2153 
call_VM_leaf_base(address entry_point,bool allow_relocation)2154 void MacroAssembler::call_VM_leaf_base(address entry_point, bool allow_relocation) {
2155   if (allow_relocation) {
2156     call_c(entry_point);
2157   } else {
2158     call_c_static(entry_point);
2159   }
2160 }
2161 
call_VM_leaf_base(address entry_point)2162 void MacroAssembler::call_VM_leaf_base(address entry_point) {
2163   bool allow_relocation = true;
2164   call_VM_leaf_base(entry_point, allow_relocation);
2165 }
2166 
call_VM_base(Register oop_result,Register last_java_sp,address entry_point,bool allow_relocation,bool check_exceptions)2167 void MacroAssembler::call_VM_base(Register oop_result,
2168                                   Register last_java_sp,
2169                                   address  entry_point,
2170                                   bool     allow_relocation,
2171                                   bool     check_exceptions) { // Defaults to true.
2172   // Allow_relocation indicates, if true, that the generated code shall
2173   // be fit for code relocation or referenced data relocation. In other
2174   // words: all addresses must be considered variable. PC-relative addressing
2175   // is not possible then.
2176   // On the other hand, if (allow_relocation == false), addresses and offsets
2177   // may be considered stable, enabling us to take advantage of some PC-relative
2178   // addressing tweaks. These might improve performance and reduce code size.
2179 
2180   // Determine last_java_sp register.
2181   if (!last_java_sp->is_valid()) {
2182     last_java_sp = Z_SP;  // Load Z_SP as SP.
2183   }
2184 
2185   set_top_ijava_frame_at_SP_as_last_Java_frame(last_java_sp, Z_R1, allow_relocation);
2186 
2187   // ARG1 must hold thread address.
2188   z_lgr(Z_ARG1, Z_thread);
2189 
2190   address return_pc = NULL;
2191   if (allow_relocation) {
2192     return_pc = call_c(entry_point);
2193   } else {
2194     return_pc = call_c_static(entry_point);
2195   }
2196 
2197   reset_last_Java_frame(allow_relocation);
2198 
2199   // C++ interp handles this in the interpreter.
2200   check_and_handle_popframe(Z_thread);
2201   check_and_handle_earlyret(Z_thread);
2202 
2203   // Check for pending exceptions.
2204   if (check_exceptions) {
2205     // Check for pending exceptions (java_thread is set upon return).
2206     load_and_test_long(Z_R0_scratch, Address(Z_thread, Thread::pending_exception_offset()));
2207 
2208     // This used to conditionally jump to forward_exception however it is
2209     // possible if we relocate that the branch will not reach. So we must jump
2210     // around so we can always reach.
2211 
2212     Label ok;
2213     z_bre(ok); // Bcondequal is the same as bcondZero.
2214     call_stub(StubRoutines::forward_exception_entry());
2215     bind(ok);
2216   }
2217 
2218   // Get oop result if there is one and reset the value in the thread.
2219   if (oop_result->is_valid()) {
2220     get_vm_result(oop_result);
2221   }
2222 
2223   _last_calls_return_pc = return_pc;  // Wipe out other (error handling) calls.
2224 }
2225 
call_VM_base(Register oop_result,Register last_java_sp,address entry_point,bool check_exceptions)2226 void MacroAssembler::call_VM_base(Register oop_result,
2227                                   Register last_java_sp,
2228                                   address  entry_point,
2229                                   bool     check_exceptions) { // Defaults to true.
2230   bool allow_relocation = true;
2231   call_VM_base(oop_result, last_java_sp, entry_point, allow_relocation, check_exceptions);
2232 }
2233 
2234 // VM calls without explicit last_java_sp.
2235 
call_VM(Register oop_result,address entry_point,bool check_exceptions)2236 void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) {
2237   // Call takes possible detour via InterpreterMacroAssembler.
2238   call_VM_base(oop_result, noreg, entry_point, true, check_exceptions);
2239 }
2240 
call_VM(Register oop_result,address entry_point,Register arg_1,bool check_exceptions)2241 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) {
2242   // Z_ARG1 is reserved for the thread.
2243   lgr_if_needed(Z_ARG2, arg_1);
2244   call_VM(oop_result, entry_point, check_exceptions);
2245 }
2246 
call_VM(Register oop_result,address entry_point,Register arg_1,Register arg_2,bool check_exceptions)2247 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
2248   // Z_ARG1 is reserved for the thread.
2249   lgr_if_needed(Z_ARG2, arg_1);
2250   assert(arg_2 != Z_ARG2, "smashed argument");
2251   lgr_if_needed(Z_ARG3, arg_2);
2252   call_VM(oop_result, entry_point, check_exceptions);
2253 }
2254 
call_VM(Register oop_result,address entry_point,Register arg_1,Register arg_2,Register arg_3,bool check_exceptions)2255 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2,
2256                              Register arg_3, bool check_exceptions) {
2257   // Z_ARG1 is reserved for the thread.
2258   lgr_if_needed(Z_ARG2, arg_1);
2259   assert(arg_2 != Z_ARG2, "smashed argument");
2260   lgr_if_needed(Z_ARG3, arg_2);
2261   assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");
2262   lgr_if_needed(Z_ARG4, arg_3);
2263   call_VM(oop_result, entry_point, check_exceptions);
2264 }
2265 
2266 // VM static calls without explicit last_java_sp.
2267 
call_VM_static(Register oop_result,address entry_point,bool check_exceptions)2268 void MacroAssembler::call_VM_static(Register oop_result, address entry_point, bool check_exceptions) {
2269   // Call takes possible detour via InterpreterMacroAssembler.
2270   call_VM_base(oop_result, noreg, entry_point, false, check_exceptions);
2271 }
2272 
call_VM_static(Register oop_result,address entry_point,Register arg_1,Register arg_2,Register arg_3,bool check_exceptions)2273 void MacroAssembler::call_VM_static(Register oop_result, address entry_point, Register arg_1, Register arg_2,
2274                                     Register arg_3, bool check_exceptions) {
2275   // Z_ARG1 is reserved for the thread.
2276   lgr_if_needed(Z_ARG2, arg_1);
2277   assert(arg_2 != Z_ARG2, "smashed argument");
2278   lgr_if_needed(Z_ARG3, arg_2);
2279   assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");
2280   lgr_if_needed(Z_ARG4, arg_3);
2281   call_VM_static(oop_result, entry_point, check_exceptions);
2282 }
2283 
2284 // VM calls with explicit last_java_sp.
2285 
call_VM(Register oop_result,Register last_java_sp,address entry_point,bool check_exceptions)2286 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, bool check_exceptions) {
2287   // Call takes possible detour via InterpreterMacroAssembler.
2288   call_VM_base(oop_result, last_java_sp, entry_point, true, check_exceptions);
2289 }
2290 
call_VM(Register oop_result,Register last_java_sp,address entry_point,Register arg_1,bool check_exceptions)2291 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) {
2292    // Z_ARG1 is reserved for the thread.
2293    lgr_if_needed(Z_ARG2, arg_1);
2294    call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2295 }
2296 
call_VM(Register oop_result,Register last_java_sp,address entry_point,Register arg_1,Register arg_2,bool check_exceptions)2297 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1,
2298                              Register arg_2, bool check_exceptions) {
2299    // Z_ARG1 is reserved for the thread.
2300    lgr_if_needed(Z_ARG2, arg_1);
2301    assert(arg_2 != Z_ARG2, "smashed argument");
2302    lgr_if_needed(Z_ARG3, arg_2);
2303    call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2304 }
2305 
call_VM(Register oop_result,Register last_java_sp,address entry_point,Register arg_1,Register arg_2,Register arg_3,bool check_exceptions)2306 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1,
2307                              Register arg_2, Register arg_3, bool check_exceptions) {
2308   // Z_ARG1 is reserved for the thread.
2309   lgr_if_needed(Z_ARG2, arg_1);
2310   assert(arg_2 != Z_ARG2, "smashed argument");
2311   lgr_if_needed(Z_ARG3, arg_2);
2312   assert(arg_3 != Z_ARG2 && arg_3 != Z_ARG3, "smashed argument");
2313   lgr_if_needed(Z_ARG4, arg_3);
2314   call_VM(oop_result, last_java_sp, entry_point, check_exceptions);
2315 }
2316 
2317 // VM leaf calls.
2318 
call_VM_leaf(address entry_point)2319 void MacroAssembler::call_VM_leaf(address entry_point) {
2320   // Call takes possible detour via InterpreterMacroAssembler.
2321   call_VM_leaf_base(entry_point, true);
2322 }
2323 
call_VM_leaf(address entry_point,Register arg_1)2324 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) {
2325   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2326   call_VM_leaf(entry_point);
2327 }
2328 
call_VM_leaf(address entry_point,Register arg_1,Register arg_2)2329 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) {
2330   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2331   assert(arg_2 != Z_ARG1, "smashed argument");
2332   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2333   call_VM_leaf(entry_point);
2334 }
2335 
call_VM_leaf(address entry_point,Register arg_1,Register arg_2,Register arg_3)2336 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
2337   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2338   assert(arg_2 != Z_ARG1, "smashed argument");
2339   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2340   assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument");
2341   if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3);
2342   call_VM_leaf(entry_point);
2343 }
2344 
2345 // Static VM leaf calls.
2346 // Really static VM leaf calls are never patched.
2347 
call_VM_leaf_static(address entry_point)2348 void MacroAssembler::call_VM_leaf_static(address entry_point) {
2349   // Call takes possible detour via InterpreterMacroAssembler.
2350   call_VM_leaf_base(entry_point, false);
2351 }
2352 
call_VM_leaf_static(address entry_point,Register arg_1)2353 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1) {
2354   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2355   call_VM_leaf_static(entry_point);
2356 }
2357 
call_VM_leaf_static(address entry_point,Register arg_1,Register arg_2)2358 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2) {
2359   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2360   assert(arg_2 != Z_ARG1, "smashed argument");
2361   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2362   call_VM_leaf_static(entry_point);
2363 }
2364 
call_VM_leaf_static(address entry_point,Register arg_1,Register arg_2,Register arg_3)2365 void MacroAssembler::call_VM_leaf_static(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
2366   if (arg_1 != noreg) lgr_if_needed(Z_ARG1, arg_1);
2367   assert(arg_2 != Z_ARG1, "smashed argument");
2368   if (arg_2 != noreg) lgr_if_needed(Z_ARG2, arg_2);
2369   assert(arg_3 != Z_ARG1 && arg_3 != Z_ARG2, "smashed argument");
2370   if (arg_3 != noreg) lgr_if_needed(Z_ARG3, arg_3);
2371   call_VM_leaf_static(entry_point);
2372 }
2373 
2374 // Don't use detour via call_c(reg).
call_c(address function_entry)2375 address MacroAssembler::call_c(address function_entry) {
2376   load_const(Z_R1, function_entry);
2377   return call(Z_R1);
2378 }
2379 
2380 // Variant for really static (non-relocatable) calls which are never patched.
call_c_static(address function_entry)2381 address MacroAssembler::call_c_static(address function_entry) {
2382   load_absolute_address(Z_R1, function_entry);
2383 #if 0 // def ASSERT
2384   // Verify that call site did not move.
2385   load_const_optimized(Z_R0, function_entry);
2386   z_cgr(Z_R1, Z_R0);
2387   z_brc(bcondEqual, 3);
2388   z_illtrap(0xba);
2389 #endif
2390   return call(Z_R1);
2391 }
2392 
call_c_opt(address function_entry)2393 address MacroAssembler::call_c_opt(address function_entry) {
2394   bool success = call_far_patchable(function_entry, -2 /* emit relocation + constant */);
2395   _last_calls_return_pc = success ? pc() : NULL;
2396   return _last_calls_return_pc;
2397 }
2398 
2399 // Identify a call_far_patchable instruction: LARL + LG + BASR
2400 //
2401 //    nop                   ; optionally, if required for alignment
2402 //    lgrl rx,A(TOC entry)  ; PC-relative access into constant pool
2403 //    basr Z_R14,rx         ; end of this instruction must be aligned to a word boundary
2404 //
2405 // Code pattern will eventually get patched into variant2 (see below for detection code).
2406 //
is_call_far_patchable_variant0_at(address instruction_addr)2407 bool MacroAssembler::is_call_far_patchable_variant0_at(address instruction_addr) {
2408   address iaddr = instruction_addr;
2409 
2410   // Check for the actual load instruction.
2411   if (!is_load_const_from_toc(iaddr)) { return false; }
2412   iaddr += load_const_from_toc_size();
2413 
2414   // Check for the call (BASR) instruction, finally.
2415   assert(iaddr-instruction_addr+call_byregister_size() == call_far_patchable_size(), "size mismatch");
2416   return is_call_byregister(iaddr);
2417 }
2418 
2419 // Identify a call_far_patchable instruction: BRASL
2420 //
2421 // Code pattern to suits atomic patching:
2422 //    nop                       ; Optionally, if required for alignment.
2423 //    nop    ...                ; Multiple filler nops to compensate for size difference (variant0 is longer).
2424 //    nop                       ; For code pattern detection: Prepend each BRASL with a nop.
2425 //    brasl  Z_R14,<reladdr>    ; End of code must be 4-byte aligned !
is_call_far_patchable_variant2_at(address instruction_addr)2426 bool MacroAssembler::is_call_far_patchable_variant2_at(address instruction_addr) {
2427   const address call_addr = (address)((intptr_t)instruction_addr + call_far_patchable_size() - call_far_pcrelative_size());
2428 
2429   // Check for correct number of leading nops.
2430   address iaddr;
2431   for (iaddr = instruction_addr; iaddr < call_addr; iaddr += nop_size()) {
2432     if (!is_z_nop(iaddr)) { return false; }
2433   }
2434   assert(iaddr == call_addr, "sanity");
2435 
2436   // --> Check for call instruction.
2437   if (is_call_far_pcrelative(call_addr)) {
2438     assert(call_addr-instruction_addr+call_far_pcrelative_size() == call_far_patchable_size(), "size mismatch");
2439     return true;
2440   }
2441 
2442   return false;
2443 }
2444 
2445 // Emit a NOT mt-safely patchable 64 bit absolute call.
2446 // If toc_offset == -2, then the destination of the call (= target) is emitted
2447 //                      to the constant pool and a runtime_call relocation is added
2448 //                      to the code buffer.
2449 // If toc_offset != -2, target must already be in the constant pool at
2450 //                      _ctableStart+toc_offset (a caller can retrieve toc_offset
2451 //                      from the runtime_call relocation).
2452 // Special handling of emitting to scratch buffer when there is no constant pool.
2453 // Slightly changed code pattern. We emit an additional nop if we would
2454 // not end emitting at a word aligned address. This is to ensure
2455 // an atomically patchable displacement in brasl instructions.
2456 //
2457 // A call_far_patchable comes in different flavors:
2458 //  - LARL(CP) / LG(CP) / BR (address in constant pool, access via CP register)
2459 //  - LGRL(CP) / BR          (address in constant pool, pc-relative accesss)
2460 //  - BRASL                  (relative address of call target coded in instruction)
2461 // All flavors occupy the same amount of space. Length differences are compensated
2462 // by leading nops, such that the instruction sequence always ends at the same
2463 // byte offset. This is required to keep the return offset constant.
2464 // Furthermore, the return address (the end of the instruction sequence) is forced
2465 // to be on a 4-byte boundary. This is required for atomic patching, should we ever
2466 // need to patch the call target of the BRASL flavor.
2467 // RETURN value: false, if no constant pool entry could be allocated, true otherwise.
call_far_patchable(address target,int64_t tocOffset)2468 bool MacroAssembler::call_far_patchable(address target, int64_t tocOffset) {
2469   // Get current pc and ensure word alignment for end of instr sequence.
2470   const address start_pc = pc();
2471   const intptr_t       start_off = offset();
2472   assert(!call_far_patchable_requires_alignment_nop(start_pc), "call_far_patchable requires aligned address");
2473   const ptrdiff_t      dist      = (ptrdiff_t)(target - (start_pc + 2)); // Prepend each BRASL with a nop.
2474   const bool emit_target_to_pool = (tocOffset == -2) && !code_section()->scratch_emit();
2475   const bool emit_relative_call  = !emit_target_to_pool &&
2476                                    RelAddr::is_in_range_of_RelAddr32(dist) &&
2477                                    ReoptimizeCallSequences &&
2478                                    !code_section()->scratch_emit();
2479 
2480   if (emit_relative_call) {
2481     // Add padding to get the same size as below.
2482     const unsigned int padding = call_far_patchable_size() - call_far_pcrelative_size();
2483     unsigned int current_padding;
2484     for (current_padding = 0; current_padding < padding; current_padding += nop_size()) { z_nop(); }
2485     assert(current_padding == padding, "sanity");
2486 
2487     // relative call: len = 2(nop) + 6 (brasl)
2488     // CodeBlob resize cannot occur in this case because
2489     // this call is emitted into pre-existing space.
2490     z_nop(); // Prepend each BRASL with a nop.
2491     z_brasl(Z_R14, target);
2492   } else {
2493     // absolute call: Get address from TOC.
2494     // len = (load TOC){6|0} + (load from TOC){6} + (basr){2} = {14|8}
2495     if (emit_target_to_pool) {
2496       // When emitting the call for the first time, we do not need to use
2497       // the pc-relative version. It will be patched anyway, when the code
2498       // buffer is copied.
2499       // Relocation is not needed when !ReoptimizeCallSequences.
2500       relocInfo::relocType rt = ReoptimizeCallSequences ? relocInfo::runtime_call_w_cp_type : relocInfo::none;
2501       AddressLiteral dest(target, rt);
2502       // Store_oop_in_toc() adds dest to the constant table. As side effect, this kills
2503       // inst_mark(). Reset if possible.
2504       bool reset_mark = (inst_mark() == pc());
2505       tocOffset = store_oop_in_toc(dest);
2506       if (reset_mark) { set_inst_mark(); }
2507       if (tocOffset == -1) {
2508         return false; // Couldn't create constant pool entry.
2509       }
2510     }
2511     assert(offset() == start_off, "emit no code before this point!");
2512 
2513     address tocPos = pc() + tocOffset;
2514     if (emit_target_to_pool) {
2515       tocPos = code()->consts()->start() + tocOffset;
2516     }
2517     load_long_pcrelative(Z_R14, tocPos);
2518     z_basr(Z_R14, Z_R14);
2519   }
2520 
2521 #ifdef ASSERT
2522   // Assert that we can identify the emitted call.
2523   assert(is_call_far_patchable_at(addr_at(start_off)), "can't identify emitted call");
2524   assert(offset() == start_off+call_far_patchable_size(), "wrong size");
2525 
2526   if (emit_target_to_pool) {
2527     assert(get_dest_of_call_far_patchable_at(addr_at(start_off), code()->consts()->start()) == target,
2528            "wrong encoding of dest address");
2529   }
2530 #endif
2531   return true; // success
2532 }
2533 
2534 // Identify a call_far_patchable instruction.
2535 // For more detailed information see header comment of call_far_patchable.
is_call_far_patchable_at(address instruction_addr)2536 bool MacroAssembler::is_call_far_patchable_at(address instruction_addr) {
2537   return is_call_far_patchable_variant2_at(instruction_addr)  || // short version: BRASL
2538          is_call_far_patchable_variant0_at(instruction_addr);    // long version LARL + LG + BASR
2539 }
2540 
2541 // Does the call_far_patchable instruction use a pc-relative encoding
2542 // of the call destination?
is_call_far_patchable_pcrelative_at(address instruction_addr)2543 bool MacroAssembler::is_call_far_patchable_pcrelative_at(address instruction_addr) {
2544   // Variant 2 is pc-relative.
2545   return is_call_far_patchable_variant2_at(instruction_addr);
2546 }
2547 
is_call_far_pcrelative(address instruction_addr)2548 bool MacroAssembler::is_call_far_pcrelative(address instruction_addr) {
2549   // Prepend each BRASL with a nop.
2550   return is_z_nop(instruction_addr) && is_z_brasl(instruction_addr + nop_size());  // Match at position after one nop required.
2551 }
2552 
2553 // Set destination address of a call_far_patchable instruction.
set_dest_of_call_far_patchable_at(address instruction_addr,address dest,int64_t tocOffset)2554 void MacroAssembler::set_dest_of_call_far_patchable_at(address instruction_addr, address dest, int64_t tocOffset) {
2555   ResourceMark rm;
2556 
2557   // Now that CP entry is verified, patch call to a pc-relative call (if circumstances permit).
2558   int code_size = MacroAssembler::call_far_patchable_size();
2559   CodeBuffer buf(instruction_addr, code_size);
2560   MacroAssembler masm(&buf);
2561   masm.call_far_patchable(dest, tocOffset);
2562   ICache::invalidate_range(instruction_addr, code_size); // Empty on z.
2563 }
2564 
2565 // Get dest address of a call_far_patchable instruction.
get_dest_of_call_far_patchable_at(address instruction_addr,address ctable)2566 address MacroAssembler::get_dest_of_call_far_patchable_at(address instruction_addr, address ctable) {
2567   // Dynamic TOC: absolute address in constant pool.
2568   // Check variant2 first, it is more frequent.
2569 
2570   // Relative address encoded in call instruction.
2571   if (is_call_far_patchable_variant2_at(instruction_addr)) {
2572     return MacroAssembler::get_target_addr_pcrel(instruction_addr + nop_size()); // Prepend each BRASL with a nop.
2573 
2574   // Absolute address in constant pool.
2575   } else if (is_call_far_patchable_variant0_at(instruction_addr)) {
2576     address iaddr = instruction_addr;
2577 
2578     long    tocOffset = get_load_const_from_toc_offset(iaddr);
2579     address tocLoc    = iaddr + tocOffset;
2580     return *(address *)(tocLoc);
2581   } else {
2582     fprintf(stderr, "MacroAssembler::get_dest_of_call_far_patchable_at has a problem at %p:\n", instruction_addr);
2583     fprintf(stderr, "not a call_far_patchable: %16.16lx %16.16lx, len = %d\n",
2584             *(unsigned long*)instruction_addr,
2585             *(unsigned long*)(instruction_addr+8),
2586             call_far_patchable_size());
2587     Disassembler::decode(instruction_addr, instruction_addr+call_far_patchable_size());
2588     ShouldNotReachHere();
2589     return NULL;
2590   }
2591 }
2592 
align_call_far_patchable(address pc)2593 void MacroAssembler::align_call_far_patchable(address pc) {
2594   if (call_far_patchable_requires_alignment_nop(pc)) { z_nop(); }
2595 }
2596 
check_and_handle_earlyret(Register java_thread)2597 void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
2598 }
2599 
check_and_handle_popframe(Register java_thread)2600 void MacroAssembler::check_and_handle_popframe(Register java_thread) {
2601 }
2602 
2603 // Read from the polling page.
2604 // Use TM or TMY instruction, depending on read offset.
2605 //   offset = 0: Use TM, safepoint polling.
2606 //   offset < 0: Use TMY, profiling safepoint polling.
load_from_polling_page(Register polling_page_address,int64_t offset)2607 void MacroAssembler::load_from_polling_page(Register polling_page_address, int64_t offset) {
2608   if (Immediate::is_uimm12(offset)) {
2609     z_tm(offset, polling_page_address, mask_safepoint);
2610   } else {
2611     z_tmy(offset, polling_page_address, mask_profiling);
2612   }
2613 }
2614 
2615 // Check whether z_instruction is a read access to the polling page
2616 // which was emitted by load_from_polling_page(..).
is_load_from_polling_page(address instr_loc)2617 bool MacroAssembler::is_load_from_polling_page(address instr_loc) {
2618   unsigned long z_instruction;
2619   unsigned int  ilen = get_instruction(instr_loc, &z_instruction);
2620 
2621   if (ilen == 2) { return false; } // It's none of the allowed instructions.
2622 
2623   if (ilen == 4) {
2624     if (!is_z_tm(z_instruction)) { return false; } // It's len=4, but not a z_tm. fail.
2625 
2626     int ms = inv_mask(z_instruction,8,32);  // mask
2627     int ra = inv_reg(z_instruction,16,32);  // base register
2628     int ds = inv_uimm12(z_instruction);     // displacement
2629 
2630     if (!(ds == 0 && ra != 0 && ms == mask_safepoint)) {
2631       return false; // It's not a z_tm(0, ra, mask_safepoint). Fail.
2632     }
2633 
2634   } else { /* if (ilen == 6) */
2635 
2636     assert(!is_z_lg(z_instruction), "old form (LG) polling page access. Please fix and use TM(Y).");
2637 
2638     if (!is_z_tmy(z_instruction)) { return false; } // It's len=6, but not a z_tmy. fail.
2639 
2640     int ms = inv_mask(z_instruction,8,48);  // mask
2641     int ra = inv_reg(z_instruction,16,48);  // base register
2642     int ds = inv_simm20(z_instruction);     // displacement
2643   }
2644 
2645   return true;
2646 }
2647 
2648 // Extract poll address from instruction and ucontext.
get_poll_address(address instr_loc,void * ucontext)2649 address MacroAssembler::get_poll_address(address instr_loc, void* ucontext) {
2650   assert(ucontext != NULL, "must have ucontext");
2651   ucontext_t* uc = (ucontext_t*) ucontext;
2652   unsigned long z_instruction;
2653   unsigned int ilen = get_instruction(instr_loc, &z_instruction);
2654 
2655   if (ilen == 4 && is_z_tm(z_instruction)) {
2656     int ra = inv_reg(z_instruction, 16, 32);  // base register
2657     int ds = inv_uimm12(z_instruction);       // displacement
2658     address addr = (address)uc->uc_mcontext.gregs[ra];
2659     return addr + ds;
2660   } else if (ilen == 6 && is_z_tmy(z_instruction)) {
2661     int ra = inv_reg(z_instruction, 16, 48);  // base register
2662     int ds = inv_simm20(z_instruction);       // displacement
2663     address addr = (address)uc->uc_mcontext.gregs[ra];
2664     return addr + ds;
2665   }
2666 
2667   ShouldNotReachHere();
2668   return NULL;
2669 }
2670 
2671 // Extract poll register from instruction.
get_poll_register(address instr_loc)2672 uint MacroAssembler::get_poll_register(address instr_loc) {
2673   unsigned long z_instruction;
2674   unsigned int ilen = get_instruction(instr_loc, &z_instruction);
2675 
2676   if (ilen == 4 && is_z_tm(z_instruction)) {
2677     return (uint)inv_reg(z_instruction, 16, 32);  // base register
2678   } else if (ilen == 6 && is_z_tmy(z_instruction)) {
2679     return (uint)inv_reg(z_instruction, 16, 48);  // base register
2680   }
2681 
2682   ShouldNotReachHere();
2683   return 0;
2684 }
2685 
safepoint_poll(Label & slow_path,Register temp_reg)2686 void MacroAssembler::safepoint_poll(Label& slow_path, Register temp_reg) {
2687   const Address poll_byte_addr(Z_thread, in_bytes(Thread::polling_page_offset()) + 7 /* Big Endian */);
2688   // Armed page has poll_bit set.
2689   z_tm(poll_byte_addr, SafepointMechanism::poll_bit());
2690   z_brnaz(slow_path);
2691 }
2692 
2693 // Don't rely on register locking, always use Z_R1 as scratch register instead.
bang_stack_with_offset(int offset)2694 void MacroAssembler::bang_stack_with_offset(int offset) {
2695   // Stack grows down, caller passes positive offset.
2696   assert(offset > 0, "must bang with positive offset");
2697   if (Displacement::is_validDisp(-offset)) {
2698     z_tmy(-offset, Z_SP, mask_stackbang);
2699   } else {
2700     add2reg(Z_R1, -offset, Z_SP);    // Do not destroy Z_SP!!!
2701     z_tm(0, Z_R1, mask_stackbang);  // Just banging.
2702   }
2703 }
2704 
reserved_stack_check(Register return_pc)2705 void MacroAssembler::reserved_stack_check(Register return_pc) {
2706   // Test if reserved zone needs to be enabled.
2707   Label no_reserved_zone_enabling;
2708   assert(return_pc == Z_R14, "Return pc must be in R14 before z_br() to StackOverflow stub.");
2709   BLOCK_COMMENT("reserved_stack_check {");
2710 
2711   z_clg(Z_SP, Address(Z_thread, JavaThread::reserved_stack_activation_offset()));
2712   z_brl(no_reserved_zone_enabling);
2713 
2714   // Enable reserved zone again, throw stack overflow exception.
2715   save_return_pc();
2716   push_frame_abi160(0);
2717   call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), Z_thread);
2718   pop_frame();
2719   restore_return_pc();
2720 
2721   load_const_optimized(Z_R1, StubRoutines::throw_delayed_StackOverflowError_entry());
2722   // Don't use call() or z_basr(), they will invalidate Z_R14 which contains the return pc.
2723   z_br(Z_R1);
2724 
2725   should_not_reach_here();
2726 
2727   bind(no_reserved_zone_enabling);
2728   BLOCK_COMMENT("} reserved_stack_check");
2729 }
2730 
2731 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
tlab_allocate(Register obj,Register var_size_in_bytes,int con_size_in_bytes,Register t1,Label & slow_case)2732 void MacroAssembler::tlab_allocate(Register obj,
2733                                    Register var_size_in_bytes,
2734                                    int con_size_in_bytes,
2735                                    Register t1,
2736                                    Label& slow_case) {
2737   assert_different_registers(obj, var_size_in_bytes, t1);
2738   Register end = t1;
2739   Register thread = Z_thread;
2740 
2741   z_lg(obj, Address(thread, JavaThread::tlab_top_offset()));
2742   if (var_size_in_bytes == noreg) {
2743     z_lay(end, Address(obj, con_size_in_bytes));
2744   } else {
2745     z_lay(end, Address(obj, var_size_in_bytes));
2746   }
2747   z_cg(end, Address(thread, JavaThread::tlab_end_offset()));
2748   branch_optimized(bcondHigh, slow_case);
2749 
2750   // Update the tlab top pointer.
2751   z_stg(end, Address(thread, JavaThread::tlab_top_offset()));
2752 
2753   // Recover var_size_in_bytes if necessary.
2754   if (var_size_in_bytes == end) {
2755     z_sgr(var_size_in_bytes, obj);
2756   }
2757 }
2758 
2759 // Emitter for interface method lookup.
2760 //   input: recv_klass, intf_klass, itable_index
2761 //   output: method_result
2762 //   kills: itable_index, temp1_reg, Z_R0, Z_R1
2763 // TODO: Temp2_reg is unused. we may use this emitter also in the itable stubs.
2764 // If the register is still not needed then, remove it.
lookup_interface_method(Register recv_klass,Register intf_klass,RegisterOrConstant itable_index,Register method_result,Register temp1_reg,Label & no_such_interface,bool return_method)2765 void MacroAssembler::lookup_interface_method(Register           recv_klass,
2766                                              Register           intf_klass,
2767                                              RegisterOrConstant itable_index,
2768                                              Register           method_result,
2769                                              Register           temp1_reg,
2770                                              Label&             no_such_interface,
2771                                              bool               return_method) {
2772 
2773   const Register vtable_len = temp1_reg;    // Used to compute itable_entry_addr.
2774   const Register itable_entry_addr = Z_R1_scratch;
2775   const Register itable_interface = Z_R0_scratch;
2776 
2777   BLOCK_COMMENT("lookup_interface_method {");
2778 
2779   // Load start of itable entries into itable_entry_addr.
2780   z_llgf(vtable_len, Address(recv_klass, Klass::vtable_length_offset()));
2781   z_sllg(vtable_len, vtable_len, exact_log2(vtableEntry::size_in_bytes()));
2782 
2783   // Loop over all itable entries until desired interfaceOop(Rinterface) found.
2784   const int vtable_base_offset = in_bytes(Klass::vtable_start_offset());
2785 
2786   add2reg_with_index(itable_entry_addr,
2787                      vtable_base_offset + itableOffsetEntry::interface_offset_in_bytes(),
2788                      recv_klass, vtable_len);
2789 
2790   const int itable_offset_search_inc = itableOffsetEntry::size() * wordSize;
2791   Label     search;
2792 
2793   bind(search);
2794 
2795   // Handle IncompatibleClassChangeError.
2796   // If the entry is NULL then we've reached the end of the table
2797   // without finding the expected interface, so throw an exception.
2798   load_and_test_long(itable_interface, Address(itable_entry_addr));
2799   z_bre(no_such_interface);
2800 
2801   add2reg(itable_entry_addr, itable_offset_search_inc);
2802   z_cgr(itable_interface, intf_klass);
2803   z_brne(search);
2804 
2805   // Entry found and itable_entry_addr points to it, get offset of vtable for interface.
2806   if (return_method) {
2807     const int vtable_offset_offset = (itableOffsetEntry::offset_offset_in_bytes() -
2808                                       itableOffsetEntry::interface_offset_in_bytes()) -
2809                                      itable_offset_search_inc;
2810 
2811     // Compute itableMethodEntry and get method and entry point
2812     // we use addressing with index and displacement, since the formula
2813     // for computing the entry's offset has a fixed and a dynamic part,
2814     // the latter depending on the matched interface entry and on the case,
2815     // that the itable index has been passed as a register, not a constant value.
2816     int method_offset = itableMethodEntry::method_offset_in_bytes();
2817                              // Fixed part (displacement), common operand.
2818     Register itable_offset = method_result;  // Dynamic part (index register).
2819 
2820     if (itable_index.is_register()) {
2821        // Compute the method's offset in that register, for the formula, see the
2822        // else-clause below.
2823        z_sllg(itable_offset, itable_index.as_register(), exact_log2(itableMethodEntry::size() * wordSize));
2824        z_agf(itable_offset, vtable_offset_offset, itable_entry_addr);
2825     } else {
2826       // Displacement increases.
2827       method_offset += itableMethodEntry::size() * wordSize * itable_index.as_constant();
2828 
2829       // Load index from itable.
2830       z_llgf(itable_offset, vtable_offset_offset, itable_entry_addr);
2831     }
2832 
2833     // Finally load the method's oop.
2834     z_lg(method_result, method_offset, itable_offset, recv_klass);
2835   }
2836   BLOCK_COMMENT("} lookup_interface_method");
2837 }
2838 
2839 // Lookup for virtual method invocation.
lookup_virtual_method(Register recv_klass,RegisterOrConstant vtable_index,Register method_result)2840 void MacroAssembler::lookup_virtual_method(Register           recv_klass,
2841                                            RegisterOrConstant vtable_index,
2842                                            Register           method_result) {
2843   assert_different_registers(recv_klass, vtable_index.register_or_noreg());
2844   assert(vtableEntry::size() * wordSize == wordSize,
2845          "else adjust the scaling in the code below");
2846 
2847   BLOCK_COMMENT("lookup_virtual_method {");
2848 
2849   const int base = in_bytes(Klass::vtable_start_offset());
2850 
2851   if (vtable_index.is_constant()) {
2852     // Load with base + disp.
2853     Address vtable_entry_addr(recv_klass,
2854                               vtable_index.as_constant() * wordSize +
2855                               base +
2856                               vtableEntry::method_offset_in_bytes());
2857 
2858     z_lg(method_result, vtable_entry_addr);
2859   } else {
2860     // Shift index properly and load with base + index + disp.
2861     Register vindex = vtable_index.as_register();
2862     Address  vtable_entry_addr(recv_klass, vindex,
2863                                base + vtableEntry::method_offset_in_bytes());
2864 
2865     z_sllg(vindex, vindex, exact_log2(wordSize));
2866     z_lg(method_result, vtable_entry_addr);
2867   }
2868   BLOCK_COMMENT("} lookup_virtual_method");
2869 }
2870 
2871 // Factor out code to call ic_miss_handler.
2872 // Generate code to call the inline cache miss handler.
2873 //
2874 // In most cases, this code will be generated out-of-line.
2875 // The method parameters are intended to provide some variability.
2876 //   ICM          - Label which has to be bound to the start of useful code (past any traps).
2877 //   trapMarker   - Marking byte for the generated illtrap instructions (if any).
2878 //                  Any value except 0x00 is supported.
2879 //                  = 0x00 - do not generate illtrap instructions.
2880 //                         use nops to fill ununsed space.
2881 //   requiredSize - required size of the generated code. If the actually
2882 //                  generated code is smaller, use padding instructions to fill up.
2883 //                  = 0 - no size requirement, no padding.
2884 //   scratch      - scratch register to hold branch target address.
2885 //
2886 //  The method returns the code offset of the bound label.
call_ic_miss_handler(Label & ICM,int trapMarker,int requiredSize,Register scratch)2887 unsigned int MacroAssembler::call_ic_miss_handler(Label& ICM, int trapMarker, int requiredSize, Register scratch) {
2888   intptr_t startOffset = offset();
2889 
2890   // Prevent entry at content_begin().
2891   if (trapMarker != 0) {
2892     z_illtrap(trapMarker);
2893   }
2894 
2895   // Load address of inline cache miss code into scratch register
2896   // and branch to cache miss handler.
2897   BLOCK_COMMENT("IC miss handler {");
2898   BIND(ICM);
2899   unsigned int   labelOffset = offset();
2900   AddressLiteral icmiss(SharedRuntime::get_ic_miss_stub());
2901 
2902   load_const_optimized(scratch, icmiss);
2903   z_br(scratch);
2904 
2905   // Fill unused space.
2906   if (requiredSize > 0) {
2907     while ((offset() - startOffset) < requiredSize) {
2908       if (trapMarker == 0) {
2909         z_nop();
2910       } else {
2911         z_illtrap(trapMarker);
2912       }
2913     }
2914   }
2915   BLOCK_COMMENT("} IC miss handler");
2916   return labelOffset;
2917 }
2918 
nmethod_UEP(Label & ic_miss)2919 void MacroAssembler::nmethod_UEP(Label& ic_miss) {
2920   Register ic_reg       = Z_inline_cache;
2921   int      klass_offset = oopDesc::klass_offset_in_bytes();
2922   if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) {
2923     if (VM_Version::has_CompareBranch()) {
2924       z_cgij(Z_ARG1, 0, Assembler::bcondEqual, ic_miss);
2925     } else {
2926       z_ltgr(Z_ARG1, Z_ARG1);
2927       z_bre(ic_miss);
2928     }
2929   }
2930   // Compare cached class against klass from receiver.
2931   compare_klass_ptr(ic_reg, klass_offset, Z_ARG1, false);
2932   z_brne(ic_miss);
2933 }
2934 
check_klass_subtype_fast_path(Register sub_klass,Register super_klass,Register temp1_reg,Label * L_success,Label * L_failure,Label * L_slow_path,RegisterOrConstant super_check_offset)2935 void MacroAssembler::check_klass_subtype_fast_path(Register   sub_klass,
2936                                                    Register   super_klass,
2937                                                    Register   temp1_reg,
2938                                                    Label*     L_success,
2939                                                    Label*     L_failure,
2940                                                    Label*     L_slow_path,
2941                                                    RegisterOrConstant super_check_offset) {
2942 
2943   const int sc_offset  = in_bytes(Klass::secondary_super_cache_offset());
2944   const int sco_offset = in_bytes(Klass::super_check_offset_offset());
2945 
2946   bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
2947   bool need_slow_path = (must_load_sco ||
2948                          super_check_offset.constant_or_zero() == sc_offset);
2949 
2950   // Input registers must not overlap.
2951   assert_different_registers(sub_klass, super_klass, temp1_reg);
2952   if (super_check_offset.is_register()) {
2953     assert_different_registers(sub_klass, super_klass,
2954                                super_check_offset.as_register());
2955   } else if (must_load_sco) {
2956     assert(temp1_reg != noreg, "supply either a temp or a register offset");
2957   }
2958 
2959   const Register Rsuper_check_offset = temp1_reg;
2960 
2961   NearLabel L_fallthrough;
2962   int label_nulls = 0;
2963   if (L_success == NULL)   { L_success   = &L_fallthrough; label_nulls++; }
2964   if (L_failure == NULL)   { L_failure   = &L_fallthrough; label_nulls++; }
2965   if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
2966   assert(label_nulls <= 1 ||
2967          (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path),
2968          "at most one NULL in the batch, usually");
2969 
2970   BLOCK_COMMENT("check_klass_subtype_fast_path {");
2971   // If the pointers are equal, we are done (e.g., String[] elements).
2972   // This self-check enables sharing of secondary supertype arrays among
2973   // non-primary types such as array-of-interface. Otherwise, each such
2974   // type would need its own customized SSA.
2975   // We move this check to the front of the fast path because many
2976   // type checks are in fact trivially successful in this manner,
2977   // so we get a nicely predicted branch right at the start of the check.
2978   compare64_and_branch(sub_klass, super_klass, bcondEqual, *L_success);
2979 
2980   // Check the supertype display, which is uint.
2981   if (must_load_sco) {
2982     z_llgf(Rsuper_check_offset, sco_offset, super_klass);
2983     super_check_offset = RegisterOrConstant(Rsuper_check_offset);
2984   }
2985   Address super_check_addr(sub_klass, super_check_offset, 0);
2986   z_cg(super_klass, super_check_addr); // compare w/ displayed supertype
2987 
2988   // This check has worked decisively for primary supers.
2989   // Secondary supers are sought in the super_cache ('super_cache_addr').
2990   // (Secondary supers are interfaces and very deeply nested subtypes.)
2991   // This works in the same check above because of a tricky aliasing
2992   // between the super_cache and the primary super display elements.
2993   // (The 'super_check_addr' can address either, as the case requires.)
2994   // Note that the cache is updated below if it does not help us find
2995   // what we need immediately.
2996   // So if it was a primary super, we can just fail immediately.
2997   // Otherwise, it's the slow path for us (no success at this point).
2998 
2999   // Hacked jmp, which may only be used just before L_fallthrough.
3000 #define final_jmp(label)                                                \
3001   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
3002   else                            { branch_optimized(Assembler::bcondAlways, label); } /*omit semicolon*/
3003 
3004   if (super_check_offset.is_register()) {
3005     branch_optimized(Assembler::bcondEqual, *L_success);
3006     z_cfi(super_check_offset.as_register(), sc_offset);
3007     if (L_failure == &L_fallthrough) {
3008       branch_optimized(Assembler::bcondEqual, *L_slow_path);
3009     } else {
3010       branch_optimized(Assembler::bcondNotEqual, *L_failure);
3011       final_jmp(*L_slow_path);
3012     }
3013   } else if (super_check_offset.as_constant() == sc_offset) {
3014     // Need a slow path; fast failure is impossible.
3015     if (L_slow_path == &L_fallthrough) {
3016       branch_optimized(Assembler::bcondEqual, *L_success);
3017     } else {
3018       branch_optimized(Assembler::bcondNotEqual, *L_slow_path);
3019       final_jmp(*L_success);
3020     }
3021   } else {
3022     // No slow path; it's a fast decision.
3023     if (L_failure == &L_fallthrough) {
3024       branch_optimized(Assembler::bcondEqual, *L_success);
3025     } else {
3026       branch_optimized(Assembler::bcondNotEqual, *L_failure);
3027       final_jmp(*L_success);
3028     }
3029   }
3030 
3031   bind(L_fallthrough);
3032 #undef local_brc
3033 #undef final_jmp
3034   BLOCK_COMMENT("} check_klass_subtype_fast_path");
3035   // fallthru (to slow path)
3036 }
3037 
check_klass_subtype_slow_path(Register Rsubklass,Register Rsuperklass,Register Rarray_ptr,Register Rlength,Label * L_success,Label * L_failure)3038 void MacroAssembler::check_klass_subtype_slow_path(Register Rsubklass,
3039                                                    Register Rsuperklass,
3040                                                    Register Rarray_ptr,  // tmp
3041                                                    Register Rlength,     // tmp
3042                                                    Label* L_success,
3043                                                    Label* L_failure) {
3044   // Input registers must not overlap.
3045   // Also check for R1 which is explicitely used here.
3046   assert_different_registers(Z_R1, Rsubklass, Rsuperklass, Rarray_ptr, Rlength);
3047   NearLabel L_fallthrough;
3048   int label_nulls = 0;
3049   if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
3050   if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
3051   assert(label_nulls <= 1, "at most one NULL in the batch");
3052 
3053   const int ss_offset = in_bytes(Klass::secondary_supers_offset());
3054   const int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
3055 
3056   const int length_offset = Array<Klass*>::length_offset_in_bytes();
3057   const int base_offset   = Array<Klass*>::base_offset_in_bytes();
3058 
3059   // Hacked jmp, which may only be used just before L_fallthrough.
3060 #define final_jmp(label)                                                \
3061   if (&(label) == &L_fallthrough) { /*do nothing*/ }                    \
3062   else                            branch_optimized(Assembler::bcondAlways, label) /*omit semicolon*/
3063 
3064   NearLabel loop_iterate, loop_count, match;
3065 
3066   BLOCK_COMMENT("check_klass_subtype_slow_path {");
3067   z_lg(Rarray_ptr, ss_offset, Rsubklass);
3068 
3069   load_and_test_int(Rlength, Address(Rarray_ptr, length_offset));
3070   branch_optimized(Assembler::bcondZero, *L_failure);
3071 
3072   // Oops in table are NO MORE compressed.
3073   z_cg(Rsuperklass, base_offset, Rarray_ptr); // Check array element for match.
3074   z_bre(match);                               // Shortcut for array length = 1.
3075 
3076   // No match yet, so we must walk the array's elements.
3077   z_lngfr(Rlength, Rlength);
3078   z_sllg(Rlength, Rlength, LogBytesPerWord); // -#bytes of cache array
3079   z_llill(Z_R1, BytesPerWord);               // Set increment/end index.
3080   add2reg(Rlength, 2 * BytesPerWord);        // start index  = -(n-2)*BytesPerWord
3081   z_slgr(Rarray_ptr, Rlength);               // start addr: +=  (n-2)*BytesPerWord
3082   z_bru(loop_count);
3083 
3084   BIND(loop_iterate);
3085   z_cg(Rsuperklass, base_offset, Rlength, Rarray_ptr); // Check array element for match.
3086   z_bre(match);
3087   BIND(loop_count);
3088   z_brxlg(Rlength, Z_R1, loop_iterate);
3089 
3090   // Rsuperklass not found among secondary super classes -> failure.
3091   branch_optimized(Assembler::bcondAlways, *L_failure);
3092 
3093   // Got a hit. Return success (zero result). Set cache.
3094   // Cache load doesn't happen here. For speed it is directly emitted by the compiler.
3095 
3096   BIND(match);
3097 
3098   z_stg(Rsuperklass, sc_offset, Rsubklass); // Save result to cache.
3099 
3100   final_jmp(*L_success);
3101 
3102   // Exit to the surrounding code.
3103   BIND(L_fallthrough);
3104 #undef local_brc
3105 #undef final_jmp
3106   BLOCK_COMMENT("} check_klass_subtype_slow_path");
3107 }
3108 
3109 // Emitter for combining fast and slow path.
check_klass_subtype(Register sub_klass,Register super_klass,Register temp1_reg,Register temp2_reg,Label & L_success)3110 void MacroAssembler::check_klass_subtype(Register sub_klass,
3111                                          Register super_klass,
3112                                          Register temp1_reg,
3113                                          Register temp2_reg,
3114                                          Label&   L_success) {
3115   NearLabel failure;
3116   BLOCK_COMMENT(err_msg("check_klass_subtype(%s subclass of %s) {", sub_klass->name(), super_klass->name()));
3117   check_klass_subtype_fast_path(sub_klass, super_klass, temp1_reg,
3118                                 &L_success, &failure, NULL);
3119   check_klass_subtype_slow_path(sub_klass, super_klass,
3120                                 temp1_reg, temp2_reg, &L_success, NULL);
3121   BIND(failure);
3122   BLOCK_COMMENT("} check_klass_subtype");
3123 }
3124 
clinit_barrier(Register klass,Register thread,Label * L_fast_path,Label * L_slow_path)3125 void MacroAssembler::clinit_barrier(Register klass, Register thread, Label* L_fast_path, Label* L_slow_path) {
3126   assert(L_fast_path != NULL || L_slow_path != NULL, "at least one is required");
3127 
3128   Label L_fallthrough;
3129   if (L_fast_path == NULL) {
3130     L_fast_path = &L_fallthrough;
3131   } else if (L_slow_path == NULL) {
3132     L_slow_path = &L_fallthrough;
3133   }
3134 
3135   // Fast path check: class is fully initialized
3136   z_cli(Address(klass, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
3137   z_bre(*L_fast_path);
3138 
3139   // Fast path check: current thread is initializer thread
3140   z_cg(thread, Address(klass, InstanceKlass::init_thread_offset()));
3141   if (L_slow_path == &L_fallthrough) {
3142     z_bre(*L_fast_path);
3143   } else if (L_fast_path == &L_fallthrough) {
3144     z_brne(*L_slow_path);
3145   } else {
3146     Unimplemented();
3147   }
3148 
3149   bind(L_fallthrough);
3150 }
3151 
3152 // Increment a counter at counter_address when the eq condition code is
3153 // set. Kills registers tmp1_reg and tmp2_reg and preserves the condition code.
increment_counter_eq(address counter_address,Register tmp1_reg,Register tmp2_reg)3154 void MacroAssembler::increment_counter_eq(address counter_address, Register tmp1_reg, Register tmp2_reg) {
3155   Label l;
3156   z_brne(l);
3157   load_const(tmp1_reg, counter_address);
3158   add2mem_32(Address(tmp1_reg), 1, tmp2_reg);
3159   z_cr(tmp1_reg, tmp1_reg); // Set cc to eq.
3160   bind(l);
3161 }
3162 
3163 // Semantics are dependent on the slow_case label:
3164 //   If the slow_case label is not NULL, failure to biased-lock the object
3165 //   transfers control to the location of the slow_case label. If the
3166 //   object could be biased-locked, control is transferred to the done label.
3167 //   The condition code is unpredictable.
3168 //
3169 //   If the slow_case label is NULL, failure to biased-lock the object results
3170 //   in a transfer of control to the done label with a condition code of not_equal.
3171 //   If the biased-lock could be successfully obtained, control is transfered to
3172 //   the done label with a condition code of equal.
3173 //   It is mandatory to react on the condition code At the done label.
3174 //
biased_locking_enter(Register obj_reg,Register mark_reg,Register temp_reg,Register temp2_reg,Label & done,Label * slow_case)3175 void MacroAssembler::biased_locking_enter(Register  obj_reg,
3176                                           Register  mark_reg,
3177                                           Register  temp_reg,
3178                                           Register  temp2_reg,    // May be Z_RO!
3179                                           Label    &done,
3180                                           Label    *slow_case) {
3181   assert(UseBiasedLocking, "why call this otherwise?");
3182   assert_different_registers(obj_reg, mark_reg, temp_reg, temp2_reg);
3183 
3184   Label cas_label; // Try, if implemented, CAS locking. Fall thru to slow path otherwise.
3185 
3186   BLOCK_COMMENT("biased_locking_enter {");
3187 
3188   // Biased locking
3189   // See whether the lock is currently biased toward our thread and
3190   // whether the epoch is still valid.
3191   // Note that the runtime guarantees sufficient alignment of JavaThread
3192   // pointers to allow age to be placed into low bits.
3193   assert(markWord::age_shift == markWord::lock_bits + markWord::biased_lock_bits,
3194          "biased locking makes assumptions about bit layout");
3195   z_lr(temp_reg, mark_reg);
3196   z_nilf(temp_reg, markWord::biased_lock_mask_in_place);
3197   z_chi(temp_reg, markWord::biased_lock_pattern);
3198   z_brne(cas_label);  // Try cas if object is not biased, i.e. cannot be biased locked.
3199 
3200   load_prototype_header(temp_reg, obj_reg);
3201   load_const_optimized(temp2_reg, ~((int) markWord::age_mask_in_place));
3202 
3203   z_ogr(temp_reg, Z_thread);
3204   z_xgr(temp_reg, mark_reg);
3205   z_ngr(temp_reg, temp2_reg);
3206   if (PrintBiasedLockingStatistics) {
3207     increment_counter_eq((address) BiasedLocking::biased_lock_entry_count_addr(), mark_reg, temp2_reg);
3208     // Restore mark_reg.
3209     z_lg(mark_reg, oopDesc::mark_offset_in_bytes(), obj_reg);
3210   }
3211   branch_optimized(Assembler::bcondEqual, done);  // Biased lock obtained, return success.
3212 
3213   Label try_revoke_bias;
3214   Label try_rebias;
3215   Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes());
3216 
3217   //----------------------------------------------------------------------------
3218   // At this point we know that the header has the bias pattern and
3219   // that we are not the bias owner in the current epoch. We need to
3220   // figure out more details about the state of the header in order to
3221   // know what operations can be legally performed on the object's
3222   // header.
3223 
3224   // If the low three bits in the xor result aren't clear, that means
3225   // the prototype header is no longer biased and we have to revoke
3226   // the bias on this object.
3227   z_tmll(temp_reg, markWord::biased_lock_mask_in_place);
3228   z_brnaz(try_revoke_bias);
3229 
3230   // Biasing is still enabled for this data type. See whether the
3231   // epoch of the current bias is still valid, meaning that the epoch
3232   // bits of the mark word are equal to the epoch bits of the
3233   // prototype header. (Note that the prototype header's epoch bits
3234   // only change at a safepoint.) If not, attempt to rebias the object
3235   // toward the current thread. Note that we must be absolutely sure
3236   // that the current epoch is invalid in order to do this because
3237   // otherwise the manipulations it performs on the mark word are
3238   // illegal.
3239   z_tmll(temp_reg, markWord::epoch_mask_in_place);
3240   z_brnaz(try_rebias);
3241 
3242   //----------------------------------------------------------------------------
3243   // The epoch of the current bias is still valid but we know nothing
3244   // about the owner; it might be set or it might be clear. Try to
3245   // acquire the bias of the object using an atomic operation. If this
3246   // fails we will go in to the runtime to revoke the object's bias.
3247   // Note that we first construct the presumed unbiased header so we
3248   // don't accidentally blow away another thread's valid bias.
3249   z_nilf(mark_reg, markWord::biased_lock_mask_in_place | markWord::age_mask_in_place |
3250          markWord::epoch_mask_in_place);
3251   z_lgr(temp_reg, Z_thread);
3252   z_llgfr(mark_reg, mark_reg);
3253   z_ogr(temp_reg, mark_reg);
3254 
3255   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3256 
3257   z_csg(mark_reg, temp_reg, 0, obj_reg);
3258 
3259   // If the biasing toward our thread failed, this means that
3260   // another thread succeeded in biasing it toward itself and we
3261   // need to revoke that bias. The revocation will occur in the
3262   // interpreter runtime in the slow case.
3263 
3264   if (PrintBiasedLockingStatistics) {
3265     increment_counter_eq((address) BiasedLocking::anonymously_biased_lock_entry_count_addr(),
3266                          temp_reg, temp2_reg);
3267   }
3268   if (slow_case != NULL) {
3269     branch_optimized(Assembler::bcondNotEqual, *slow_case); // Biased lock not obtained, need to go the long way.
3270   }
3271   branch_optimized(Assembler::bcondAlways, done);           // Biased lock status given in condition code.
3272 
3273   //----------------------------------------------------------------------------
3274   bind(try_rebias);
3275   // At this point we know the epoch has expired, meaning that the
3276   // current "bias owner", if any, is actually invalid. Under these
3277   // circumstances _only_, we are allowed to use the current header's
3278   // value as the comparison value when doing the cas to acquire the
3279   // bias in the current epoch. In other words, we allow transfer of
3280   // the bias from one thread to another directly in this situation.
3281 
3282   z_nilf(mark_reg, markWord::biased_lock_mask_in_place | markWord::age_mask_in_place | markWord::epoch_mask_in_place);
3283   load_prototype_header(temp_reg, obj_reg);
3284   z_llgfr(mark_reg, mark_reg);
3285 
3286   z_ogr(temp_reg, Z_thread);
3287 
3288   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3289 
3290   z_csg(mark_reg, temp_reg, 0, obj_reg);
3291 
3292   // If the biasing toward our thread failed, this means that
3293   // another thread succeeded in biasing it toward itself and we
3294   // need to revoke that bias. The revocation will occur in the
3295   // interpreter runtime in the slow case.
3296 
3297   if (PrintBiasedLockingStatistics) {
3298     increment_counter_eq((address) BiasedLocking::rebiased_lock_entry_count_addr(), temp_reg, temp2_reg);
3299   }
3300   if (slow_case != NULL) {
3301     branch_optimized(Assembler::bcondNotEqual, *slow_case);  // Biased lock not obtained, need to go the long way.
3302   }
3303   z_bru(done);           // Biased lock status given in condition code.
3304 
3305   //----------------------------------------------------------------------------
3306   bind(try_revoke_bias);
3307   // The prototype mark in the klass doesn't have the bias bit set any
3308   // more, indicating that objects of this data type are not supposed
3309   // to be biased any more. We are going to try to reset the mark of
3310   // this object to the prototype value and fall through to the
3311   // CAS-based locking scheme. Note that if our CAS fails, it means
3312   // that another thread raced us for the privilege of revoking the
3313   // bias of this particular object, so it's okay to continue in the
3314   // normal locking code.
3315   load_prototype_header(temp_reg, obj_reg);
3316 
3317   assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
3318 
3319   z_csg(mark_reg, temp_reg, 0, obj_reg);
3320 
3321   // Fall through to the normal CAS-based lock, because no matter what
3322   // the result of the above CAS, some thread must have succeeded in
3323   // removing the bias bit from the object's header.
3324   if (PrintBiasedLockingStatistics) {
3325     // z_cgr(mark_reg, temp2_reg);
3326     increment_counter_eq((address) BiasedLocking::revoked_lock_entry_count_addr(), temp_reg, temp2_reg);
3327   }
3328 
3329   bind(cas_label);
3330   BLOCK_COMMENT("} biased_locking_enter");
3331 }
3332 
biased_locking_exit(Register mark_addr,Register temp_reg,Label & done)3333 void MacroAssembler::biased_locking_exit(Register mark_addr, Register temp_reg, Label& done) {
3334   // Check for biased locking unlock case, which is a no-op
3335   // Note: we do not have to check the thread ID for two reasons.
3336   // First, the interpreter checks for IllegalMonitorStateException at
3337   // a higher level. Second, if the bias was revoked while we held the
3338   // lock, the object could not be rebiased toward another thread, so
3339   // the bias bit would be clear.
3340   BLOCK_COMMENT("biased_locking_exit {");
3341 
3342   z_lg(temp_reg, 0, mark_addr);
3343   z_nilf(temp_reg, markWord::biased_lock_mask_in_place);
3344 
3345   z_chi(temp_reg, markWord::biased_lock_pattern);
3346   z_bre(done);
3347   BLOCK_COMMENT("} biased_locking_exit");
3348 }
3349 
compiler_fast_lock_object(Register oop,Register box,Register temp1,Register temp2,bool try_bias)3350 void MacroAssembler::compiler_fast_lock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) {
3351   Register displacedHeader = temp1;
3352   Register currentHeader = temp1;
3353   Register temp = temp2;
3354   NearLabel done, object_has_monitor;
3355 
3356   BLOCK_COMMENT("compiler_fast_lock_object {");
3357 
3358   // Load markWord from oop into mark.
3359   z_lg(displacedHeader, 0, oop);
3360 
3361   if (try_bias) {
3362     biased_locking_enter(oop, displacedHeader, temp, Z_R0, done);
3363   }
3364 
3365   // Handle existing monitor.
3366   // The object has an existing monitor iff (mark & monitor_value) != 0.
3367   guarantee(Immediate::is_uimm16(markWord::monitor_value), "must be half-word");
3368   z_lr(temp, displacedHeader);
3369   z_nill(temp, markWord::monitor_value);
3370   z_brne(object_has_monitor);
3371 
3372   // Set mark to markWord | markWord::unlocked_value.
3373   z_oill(displacedHeader, markWord::unlocked_value);
3374 
3375   // Load Compare Value application register.
3376 
3377   // Initialize the box (must happen before we update the object mark).
3378   z_stg(displacedHeader, BasicLock::displaced_header_offset_in_bytes(), box);
3379 
3380   // Memory Fence (in cmpxchgd)
3381   // Compare object markWord with mark and if equal exchange scratch1 with object markWord.
3382 
3383   // If the compare-and-swap succeeded, then we found an unlocked object and we
3384   // have now locked it.
3385   z_csg(displacedHeader, box, 0, oop);
3386   assert(currentHeader==displacedHeader, "must be same register"); // Identified two registers from z/Architecture.
3387   z_bre(done);
3388 
3389   // We did not see an unlocked object so try the fast recursive case.
3390 
3391   z_sgr(currentHeader, Z_SP);
3392   load_const_optimized(temp, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
3393 
3394   z_ngr(currentHeader, temp);
3395   //   z_brne(done);
3396   //   z_release();
3397   z_stg(currentHeader/*==0 or not 0*/, BasicLock::displaced_header_offset_in_bytes(), box);
3398 
3399   z_bru(done);
3400 
3401   Register zero = temp;
3402   Register monitor_tagged = displacedHeader; // Tagged with markWord::monitor_value.
3403   bind(object_has_monitor);
3404   // The object's monitor m is unlocked iff m->owner == NULL,
3405   // otherwise m->owner may contain a thread or a stack address.
3406   //
3407   // Try to CAS m->owner from NULL to current thread.
3408   z_lghi(zero, 0);
3409   // If m->owner is null, then csg succeeds and sets m->owner=THREAD and CR=EQ.
3410   z_csg(zero, Z_thread, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), monitor_tagged);
3411   // Store a non-null value into the box.
3412   z_stg(box, BasicLock::displaced_header_offset_in_bytes(), box);
3413 #ifdef ASSERT
3414   z_brne(done);
3415   // We've acquired the monitor, check some invariants.
3416   // Invariant 1: _recursions should be 0.
3417   asm_assert_mem8_is_zero(OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions), monitor_tagged,
3418                           "monitor->_recursions should be 0", -1);
3419   z_ltgr(zero, zero); // Set CR=EQ.
3420 #endif
3421   bind(done);
3422 
3423   BLOCK_COMMENT("} compiler_fast_lock_object");
3424   // If locking was successful, CR should indicate 'EQ'.
3425   // The compiler or the native wrapper generates a branch to the runtime call
3426   // _complete_monitor_locking_Java.
3427 }
3428 
compiler_fast_unlock_object(Register oop,Register box,Register temp1,Register temp2,bool try_bias)3429 void MacroAssembler::compiler_fast_unlock_object(Register oop, Register box, Register temp1, Register temp2, bool try_bias) {
3430   Register displacedHeader = temp1;
3431   Register currentHeader = temp2;
3432   Register temp = temp1;
3433   Register monitor = temp2;
3434 
3435   Label done, object_has_monitor;
3436 
3437   BLOCK_COMMENT("compiler_fast_unlock_object {");
3438 
3439   if (try_bias) {
3440     biased_locking_exit(oop, currentHeader, done);
3441   }
3442 
3443   // Find the lock address and load the displaced header from the stack.
3444   // if the displaced header is zero, we have a recursive unlock.
3445   load_and_test_long(displacedHeader, Address(box, BasicLock::displaced_header_offset_in_bytes()));
3446   z_bre(done);
3447 
3448   // Handle existing monitor.
3449   // The object has an existing monitor iff (mark & monitor_value) != 0.
3450   z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop);
3451   guarantee(Immediate::is_uimm16(markWord::monitor_value), "must be half-word");
3452   z_nill(currentHeader, markWord::monitor_value);
3453   z_brne(object_has_monitor);
3454 
3455   // Check if it is still a light weight lock, this is true if we see
3456   // the stack address of the basicLock in the markWord of the object
3457   // copy box to currentHeader such that csg does not kill it.
3458   z_lgr(currentHeader, box);
3459   z_csg(currentHeader, displacedHeader, 0, oop);
3460   z_bru(done); // Csg sets CR as desired.
3461 
3462   // Handle existing monitor.
3463   bind(object_has_monitor);
3464   z_lg(currentHeader, oopDesc::mark_offset_in_bytes(), oop);    // CurrentHeader is tagged with monitor_value set.
3465   load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(recursions)));
3466   z_brne(done);
3467   load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner)));
3468   z_brne(done);
3469   load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(EntryList)));
3470   z_brne(done);
3471   load_and_test_long(temp, Address(currentHeader, OM_OFFSET_NO_MONITOR_VALUE_TAG(cxq)));
3472   z_brne(done);
3473   z_release();
3474   z_stg(temp/*=0*/, OM_OFFSET_NO_MONITOR_VALUE_TAG(owner), currentHeader);
3475 
3476   bind(done);
3477 
3478   BLOCK_COMMENT("} compiler_fast_unlock_object");
3479   // flag == EQ indicates success
3480   // flag == NE indicates failure
3481 }
3482 
resolve_jobject(Register value,Register tmp1,Register tmp2)3483 void MacroAssembler::resolve_jobject(Register value, Register tmp1, Register tmp2) {
3484   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
3485   bs->resolve_jobject(this, value, tmp1, tmp2);
3486 }
3487 
3488 // Last_Java_sp must comply to the rules in frame_s390.hpp.
set_last_Java_frame(Register last_Java_sp,Register last_Java_pc,bool allow_relocation)3489 void MacroAssembler::set_last_Java_frame(Register last_Java_sp, Register last_Java_pc, bool allow_relocation) {
3490   BLOCK_COMMENT("set_last_Java_frame {");
3491 
3492   // Always set last_Java_pc and flags first because once last_Java_sp
3493   // is visible has_last_Java_frame is true and users will look at the
3494   // rest of the fields. (Note: flags should always be zero before we
3495   // get here so doesn't need to be set.)
3496 
3497   // Verify that last_Java_pc was zeroed on return to Java.
3498   if (allow_relocation) {
3499     asm_assert_mem8_is_zero(in_bytes(JavaThread::last_Java_pc_offset()),
3500                             Z_thread,
3501                             "last_Java_pc not zeroed before leaving Java",
3502                             0x200);
3503   } else {
3504     asm_assert_mem8_is_zero_static(in_bytes(JavaThread::last_Java_pc_offset()),
3505                                    Z_thread,
3506                                    "last_Java_pc not zeroed before leaving Java",
3507                                    0x200);
3508   }
3509 
3510   // When returning from calling out from Java mode the frame anchor's
3511   // last_Java_pc will always be set to NULL. It is set here so that
3512   // if we are doing a call to native (not VM) that we capture the
3513   // known pc and don't have to rely on the native call having a
3514   // standard frame linkage where we can find the pc.
3515   if (last_Java_pc!=noreg) {
3516     z_stg(last_Java_pc, Address(Z_thread, JavaThread::last_Java_pc_offset()));
3517   }
3518 
3519   // This membar release is not required on z/Architecture, since the sequence of stores
3520   // in maintained. Nevertheless, we leave it in to document the required ordering.
3521   // The implementation of z_release() should be empty.
3522   // z_release();
3523 
3524   z_stg(last_Java_sp, Address(Z_thread, JavaThread::last_Java_sp_offset()));
3525   BLOCK_COMMENT("} set_last_Java_frame");
3526 }
3527 
reset_last_Java_frame(bool allow_relocation)3528 void MacroAssembler::reset_last_Java_frame(bool allow_relocation) {
3529   BLOCK_COMMENT("reset_last_Java_frame {");
3530 
3531   if (allow_relocation) {
3532     asm_assert_mem8_isnot_zero(in_bytes(JavaThread::last_Java_sp_offset()),
3533                                Z_thread,
3534                                "SP was not set, still zero",
3535                                0x202);
3536   } else {
3537     asm_assert_mem8_isnot_zero_static(in_bytes(JavaThread::last_Java_sp_offset()),
3538                                       Z_thread,
3539                                       "SP was not set, still zero",
3540                                       0x202);
3541   }
3542 
3543   // _last_Java_sp = 0
3544   // Clearing storage must be atomic here, so don't use clear_mem()!
3545   store_const(Address(Z_thread, JavaThread::last_Java_sp_offset()), 0);
3546 
3547   // _last_Java_pc = 0
3548   store_const(Address(Z_thread, JavaThread::last_Java_pc_offset()), 0);
3549 
3550   BLOCK_COMMENT("} reset_last_Java_frame");
3551   return;
3552 }
3553 
set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp,Register tmp1,bool allow_relocation)3554 void MacroAssembler::set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1, bool allow_relocation) {
3555   assert_different_registers(sp, tmp1);
3556 
3557   // We cannot trust that code generated by the C++ compiler saves R14
3558   // to z_abi_160.return_pc, because sometimes it spills R14 using stmg at
3559   // z_abi_160.gpr14 (e.g. InterpreterRuntime::_new()).
3560   // Therefore we load the PC into tmp1 and let set_last_Java_frame() save
3561   // it into the frame anchor.
3562   get_PC(tmp1);
3563   set_last_Java_frame(/*sp=*/sp, /*pc=*/tmp1, allow_relocation);
3564 }
3565 
set_thread_state(JavaThreadState new_state)3566 void MacroAssembler::set_thread_state(JavaThreadState new_state) {
3567   z_release();
3568 
3569   assert(Immediate::is_uimm16(_thread_max_state), "enum value out of range for instruction");
3570   assert(sizeof(JavaThreadState) == sizeof(int), "enum value must have base type int");
3571   store_const(Address(Z_thread, JavaThread::thread_state_offset()), new_state, Z_R0, false);
3572 }
3573 
get_vm_result(Register oop_result)3574 void MacroAssembler::get_vm_result(Register oop_result) {
3575   verify_thread();
3576 
3577   z_lg(oop_result, Address(Z_thread, JavaThread::vm_result_offset()));
3578   clear_mem(Address(Z_thread, JavaThread::vm_result_offset()), sizeof(void*));
3579 
3580   verify_oop(oop_result, FILE_AND_LINE);
3581 }
3582 
get_vm_result_2(Register result)3583 void MacroAssembler::get_vm_result_2(Register result) {
3584   verify_thread();
3585 
3586   z_lg(result, Address(Z_thread, JavaThread::vm_result_2_offset()));
3587   clear_mem(Address(Z_thread, JavaThread::vm_result_2_offset()), sizeof(void*));
3588 }
3589 
3590 // We require that C code which does not return a value in vm_result will
3591 // leave it undisturbed.
set_vm_result(Register oop_result)3592 void MacroAssembler::set_vm_result(Register oop_result) {
3593   z_stg(oop_result, Address(Z_thread, JavaThread::vm_result_offset()));
3594 }
3595 
3596 // Explicit null checks (used for method handle code).
null_check(Register reg,Register tmp,int64_t offset)3597 void MacroAssembler::null_check(Register reg, Register tmp, int64_t offset) {
3598   if (!ImplicitNullChecks) {
3599     NearLabel ok;
3600 
3601     compare64_and_branch(reg, (intptr_t) 0, Assembler::bcondNotEqual, ok);
3602 
3603     // We just put the address into reg if it was 0 (tmp==Z_R0 is allowed so we can't use it for the address).
3604     address exception_entry = Interpreter::throw_NullPointerException_entry();
3605     load_absolute_address(reg, exception_entry);
3606     z_br(reg);
3607 
3608     bind(ok);
3609   } else {
3610     if (needs_explicit_null_check((intptr_t)offset)) {
3611       // Provoke OS NULL exception if reg = NULL by
3612       // accessing M[reg] w/o changing any registers.
3613       z_lg(tmp, 0, reg);
3614     }
3615     // else
3616       // Nothing to do, (later) access of M[reg + offset]
3617       // will provoke OS NULL exception if reg = NULL.
3618   }
3619 }
3620 
3621 //-------------------------------------
3622 //  Compressed Klass Pointers
3623 //-------------------------------------
3624 
3625 // Klass oop manipulations if compressed.
encode_klass_not_null(Register dst,Register src)3626 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
3627   Register current = (src != noreg) ? src : dst; // Klass is in dst if no src provided. (dst == src) also possible.
3628   address  base    = CompressedKlassPointers::base();
3629   int      shift   = CompressedKlassPointers::shift();
3630   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3631 
3632   BLOCK_COMMENT("cKlass encoder {");
3633 
3634 #ifdef ASSERT
3635   Label ok;
3636   z_tmll(current, KlassAlignmentInBytes-1); // Check alignment.
3637   z_brc(Assembler::bcondAllZero, ok);
3638   // The plain disassembler does not recognize illtrap. It instead displays
3639   // a 32-bit value. Issueing two illtraps assures the disassembler finds
3640   // the proper beginning of the next instruction.
3641   z_illtrap(0xee);
3642   z_illtrap(0xee);
3643   bind(ok);
3644 #endif
3645 
3646   if (base != NULL) {
3647     unsigned int base_h = ((unsigned long)base)>>32;
3648     unsigned int base_l = (unsigned int)((unsigned long)base);
3649     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3650       lgr_if_needed(dst, current);
3651       z_aih(dst, -((int)base_h));     // Base has no set bits in lower half.
3652     } else if ((base_h == 0) && (base_l != 0)) {
3653       lgr_if_needed(dst, current);
3654       z_agfi(dst, -(int)base_l);
3655     } else {
3656       load_const(Z_R0, base);
3657       lgr_if_needed(dst, current);
3658       z_sgr(dst, Z_R0);
3659     }
3660     current = dst;
3661   }
3662   if (shift != 0) {
3663     assert (LogKlassAlignmentInBytes == shift, "decode alg wrong");
3664     z_srlg(dst, current, shift);
3665     current = dst;
3666   }
3667   lgr_if_needed(dst, current); // Move may be required (if neither base nor shift != 0).
3668 
3669   BLOCK_COMMENT("} cKlass encoder");
3670 }
3671 
3672 // This function calculates the size of the code generated by
3673 //   decode_klass_not_null(register dst, Register src)
3674 // when (Universe::heap() != NULL). Hence, if the instructions
3675 // it generates change, then this method needs to be updated.
instr_size_for_decode_klass_not_null()3676 int MacroAssembler::instr_size_for_decode_klass_not_null() {
3677   address  base    = CompressedKlassPointers::base();
3678   int shift_size   = CompressedKlassPointers::shift() == 0 ? 0 : 6; /* sllg */
3679   int addbase_size = 0;
3680   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3681 
3682   if (base != NULL) {
3683     unsigned int base_h = ((unsigned long)base)>>32;
3684     unsigned int base_l = (unsigned int)((unsigned long)base);
3685     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3686       addbase_size += 6; /* aih */
3687     } else if ((base_h == 0) && (base_l != 0)) {
3688       addbase_size += 6; /* algfi */
3689     } else {
3690       addbase_size += load_const_size();
3691       addbase_size += 4; /* algr */
3692     }
3693   }
3694 #ifdef ASSERT
3695   addbase_size += 10;
3696   addbase_size += 2; // Extra sigill.
3697 #endif
3698   return addbase_size + shift_size;
3699 }
3700 
3701 // !!! If the instructions that get generated here change
3702 //     then function instr_size_for_decode_klass_not_null()
3703 //     needs to get updated.
3704 // This variant of decode_klass_not_null() must generate predictable code!
3705 // The code must only depend on globally known parameters.
decode_klass_not_null(Register dst)3706 void MacroAssembler::decode_klass_not_null(Register dst) {
3707   address  base    = CompressedKlassPointers::base();
3708   int      shift   = CompressedKlassPointers::shift();
3709   int      beg_off = offset();
3710   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3711 
3712   BLOCK_COMMENT("cKlass decoder (const size) {");
3713 
3714   if (shift != 0) { // Shift required?
3715     z_sllg(dst, dst, shift);
3716   }
3717   if (base != NULL) {
3718     unsigned int base_h = ((unsigned long)base)>>32;
3719     unsigned int base_l = (unsigned int)((unsigned long)base);
3720     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3721       z_aih(dst, base_h);     // Base has no set bits in lower half.
3722     } else if ((base_h == 0) && (base_l != 0)) {
3723       z_algfi(dst, base_l);   // Base has no set bits in upper half.
3724     } else {
3725       load_const(Z_R0, base); // Base has set bits everywhere.
3726       z_algr(dst, Z_R0);
3727     }
3728   }
3729 
3730 #ifdef ASSERT
3731   Label ok;
3732   z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment.
3733   z_brc(Assembler::bcondAllZero, ok);
3734   // The plain disassembler does not recognize illtrap. It instead displays
3735   // a 32-bit value. Issueing two illtraps assures the disassembler finds
3736   // the proper beginning of the next instruction.
3737   z_illtrap(0xd1);
3738   z_illtrap(0xd1);
3739   bind(ok);
3740 #endif
3741   assert(offset() == beg_off + instr_size_for_decode_klass_not_null(), "Code gen mismatch.");
3742 
3743   BLOCK_COMMENT("} cKlass decoder (const size)");
3744 }
3745 
3746 // This variant of decode_klass_not_null() is for cases where
3747 //  1) the size of the generated instructions may vary
3748 //  2) the result is (potentially) stored in a register different from the source.
decode_klass_not_null(Register dst,Register src)3749 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
3750   address base  = CompressedKlassPointers::base();
3751   int     shift = CompressedKlassPointers::shift();
3752   assert(UseCompressedClassPointers, "only for compressed klass ptrs");
3753 
3754   BLOCK_COMMENT("cKlass decoder {");
3755 
3756   if (src == noreg) src = dst;
3757 
3758   if (shift != 0) { // Shift or at least move required?
3759     z_sllg(dst, src, shift);
3760   } else {
3761     lgr_if_needed(dst, src);
3762   }
3763 
3764   if (base != NULL) {
3765     unsigned int base_h = ((unsigned long)base)>>32;
3766     unsigned int base_l = (unsigned int)((unsigned long)base);
3767     if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3768       z_aih(dst, base_h);     // Base has not set bits in lower half.
3769     } else if ((base_h == 0) && (base_l != 0)) {
3770       z_algfi(dst, base_l);   // Base has no set bits in upper half.
3771     } else {
3772       load_const_optimized(Z_R0, base); // Base has set bits everywhere.
3773       z_algr(dst, Z_R0);
3774     }
3775   }
3776 
3777 #ifdef ASSERT
3778   Label ok;
3779   z_tmll(dst, KlassAlignmentInBytes-1); // Check alignment.
3780   z_brc(Assembler::bcondAllZero, ok);
3781   // The plain disassembler does not recognize illtrap. It instead displays
3782   // a 32-bit value. Issueing two illtraps assures the disassembler finds
3783   // the proper beginning of the next instruction.
3784   z_illtrap(0xd2);
3785   z_illtrap(0xd2);
3786   bind(ok);
3787 #endif
3788   BLOCK_COMMENT("} cKlass decoder");
3789 }
3790 
load_klass(Register klass,Address mem)3791 void MacroAssembler::load_klass(Register klass, Address mem) {
3792   if (UseCompressedClassPointers) {
3793     z_llgf(klass, mem);
3794     // Attention: no null check here!
3795     decode_klass_not_null(klass);
3796   } else {
3797     z_lg(klass, mem);
3798   }
3799 }
3800 
load_klass(Register klass,Register src_oop)3801 void MacroAssembler::load_klass(Register klass, Register src_oop) {
3802   if (UseCompressedClassPointers) {
3803     z_llgf(klass, oopDesc::klass_offset_in_bytes(), src_oop);
3804     // Attention: no null check here!
3805     decode_klass_not_null(klass);
3806   } else {
3807     z_lg(klass, oopDesc::klass_offset_in_bytes(), src_oop);
3808   }
3809 }
3810 
load_prototype_header(Register Rheader,Register Rsrc_oop)3811 void MacroAssembler::load_prototype_header(Register Rheader, Register Rsrc_oop) {
3812   assert_different_registers(Rheader, Rsrc_oop);
3813   load_klass(Rheader, Rsrc_oop);
3814   z_lg(Rheader, Address(Rheader, Klass::prototype_header_offset()));
3815 }
3816 
store_klass(Register klass,Register dst_oop,Register ck)3817 void MacroAssembler::store_klass(Register klass, Register dst_oop, Register ck) {
3818   if (UseCompressedClassPointers) {
3819     assert_different_registers(dst_oop, klass, Z_R0);
3820     if (ck == noreg) ck = klass;
3821     encode_klass_not_null(ck, klass);
3822     z_st(ck, Address(dst_oop, oopDesc::klass_offset_in_bytes()));
3823   } else {
3824     z_stg(klass, Address(dst_oop, oopDesc::klass_offset_in_bytes()));
3825   }
3826 }
3827 
store_klass_gap(Register s,Register d)3828 void MacroAssembler::store_klass_gap(Register s, Register d) {
3829   if (UseCompressedClassPointers) {
3830     assert(s != d, "not enough registers");
3831     // Support s = noreg.
3832     if (s != noreg) {
3833       z_st(s, Address(d, oopDesc::klass_gap_offset_in_bytes()));
3834     } else {
3835       z_mvhi(Address(d, oopDesc::klass_gap_offset_in_bytes()), 0);
3836     }
3837   }
3838 }
3839 
3840 // Compare klass ptr in memory against klass ptr in register.
3841 //
3842 // Rop1            - klass in register, always uncompressed.
3843 // disp            - Offset of klass in memory, compressed/uncompressed, depending on runtime flag.
3844 // Rbase           - Base address of cKlass in memory.
3845 // maybeNULL       - True if Rop1 possibly is a NULL.
compare_klass_ptr(Register Rop1,int64_t disp,Register Rbase,bool maybeNULL)3846 void MacroAssembler::compare_klass_ptr(Register Rop1, int64_t disp, Register Rbase, bool maybeNULL) {
3847 
3848   BLOCK_COMMENT("compare klass ptr {");
3849 
3850   if (UseCompressedClassPointers) {
3851     const int shift = CompressedKlassPointers::shift();
3852     address   base  = CompressedKlassPointers::base();
3853 
3854     assert((shift == 0) || (shift == LogKlassAlignmentInBytes), "cKlass encoder detected bad shift");
3855     assert_different_registers(Rop1, Z_R0);
3856     assert_different_registers(Rop1, Rbase, Z_R1);
3857 
3858     // First encode register oop and then compare with cOop in memory.
3859     // This sequence saves an unnecessary cOop load and decode.
3860     if (base == NULL) {
3861       if (shift == 0) {
3862         z_cl(Rop1, disp, Rbase);     // Unscaled
3863       } else {
3864         z_srlg(Z_R0, Rop1, shift);   // ZeroBased
3865         z_cl(Z_R0, disp, Rbase);
3866       }
3867     } else {                         // HeapBased
3868 #ifdef ASSERT
3869       bool     used_R0 = true;
3870       bool     used_R1 = true;
3871 #endif
3872       Register current = Rop1;
3873       Label    done;
3874 
3875       if (maybeNULL) {       // NULL ptr must be preserved!
3876         z_ltgr(Z_R0, current);
3877         z_bre(done);
3878         current = Z_R0;
3879       }
3880 
3881       unsigned int base_h = ((unsigned long)base)>>32;
3882       unsigned int base_l = (unsigned int)((unsigned long)base);
3883       if ((base_h != 0) && (base_l == 0) && VM_Version::has_HighWordInstr()) {
3884         lgr_if_needed(Z_R0, current);
3885         z_aih(Z_R0, -((int)base_h));     // Base has no set bits in lower half.
3886       } else if ((base_h == 0) && (base_l != 0)) {
3887         lgr_if_needed(Z_R0, current);
3888         z_agfi(Z_R0, -(int)base_l);
3889       } else {
3890         int pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base));
3891         add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1); // Subtract base by adding complement.
3892       }
3893 
3894       if (shift != 0) {
3895         z_srlg(Z_R0, Z_R0, shift);
3896       }
3897       bind(done);
3898       z_cl(Z_R0, disp, Rbase);
3899 #ifdef ASSERT
3900       if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2);
3901       if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2);
3902 #endif
3903     }
3904   } else {
3905     z_clg(Rop1, disp, Z_R0, Rbase);
3906   }
3907   BLOCK_COMMENT("} compare klass ptr");
3908 }
3909 
3910 //---------------------------
3911 //  Compressed oops
3912 //---------------------------
3913 
encode_heap_oop(Register oop)3914 void MacroAssembler::encode_heap_oop(Register oop) {
3915   oop_encoder(oop, oop, true /*maybe null*/);
3916 }
3917 
encode_heap_oop_not_null(Register oop)3918 void MacroAssembler::encode_heap_oop_not_null(Register oop) {
3919   oop_encoder(oop, oop, false /*not null*/);
3920 }
3921 
3922 // Called with something derived from the oop base. e.g. oop_base>>3.
get_oop_base_pow2_offset(uint64_t oop_base)3923 int MacroAssembler::get_oop_base_pow2_offset(uint64_t oop_base) {
3924   unsigned int oop_base_ll = ((unsigned int)(oop_base >>  0)) & 0xffff;
3925   unsigned int oop_base_lh = ((unsigned int)(oop_base >> 16)) & 0xffff;
3926   unsigned int oop_base_hl = ((unsigned int)(oop_base >> 32)) & 0xffff;
3927   unsigned int oop_base_hh = ((unsigned int)(oop_base >> 48)) & 0xffff;
3928   unsigned int n_notzero_parts = (oop_base_ll == 0 ? 0:1)
3929                                + (oop_base_lh == 0 ? 0:1)
3930                                + (oop_base_hl == 0 ? 0:1)
3931                                + (oop_base_hh == 0 ? 0:1);
3932 
3933   assert(oop_base != 0, "This is for HeapBased cOops only");
3934 
3935   if (n_notzero_parts != 1) { //  Check if oop_base is just a few pages shy of a power of 2.
3936     uint64_t pow2_offset = 0x10000 - oop_base_ll;
3937     if (pow2_offset < 0x8000) {  // This might not be necessary.
3938       uint64_t oop_base2 = oop_base + pow2_offset;
3939 
3940       oop_base_ll = ((unsigned int)(oop_base2 >>  0)) & 0xffff;
3941       oop_base_lh = ((unsigned int)(oop_base2 >> 16)) & 0xffff;
3942       oop_base_hl = ((unsigned int)(oop_base2 >> 32)) & 0xffff;
3943       oop_base_hh = ((unsigned int)(oop_base2 >> 48)) & 0xffff;
3944       n_notzero_parts = (oop_base_ll == 0 ? 0:1) +
3945                         (oop_base_lh == 0 ? 0:1) +
3946                         (oop_base_hl == 0 ? 0:1) +
3947                         (oop_base_hh == 0 ? 0:1);
3948       if (n_notzero_parts == 1) {
3949         assert(-(int64_t)pow2_offset != (int64_t)-1, "We use -1 to signal uninitialized base register");
3950         return -pow2_offset;
3951       }
3952     }
3953   }
3954   return 0;
3955 }
3956 
3957 // If base address is offset from a straight power of two by just a few pages,
3958 // return this offset to the caller for a possible later composite add.
3959 // TODO/FIX: will only work correctly for 4k pages.
get_oop_base(Register Rbase,uint64_t oop_base)3960 int MacroAssembler::get_oop_base(Register Rbase, uint64_t oop_base) {
3961   int pow2_offset = get_oop_base_pow2_offset(oop_base);
3962 
3963   load_const_optimized(Rbase, oop_base - pow2_offset); // Best job possible.
3964 
3965   return pow2_offset;
3966 }
3967 
get_oop_base_complement(Register Rbase,uint64_t oop_base)3968 int MacroAssembler::get_oop_base_complement(Register Rbase, uint64_t oop_base) {
3969   int offset = get_oop_base(Rbase, oop_base);
3970   z_lcgr(Rbase, Rbase);
3971   return -offset;
3972 }
3973 
3974 // Compare compressed oop in memory against oop in register.
3975 // Rop1            - Oop in register.
3976 // disp            - Offset of cOop in memory.
3977 // Rbase           - Base address of cOop in memory.
3978 // maybeNULL       - True if Rop1 possibly is a NULL.
3979 // maybeNULLtarget - Branch target for Rop1 == NULL, if flow control shall NOT continue with compare instruction.
compare_heap_oop(Register Rop1,Address mem,bool maybeNULL)3980 void MacroAssembler::compare_heap_oop(Register Rop1, Address mem, bool maybeNULL) {
3981   Register Rbase  = mem.baseOrR0();
3982   Register Rindex = mem.indexOrR0();
3983   int64_t  disp   = mem.disp();
3984 
3985   const int shift = CompressedOops::shift();
3986   address   base  = CompressedOops::base();
3987 
3988   assert(UseCompressedOops, "must be on to call this method");
3989   assert(Universe::heap() != NULL, "java heap must be initialized to call this method");
3990   assert((shift == 0) || (shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift");
3991   assert_different_registers(Rop1, Z_R0);
3992   assert_different_registers(Rop1, Rbase, Z_R1);
3993   assert_different_registers(Rop1, Rindex, Z_R1);
3994 
3995   BLOCK_COMMENT("compare heap oop {");
3996 
3997   // First encode register oop and then compare with cOop in memory.
3998   // This sequence saves an unnecessary cOop load and decode.
3999   if (base == NULL) {
4000     if (shift == 0) {
4001       z_cl(Rop1, disp, Rindex, Rbase);  // Unscaled
4002     } else {
4003       z_srlg(Z_R0, Rop1, shift);        // ZeroBased
4004       z_cl(Z_R0, disp, Rindex, Rbase);
4005     }
4006   } else {                              // HeapBased
4007 #ifdef ASSERT
4008     bool  used_R0 = true;
4009     bool  used_R1 = true;
4010 #endif
4011     Label done;
4012     int   pow2_offset = get_oop_base_complement(Z_R1, ((uint64_t)(intptr_t)base));
4013 
4014     if (maybeNULL) {       // NULL ptr must be preserved!
4015       z_ltgr(Z_R0, Rop1);
4016       z_bre(done);
4017     }
4018 
4019     add2reg_with_index(Z_R0, pow2_offset, Z_R1, Rop1);
4020     z_srlg(Z_R0, Z_R0, shift);
4021 
4022     bind(done);
4023     z_cl(Z_R0, disp, Rindex, Rbase);
4024 #ifdef ASSERT
4025     if (used_R0) preset_reg(Z_R0, 0xb05bUL, 2);
4026     if (used_R1) preset_reg(Z_R1, 0xb06bUL, 2);
4027 #endif
4028   }
4029   BLOCK_COMMENT("} compare heap oop");
4030 }
4031 
access_store_at(BasicType type,DecoratorSet decorators,const Address & addr,Register val,Register tmp1,Register tmp2,Register tmp3)4032 void MacroAssembler::access_store_at(BasicType type, DecoratorSet decorators,
4033                                      const Address& addr, Register val,
4034                                      Register tmp1, Register tmp2, Register tmp3) {
4035   assert((decorators & ~(AS_RAW | IN_HEAP | IN_NATIVE | IS_ARRAY | IS_NOT_NULL |
4036                          ON_UNKNOWN_OOP_REF)) == 0, "unsupported decorator");
4037   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
4038   decorators = AccessInternal::decorator_fixup(decorators);
4039   bool as_raw = (decorators & AS_RAW) != 0;
4040   if (as_raw) {
4041     bs->BarrierSetAssembler::store_at(this, decorators, type,
4042                                       addr, val,
4043                                       tmp1, tmp2, tmp3);
4044   } else {
4045     bs->store_at(this, decorators, type,
4046                  addr, val,
4047                  tmp1, tmp2, tmp3);
4048   }
4049 }
4050 
access_load_at(BasicType type,DecoratorSet decorators,const Address & addr,Register dst,Register tmp1,Register tmp2,Label * is_null)4051 void MacroAssembler::access_load_at(BasicType type, DecoratorSet decorators,
4052                                     const Address& addr, Register dst,
4053                                     Register tmp1, Register tmp2, Label *is_null) {
4054   assert((decorators & ~(AS_RAW | IN_HEAP | IN_NATIVE | IS_ARRAY | IS_NOT_NULL |
4055                          ON_PHANTOM_OOP_REF | ON_WEAK_OOP_REF)) == 0, "unsupported decorator");
4056   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
4057   decorators = AccessInternal::decorator_fixup(decorators);
4058   bool as_raw = (decorators & AS_RAW) != 0;
4059   if (as_raw) {
4060     bs->BarrierSetAssembler::load_at(this, decorators, type,
4061                                      addr, dst,
4062                                      tmp1, tmp2, is_null);
4063   } else {
4064     bs->load_at(this, decorators, type,
4065                 addr, dst,
4066                 tmp1, tmp2, is_null);
4067   }
4068 }
4069 
load_heap_oop(Register dest,const Address & a,Register tmp1,Register tmp2,DecoratorSet decorators,Label * is_null)4070 void MacroAssembler::load_heap_oop(Register dest, const Address &a,
4071                                    Register tmp1, Register tmp2,
4072                                    DecoratorSet decorators, Label *is_null) {
4073   access_load_at(T_OBJECT, IN_HEAP | decorators, a, dest, tmp1, tmp2, is_null);
4074 }
4075 
store_heap_oop(Register Roop,const Address & a,Register tmp1,Register tmp2,Register tmp3,DecoratorSet decorators)4076 void MacroAssembler::store_heap_oop(Register Roop, const Address &a,
4077                                     Register tmp1, Register tmp2, Register tmp3,
4078                                     DecoratorSet decorators) {
4079   access_store_at(T_OBJECT, IN_HEAP | decorators, a, Roop, tmp1, tmp2, tmp3);
4080 }
4081 
4082 //-------------------------------------------------
4083 // Encode compressed oop. Generally usable encoder.
4084 //-------------------------------------------------
4085 // Rsrc - contains regular oop on entry. It remains unchanged.
4086 // Rdst - contains compressed oop on exit.
4087 // Rdst and Rsrc may indicate same register, in which case Rsrc does not remain unchanged.
4088 //
4089 // Rdst must not indicate scratch register Z_R1 (Z_R1_scratch) for functionality.
4090 // Rdst should not indicate scratch register Z_R0 (Z_R0_scratch) for performance.
4091 //
4092 // only32bitValid is set, if later code only uses the lower 32 bits. In this
4093 // case we must not fix the upper 32 bits.
oop_encoder(Register Rdst,Register Rsrc,bool maybeNULL,Register Rbase,int pow2_offset,bool only32bitValid)4094 void MacroAssembler::oop_encoder(Register Rdst, Register Rsrc, bool maybeNULL,
4095                                  Register Rbase, int pow2_offset, bool only32bitValid) {
4096 
4097   const address oop_base  = CompressedOops::base();
4098   const int     oop_shift = CompressedOops::shift();
4099   const bool    disjoint  = CompressedOops::base_disjoint();
4100 
4101   assert(UseCompressedOops, "must be on to call this method");
4102   assert(Universe::heap() != NULL, "java heap must be initialized to call this encoder");
4103   assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes), "cOop encoder detected bad shift");
4104 
4105   if (disjoint || (oop_base == NULL)) {
4106     BLOCK_COMMENT("cOop encoder zeroBase {");
4107     if (oop_shift == 0) {
4108       if (oop_base != NULL && !only32bitValid) {
4109         z_llgfr(Rdst, Rsrc); // Clear upper bits in case the register will be decoded again.
4110       } else {
4111         lgr_if_needed(Rdst, Rsrc);
4112       }
4113     } else {
4114       z_srlg(Rdst, Rsrc, oop_shift);
4115       if (oop_base != NULL && !only32bitValid) {
4116         z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
4117       }
4118     }
4119     BLOCK_COMMENT("} cOop encoder zeroBase");
4120     return;
4121   }
4122 
4123   bool used_R0 = false;
4124   bool used_R1 = false;
4125 
4126   BLOCK_COMMENT("cOop encoder general {");
4127   assert_different_registers(Rdst, Z_R1);
4128   assert_different_registers(Rsrc, Rbase);
4129   if (maybeNULL) {
4130     Label done;
4131     // We reorder shifting and subtracting, so that we can compare
4132     // and shift in parallel:
4133     //
4134     // cycle 0:  potential LoadN, base = <const>
4135     // cycle 1:  base = !base     dst = src >> 3,    cmp cr = (src != 0)
4136     // cycle 2:  if (cr) br,      dst = dst + base + offset
4137 
4138     // Get oop_base components.
4139     if (pow2_offset == -1) {
4140       if (Rdst == Rbase) {
4141         if (Rdst == Z_R1 || Rsrc == Z_R1) {
4142           Rbase = Z_R0;
4143           used_R0 = true;
4144         } else {
4145           Rdst = Z_R1;
4146           used_R1 = true;
4147         }
4148       }
4149       if (Rbase == Z_R1) {
4150         used_R1 = true;
4151       }
4152       pow2_offset = get_oop_base_complement(Rbase, ((uint64_t)(intptr_t)oop_base) >> oop_shift);
4153     }
4154     assert_different_registers(Rdst, Rbase);
4155 
4156     // Check for NULL oop (must be left alone) and shift.
4157     if (oop_shift != 0) {  // Shift out alignment bits
4158       if (((intptr_t)oop_base&0xc000000000000000L) == 0L) { // We are sure: no single address will have the leftmost bit set.
4159         z_srag(Rdst, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
4160       } else {
4161         z_srlg(Rdst, Rsrc, oop_shift);
4162         z_ltgr(Rsrc, Rsrc);  // This is the recommended way of testing for zero.
4163         // This probably is faster, as it does not write a register. No!
4164         // z_cghi(Rsrc, 0);
4165       }
4166     } else {
4167       z_ltgr(Rdst, Rsrc);   // Move NULL to result register.
4168     }
4169     z_bre(done);
4170 
4171     // Subtract oop_base components.
4172     if ((Rdst == Z_R0) || (Rbase == Z_R0)) {
4173       z_algr(Rdst, Rbase);
4174       if (pow2_offset != 0) { add2reg(Rdst, pow2_offset); }
4175     } else {
4176       add2reg_with_index(Rdst, pow2_offset, Rbase, Rdst);
4177     }
4178     if (!only32bitValid) {
4179       z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
4180     }
4181     bind(done);
4182 
4183   } else {  // not null
4184     // Get oop_base components.
4185     if (pow2_offset == -1) {
4186       pow2_offset = get_oop_base_complement(Rbase, (uint64_t)(intptr_t)oop_base);
4187     }
4188 
4189     // Subtract oop_base components and shift.
4190     if (Rdst == Z_R0 || Rsrc == Z_R0 || Rbase == Z_R0) {
4191       // Don't use lay instruction.
4192       if (Rdst == Rsrc) {
4193         z_algr(Rdst, Rbase);
4194       } else {
4195         lgr_if_needed(Rdst, Rbase);
4196         z_algr(Rdst, Rsrc);
4197       }
4198       if (pow2_offset != 0) add2reg(Rdst, pow2_offset);
4199     } else {
4200       add2reg_with_index(Rdst, pow2_offset, Rbase, Rsrc);
4201     }
4202     if (oop_shift != 0) {   // Shift out alignment bits.
4203       z_srlg(Rdst, Rdst, oop_shift);
4204     }
4205     if (!only32bitValid) {
4206       z_llgfr(Rdst, Rdst); // Clear upper bits in case the register will be decoded again.
4207     }
4208   }
4209 #ifdef ASSERT
4210   if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb01bUL, 2); }
4211   if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb02bUL, 2); }
4212 #endif
4213   BLOCK_COMMENT("} cOop encoder general");
4214 }
4215 
4216 //-------------------------------------------------
4217 // decode compressed oop. Generally usable decoder.
4218 //-------------------------------------------------
4219 // Rsrc - contains compressed oop on entry.
4220 // Rdst - contains regular oop on exit.
4221 // Rdst and Rsrc may indicate same register.
4222 // Rdst must not be the same register as Rbase, if Rbase was preloaded (before call).
4223 // Rdst can be the same register as Rbase. Then, either Z_R0 or Z_R1 must be available as scratch.
4224 // Rbase - register to use for the base
4225 // pow2_offset - offset of base to nice value. If -1, base must be loaded.
4226 // For performance, it is good to
4227 //  - avoid Z_R0 for any of the argument registers.
4228 //  - keep Rdst and Rsrc distinct from Rbase. Rdst == Rsrc is ok for performance.
4229 //  - avoid Z_R1 for Rdst if Rdst == Rbase.
oop_decoder(Register Rdst,Register Rsrc,bool maybeNULL,Register Rbase,int pow2_offset)4230 void MacroAssembler::oop_decoder(Register Rdst, Register Rsrc, bool maybeNULL, Register Rbase, int pow2_offset) {
4231 
4232   const address oop_base  = CompressedOops::base();
4233   const int     oop_shift = CompressedOops::shift();
4234   const bool    disjoint  = CompressedOops::base_disjoint();
4235 
4236   assert(UseCompressedOops, "must be on to call this method");
4237   assert(Universe::heap() != NULL, "java heap must be initialized to call this decoder");
4238   assert((oop_shift == 0) || (oop_shift == LogMinObjAlignmentInBytes),
4239          "cOop encoder detected bad shift");
4240 
4241   // cOops are always loaded zero-extended from memory. No explicit zero-extension necessary.
4242 
4243   if (oop_base != NULL) {
4244     unsigned int oop_base_hl = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xffff;
4245     unsigned int oop_base_hh = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 48)) & 0xffff;
4246     unsigned int oop_base_hf = ((unsigned int)((uint64_t)(intptr_t)oop_base >> 32)) & 0xFFFFffff;
4247     if (disjoint && (oop_base_hl == 0 || oop_base_hh == 0)) {
4248       BLOCK_COMMENT("cOop decoder disjointBase {");
4249       // We do not need to load the base. Instead, we can install the upper bits
4250       // with an OR instead of an ADD.
4251       Label done;
4252 
4253       // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set.
4254       if (maybeNULL) {  // NULL ptr must be preserved!
4255         z_slag(Rdst, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
4256         z_bre(done);
4257       } else {
4258         z_sllg(Rdst, Rsrc, oop_shift);  // Logical shift leaves condition code alone.
4259       }
4260       if ((oop_base_hl != 0) && (oop_base_hh != 0)) {
4261         z_oihf(Rdst, oop_base_hf);
4262       } else if (oop_base_hl != 0) {
4263         z_oihl(Rdst, oop_base_hl);
4264       } else {
4265         assert(oop_base_hh != 0, "not heapbased mode");
4266         z_oihh(Rdst, oop_base_hh);
4267       }
4268       bind(done);
4269       BLOCK_COMMENT("} cOop decoder disjointBase");
4270     } else {
4271       BLOCK_COMMENT("cOop decoder general {");
4272       // There are three decode steps:
4273       //   scale oop offset (shift left)
4274       //   get base (in reg) and pow2_offset (constant)
4275       //   add base, pow2_offset, and oop offset
4276       // The following register overlap situations may exist:
4277       // Rdst == Rsrc,  Rbase any other
4278       //   not a problem. Scaling in-place leaves Rbase undisturbed.
4279       //   Loading Rbase does not impact the scaled offset.
4280       // Rdst == Rbase, Rsrc  any other
4281       //   scaling would destroy a possibly preloaded Rbase. Loading Rbase
4282       //   would destroy the scaled offset.
4283       //   Remedy: use Rdst_tmp if Rbase has been preloaded.
4284       //           use Rbase_tmp if base has to be loaded.
4285       // Rsrc == Rbase, Rdst  any other
4286       //   Only possible without preloaded Rbase.
4287       //   Loading Rbase does not destroy compressed oop because it was scaled into Rdst before.
4288       // Rsrc == Rbase, Rdst == Rbase
4289       //   Only possible without preloaded Rbase.
4290       //   Loading Rbase would destroy compressed oop. Scaling in-place is ok.
4291       //   Remedy: use Rbase_tmp.
4292       //
4293       Label    done;
4294       Register Rdst_tmp       = Rdst;
4295       Register Rbase_tmp      = Rbase;
4296       bool     used_R0        = false;
4297       bool     used_R1        = false;
4298       bool     base_preloaded = pow2_offset >= 0;
4299       guarantee(!(base_preloaded && (Rsrc == Rbase)), "Register clash, check caller");
4300       assert(oop_shift != 0, "room for optimization");
4301 
4302       // Check if we need to use scratch registers.
4303       if (Rdst == Rbase) {
4304         assert(!(((Rdst == Z_R0) && (Rsrc == Z_R1)) || ((Rdst == Z_R1) && (Rsrc == Z_R0))), "need a scratch reg");
4305         if (Rdst != Rsrc) {
4306           if (base_preloaded) { Rdst_tmp  = (Rdst == Z_R1) ? Z_R0 : Z_R1; }
4307           else                { Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1; }
4308         } else {
4309           Rbase_tmp = (Rdst == Z_R1) ? Z_R0 : Z_R1;
4310         }
4311       }
4312       if (base_preloaded) lgr_if_needed(Rbase_tmp, Rbase);
4313 
4314       // Scale oop and check for NULL.
4315       // Rsrc contains a narrow oop. Thus we are sure the leftmost <oop_shift> bits will never be set.
4316       if (maybeNULL) {  // NULL ptr must be preserved!
4317         z_slag(Rdst_tmp, Rsrc, oop_shift);  // Arithmetic shift sets the condition code.
4318         z_bre(done);
4319       } else {
4320         z_sllg(Rdst_tmp, Rsrc, oop_shift);  // Logical shift leaves condition code alone.
4321       }
4322 
4323       // Get oop_base components.
4324       if (!base_preloaded) {
4325         pow2_offset = get_oop_base(Rbase_tmp, (uint64_t)(intptr_t)oop_base);
4326       }
4327 
4328       // Add up all components.
4329       if ((Rbase_tmp == Z_R0) || (Rdst_tmp == Z_R0)) {
4330         z_algr(Rdst_tmp, Rbase_tmp);
4331         if (pow2_offset != 0) { add2reg(Rdst_tmp, pow2_offset); }
4332       } else {
4333         add2reg_with_index(Rdst_tmp, pow2_offset, Rbase_tmp, Rdst_tmp);
4334       }
4335 
4336       bind(done);
4337       lgr_if_needed(Rdst, Rdst_tmp);
4338 #ifdef ASSERT
4339       if (used_R0 && Rdst != Z_R0 && Rsrc != Z_R0) { preset_reg(Z_R0, 0xb03bUL, 2); }
4340       if (used_R1 && Rdst != Z_R1 && Rsrc != Z_R1) { preset_reg(Z_R1, 0xb04bUL, 2); }
4341 #endif
4342       BLOCK_COMMENT("} cOop decoder general");
4343     }
4344   } else {
4345     BLOCK_COMMENT("cOop decoder zeroBase {");
4346     if (oop_shift == 0) {
4347       lgr_if_needed(Rdst, Rsrc);
4348     } else {
4349       z_sllg(Rdst, Rsrc, oop_shift);
4350     }
4351     BLOCK_COMMENT("} cOop decoder zeroBase");
4352   }
4353 }
4354 
4355 // ((OopHandle)result).resolve();
resolve_oop_handle(Register result)4356 void MacroAssembler::resolve_oop_handle(Register result) {
4357   // OopHandle::resolve is an indirection.
4358   z_lg(result, 0, result);
4359 }
4360 
load_mirror_from_const_method(Register mirror,Register const_method)4361 void MacroAssembler::load_mirror_from_const_method(Register mirror, Register const_method) {
4362   mem2reg_opt(mirror, Address(const_method, ConstMethod::constants_offset()));
4363   mem2reg_opt(mirror, Address(mirror, ConstantPool::pool_holder_offset_in_bytes()));
4364   mem2reg_opt(mirror, Address(mirror, Klass::java_mirror_offset()));
4365   resolve_oop_handle(mirror);
4366 }
4367 
load_method_holder(Register holder,Register method)4368 void MacroAssembler::load_method_holder(Register holder, Register method) {
4369   mem2reg_opt(holder, Address(method, Method::const_offset()));
4370   mem2reg_opt(holder, Address(holder, ConstMethod::constants_offset()));
4371   mem2reg_opt(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes()));
4372 }
4373 
4374 //---------------------------------------------------------------
4375 //---  Operations on arrays.
4376 //---------------------------------------------------------------
4377 
4378 // Compiler ensures base is doubleword aligned and cnt is #doublewords.
4379 // Emitter does not KILL cnt and base arguments, since they need to be copied to
4380 // work registers anyway.
4381 // Actually, only r0, r1, and r5 are killed.
Clear_Array(Register cnt_arg,Register base_pointer_arg,Register odd_tmp_reg)4382 unsigned int MacroAssembler::Clear_Array(Register cnt_arg, Register base_pointer_arg, Register odd_tmp_reg) {
4383 
4384   int      block_start = offset();
4385   Register dst_len  = Z_R1;    // Holds dst len  for MVCLE.
4386   Register dst_addr = Z_R0;    // Holds dst addr for MVCLE.
4387 
4388   Label doXC, doMVCLE, done;
4389 
4390   BLOCK_COMMENT("Clear_Array {");
4391 
4392   // Check for zero len and convert to long.
4393   z_ltgfr(odd_tmp_reg, cnt_arg);
4394   z_bre(done);                    // Nothing to do if len == 0.
4395 
4396   // Prefetch data to be cleared.
4397   if (VM_Version::has_Prefetch()) {
4398     z_pfd(0x02,   0, Z_R0, base_pointer_arg);
4399     z_pfd(0x02, 256, Z_R0, base_pointer_arg);
4400   }
4401 
4402   z_sllg(dst_len, odd_tmp_reg, 3); // #bytes to clear.
4403   z_cghi(odd_tmp_reg, 32);         // Check for len <= 256 bytes (<=32 DW).
4404   z_brnh(doXC);                    // If so, use executed XC to clear.
4405 
4406   // MVCLE: initialize long arrays (general case).
4407   bind(doMVCLE);
4408   z_lgr(dst_addr, base_pointer_arg);
4409   // Pass 0 as source length to MVCLE: destination will be filled with padding byte 0.
4410   // The even register of the register pair is not killed.
4411   clear_reg(odd_tmp_reg, true, false);
4412   MacroAssembler::move_long_ext(dst_addr, as_Register(odd_tmp_reg->encoding()-1), 0);
4413   z_bru(done);
4414 
4415   // XC: initialize short arrays.
4416   Label XC_template; // Instr template, never exec directly!
4417     bind(XC_template);
4418     z_xc(0,0,base_pointer_arg,0,base_pointer_arg);
4419 
4420   bind(doXC);
4421     add2reg(dst_len, -1);               // Get #bytes-1 for EXECUTE.
4422     if (VM_Version::has_ExecuteExtensions()) {
4423       z_exrl(dst_len, XC_template);     // Execute XC with var. len.
4424     } else {
4425       z_larl(odd_tmp_reg, XC_template);
4426       z_ex(dst_len,0,Z_R0,odd_tmp_reg); // Execute XC with var. len.
4427     }
4428     // z_bru(done);      // fallthru
4429 
4430   bind(done);
4431 
4432   BLOCK_COMMENT("} Clear_Array");
4433 
4434   int block_end = offset();
4435   return block_end - block_start;
4436 }
4437 
4438 // Compiler ensures base is doubleword aligned and cnt is count of doublewords.
4439 // Emitter does not KILL any arguments nor work registers.
4440 // Emitter generates up to 16 XC instructions, depending on the array length.
Clear_Array_Const(long cnt,Register base)4441 unsigned int MacroAssembler::Clear_Array_Const(long cnt, Register base) {
4442   int  block_start    = offset();
4443   int  off;
4444   int  lineSize_Bytes = AllocatePrefetchStepSize;
4445   int  lineSize_DW    = AllocatePrefetchStepSize>>LogBytesPerWord;
4446   bool doPrefetch     = VM_Version::has_Prefetch();
4447   int  XC_maxlen      = 256;
4448   int  numXCInstr     = cnt > 0 ? (cnt*BytesPerWord-1)/XC_maxlen+1 : 0;
4449 
4450   BLOCK_COMMENT("Clear_Array_Const {");
4451   assert(cnt*BytesPerWord <= 4096, "ClearArrayConst can handle 4k only");
4452 
4453   // Do less prefetching for very short arrays.
4454   if (numXCInstr > 0) {
4455     // Prefetch only some cache lines, then begin clearing.
4456     if (doPrefetch) {
4457       if (cnt*BytesPerWord <= lineSize_Bytes/4) {  // If less than 1/4 of a cache line to clear,
4458         z_pfd(0x02, 0, Z_R0, base);                // prefetch just the first cache line.
4459       } else {
4460         assert(XC_maxlen == lineSize_Bytes, "ClearArrayConst needs 256B cache lines");
4461         for (off = 0; (off < AllocatePrefetchLines) && (off <= numXCInstr); off ++) {
4462           z_pfd(0x02, off*lineSize_Bytes, Z_R0, base);
4463         }
4464       }
4465     }
4466 
4467     for (off=0; off<(numXCInstr-1); off++) {
4468       z_xc(off*XC_maxlen, XC_maxlen-1, base, off*XC_maxlen, base);
4469 
4470       // Prefetch some cache lines in advance.
4471       if (doPrefetch && (off <= numXCInstr-AllocatePrefetchLines)) {
4472         z_pfd(0x02, (off+AllocatePrefetchLines)*lineSize_Bytes, Z_R0, base);
4473       }
4474     }
4475     if (off*XC_maxlen < cnt*BytesPerWord) {
4476       z_xc(off*XC_maxlen, (cnt*BytesPerWord-off*XC_maxlen)-1, base, off*XC_maxlen, base);
4477     }
4478   }
4479   BLOCK_COMMENT("} Clear_Array_Const");
4480 
4481   int block_end = offset();
4482   return block_end - block_start;
4483 }
4484 
4485 // Compiler ensures base is doubleword aligned and cnt is #doublewords.
4486 // Emitter does not KILL cnt and base arguments, since they need to be copied to
4487 // work registers anyway.
4488 // Actually, only r0, r1, (which are work registers) and odd_tmp_reg are killed.
4489 //
4490 // For very large arrays, exploit MVCLE H/W support.
4491 // MVCLE instruction automatically exploits H/W-optimized page mover.
4492 // - Bytes up to next page boundary are cleared with a series of XC to self.
4493 // - All full pages are cleared with the page mover H/W assist.
4494 // - Remaining bytes are again cleared by a series of XC to self.
4495 //
Clear_Array_Const_Big(long cnt,Register base_pointer_arg,Register odd_tmp_reg)4496 unsigned int MacroAssembler::Clear_Array_Const_Big(long cnt, Register base_pointer_arg, Register odd_tmp_reg) {
4497 
4498   int      block_start = offset();
4499   Register dst_len  = Z_R1;      // Holds dst len  for MVCLE.
4500   Register dst_addr = Z_R0;      // Holds dst addr for MVCLE.
4501 
4502   BLOCK_COMMENT("Clear_Array_Const_Big {");
4503 
4504   // Get len to clear.
4505   load_const_optimized(dst_len, (long)cnt*8L);  // in Bytes = #DW*8
4506 
4507   // Prepare other args to MVCLE.
4508   z_lgr(dst_addr, base_pointer_arg);
4509   // Pass 0 as source length to MVCLE: destination will be filled with padding byte 0.
4510   // The even register of the register pair is not killed.
4511   (void) clear_reg(odd_tmp_reg, true, false);  // Src len of MVCLE is zero.
4512   MacroAssembler::move_long_ext(dst_addr, as_Register(odd_tmp_reg->encoding() - 1), 0);
4513   BLOCK_COMMENT("} Clear_Array_Const_Big");
4514 
4515   int block_end = offset();
4516   return block_end - block_start;
4517 }
4518 
4519 // Allocator.
CopyRawMemory_AlignedDisjoint(Register src_reg,Register dst_reg,Register cnt_reg,Register tmp1_reg,Register tmp2_reg)4520 unsigned int MacroAssembler::CopyRawMemory_AlignedDisjoint(Register src_reg, Register dst_reg,
4521                                                            Register cnt_reg,
4522                                                            Register tmp1_reg, Register tmp2_reg) {
4523   // Tmp1 is oddReg.
4524   // Tmp2 is evenReg.
4525 
4526   int block_start = offset();
4527   Label doMVC, doMVCLE, done, MVC_template;
4528 
4529   BLOCK_COMMENT("CopyRawMemory_AlignedDisjoint {");
4530 
4531   // Check for zero len and convert to long.
4532   z_ltgfr(cnt_reg, cnt_reg);      // Remember casted value for doSTG case.
4533   z_bre(done);                    // Nothing to do if len == 0.
4534 
4535   z_sllg(Z_R1, cnt_reg, 3);       // Dst len in bytes. calc early to have the result ready.
4536 
4537   z_cghi(cnt_reg, 32);            // Check for len <= 256 bytes (<=32 DW).
4538   z_brnh(doMVC);                  // If so, use executed MVC to clear.
4539 
4540   bind(doMVCLE);                  // A lot of data (more than 256 bytes).
4541   // Prep dest reg pair.
4542   z_lgr(Z_R0, dst_reg);           // dst addr
4543   // Dst len already in Z_R1.
4544   // Prep src reg pair.
4545   z_lgr(tmp2_reg, src_reg);       // src addr
4546   z_lgr(tmp1_reg, Z_R1);          // Src len same as dst len.
4547 
4548   // Do the copy.
4549   move_long_ext(Z_R0, tmp2_reg, 0xb0); // Bypass cache.
4550   z_bru(done);                         // All done.
4551 
4552   bind(MVC_template);             // Just some data (not more than 256 bytes).
4553   z_mvc(0, 0, dst_reg, 0, src_reg);
4554 
4555   bind(doMVC);
4556 
4557   if (VM_Version::has_ExecuteExtensions()) {
4558     add2reg(Z_R1, -1);
4559   } else {
4560     add2reg(tmp1_reg, -1, Z_R1);
4561     z_larl(Z_R1, MVC_template);
4562   }
4563 
4564   if (VM_Version::has_Prefetch()) {
4565     z_pfd(1,  0,Z_R0,src_reg);
4566     z_pfd(2,  0,Z_R0,dst_reg);
4567     //    z_pfd(1,256,Z_R0,src_reg);    // Assume very short copy.
4568     //    z_pfd(2,256,Z_R0,dst_reg);
4569   }
4570 
4571   if (VM_Version::has_ExecuteExtensions()) {
4572     z_exrl(Z_R1, MVC_template);
4573   } else {
4574     z_ex(tmp1_reg, 0, Z_R0, Z_R1);
4575   }
4576 
4577   bind(done);
4578 
4579   BLOCK_COMMENT("} CopyRawMemory_AlignedDisjoint");
4580 
4581   int block_end = offset();
4582   return block_end - block_start;
4583 }
4584 
4585 //-------------------------------------------------
4586 //   Constants (scalar and oop) in constant pool
4587 //-------------------------------------------------
4588 
4589 // Add a non-relocated constant to the CP.
store_const_in_toc(AddressLiteral & val)4590 int MacroAssembler::store_const_in_toc(AddressLiteral& val) {
4591   long    value  = val.value();
4592   address tocPos = long_constant(value);
4593 
4594   if (tocPos != NULL) {
4595     int tocOffset = (int)(tocPos - code()->consts()->start());
4596     return tocOffset;
4597   }
4598   // Address_constant returned NULL, so no constant entry has been created.
4599   // In that case, we return a "fatal" offset, just in case that subsequently
4600   // generated access code is executed.
4601   return -1;
4602 }
4603 
4604 // Returns the TOC offset where the address is stored.
4605 // Add a relocated constant to the CP.
store_oop_in_toc(AddressLiteral & oop)4606 int MacroAssembler::store_oop_in_toc(AddressLiteral& oop) {
4607   // Use RelocationHolder::none for the constant pool entry.
4608   // Otherwise we will end up with a failing NativeCall::verify(x),
4609   // where x is the address of the constant pool entry.
4610   address tocPos = address_constant((address)oop.value(), RelocationHolder::none);
4611 
4612   if (tocPos != NULL) {
4613     int              tocOffset = (int)(tocPos - code()->consts()->start());
4614     RelocationHolder rsp = oop.rspec();
4615     Relocation      *rel = rsp.reloc();
4616 
4617     // Store toc_offset in relocation, used by call_far_patchable.
4618     if ((relocInfo::relocType)rel->type() == relocInfo::runtime_call_w_cp_type) {
4619       ((runtime_call_w_cp_Relocation *)(rel))->set_constant_pool_offset(tocOffset);
4620     }
4621     // Relocate at the load's pc.
4622     relocate(rsp);
4623 
4624     return tocOffset;
4625   }
4626   // Address_constant returned NULL, so no constant entry has been created
4627   // in that case, we return a "fatal" offset, just in case that subsequently
4628   // generated access code is executed.
4629   return -1;
4630 }
4631 
load_const_from_toc(Register dst,AddressLiteral & a,Register Rtoc)4632 bool MacroAssembler::load_const_from_toc(Register dst, AddressLiteral& a, Register Rtoc) {
4633   int     tocOffset = store_const_in_toc(a);
4634   if (tocOffset == -1) return false;
4635   address tocPos    = tocOffset + code()->consts()->start();
4636   assert((address)code()->consts()->start() != NULL, "Please add CP address");
4637   relocate(a.rspec());
4638   load_long_pcrelative(dst, tocPos);
4639   return true;
4640 }
4641 
load_oop_from_toc(Register dst,AddressLiteral & a,Register Rtoc)4642 bool MacroAssembler::load_oop_from_toc(Register dst, AddressLiteral& a, Register Rtoc) {
4643   int     tocOffset = store_oop_in_toc(a);
4644   if (tocOffset == -1) return false;
4645   address tocPos    = tocOffset + code()->consts()->start();
4646   assert((address)code()->consts()->start() != NULL, "Please add CP address");
4647 
4648   load_addr_pcrelative(dst, tocPos);
4649   return true;
4650 }
4651 
4652 // If the instruction sequence at the given pc is a load_const_from_toc
4653 // sequence, return the value currently stored at the referenced position
4654 // in the TOC.
get_const_from_toc(address pc)4655 intptr_t MacroAssembler::get_const_from_toc(address pc) {
4656 
4657   assert(is_load_const_from_toc(pc), "must be load_const_from_pool");
4658 
4659   long    offset  = get_load_const_from_toc_offset(pc);
4660   address dataLoc = NULL;
4661   if (is_load_const_from_toc_pcrelative(pc)) {
4662     dataLoc = pc + offset;
4663   } else {
4664     CodeBlob* cb = CodeCache::find_blob_unsafe(pc);   // Else we get assertion if nmethod is zombie.
4665     assert(cb && cb->is_nmethod(), "sanity");
4666     nmethod* nm = (nmethod*)cb;
4667     dataLoc = nm->ctable_begin() + offset;
4668   }
4669   return *(intptr_t *)dataLoc;
4670 }
4671 
4672 // If the instruction sequence at the given pc is a load_const_from_toc
4673 // sequence, copy the passed-in new_data value into the referenced
4674 // position in the TOC.
set_const_in_toc(address pc,unsigned long new_data,CodeBlob * cb)4675 void MacroAssembler::set_const_in_toc(address pc, unsigned long new_data, CodeBlob *cb) {
4676   assert(is_load_const_from_toc(pc), "must be load_const_from_pool");
4677 
4678   long    offset = MacroAssembler::get_load_const_from_toc_offset(pc);
4679   address dataLoc = NULL;
4680   if (is_load_const_from_toc_pcrelative(pc)) {
4681     dataLoc = pc+offset;
4682   } else {
4683     nmethod* nm = CodeCache::find_nmethod(pc);
4684     assert((cb == NULL) || (nm == (nmethod*)cb), "instruction address should be in CodeBlob");
4685     dataLoc = nm->ctable_begin() + offset;
4686   }
4687   if (*(unsigned long *)dataLoc != new_data) { // Prevent cache invalidation: update only if necessary.
4688     *(unsigned long *)dataLoc = new_data;
4689   }
4690 }
4691 
4692 // Dynamic TOC. Getter must only be called if "a" is a load_const_from_toc
4693 // site. Verify by calling is_load_const_from_toc() before!!
4694 // Offset is +/- 2**32 -> use long.
get_load_const_from_toc_offset(address a)4695 long MacroAssembler::get_load_const_from_toc_offset(address a) {
4696   assert(is_load_const_from_toc_pcrelative(a), "expected pc relative load");
4697   //  expected code sequence:
4698   //    z_lgrl(t, simm32);    len = 6
4699   unsigned long inst;
4700   unsigned int  len = get_instruction(a, &inst);
4701   return get_pcrel_offset(inst);
4702 }
4703 
4704 //**********************************************************************************
4705 //  inspection of generated instruction sequences for a particular pattern
4706 //**********************************************************************************
4707 
is_load_const_from_toc_pcrelative(address a)4708 bool MacroAssembler::is_load_const_from_toc_pcrelative(address a) {
4709 #ifdef ASSERT
4710   unsigned long inst;
4711   unsigned int  len = get_instruction(a+2, &inst);
4712   if ((len == 6) && is_load_pcrelative_long(a) && is_call_pcrelative_long(inst)) {
4713     const int range = 128;
4714     Assembler::dump_code_range(tty, a, range, "instr(a) == z_lgrl && instr(a+2) == z_brasl");
4715     VM_Version::z_SIGSEGV();
4716   }
4717 #endif
4718   // expected code sequence:
4719   //   z_lgrl(t, relAddr32);    len = 6
4720   //TODO: verify accessed data is in CP, if possible.
4721   return is_load_pcrelative_long(a);  // TODO: might be too general. Currently, only lgrl is used.
4722 }
4723 
is_load_const_from_toc_call(address a)4724 bool MacroAssembler::is_load_const_from_toc_call(address a) {
4725   return is_load_const_from_toc(a) && is_call_byregister(a + load_const_from_toc_size());
4726 }
4727 
is_load_const_call(address a)4728 bool MacroAssembler::is_load_const_call(address a) {
4729   return is_load_const(a) && is_call_byregister(a + load_const_size());
4730 }
4731 
4732 //-------------------------------------------------
4733 //   Emitters for some really CICS instructions
4734 //-------------------------------------------------
4735 
move_long_ext(Register dst,Register src,unsigned int pad)4736 void MacroAssembler::move_long_ext(Register dst, Register src, unsigned int pad) {
4737   assert(dst->encoding()%2==0, "must be an even/odd register pair");
4738   assert(src->encoding()%2==0, "must be an even/odd register pair");
4739   assert(pad<256, "must be a padding BYTE");
4740 
4741   Label retry;
4742   bind(retry);
4743   Assembler::z_mvcle(dst, src, pad);
4744   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4745 }
4746 
compare_long_ext(Register left,Register right,unsigned int pad)4747 void MacroAssembler::compare_long_ext(Register left, Register right, unsigned int pad) {
4748   assert(left->encoding() % 2 == 0, "must be an even/odd register pair");
4749   assert(right->encoding() % 2 == 0, "must be an even/odd register pair");
4750   assert(pad<256, "must be a padding BYTE");
4751 
4752   Label retry;
4753   bind(retry);
4754   Assembler::z_clcle(left, right, pad, Z_R0);
4755   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4756 }
4757 
compare_long_uni(Register left,Register right,unsigned int pad)4758 void MacroAssembler::compare_long_uni(Register left, Register right, unsigned int pad) {
4759   assert(left->encoding() % 2 == 0, "must be an even/odd register pair");
4760   assert(right->encoding() % 2 == 0, "must be an even/odd register pair");
4761   assert(pad<=0xfff, "must be a padding HALFWORD");
4762   assert(VM_Version::has_ETF2(), "instruction must be available");
4763 
4764   Label retry;
4765   bind(retry);
4766   Assembler::z_clclu(left, right, pad, Z_R0);
4767   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4768 }
4769 
search_string(Register end,Register start)4770 void MacroAssembler::search_string(Register end, Register start) {
4771   assert(end->encoding() != 0, "end address must not be in R0");
4772   assert(start->encoding() != 0, "start address must not be in R0");
4773 
4774   Label retry;
4775   bind(retry);
4776   Assembler::z_srst(end, start);
4777   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4778 }
4779 
search_string_uni(Register end,Register start)4780 void MacroAssembler::search_string_uni(Register end, Register start) {
4781   assert(end->encoding() != 0, "end address must not be in R0");
4782   assert(start->encoding() != 0, "start address must not be in R0");
4783   assert(VM_Version::has_ETF3(), "instruction must be available");
4784 
4785   Label retry;
4786   bind(retry);
4787   Assembler::z_srstu(end, start);
4788   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4789 }
4790 
kmac(Register srcBuff)4791 void MacroAssembler::kmac(Register srcBuff) {
4792   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
4793   assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
4794 
4795   Label retry;
4796   bind(retry);
4797   Assembler::z_kmac(Z_R0, srcBuff);
4798   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4799 }
4800 
kimd(Register srcBuff)4801 void MacroAssembler::kimd(Register srcBuff) {
4802   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
4803   assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
4804 
4805   Label retry;
4806   bind(retry);
4807   Assembler::z_kimd(Z_R0, srcBuff);
4808   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4809 }
4810 
klmd(Register srcBuff)4811 void MacroAssembler::klmd(Register srcBuff) {
4812   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
4813   assert(srcBuff->encoding() % 2 == 0, "src buffer/len must be an even/odd register pair");
4814 
4815   Label retry;
4816   bind(retry);
4817   Assembler::z_klmd(Z_R0, srcBuff);
4818   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4819 }
4820 
km(Register dstBuff,Register srcBuff)4821 void MacroAssembler::km(Register dstBuff, Register srcBuff) {
4822   // DstBuff and srcBuff are allowed to be the same register (encryption in-place).
4823   // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block.
4824   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
4825   assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register");
4826   assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
4827 
4828   Label retry;
4829   bind(retry);
4830   Assembler::z_km(dstBuff, srcBuff);
4831   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4832 }
4833 
kmc(Register dstBuff,Register srcBuff)4834 void MacroAssembler::kmc(Register dstBuff, Register srcBuff) {
4835   // DstBuff and srcBuff are allowed to be the same register (encryption in-place).
4836   // DstBuff and srcBuff storage must not overlap destructively, and neither must overlap the parameter block.
4837   assert(srcBuff->encoding()     != 0, "src buffer address can't be in Z_R0");
4838   assert(dstBuff->encoding() % 2 == 0, "dst buffer addr must be an even register");
4839   assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
4840 
4841   Label retry;
4842   bind(retry);
4843   Assembler::z_kmc(dstBuff, srcBuff);
4844   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4845 }
4846 
cksm(Register crcBuff,Register srcBuff)4847 void MacroAssembler::cksm(Register crcBuff, Register srcBuff) {
4848   assert(srcBuff->encoding() % 2 == 0, "src buffer addr/len must be an even/odd register pair");
4849 
4850   Label retry;
4851   bind(retry);
4852   Assembler::z_cksm(crcBuff, srcBuff);
4853   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4854 }
4855 
translate_oo(Register r1,Register r2,uint m3)4856 void MacroAssembler::translate_oo(Register r1, Register r2, uint m3) {
4857   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
4858   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
4859 
4860   Label retry;
4861   bind(retry);
4862   Assembler::z_troo(r1, r2, m3);
4863   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4864 }
4865 
translate_ot(Register r1,Register r2,uint m3)4866 void MacroAssembler::translate_ot(Register r1, Register r2, uint m3) {
4867   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
4868   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
4869 
4870   Label retry;
4871   bind(retry);
4872   Assembler::z_trot(r1, r2, m3);
4873   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4874 }
4875 
translate_to(Register r1,Register r2,uint m3)4876 void MacroAssembler::translate_to(Register r1, Register r2, uint m3) {
4877   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
4878   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
4879 
4880   Label retry;
4881   bind(retry);
4882   Assembler::z_trto(r1, r2, m3);
4883   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4884 }
4885 
translate_tt(Register r1,Register r2,uint m3)4886 void MacroAssembler::translate_tt(Register r1, Register r2, uint m3) {
4887   assert(r1->encoding() % 2 == 0, "dst addr/src len must be an even/odd register pair");
4888   assert((m3 & 0b1110) == 0, "Unused mask bits must be zero");
4889 
4890   Label retry;
4891   bind(retry);
4892   Assembler::z_trtt(r1, r2, m3);
4893   Assembler::z_brc(Assembler::bcondOverflow /* CC==3 (iterate) */, retry);
4894 }
4895 
4896 //---------------------------------------
4897 // Helpers for Intrinsic Emitters
4898 //---------------------------------------
4899 
4900 /**
4901  * uint32_t crc;
4902  * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
4903  */
fold_byte_crc32(Register crc,Register val,Register table,Register tmp)4904 void MacroAssembler::fold_byte_crc32(Register crc, Register val, Register table, Register tmp) {
4905   assert_different_registers(crc, table, tmp);
4906   assert_different_registers(val, table);
4907   if (crc == val) {      // Must rotate first to use the unmodified value.
4908     rotate_then_insert(tmp, val, 56-2, 63-2, 2, true);  // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest.
4909     z_srl(crc, 8);       // Unsigned shift, clear leftmost 8 bits.
4910   } else {
4911     z_srl(crc, 8);       // Unsigned shift, clear leftmost 8 bits.
4912     rotate_then_insert(tmp, val, 56-2, 63-2, 2, true);  // Insert byte 7 of val, shifted left by 2, into byte 6..7 of tmp, clear the rest.
4913   }
4914   z_x(crc, Address(table, tmp, 0));
4915 }
4916 
4917 /**
4918  * uint32_t crc;
4919  * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
4920  */
fold_8bit_crc32(Register crc,Register table,Register tmp)4921 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
4922   fold_byte_crc32(crc, crc, table, tmp);
4923 }
4924 
4925 /**
4926  * Emits code to update CRC-32 with a byte value according to constants in table.
4927  *
4928  * @param [in,out]crc Register containing the crc.
4929  * @param [in]val     Register containing the byte to fold into the CRC.
4930  * @param [in]table   Register containing the table of crc constants.
4931  *
4932  * uint32_t crc;
4933  * val = crc_table[(val ^ crc) & 0xFF];
4934  * crc = val ^ (crc >> 8);
4935  */
update_byte_crc32(Register crc,Register val,Register table)4936 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
4937   z_xr(val, crc);
4938   fold_byte_crc32(crc, val, table, val);
4939 }
4940 
4941 
4942 /**
4943  * @param crc   register containing existing CRC (32-bit)
4944  * @param buf   register pointing to input byte buffer (byte*)
4945  * @param len   register containing number of bytes
4946  * @param table register pointing to CRC table
4947  */
update_byteLoop_crc32(Register crc,Register buf,Register len,Register table,Register data)4948 void MacroAssembler::update_byteLoop_crc32(Register crc, Register buf, Register len, Register table, Register data) {
4949   assert_different_registers(crc, buf, len, table, data);
4950 
4951   Label L_mainLoop, L_done;
4952   const int mainLoop_stepping = 1;
4953 
4954   // Process all bytes in a single-byte loop.
4955   z_ltr(len, len);
4956   z_brnh(L_done);
4957 
4958   bind(L_mainLoop);
4959     z_llgc(data, Address(buf, (intptr_t)0));// Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
4960     add2reg(buf, mainLoop_stepping);        // Advance buffer position.
4961     update_byte_crc32(crc, data, table);
4962     z_brct(len, L_mainLoop);                // Iterate.
4963 
4964   bind(L_done);
4965 }
4966 
4967 /**
4968  * Emits code to update CRC-32 with a 4-byte value according to constants in table.
4969  * Implementation according to jdk/src/share/native/java/util/zip/zlib-1.2.8/crc32.c.
4970  *
4971  */
update_1word_crc32(Register crc,Register buf,Register table,int bufDisp,int bufInc,Register t0,Register t1,Register t2,Register t3)4972 void MacroAssembler::update_1word_crc32(Register crc, Register buf, Register table, int bufDisp, int bufInc,
4973                                         Register t0,  Register t1,  Register t2,    Register t3) {
4974   // This is what we implement (the DOBIG4 part):
4975   //
4976   // #define DOBIG4 c ^= *++buf4; \
4977   //         c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \
4978   //             crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24]
4979   // #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4
4980   // Pre-calculate (constant) column offsets, use columns 4..7 for big-endian.
4981   const int ix0 = 4*(4*CRC32_COLUMN_SIZE);
4982   const int ix1 = 5*(4*CRC32_COLUMN_SIZE);
4983   const int ix2 = 6*(4*CRC32_COLUMN_SIZE);
4984   const int ix3 = 7*(4*CRC32_COLUMN_SIZE);
4985 
4986   // XOR crc with next four bytes of buffer.
4987   lgr_if_needed(t0, crc);
4988   z_x(t0, Address(buf, bufDisp));
4989   if (bufInc != 0) {
4990     add2reg(buf, bufInc);
4991   }
4992 
4993   // Chop crc into 4 single-byte pieces, shifted left 2 bits, to form the table indices.
4994   rotate_then_insert(t3, t0, 56-2, 63-2, 2,    true);  // ((c >>  0) & 0xff) << 2
4995   rotate_then_insert(t2, t0, 56-2, 63-2, 2-8,  true);  // ((c >>  8) & 0xff) << 2
4996   rotate_then_insert(t1, t0, 56-2, 63-2, 2-16, true);  // ((c >> 16) & 0xff) << 2
4997   rotate_then_insert(t0, t0, 56-2, 63-2, 2-24, true);  // ((c >> 24) & 0xff) << 2
4998 
4999   // XOR indexed table values to calculate updated crc.
5000   z_ly(t2, Address(table, t2, (intptr_t)ix1));
5001   z_ly(t0, Address(table, t0, (intptr_t)ix3));
5002   z_xy(t2, Address(table, t3, (intptr_t)ix0));
5003   z_xy(t0, Address(table, t1, (intptr_t)ix2));
5004   z_xr(t0, t2);           // Now t0 contains the updated CRC value.
5005   lgr_if_needed(crc, t0);
5006 }
5007 
5008 /**
5009  * @param crc   register containing existing CRC (32-bit)
5010  * @param buf   register pointing to input byte buffer (byte*)
5011  * @param len   register containing number of bytes
5012  * @param table register pointing to CRC table
5013  *
5014  * uses Z_R10..Z_R13 as work register. Must be saved/restored by caller!
5015  */
kernel_crc32_1word(Register crc,Register buf,Register len,Register table,Register t0,Register t1,Register t2,Register t3,bool invertCRC)5016 void MacroAssembler::kernel_crc32_1word(Register crc, Register buf, Register len, Register table,
5017                                         Register t0,  Register t1,  Register t2,  Register t3,
5018                                         bool invertCRC) {
5019   assert_different_registers(crc, buf, len, table);
5020 
5021   Label L_mainLoop, L_tail;
5022   Register  data = t0;
5023   Register  ctr  = Z_R0;
5024   const int mainLoop_stepping = 4;
5025   const int log_stepping      = exact_log2(mainLoop_stepping);
5026 
5027   // Don't test for len <= 0 here. This pathological case should not occur anyway.
5028   // Optimizing for it by adding a test and a branch seems to be a waste of CPU cycles.
5029   // The situation itself is detected and handled correctly by the conditional branches
5030   // following aghi(len, -stepping) and aghi(len, +stepping).
5031 
5032   if (invertCRC) {
5033     not_(crc, noreg, false);           // 1s complement of crc
5034   }
5035 
5036   // Check for short (<4 bytes) buffer.
5037   z_srag(ctr, len, log_stepping);
5038   z_brnh(L_tail);
5039 
5040   z_lrvr(crc, crc);          // Revert byte order because we are dealing with big-endian data.
5041   rotate_then_insert(len, len, 64-log_stepping, 63, 0, true); // #bytes for tailLoop
5042 
5043   BIND(L_mainLoop);
5044     update_1word_crc32(crc, buf, table, 0, mainLoop_stepping, crc, t1, t2, t3);
5045     z_brct(ctr, L_mainLoop); // Iterate.
5046 
5047   z_lrvr(crc, crc);          // Revert byte order back to original.
5048 
5049   // Process last few (<8) bytes of buffer.
5050   BIND(L_tail);
5051   update_byteLoop_crc32(crc, buf, len, table, data);
5052 
5053   if (invertCRC) {
5054     not_(crc, noreg, false);           // 1s complement of crc
5055   }
5056 }
5057 
5058 /**
5059  * @param crc   register containing existing CRC (32-bit)
5060  * @param buf   register pointing to input byte buffer (byte*)
5061  * @param len   register containing number of bytes
5062  * @param table register pointing to CRC table
5063  */
kernel_crc32_1byte(Register crc,Register buf,Register len,Register table,Register t0,Register t1,Register t2,Register t3,bool invertCRC)5064 void MacroAssembler::kernel_crc32_1byte(Register crc, Register buf, Register len, Register table,
5065                                         Register t0,  Register t1,  Register t2,  Register t3,
5066                                         bool invertCRC) {
5067   assert_different_registers(crc, buf, len, table);
5068   Register data = t0;
5069 
5070   if (invertCRC) {
5071     not_(crc, noreg, false);           // 1s complement of crc
5072   }
5073 
5074   update_byteLoop_crc32(crc, buf, len, table, data);
5075 
5076   if (invertCRC) {
5077     not_(crc, noreg, false);           // 1s complement of crc
5078   }
5079 }
5080 
kernel_crc32_singleByte(Register crc,Register buf,Register len,Register table,Register tmp,bool invertCRC)5081 void MacroAssembler::kernel_crc32_singleByte(Register crc, Register buf, Register len, Register table, Register tmp,
5082                                              bool invertCRC) {
5083   assert_different_registers(crc, buf, len, table, tmp);
5084 
5085   if (invertCRC) {
5086     not_(crc, noreg, false);           // 1s complement of crc
5087   }
5088 
5089   z_llgc(tmp, Address(buf, (intptr_t)0));  // Current byte of input buffer (zero extended). Avoids garbage in upper half of register.
5090   update_byte_crc32(crc, tmp, table);
5091 
5092   if (invertCRC) {
5093     not_(crc, noreg, false);           // 1s complement of crc
5094   }
5095 }
5096 
kernel_crc32_singleByteReg(Register crc,Register val,Register table,bool invertCRC)5097 void MacroAssembler::kernel_crc32_singleByteReg(Register crc, Register val, Register table,
5098                                                 bool invertCRC) {
5099   assert_different_registers(crc, val, table);
5100 
5101   if (invertCRC) {
5102     not_(crc, noreg, false);           // 1s complement of crc
5103   }
5104 
5105   update_byte_crc32(crc, val, table);
5106 
5107   if (invertCRC) {
5108     not_(crc, noreg, false);           // 1s complement of crc
5109   }
5110 }
5111 
5112 //
5113 // Code for BigInteger::multiplyToLen() intrinsic.
5114 //
5115 
5116 // dest_lo += src1 + src2
5117 // dest_hi += carry1 + carry2
5118 // Z_R7 is destroyed !
add2_with_carry(Register dest_hi,Register dest_lo,Register src1,Register src2)5119 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo,
5120                                      Register src1, Register src2) {
5121   clear_reg(Z_R7);
5122   z_algr(dest_lo, src1);
5123   z_alcgr(dest_hi, Z_R7);
5124   z_algr(dest_lo, src2);
5125   z_alcgr(dest_hi, Z_R7);
5126 }
5127 
5128 // Multiply 64 bit by 64 bit first loop.
multiply_64_x_64_loop(Register x,Register xstart,Register x_xstart,Register y,Register y_idx,Register z,Register carry,Register product,Register idx,Register kdx)5129 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart,
5130                                            Register x_xstart,
5131                                            Register y, Register y_idx,
5132                                            Register z,
5133                                            Register carry,
5134                                            Register product,
5135                                            Register idx, Register kdx) {
5136   // jlong carry, x[], y[], z[];
5137   // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx--, kdx--) {
5138   //   huge_128 product = y[idx] * x[xstart] + carry;
5139   //   z[kdx] = (jlong)product;
5140   //   carry  = (jlong)(product >>> 64);
5141   // }
5142   // z[xstart] = carry;
5143 
5144   Label L_first_loop, L_first_loop_exit;
5145   Label L_one_x, L_one_y, L_multiply;
5146 
5147   z_aghi(xstart, -1);
5148   z_brl(L_one_x);   // Special case: length of x is 1.
5149 
5150   // Load next two integers of x.
5151   z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
5152   mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0));
5153 
5154 
5155   bind(L_first_loop);
5156 
5157   z_aghi(idx, -1);
5158   z_brl(L_first_loop_exit);
5159   z_aghi(idx, -1);
5160   z_brl(L_one_y);
5161 
5162   // Load next two integers of y.
5163   z_sllg(Z_R1_scratch, idx, LogBytesPerInt);
5164   mem2reg_opt(y_idx, Address(y, Z_R1_scratch, 0));
5165 
5166 
5167   bind(L_multiply);
5168 
5169   Register multiplicand = product->successor();
5170   Register product_low = multiplicand;
5171 
5172   lgr_if_needed(multiplicand, x_xstart);
5173   z_mlgr(product, y_idx);     // multiplicand * y_idx -> product::multiplicand
5174   clear_reg(Z_R7);
5175   z_algr(product_low, carry); // Add carry to result.
5176   z_alcgr(product, Z_R7);     // Add carry of the last addition.
5177   add2reg(kdx, -2);
5178 
5179   // Store result.
5180   z_sllg(Z_R7, kdx, LogBytesPerInt);
5181   reg2mem_opt(product_low, Address(z, Z_R7, 0));
5182   lgr_if_needed(carry, product);
5183   z_bru(L_first_loop);
5184 
5185 
5186   bind(L_one_y); // Load one 32 bit portion of y as (0,value).
5187 
5188   clear_reg(y_idx);
5189   mem2reg_opt(y_idx, Address(y, (intptr_t) 0), false);
5190   z_bru(L_multiply);
5191 
5192 
5193   bind(L_one_x); // Load one 32 bit portion of x as (0,value).
5194 
5195   clear_reg(x_xstart);
5196   mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false);
5197   z_bru(L_first_loop);
5198 
5199   bind(L_first_loop_exit);
5200 }
5201 
5202 // Multiply 64 bit by 64 bit and add 128 bit.
multiply_add_128_x_128(Register x_xstart,Register y,Register z,Register yz_idx,Register idx,Register carry,Register product,int offset)5203 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y,
5204                                             Register z,
5205                                             Register yz_idx, Register idx,
5206                                             Register carry, Register product,
5207                                             int offset) {
5208   // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry;
5209   // z[kdx] = (jlong)product;
5210 
5211   Register multiplicand = product->successor();
5212   Register product_low = multiplicand;
5213 
5214   z_sllg(Z_R7, idx, LogBytesPerInt);
5215   mem2reg_opt(yz_idx, Address(y, Z_R7, offset));
5216 
5217   lgr_if_needed(multiplicand, x_xstart);
5218   z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand
5219   mem2reg_opt(yz_idx, Address(z, Z_R7, offset));
5220 
5221   add2_with_carry(product, product_low, carry, yz_idx);
5222 
5223   z_sllg(Z_R7, idx, LogBytesPerInt);
5224   reg2mem_opt(product_low, Address(z, Z_R7, offset));
5225 
5226 }
5227 
5228 // Multiply 128 bit by 128 bit. Unrolled inner loop.
multiply_128_x_128_loop(Register x_xstart,Register y,Register z,Register yz_idx,Register idx,Register jdx,Register carry,Register product,Register carry2)5229 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart,
5230                                              Register y, Register z,
5231                                              Register yz_idx, Register idx,
5232                                              Register jdx,
5233                                              Register carry, Register product,
5234                                              Register carry2) {
5235   // jlong carry, x[], y[], z[];
5236   // int kdx = ystart+1;
5237   // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
5238   //   huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry;
5239   //   z[kdx+idx+1] = (jlong)product;
5240   //   jlong carry2 = (jlong)(product >>> 64);
5241   //   product = (y[idx] * x_xstart) + z[kdx+idx] + carry2;
5242   //   z[kdx+idx] = (jlong)product;
5243   //   carry = (jlong)(product >>> 64);
5244   // }
5245   // idx += 2;
5246   // if (idx > 0) {
5247   //   product = (y[idx] * x_xstart) + z[kdx+idx] + carry;
5248   //   z[kdx+idx] = (jlong)product;
5249   //   carry = (jlong)(product >>> 64);
5250   // }
5251 
5252   Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
5253 
5254   // scale the index
5255   lgr_if_needed(jdx, idx);
5256   and_imm(jdx, 0xfffffffffffffffcL);
5257   rshift(jdx, 2);
5258 
5259 
5260   bind(L_third_loop);
5261 
5262   z_aghi(jdx, -1);
5263   z_brl(L_third_loop_exit);
5264   add2reg(idx, -4);
5265 
5266   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8);
5267   lgr_if_needed(carry2, product);
5268 
5269   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0);
5270   lgr_if_needed(carry, product);
5271   z_bru(L_third_loop);
5272 
5273 
5274   bind(L_third_loop_exit);  // Handle any left-over operand parts.
5275 
5276   and_imm(idx, 0x3);
5277   z_brz(L_post_third_loop_done);
5278 
5279   Label L_check_1;
5280 
5281   z_aghi(idx, -2);
5282   z_brl(L_check_1);
5283 
5284   multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0);
5285   lgr_if_needed(carry, product);
5286 
5287 
5288   bind(L_check_1);
5289 
5290   add2reg(idx, 0x2);
5291   and_imm(idx, 0x1);
5292   z_aghi(idx, -1);
5293   z_brl(L_post_third_loop_done);
5294 
5295   Register   multiplicand = product->successor();
5296   Register   product_low = multiplicand;
5297 
5298   z_sllg(Z_R7, idx, LogBytesPerInt);
5299   clear_reg(yz_idx);
5300   mem2reg_opt(yz_idx, Address(y, Z_R7, 0), false);
5301   lgr_if_needed(multiplicand, x_xstart);
5302   z_mlgr(product, yz_idx); // multiplicand * yz_idx -> product::multiplicand
5303   clear_reg(yz_idx);
5304   mem2reg_opt(yz_idx, Address(z, Z_R7, 0), false);
5305 
5306   add2_with_carry(product, product_low, yz_idx, carry);
5307 
5308   z_sllg(Z_R7, idx, LogBytesPerInt);
5309   reg2mem_opt(product_low, Address(z, Z_R7, 0), false);
5310   rshift(product_low, 32);
5311 
5312   lshift(product, 32);
5313   z_ogr(product_low, product);
5314   lgr_if_needed(carry, product_low);
5315 
5316   bind(L_post_third_loop_done);
5317 }
5318 
multiply_to_len(Register x,Register xlen,Register y,Register ylen,Register z,Register tmp1,Register tmp2,Register tmp3,Register tmp4,Register tmp5)5319 void MacroAssembler::multiply_to_len(Register x, Register xlen,
5320                                      Register y, Register ylen,
5321                                      Register z,
5322                                      Register tmp1, Register tmp2,
5323                                      Register tmp3, Register tmp4,
5324                                      Register tmp5) {
5325   ShortBranchVerifier sbv(this);
5326 
5327   assert_different_registers(x, xlen, y, ylen, z,
5328                              tmp1, tmp2, tmp3, tmp4, tmp5, Z_R1_scratch, Z_R7);
5329   assert_different_registers(x, xlen, y, ylen, z,
5330                              tmp1, tmp2, tmp3, tmp4, tmp5, Z_R8);
5331 
5332   z_stmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP);
5333 
5334   // In openJdk, we store the argument as 32-bit value to slot.
5335   Address zlen(Z_SP, _z_abi(remaining_cargs));  // Int in long on big endian.
5336 
5337   const Register idx = tmp1;
5338   const Register kdx = tmp2;
5339   const Register xstart = tmp3;
5340 
5341   const Register y_idx = tmp4;
5342   const Register carry = tmp5;
5343   const Register product  = Z_R0_scratch;
5344   const Register x_xstart = Z_R8;
5345 
5346   // First Loop.
5347   //
5348   //   final static long LONG_MASK = 0xffffffffL;
5349   //   int xstart = xlen - 1;
5350   //   int ystart = ylen - 1;
5351   //   long carry = 0;
5352   //   for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
5353   //     long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
5354   //     z[kdx] = (int)product;
5355   //     carry = product >>> 32;
5356   //   }
5357   //   z[xstart] = (int)carry;
5358   //
5359 
5360   lgr_if_needed(idx, ylen);  // idx = ylen
5361   z_llgf(kdx, zlen);         // C2 does not respect int to long conversion for stub calls, thus load zero-extended.
5362   clear_reg(carry);          // carry = 0
5363 
5364   Label L_done;
5365 
5366   lgr_if_needed(xstart, xlen);
5367   z_aghi(xstart, -1);
5368   z_brl(L_done);
5369 
5370   multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
5371 
5372   NearLabel L_second_loop;
5373   compare64_and_branch(kdx, RegisterOrConstant((intptr_t) 0), bcondEqual, L_second_loop);
5374 
5375   NearLabel L_carry;
5376   z_aghi(kdx, -1);
5377   z_brz(L_carry);
5378 
5379   // Store lower 32 bits of carry.
5380   z_sllg(Z_R1_scratch, kdx, LogBytesPerInt);
5381   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
5382   rshift(carry, 32);
5383   z_aghi(kdx, -1);
5384 
5385 
5386   bind(L_carry);
5387 
5388   // Store upper 32 bits of carry.
5389   z_sllg(Z_R1_scratch, kdx, LogBytesPerInt);
5390   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
5391 
5392   // Second and third (nested) loops.
5393   //
5394   // for (int i = xstart-1; i >= 0; i--) { // Second loop
5395   //   carry = 0;
5396   //   for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
5397   //     long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
5398   //                    (z[k] & LONG_MASK) + carry;
5399   //     z[k] = (int)product;
5400   //     carry = product >>> 32;
5401   //   }
5402   //   z[i] = (int)carry;
5403   // }
5404   //
5405   // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx
5406 
5407   const Register jdx = tmp1;
5408 
5409   bind(L_second_loop);
5410 
5411   clear_reg(carry);           // carry = 0;
5412   lgr_if_needed(jdx, ylen);   // j = ystart+1
5413 
5414   z_aghi(xstart, -1);         // i = xstart-1;
5415   z_brl(L_done);
5416 
5417   // Use free slots in the current stackframe instead of push/pop.
5418   Address zsave(Z_SP, _z_abi(carg_1));
5419   reg2mem_opt(z, zsave);
5420 
5421 
5422   Label L_last_x;
5423 
5424   z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
5425   load_address(z, Address(z, Z_R1_scratch, 4)); // z = z + k - j
5426   z_aghi(xstart, -1);                           // i = xstart-1;
5427   z_brl(L_last_x);
5428 
5429   z_sllg(Z_R1_scratch, xstart, LogBytesPerInt);
5430   mem2reg_opt(x_xstart, Address(x, Z_R1_scratch, 0));
5431 
5432 
5433   Label L_third_loop_prologue;
5434 
5435   bind(L_third_loop_prologue);
5436 
5437   Address xsave(Z_SP, _z_abi(carg_2));
5438   Address xlensave(Z_SP, _z_abi(carg_3));
5439   Address ylensave(Z_SP, _z_abi(carg_4));
5440 
5441   reg2mem_opt(x, xsave);
5442   reg2mem_opt(xstart, xlensave);
5443   reg2mem_opt(ylen, ylensave);
5444 
5445 
5446   multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x);
5447 
5448   mem2reg_opt(z, zsave);
5449   mem2reg_opt(x, xsave);
5450   mem2reg_opt(xlen, xlensave);   // This is the decrement of the loop counter!
5451   mem2reg_opt(ylen, ylensave);
5452 
5453   add2reg(tmp3, 1, xlen);
5454   z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt);
5455   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
5456   z_aghi(tmp3, -1);
5457   z_brl(L_done);
5458 
5459   rshift(carry, 32);
5460   z_sllg(Z_R1_scratch, tmp3, LogBytesPerInt);
5461   reg2mem_opt(carry, Address(z, Z_R1_scratch, 0), false);
5462   z_bru(L_second_loop);
5463 
5464   // Next infrequent code is moved outside loops.
5465   bind(L_last_x);
5466 
5467   clear_reg(x_xstart);
5468   mem2reg_opt(x_xstart, Address(x, (intptr_t) 0), false);
5469   z_bru(L_third_loop_prologue);
5470 
5471   bind(L_done);
5472 
5473   z_lmg(Z_R7, Z_R13, _z_abi(gpr7), Z_SP);
5474 }
5475 
5476 #ifndef PRODUCT
5477 // Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false).
asm_assert(bool check_equal,const char * msg,int id)5478 void MacroAssembler::asm_assert(bool check_equal, const char *msg, int id) {
5479   Label ok;
5480   if (check_equal) {
5481     z_bre(ok);
5482   } else {
5483     z_brne(ok);
5484   }
5485   stop(msg, id);
5486   bind(ok);
5487 }
5488 
5489 // Assert if CC indicates "low".
asm_assert_low(const char * msg,int id)5490 void MacroAssembler::asm_assert_low(const char *msg, int id) {
5491   Label ok;
5492   z_brnl(ok);
5493   stop(msg, id);
5494   bind(ok);
5495 }
5496 
5497 // Assert if CC indicates "high".
asm_assert_high(const char * msg,int id)5498 void MacroAssembler::asm_assert_high(const char *msg, int id) {
5499   Label ok;
5500   z_brnh(ok);
5501   stop(msg, id);
5502   bind(ok);
5503 }
5504 
5505 // Assert if CC indicates "not equal" (check_equal==true) or "equal" (check_equal==false)
5506 // generate non-relocatable code.
asm_assert_static(bool check_equal,const char * msg,int id)5507 void MacroAssembler::asm_assert_static(bool check_equal, const char *msg, int id) {
5508   Label ok;
5509   if (check_equal) { z_bre(ok); }
5510   else             { z_brne(ok); }
5511   stop_static(msg, id);
5512   bind(ok);
5513 }
5514 
asm_assert_mems_zero(bool check_equal,bool allow_relocation,int size,int64_t mem_offset,Register mem_base,const char * msg,int id)5515 void MacroAssembler::asm_assert_mems_zero(bool check_equal, bool allow_relocation, int size, int64_t mem_offset,
5516                                           Register mem_base, const char* msg, int id) {
5517   switch (size) {
5518     case 4:
5519       load_and_test_int(Z_R0, Address(mem_base, mem_offset));
5520       break;
5521     case 8:
5522       load_and_test_long(Z_R0,  Address(mem_base, mem_offset));
5523       break;
5524     default:
5525       ShouldNotReachHere();
5526   }
5527   if (allow_relocation) { asm_assert(check_equal, msg, id); }
5528   else                  { asm_assert_static(check_equal, msg, id); }
5529 }
5530 
5531 // Check the condition
5532 //   expected_size == FP - SP
5533 // after transformation:
5534 //   expected_size - FP + SP == 0
5535 // Destroys Register expected_size if no tmp register is passed.
asm_assert_frame_size(Register expected_size,Register tmp,const char * msg,int id)5536 void MacroAssembler::asm_assert_frame_size(Register expected_size, Register tmp, const char* msg, int id) {
5537   if (tmp == noreg) {
5538     tmp = expected_size;
5539   } else {
5540     if (tmp != expected_size) {
5541       z_lgr(tmp, expected_size);
5542     }
5543     z_algr(tmp, Z_SP);
5544     z_slg(tmp, 0, Z_R0, Z_SP);
5545     asm_assert_eq(msg, id);
5546   }
5547 }
5548 #endif // !PRODUCT
5549 
verify_thread()5550 void MacroAssembler::verify_thread() {
5551   if (VerifyThread) {
5552     unimplemented("", 117);
5553   }
5554 }
5555 
5556 // Save and restore functions: Exclude Z_R0.
save_volatile_regs(Register dst,int offset,bool include_fp,bool include_flags)5557 void MacroAssembler::save_volatile_regs(Register dst, int offset, bool include_fp, bool include_flags) {
5558   z_stmg(Z_R1, Z_R5, offset, dst); offset += 5 * BytesPerWord;
5559   if (include_fp) {
5560     z_std(Z_F0, Address(dst, offset)); offset += BytesPerWord;
5561     z_std(Z_F1, Address(dst, offset)); offset += BytesPerWord;
5562     z_std(Z_F2, Address(dst, offset)); offset += BytesPerWord;
5563     z_std(Z_F3, Address(dst, offset)); offset += BytesPerWord;
5564     z_std(Z_F4, Address(dst, offset)); offset += BytesPerWord;
5565     z_std(Z_F5, Address(dst, offset)); offset += BytesPerWord;
5566     z_std(Z_F6, Address(dst, offset)); offset += BytesPerWord;
5567     z_std(Z_F7, Address(dst, offset)); offset += BytesPerWord;
5568   }
5569   if (include_flags) {
5570     Label done;
5571     z_mvi(Address(dst, offset), 2); // encoding: equal
5572     z_bre(done);
5573     z_mvi(Address(dst, offset), 4); // encoding: higher
5574     z_brh(done);
5575     z_mvi(Address(dst, offset), 1); // encoding: lower
5576     bind(done);
5577   }
5578 }
restore_volatile_regs(Register src,int offset,bool include_fp,bool include_flags)5579 void MacroAssembler::restore_volatile_regs(Register src, int offset, bool include_fp, bool include_flags) {
5580   z_lmg(Z_R1, Z_R5, offset, src); offset += 5 * BytesPerWord;
5581   if (include_fp) {
5582     z_ld(Z_F0, Address(src, offset)); offset += BytesPerWord;
5583     z_ld(Z_F1, Address(src, offset)); offset += BytesPerWord;
5584     z_ld(Z_F2, Address(src, offset)); offset += BytesPerWord;
5585     z_ld(Z_F3, Address(src, offset)); offset += BytesPerWord;
5586     z_ld(Z_F4, Address(src, offset)); offset += BytesPerWord;
5587     z_ld(Z_F5, Address(src, offset)); offset += BytesPerWord;
5588     z_ld(Z_F6, Address(src, offset)); offset += BytesPerWord;
5589     z_ld(Z_F7, Address(src, offset)); offset += BytesPerWord;
5590   }
5591   if (include_flags) {
5592     z_cli(Address(src, offset), 2); // see encoding above
5593   }
5594 }
5595 
5596 // Plausibility check for oops.
verify_oop(Register oop,const char * msg)5597 void MacroAssembler::verify_oop(Register oop, const char* msg) {
5598   if (!VerifyOops) return;
5599 
5600   BLOCK_COMMENT("verify_oop {");
5601   unsigned int nbytes_save = (5 + 8 + 1) * BytesPerWord;
5602   address entry_addr = StubRoutines::verify_oop_subroutine_entry_address();
5603 
5604   save_return_pc();
5605 
5606   // Push frame, but preserve flags
5607   z_lgr(Z_R0, Z_SP);
5608   z_lay(Z_SP, -((int64_t)nbytes_save + frame::z_abi_160_size), Z_SP);
5609   z_stg(Z_R0, _z_abi(callers_sp), Z_SP);
5610 
5611   save_volatile_regs(Z_SP, frame::z_abi_160_size, true, true);
5612 
5613   lgr_if_needed(Z_ARG2, oop);
5614   load_const_optimized(Z_ARG1, (address)msg);
5615   load_const_optimized(Z_R1, entry_addr);
5616   z_lg(Z_R1, 0, Z_R1);
5617   call_c(Z_R1);
5618 
5619   restore_volatile_regs(Z_SP, frame::z_abi_160_size, true, true);
5620   pop_frame();
5621   restore_return_pc();
5622 
5623   BLOCK_COMMENT("} verify_oop ");
5624 }
5625 
verify_oop_addr(Address addr,const char * msg)5626 void MacroAssembler::verify_oop_addr(Address addr, const char* msg) {
5627   if (!VerifyOops) return;
5628 
5629   BLOCK_COMMENT("verify_oop {");
5630   unsigned int nbytes_save = (5 + 8) * BytesPerWord;
5631   address entry_addr = StubRoutines::verify_oop_subroutine_entry_address();
5632 
5633   save_return_pc();
5634   unsigned int frame_size = push_frame_abi160(nbytes_save); // kills Z_R0
5635   save_volatile_regs(Z_SP, frame::z_abi_160_size, true, false);
5636 
5637   z_lg(Z_ARG2, addr.plus_disp(frame_size));
5638   load_const_optimized(Z_ARG1, (address)msg);
5639   load_const_optimized(Z_R1, entry_addr);
5640   z_lg(Z_R1, 0, Z_R1);
5641   call_c(Z_R1);
5642 
5643   restore_volatile_regs(Z_SP, frame::z_abi_160_size, true, false);
5644   pop_frame();
5645   restore_return_pc();
5646 
5647   BLOCK_COMMENT("} verify_oop ");
5648 }
5649 
5650 const char* MacroAssembler::stop_types[] = {
5651   "stop",
5652   "untested",
5653   "unimplemented",
5654   "shouldnotreachhere"
5655 };
5656 
stop_on_request(const char * tp,const char * msg)5657 static void stop_on_request(const char* tp, const char* msg) {
5658   tty->print("Z assembly code requires stop: (%s) %s\n", tp, msg);
5659   guarantee(false, "Z assembly code requires stop: %s", msg);
5660 }
5661 
stop(int type,const char * msg,int id)5662 void MacroAssembler::stop(int type, const char* msg, int id) {
5663   BLOCK_COMMENT(err_msg("stop: %s {", msg));
5664 
5665   // Setup arguments.
5666   load_const(Z_ARG1, (void*) stop_types[type%stop_end]);
5667   load_const(Z_ARG2, (void*) msg);
5668   get_PC(Z_R14);     // Following code pushes a frame without entering a new function. Use current pc as return address.
5669   save_return_pc();  // Saves return pc Z_R14.
5670   push_frame_abi160(0);
5671   call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
5672   // The plain disassembler does not recognize illtrap. It instead displays
5673   // a 32-bit value. Issueing two illtraps assures the disassembler finds
5674   // the proper beginning of the next instruction.
5675   z_illtrap(); // Illegal instruction.
5676   z_illtrap(); // Illegal instruction.
5677 
5678   BLOCK_COMMENT(" } stop");
5679 }
5680 
5681 // Special version of stop() for code size reduction.
5682 // Reuses the previously generated call sequence, if any.
5683 // Generates the call sequence on its own, if necessary.
5684 // Note: This code will work only in non-relocatable code!
5685 //       The relative address of the data elements (arg1, arg2) must not change.
5686 //       The reentry point must not move relative to it's users. This prerequisite
5687 //       should be given for "hand-written" code, if all chain calls are in the same code blob.
5688 //       Generated code must not undergo any transformation, e.g. ShortenBranches, to be safe.
stop_chain(address reentry,int type,const char * msg,int id,bool allow_relocation)5689 address MacroAssembler::stop_chain(address reentry, int type, const char* msg, int id, bool allow_relocation) {
5690   BLOCK_COMMENT(err_msg("stop_chain(%s,%s): %s {", reentry==NULL?"init":"cont", allow_relocation?"reloc ":"static", msg));
5691 
5692   // Setup arguments.
5693   if (allow_relocation) {
5694     // Relocatable version (for comparison purposes). Remove after some time.
5695     load_const(Z_ARG1, (void*) stop_types[type%stop_end]);
5696     load_const(Z_ARG2, (void*) msg);
5697   } else {
5698     load_absolute_address(Z_ARG1, (address)stop_types[type%stop_end]);
5699     load_absolute_address(Z_ARG2, (address)msg);
5700   }
5701   if ((reentry != NULL) && RelAddr::is_in_range_of_RelAddr16(reentry, pc())) {
5702     BLOCK_COMMENT("branch to reentry point:");
5703     z_brc(bcondAlways, reentry);
5704   } else {
5705     BLOCK_COMMENT("reentry point:");
5706     reentry = pc();      // Re-entry point for subsequent stop calls.
5707     save_return_pc();    // Saves return pc Z_R14.
5708     push_frame_abi160(0);
5709     if (allow_relocation) {
5710       reentry = NULL;    // Prevent reentry if code relocation is allowed.
5711       call_VM_leaf(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
5712     } else {
5713       call_VM_leaf_static(CAST_FROM_FN_PTR(address, stop_on_request), Z_ARG1, Z_ARG2);
5714     }
5715     z_illtrap(); // Illegal instruction as emergency stop, should the above call return.
5716   }
5717   BLOCK_COMMENT(" } stop_chain");
5718 
5719   return reentry;
5720 }
5721 
5722 // Special version of stop() for code size reduction.
5723 // Assumes constant relative addresses for data and runtime call.
stop_static(int type,const char * msg,int id)5724 void MacroAssembler::stop_static(int type, const char* msg, int id) {
5725   stop_chain(NULL, type, msg, id, false);
5726 }
5727 
stop_subroutine()5728 void MacroAssembler::stop_subroutine() {
5729   unimplemented("stop_subroutine", 710);
5730 }
5731 
5732 // Prints msg to stdout from within generated code..
warn(const char * msg)5733 void MacroAssembler::warn(const char* msg) {
5734   RegisterSaver::save_live_registers(this, RegisterSaver::all_registers, Z_R14);
5735   load_absolute_address(Z_R1, (address) warning);
5736   load_absolute_address(Z_ARG1, (address) msg);
5737   (void) call(Z_R1);
5738   RegisterSaver::restore_live_registers(this, RegisterSaver::all_registers);
5739 }
5740 
5741 #ifndef PRODUCT
5742 
5743 // Write pattern 0x0101010101010101 in region [low-before, high+after].
zap_from_to(Register low,Register high,Register val,Register addr,int before,int after)5744 void MacroAssembler::zap_from_to(Register low, Register high, Register val, Register addr, int before, int after) {
5745   if (!ZapEmptyStackFields) return;
5746   BLOCK_COMMENT("zap memory region {");
5747   load_const_optimized(val, 0x0101010101010101);
5748   int size = before + after;
5749   if (low == high && size < 5 && size > 0) {
5750     int offset = -before*BytesPerWord;
5751     for (int i = 0; i < size; ++i) {
5752       z_stg(val, Address(low, offset));
5753       offset +=(1*BytesPerWord);
5754     }
5755   } else {
5756     add2reg(addr, -before*BytesPerWord, low);
5757     if (after) {
5758 #ifdef ASSERT
5759       jlong check = after * BytesPerWord;
5760       assert(Immediate::is_simm32(check) && Immediate::is_simm32(-check), "value not encodable !");
5761 #endif
5762       add2reg(high, after * BytesPerWord);
5763     }
5764     NearLabel loop;
5765     bind(loop);
5766     z_stg(val, Address(addr));
5767     add2reg(addr, 8);
5768     compare64_and_branch(addr, high, bcondNotHigh, loop);
5769     if (after) {
5770       add2reg(high, -after * BytesPerWord);
5771     }
5772   }
5773   BLOCK_COMMENT("} zap memory region");
5774 }
5775 #endif // !PRODUCT
5776 
SkipIfEqual(MacroAssembler * masm,const bool * flag_addr,bool value,Register _rscratch)5777 SkipIfEqual::SkipIfEqual(MacroAssembler* masm, const bool* flag_addr, bool value, Register _rscratch) {
5778   _masm = masm;
5779   _masm->load_absolute_address(_rscratch, (address)flag_addr);
5780   _masm->load_and_test_int(_rscratch, Address(_rscratch));
5781   if (value) {
5782     _masm->z_brne(_label); // Skip if true, i.e. != 0.
5783   } else {
5784     _masm->z_bre(_label);  // Skip if false, i.e. == 0.
5785   }
5786 }
5787 
~SkipIfEqual()5788 SkipIfEqual::~SkipIfEqual() {
5789   _masm->bind(_label);
5790 }
5791