1 /*
2  * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
3  * Copyright (c) 2018, SAP SE. All rights reserved.
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This code is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 only, as
8  * published by the Free Software Foundation.
9  *
10  * This code is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * version 2 for more details (a copy is included in the LICENSE file that
14  * accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License version
17  * 2 along with this work; if not, write to the Free Software Foundation,
18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21  * or visit www.oracle.com if you need additional information or have any
22  * questions.
23  *
24  */
25 
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "registerSaver_s390.hpp"
29 #include "gc/g1/g1CardTable.hpp"
30 #include "gc/g1/g1BarrierSet.hpp"
31 #include "gc/g1/g1BarrierSetAssembler.hpp"
32 #include "gc/g1/g1BarrierSetRuntime.hpp"
33 #include "gc/g1/g1SATBMarkQueueSet.hpp"
34 #include "gc/g1/g1ThreadLocalData.hpp"
35 #include "gc/g1/heapRegion.hpp"
36 #include "interpreter/interp_masm.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #ifdef COMPILER1
39 #include "c1/c1_LIRAssembler.hpp"
40 #include "c1/c1_MacroAssembler.hpp"
41 #include "gc/g1/c1/g1BarrierSetC1.hpp"
42 #endif
43 
44 #define __ masm->
45 
46 #define BLOCK_COMMENT(str) if (PrintAssembly) __ block_comment(str)
47 
gen_write_ref_array_pre_barrier(MacroAssembler * masm,DecoratorSet decorators,Register addr,Register count)48 void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
49                                                             Register addr, Register count) {
50   bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
51 
52   // With G1, don't generate the call if we statically know that the target is uninitialized.
53   if (!dest_uninitialized) {
54     // Is marking active?
55     Label filtered;
56     assert_different_registers(addr,  Z_R0_scratch);  // would be destroyed by push_frame()
57     assert_different_registers(count, Z_R0_scratch);  // would be destroyed by push_frame()
58     Register Rtmp1 = Z_R0_scratch;
59     const int active_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
60     if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
61       __ load_and_test_int(Rtmp1, Address(Z_thread, active_offset));
62     } else {
63       guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
64       __ load_and_test_byte(Rtmp1, Address(Z_thread, active_offset));
65     }
66     __ z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently.
67 
68     RegisterSaver::save_live_registers(masm, RegisterSaver::arg_registers); // Creates frame.
69 
70     if (UseCompressedOops) {
71       __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry), addr, count);
72     } else {
73       __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry), addr, count);
74     }
75 
76     RegisterSaver::restore_live_registers(masm, RegisterSaver::arg_registers);
77 
78     __ bind(filtered);
79   }
80 }
81 
gen_write_ref_array_post_barrier(MacroAssembler * masm,DecoratorSet decorators,Register addr,Register count,bool do_return)82 void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
83                                                              Register addr, Register count, bool do_return) {
84   address entry_point = CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry);
85   if (!do_return) {
86     assert_different_registers(addr,  Z_R0_scratch);  // would be destroyed by push_frame()
87     assert_different_registers(count, Z_R0_scratch);  // would be destroyed by push_frame()
88     RegisterSaver::save_live_registers(masm, RegisterSaver::arg_registers); // Creates frame.
89     __ call_VM_leaf(entry_point, addr, count);
90     RegisterSaver::restore_live_registers(masm, RegisterSaver::arg_registers);
91   } else {
92     // Tail call: call c and return to stub caller.
93     __ lgr_if_needed(Z_ARG1, addr);
94     __ lgr_if_needed(Z_ARG2, count);
95     __ load_const(Z_R1, entry_point);
96     __ z_br(Z_R1); // Branch without linking, callee will return to stub caller.
97   }
98 }
99 
load_at(MacroAssembler * masm,DecoratorSet decorators,BasicType type,const Address & src,Register dst,Register tmp1,Register tmp2,Label * L_handle_null)100 void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
101                                     const Address& src, Register dst, Register tmp1, Register tmp2, Label *L_handle_null) {
102   bool on_oop = type == T_OBJECT || type == T_ARRAY;
103   bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
104   bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
105   bool on_reference = on_weak || on_phantom;
106   Label done;
107   if (on_oop && on_reference && L_handle_null == NULL) { L_handle_null = &done; }
108   ModRefBarrierSetAssembler::load_at(masm, decorators, type, src, dst, tmp1, tmp2, L_handle_null);
109   if (on_oop && on_reference) {
110     // Generate the G1 pre-barrier code to log the value of
111     // the referent field in an SATB buffer.
112     g1_write_barrier_pre(masm, decorators | IS_NOT_NULL,
113                          NULL /* obj */,
114                          dst  /* pre_val */,
115                          noreg/* preserve */ ,
116                          tmp1, tmp2 /* tmp */,
117                          true /* pre_val_needed */);
118   }
119   __ bind(done);
120 }
121 
g1_write_barrier_pre(MacroAssembler * masm,DecoratorSet decorators,const Address * obj,Register Rpre_val,Register Rval,Register Rtmp1,Register Rtmp2,bool pre_val_needed)122 void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, DecoratorSet decorators,
123                                                  const Address*  obj,
124                                                  Register        Rpre_val,      // Ideally, this is a non-volatile register.
125                                                  Register        Rval,          // Will be preserved.
126                                                  Register        Rtmp1,         // If Rpre_val is volatile, either Rtmp1
127                                                  Register        Rtmp2,         // or Rtmp2 has to be non-volatile.
128                                                  bool            pre_val_needed // Save Rpre_val across runtime call, caller uses it.
129                                                  ) {
130 
131   bool not_null  = (decorators & IS_NOT_NULL) != 0,
132        preloaded = obj == NULL;
133 
134   const Register Robj = obj ? obj->base() : noreg,
135                  Roff = obj ? obj->index() : noreg;
136   const int active_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
137   const int buffer_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
138   const int index_offset  = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
139   assert_different_registers(Rtmp1, Rtmp2, Z_R0_scratch); // None of the Rtmp<i> must be Z_R0!!
140   assert_different_registers(Robj, Z_R0_scratch);         // Used for addressing. Furthermore, push_frame destroys Z_R0!!
141   assert_different_registers(Rval, Z_R0_scratch);         // push_frame destroys Z_R0!!
142 
143   Label callRuntime, filtered;
144 
145   BLOCK_COMMENT("g1_write_barrier_pre {");
146 
147   // Is marking active?
148   // Note: value is loaded for test purposes only. No further use here.
149   if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
150     __ load_and_test_int(Rtmp1, Address(Z_thread, active_offset));
151   } else {
152     guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
153     __ load_and_test_byte(Rtmp1, Address(Z_thread, active_offset));
154   }
155   __ z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently.
156 
157   assert(Rpre_val != noreg, "must have a real register");
158 
159 
160   // If an object is given, we need to load the previous value into Rpre_val.
161   if (obj) {
162     // Load the previous value...
163     if (UseCompressedOops) {
164       __ z_llgf(Rpre_val, *obj);
165     } else {
166       __ z_lg(Rpre_val, *obj);
167     }
168   }
169 
170   // Is the previous value NULL?
171   // If so, we don't need to record it and we're done.
172   // Note: pre_val is loaded, decompressed and stored (directly or via runtime call).
173   //       Register contents is preserved across runtime call if caller requests to do so.
174   if (preloaded && not_null) {
175 #ifdef ASSERT
176     __ z_ltgr(Rpre_val, Rpre_val);
177     __ asm_assert_ne("null oop not allowed (G1 pre)", 0x321); // Checked by caller.
178 #endif
179   } else {
180     __ z_ltgr(Rpre_val, Rpre_val);
181     __ z_bre(filtered); // previous value is NULL, so we don't need to record it.
182   }
183 
184   // Decode the oop now. We know it's not NULL.
185   if (Robj != noreg && UseCompressedOops) {
186     __ oop_decoder(Rpre_val, Rpre_val, /*maybeNULL=*/false);
187   }
188 
189   // OK, it's not filtered, so we'll need to call enqueue.
190 
191   // We can store the original value in the thread's buffer
192   // only if index > 0. Otherwise, we need runtime to handle.
193   // (The index field is typed as size_t.)
194   Register Rbuffer = Rtmp1, Rindex = Rtmp2;
195   assert_different_registers(Rbuffer, Rindex, Rpre_val);
196 
197   __ z_lg(Rbuffer, buffer_offset, Z_thread);
198 
199   __ load_and_test_long(Rindex, Address(Z_thread, index_offset));
200   __ z_bre(callRuntime); // If index == 0, goto runtime.
201 
202   __ add2reg(Rindex, -wordSize); // Decrement index.
203   __ z_stg(Rindex, index_offset, Z_thread);
204 
205   // Record the previous value.
206   __ z_stg(Rpre_val, 0, Rbuffer, Rindex);
207   __ z_bru(filtered);  // We are done.
208 
209   Rbuffer = noreg;  // end of life
210   Rindex  = noreg;  // end of life
211 
212   __ bind(callRuntime);
213 
214   // Save some registers (inputs and result) over runtime call
215   // by spilling them into the top frame.
216   if (Robj != noreg && Robj->is_volatile()) {
217     __ z_stg(Robj, Robj->encoding()*BytesPerWord, Z_SP);
218   }
219   if (Roff != noreg && Roff->is_volatile()) {
220     __ z_stg(Roff, Roff->encoding()*BytesPerWord, Z_SP);
221   }
222   if (Rval != noreg && Rval->is_volatile()) {
223     __ z_stg(Rval, Rval->encoding()*BytesPerWord, Z_SP);
224   }
225 
226   // Save Rpre_val (result) over runtime call.
227   Register Rpre_save = Rpre_val;
228   if ((Rpre_val == Z_R0_scratch) || (pre_val_needed && Rpre_val->is_volatile())) {
229     guarantee(!Rtmp1->is_volatile() || !Rtmp2->is_volatile(), "oops!");
230     Rpre_save = !Rtmp1->is_volatile() ? Rtmp1 : Rtmp2;
231   }
232   __ lgr_if_needed(Rpre_save, Rpre_val);
233 
234   // Push frame to protect top frame with return pc and spilled register values.
235   __ save_return_pc();
236   __ push_frame_abi160(0); // Will use Z_R0 as tmp.
237 
238   // Rpre_val may be destroyed by push_frame().
239   __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), Rpre_save, Z_thread);
240 
241   __ pop_frame();
242   __ restore_return_pc();
243 
244   // Restore spilled values.
245   if (Robj != noreg && Robj->is_volatile()) {
246     __ z_lg(Robj, Robj->encoding()*BytesPerWord, Z_SP);
247   }
248   if (Roff != noreg && Roff->is_volatile()) {
249     __ z_lg(Roff, Roff->encoding()*BytesPerWord, Z_SP);
250   }
251   if (Rval != noreg && Rval->is_volatile()) {
252     __ z_lg(Rval, Rval->encoding()*BytesPerWord, Z_SP);
253   }
254   if (pre_val_needed && Rpre_val->is_volatile()) {
255     __ lgr_if_needed(Rpre_val, Rpre_save);
256   }
257 
258   __ bind(filtered);
259   BLOCK_COMMENT("} g1_write_barrier_pre");
260 }
261 
g1_write_barrier_post(MacroAssembler * masm,DecoratorSet decorators,Register Rstore_addr,Register Rnew_val,Register Rtmp1,Register Rtmp2,Register Rtmp3)262 void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, DecoratorSet decorators, Register Rstore_addr, Register Rnew_val,
263                                                   Register Rtmp1, Register Rtmp2, Register Rtmp3) {
264   bool not_null = (decorators & IS_NOT_NULL) != 0;
265 
266   assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2); // Most probably, Rnew_val == Rtmp3.
267 
268   Label callRuntime, filtered;
269 
270   CardTableBarrierSet* ct = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
271   assert(sizeof(*ct->card_table()->byte_map_base()) == sizeof(jbyte), "adjust this code");
272 
273   BLOCK_COMMENT("g1_write_barrier_post {");
274 
275   // Does store cross heap regions?
276   // It does if the two addresses specify different grain addresses.
277   if (VM_Version::has_DistinctOpnds()) {
278     __ z_xgrk(Rtmp1, Rstore_addr, Rnew_val);
279   } else {
280     __ z_lgr(Rtmp1, Rstore_addr);
281     __ z_xgr(Rtmp1, Rnew_val);
282   }
283   __ z_srag(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes);
284   __ z_bre(filtered);
285 
286   // Crosses regions, storing NULL?
287   if (not_null) {
288 #ifdef ASSERT
289     __ z_ltgr(Rnew_val, Rnew_val);
290     __ asm_assert_ne("null oop not allowed (G1 post)", 0x322); // Checked by caller.
291 #endif
292   } else {
293     __ z_ltgr(Rnew_val, Rnew_val);
294     __ z_bre(filtered);
295   }
296 
297   Rnew_val = noreg; // end of lifetime
298 
299   // Storing region crossing non-NULL, is card already dirty?
300   assert(sizeof(*ct->card_table()->byte_map_base()) == sizeof(jbyte), "adjust this code");
301   assert_different_registers(Rtmp1, Rtmp2, Rtmp3);
302   // Make sure not to use Z_R0 for any of these registers.
303   Register Rcard_addr = (Rtmp1 != Z_R0_scratch) ? Rtmp1 : Rtmp3;
304   Register Rbase      = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp3;
305 
306   // calculate address of card
307   __ load_const_optimized(Rbase, (address)ct->card_table()->byte_map_base());      // Card table base.
308   __ z_srlg(Rcard_addr, Rstore_addr, CardTable::card_shift);         // Index into card table.
309   __ z_algr(Rcard_addr, Rbase);                                      // Explicit calculation needed for cli.
310   Rbase = noreg; // end of lifetime
311 
312   // Filter young.
313   assert((unsigned int)G1CardTable::g1_young_card_val() <= 255, "otherwise check this code");
314   __ z_cli(0, Rcard_addr, G1CardTable::g1_young_card_val());
315   __ z_bre(filtered);
316 
317   // Check the card value. If dirty, we're done.
318   // This also avoids false sharing of the (already dirty) card.
319   __ z_sync(); // Required to support concurrent cleaning.
320   assert((unsigned int)G1CardTable::dirty_card_val() <= 255, "otherwise check this code");
321   __ z_cli(0, Rcard_addr, G1CardTable::dirty_card_val()); // Reload after membar.
322   __ z_bre(filtered);
323 
324   // Storing a region crossing, non-NULL oop, card is clean.
325   // Dirty card and log.
326   __ z_mvi(0, Rcard_addr, G1CardTable::dirty_card_val());
327 
328   Register Rcard_addr_x = Rcard_addr;
329   Register Rqueue_index = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp1;
330   Register Rqueue_buf   = (Rtmp3 != Z_R0_scratch) ? Rtmp3 : Rtmp1;
331   const int qidx_off    = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
332   const int qbuf_off    = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
333   if ((Rcard_addr == Rqueue_buf) || (Rcard_addr == Rqueue_index)) {
334     Rcard_addr_x = Z_R0_scratch;  // Register shortage. We have to use Z_R0.
335   }
336   __ lgr_if_needed(Rcard_addr_x, Rcard_addr);
337 
338   __ load_and_test_long(Rqueue_index, Address(Z_thread, qidx_off));
339   __ z_bre(callRuntime); // Index == 0 then jump to runtime.
340 
341   __ z_lg(Rqueue_buf, qbuf_off, Z_thread);
342 
343   __ add2reg(Rqueue_index, -wordSize); // Decrement index.
344   __ z_stg(Rqueue_index, qidx_off, Z_thread);
345 
346   __ z_stg(Rcard_addr_x, 0, Rqueue_index, Rqueue_buf); // Store card.
347   __ z_bru(filtered);
348 
349   __ bind(callRuntime);
350 
351   // TODO: do we need a frame? Introduced to be on the safe side.
352   bool needs_frame = true;
353   __ lgr_if_needed(Rcard_addr, Rcard_addr_x); // copy back asap. push_frame will destroy Z_R0_scratch!
354 
355   // VM call need frame to access(write) O register.
356   if (needs_frame) {
357     __ save_return_pc();
358     __ push_frame_abi160(0); // Will use Z_R0 as tmp on old CPUs.
359   }
360 
361   // Save the live input values.
362   __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), Rcard_addr, Z_thread);
363 
364   if (needs_frame) {
365     __ pop_frame();
366     __ restore_return_pc();
367   }
368 
369   __ bind(filtered);
370 
371   BLOCK_COMMENT("} g1_write_barrier_post");
372 }
373 
oop_store_at(MacroAssembler * masm,DecoratorSet decorators,BasicType type,const Address & dst,Register val,Register tmp1,Register tmp2,Register tmp3)374 void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
375                                          const Address& dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
376   bool is_array = (decorators & IS_ARRAY) != 0;
377   bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
378   bool precise = is_array || on_anonymous;
379   // Load and record the previous value.
380   g1_write_barrier_pre(masm, decorators, &dst, tmp3, val, tmp1, tmp2, false);
381 
382   BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
383 
384   // No need for post barrier if storing NULL
385   if (val != noreg) {
386     const Register base = dst.base(),
387                    idx  = dst.index();
388     const intptr_t disp = dst.disp();
389     if (precise && (disp != 0 || idx != noreg)) {
390       __ add2reg_with_index(base, disp, idx, base);
391     }
392     g1_write_barrier_post(masm, decorators, base, val, tmp1, tmp2, tmp3);
393   }
394 }
395 
resolve_jobject(MacroAssembler * masm,Register value,Register tmp1,Register tmp2)396 void G1BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value, Register tmp1, Register tmp2) {
397   NearLabel Ldone, Lnot_weak;
398   __ z_ltgr(tmp1, value);
399   __ z_bre(Ldone);          // Use NULL result as-is.
400 
401   __ z_nill(value, ~JNIHandles::weak_tag_mask);
402   __ z_lg(value, 0, value); // Resolve (untagged) jobject.
403 
404   __ z_tmll(tmp1, JNIHandles::weak_tag_mask); // Test for jweak tag.
405   __ z_braz(Lnot_weak);
406   __ verify_oop(value);
407   DecoratorSet decorators = IN_NATIVE | ON_PHANTOM_OOP_REF;
408   g1_write_barrier_pre(masm, decorators, (const Address*)NULL, value, noreg, tmp1, tmp2, true);
409   __ bind(Lnot_weak);
410   __ verify_oop(value);
411   __ bind(Ldone);
412 }
413 
414 #ifdef COMPILER1
415 
416 #undef __
417 #define __ ce->masm()->
418 
gen_pre_barrier_stub(LIR_Assembler * ce,G1PreBarrierStub * stub)419 void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
420   G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
421   // At this point we know that marking is in progress.
422   // If do_load() is true then we have to emit the
423   // load of the previous value; otherwise it has already
424   // been loaded into _pre_val.
425   __ bind(*stub->entry());
426   ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
427   assert(stub->pre_val()->is_register(), "Precondition.");
428 
429   Register pre_val_reg = stub->pre_val()->as_register();
430 
431   if (stub->do_load()) {
432     ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
433   }
434 
435   __ z_ltgr(Z_R1_scratch, pre_val_reg); // Pass oop in Z_R1_scratch to Runtime1::g1_pre_barrier_slow_id.
436   __ branch_optimized(Assembler::bcondZero, *stub->continuation());
437   ce->emit_call_c(bs->pre_barrier_c1_runtime_code_blob()->code_begin());
438   __ branch_optimized(Assembler::bcondAlways, *stub->continuation());
439 }
440 
gen_post_barrier_stub(LIR_Assembler * ce,G1PostBarrierStub * stub)441 void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
442   G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
443   __ bind(*stub->entry());
444   ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
445   assert(stub->addr()->is_register(), "Precondition.");
446   assert(stub->new_val()->is_register(), "Precondition.");
447   Register new_val_reg = stub->new_val()->as_register();
448   __ z_ltgr(new_val_reg, new_val_reg);
449   __ branch_optimized(Assembler::bcondZero, *stub->continuation());
450   __ z_lgr(Z_R1_scratch, stub->addr()->as_pointer_register());
451   ce->emit_call_c(bs->post_barrier_c1_runtime_code_blob()->code_begin());
452   __ branch_optimized(Assembler::bcondAlways, *stub->continuation());
453 }
454 
455 #undef __
456 
457 #define __ sasm->
458 
save_volatile_registers(StubAssembler * sasm,Register return_pc=Z_R14)459 static OopMap* save_volatile_registers(StubAssembler* sasm, Register return_pc = Z_R14) {
460   __ block_comment("save_volatile_registers");
461   RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
462   int frame_size_in_slots = RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
463   sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
464   return RegisterSaver::save_live_registers(sasm, reg_set, return_pc);
465 }
466 
restore_volatile_registers(StubAssembler * sasm)467 static void restore_volatile_registers(StubAssembler* sasm) {
468   __ block_comment("restore_volatile_registers");
469   RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
470   RegisterSaver::restore_live_registers(sasm, reg_set);
471 }
472 
generate_c1_pre_barrier_runtime_stub(StubAssembler * sasm)473 void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
474   // Z_R1_scratch: previous value of memory
475 
476   BarrierSet* bs = BarrierSet::barrier_set();
477   __ set_info("g1_pre_barrier_slow_id", false);
478 
479   Register pre_val = Z_R1_scratch;
480   Register tmp  = Z_R6; // Must be non-volatile because it is used to save pre_val.
481   Register tmp2 = Z_R7;
482 
483   Label refill, restart, marking_not_active;
484   int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
485   int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
486   int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
487 
488   // Save tmp registers (see assertion in G1PreBarrierStub::emit_code()).
489   __ z_stg(tmp,  0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
490   __ z_stg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
491 
492   // Is marking still active?
493   if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
494     __ load_and_test_int(tmp, Address(Z_thread, satb_q_active_byte_offset));
495   } else {
496     assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
497     __ load_and_test_byte(tmp, Address(Z_thread, satb_q_active_byte_offset));
498   }
499   __ z_bre(marking_not_active); // Activity indicator is zero, so there is no marking going on currently.
500 
501   __ bind(restart);
502   // Load the index into the SATB buffer. SATBMarkQueue::_index is a
503   // size_t so ld_ptr is appropriate.
504   __ z_ltg(tmp, satb_q_index_byte_offset, Z_R0, Z_thread);
505 
506   // index == 0?
507   __ z_brz(refill);
508 
509   __ z_lg(tmp2, satb_q_buf_byte_offset, Z_thread);
510   __ add2reg(tmp, -oopSize);
511 
512   __ z_stg(pre_val, 0, tmp, tmp2); // [_buf + index] := <address_of_card>
513   __ z_stg(tmp, satb_q_index_byte_offset, Z_thread);
514 
515   __ bind(marking_not_active);
516   // Restore tmp registers (see assertion in G1PreBarrierStub::emit_code()).
517   __ z_lg(tmp,  0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
518   __ z_lg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
519   __ z_br(Z_R14);
520 
521   __ bind(refill);
522   save_volatile_registers(sasm);
523   __ z_lgr(tmp, pre_val); // save pre_val
524   __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1SATBMarkQueueSet::handle_zero_index_for_thread),
525                   Z_thread);
526   __ z_lgr(pre_val, tmp); // restore pre_val
527   restore_volatile_registers(sasm);
528   __ z_bru(restart);
529 }
530 
generate_c1_post_barrier_runtime_stub(StubAssembler * sasm)531 void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
532   // Z_R1_scratch: oop address, address of updated memory slot
533 
534   BarrierSet* bs = BarrierSet::barrier_set();
535   __ set_info("g1_post_barrier_slow_id", false);
536 
537   Register addr_oop  = Z_R1_scratch;
538   Register addr_card = Z_R1_scratch;
539   Register r1        = Z_R6; // Must be saved/restored.
540   Register r2        = Z_R7; // Must be saved/restored.
541   Register cardtable = r1;   // Must be non-volatile, because it is used to save addr_card.
542   CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
543   CardTable* ct = ctbs->card_table();
544   jbyte* byte_map_base = ct->byte_map_base();
545 
546   // Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
547   __ z_stg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
548 
549   Label not_already_dirty, restart, refill, young_card;
550 
551   // Calculate address of card corresponding to the updated oop slot.
552   AddressLiteral rs(byte_map_base);
553   __ z_srlg(addr_card, addr_oop, CardTable::card_shift);
554   addr_oop = noreg; // dead now
555   __ load_const_optimized(cardtable, rs); // cardtable := <card table base>
556   __ z_agr(addr_card, cardtable); // addr_card := addr_oop>>card_shift + cardtable
557 
558   __ z_cli(0, addr_card, (int)G1CardTable::g1_young_card_val());
559   __ z_bre(young_card);
560 
561   __ z_sync(); // Required to support concurrent cleaning.
562 
563   __ z_cli(0, addr_card, (int)CardTable::dirty_card_val());
564   __ z_brne(not_already_dirty);
565 
566   __ bind(young_card);
567   // We didn't take the branch, so we're already dirty: restore
568   // used registers and return.
569   __ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
570   __ z_br(Z_R14);
571 
572   // Not dirty.
573   __ bind(not_already_dirty);
574 
575   // First, dirty it: [addr_card] := 0
576   __ z_mvi(0, addr_card, CardTable::dirty_card_val());
577 
578   Register idx = cardtable; // Must be non-volatile, because it is used to save addr_card.
579   Register buf = r2;
580   cardtable = noreg; // now dead
581 
582   // Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
583   __ z_stg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
584 
585   ByteSize dirty_card_q_index_byte_offset = G1ThreadLocalData::dirty_card_queue_index_offset();
586   ByteSize dirty_card_q_buf_byte_offset = G1ThreadLocalData::dirty_card_queue_buffer_offset();
587 
588   __ bind(restart);
589 
590   // Get the index into the update buffer. DirtyCardQueue::_index is
591   // a size_t so z_ltg is appropriate here.
592   __ z_ltg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
593 
594   // index == 0?
595   __ z_brz(refill);
596 
597   __ z_lg(buf, Address(Z_thread, dirty_card_q_buf_byte_offset));
598   __ add2reg(idx, -oopSize);
599 
600   __ z_stg(addr_card, 0, idx, buf); // [_buf + index] := <address_of_card>
601   __ z_stg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
602   // Restore killed registers and return.
603   __ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
604   __ z_lg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
605   __ z_br(Z_R14);
606 
607   __ bind(refill);
608   save_volatile_registers(sasm);
609   __ z_lgr(idx, addr_card); // Save addr_card, tmp3 must be non-volatile.
610   __ call_VM_leaf(CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread),
611                                    Z_thread);
612   __ z_lgr(addr_card, idx);
613   restore_volatile_registers(sasm); // Restore addr_card.
614   __ z_bru(restart);
615 }
616 
617 #undef __
618 
619 #endif // COMPILER1
620