1 /*
2 * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2018, SAP SE. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #include "precompiled.hpp"
27 #include "asm/macroAssembler.inline.hpp"
28 #include "registerSaver_s390.hpp"
29 #include "gc/g1/g1CardTable.hpp"
30 #include "gc/g1/g1BarrierSet.hpp"
31 #include "gc/g1/g1BarrierSetAssembler.hpp"
32 #include "gc/g1/g1BarrierSetRuntime.hpp"
33 #include "gc/g1/g1DirtyCardQueue.hpp"
34 #include "gc/g1/g1SATBMarkQueueSet.hpp"
35 #include "gc/g1/g1ThreadLocalData.hpp"
36 #include "gc/g1/heapRegion.hpp"
37 #include "interpreter/interp_masm.hpp"
38 #include "runtime/sharedRuntime.hpp"
39 #ifdef COMPILER1
40 #include "c1/c1_LIRAssembler.hpp"
41 #include "c1/c1_MacroAssembler.hpp"
42 #include "gc/g1/c1/g1BarrierSetC1.hpp"
43 #endif
44
45 #define __ masm->
46
47 #define BLOCK_COMMENT(str) if (PrintAssembly) __ block_comment(str)
48
gen_write_ref_array_pre_barrier(MacroAssembler * masm,DecoratorSet decorators,Register addr,Register count)49 void G1BarrierSetAssembler::gen_write_ref_array_pre_barrier(MacroAssembler* masm, DecoratorSet decorators,
50 Register addr, Register count) {
51 bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
52
53 // With G1, don't generate the call if we statically know that the target is uninitialized.
54 if (!dest_uninitialized) {
55 // Is marking active?
56 Label filtered;
57 assert_different_registers(addr, Z_R0_scratch); // would be destroyed by push_frame()
58 assert_different_registers(count, Z_R0_scratch); // would be destroyed by push_frame()
59 Register Rtmp1 = Z_R0_scratch;
60 const int active_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
61 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
62 __ load_and_test_int(Rtmp1, Address(Z_thread, active_offset));
63 } else {
64 guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
65 __ load_and_test_byte(Rtmp1, Address(Z_thread, active_offset));
66 }
67 __ z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently.
68
69 RegisterSaver::save_live_registers(masm, RegisterSaver::arg_registers); // Creates frame.
70
71 if (UseCompressedOops) {
72 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry), addr, count);
73 } else {
74 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry), addr, count);
75 }
76
77 RegisterSaver::restore_live_registers(masm, RegisterSaver::arg_registers);
78
79 __ bind(filtered);
80 }
81 }
82
gen_write_ref_array_post_barrier(MacroAssembler * masm,DecoratorSet decorators,Register addr,Register count,bool do_return)83 void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
84 Register addr, Register count, bool do_return) {
85 address entry_point = CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry);
86 if (!do_return) {
87 assert_different_registers(addr, Z_R0_scratch); // would be destroyed by push_frame()
88 assert_different_registers(count, Z_R0_scratch); // would be destroyed by push_frame()
89 RegisterSaver::save_live_registers(masm, RegisterSaver::arg_registers); // Creates frame.
90 __ call_VM_leaf(entry_point, addr, count);
91 RegisterSaver::restore_live_registers(masm, RegisterSaver::arg_registers);
92 } else {
93 // Tail call: call c and return to stub caller.
94 __ lgr_if_needed(Z_ARG1, addr);
95 __ lgr_if_needed(Z_ARG2, count);
96 __ load_const(Z_R1, entry_point);
97 __ z_br(Z_R1); // Branch without linking, callee will return to stub caller.
98 }
99 }
100
load_at(MacroAssembler * masm,DecoratorSet decorators,BasicType type,const Address & src,Register dst,Register tmp1,Register tmp2,Label * L_handle_null)101 void G1BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
102 const Address& src, Register dst, Register tmp1, Register tmp2, Label *L_handle_null) {
103 bool on_oop = type == T_OBJECT || type == T_ARRAY;
104 bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0;
105 bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0;
106 bool on_reference = on_weak || on_phantom;
107 Label done;
108 if (on_oop && on_reference && L_handle_null == NULL) { L_handle_null = &done; }
109 ModRefBarrierSetAssembler::load_at(masm, decorators, type, src, dst, tmp1, tmp2, L_handle_null);
110 if (on_oop && on_reference) {
111 // Generate the G1 pre-barrier code to log the value of
112 // the referent field in an SATB buffer.
113 g1_write_barrier_pre(masm, decorators | IS_NOT_NULL,
114 NULL /* obj */,
115 dst /* pre_val */,
116 noreg/* preserve */ ,
117 tmp1, tmp2 /* tmp */,
118 true /* pre_val_needed */);
119 }
120 __ bind(done);
121 }
122
g1_write_barrier_pre(MacroAssembler * masm,DecoratorSet decorators,const Address * obj,Register Rpre_val,Register Rval,Register Rtmp1,Register Rtmp2,bool pre_val_needed)123 void G1BarrierSetAssembler::g1_write_barrier_pre(MacroAssembler* masm, DecoratorSet decorators,
124 const Address* obj,
125 Register Rpre_val, // Ideally, this is a non-volatile register.
126 Register Rval, // Will be preserved.
127 Register Rtmp1, // If Rpre_val is volatile, either Rtmp1
128 Register Rtmp2, // or Rtmp2 has to be non-volatile.
129 bool pre_val_needed // Save Rpre_val across runtime call, caller uses it.
130 ) {
131
132 bool not_null = (decorators & IS_NOT_NULL) != 0,
133 preloaded = obj == NULL;
134
135 const Register Robj = obj ? obj->base() : noreg,
136 Roff = obj ? obj->index() : noreg;
137 const int active_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
138 const int buffer_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
139 const int index_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
140 assert_different_registers(Rtmp1, Rtmp2, Z_R0_scratch); // None of the Rtmp<i> must be Z_R0!!
141 assert_different_registers(Robj, Z_R0_scratch); // Used for addressing. Furthermore, push_frame destroys Z_R0!!
142 assert_different_registers(Rval, Z_R0_scratch); // push_frame destroys Z_R0!!
143
144 Label callRuntime, filtered;
145
146 BLOCK_COMMENT("g1_write_barrier_pre {");
147
148 // Is marking active?
149 // Note: value is loaded for test purposes only. No further use here.
150 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
151 __ load_and_test_int(Rtmp1, Address(Z_thread, active_offset));
152 } else {
153 guarantee(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
154 __ load_and_test_byte(Rtmp1, Address(Z_thread, active_offset));
155 }
156 __ z_bre(filtered); // Activity indicator is zero, so there is no marking going on currently.
157
158 assert(Rpre_val != noreg, "must have a real register");
159
160
161 // If an object is given, we need to load the previous value into Rpre_val.
162 if (obj) {
163 // Load the previous value...
164 if (UseCompressedOops) {
165 __ z_llgf(Rpre_val, *obj);
166 } else {
167 __ z_lg(Rpre_val, *obj);
168 }
169 }
170
171 // Is the previous value NULL?
172 // If so, we don't need to record it and we're done.
173 // Note: pre_val is loaded, decompressed and stored (directly or via runtime call).
174 // Register contents is preserved across runtime call if caller requests to do so.
175 if (preloaded && not_null) {
176 #ifdef ASSERT
177 __ z_ltgr(Rpre_val, Rpre_val);
178 __ asm_assert_ne("null oop not allowed (G1 pre)", 0x321); // Checked by caller.
179 #endif
180 } else {
181 __ z_ltgr(Rpre_val, Rpre_val);
182 __ z_bre(filtered); // previous value is NULL, so we don't need to record it.
183 }
184
185 // Decode the oop now. We know it's not NULL.
186 if (Robj != noreg && UseCompressedOops) {
187 __ oop_decoder(Rpre_val, Rpre_val, /*maybeNULL=*/false);
188 }
189
190 // OK, it's not filtered, so we'll need to call enqueue.
191
192 // We can store the original value in the thread's buffer
193 // only if index > 0. Otherwise, we need runtime to handle.
194 // (The index field is typed as size_t.)
195 Register Rbuffer = Rtmp1, Rindex = Rtmp2;
196 assert_different_registers(Rbuffer, Rindex, Rpre_val);
197
198 __ z_lg(Rbuffer, buffer_offset, Z_thread);
199
200 __ load_and_test_long(Rindex, Address(Z_thread, index_offset));
201 __ z_bre(callRuntime); // If index == 0, goto runtime.
202
203 __ add2reg(Rindex, -wordSize); // Decrement index.
204 __ z_stg(Rindex, index_offset, Z_thread);
205
206 // Record the previous value.
207 __ z_stg(Rpre_val, 0, Rbuffer, Rindex);
208 __ z_bru(filtered); // We are done.
209
210 Rbuffer = noreg; // end of life
211 Rindex = noreg; // end of life
212
213 __ bind(callRuntime);
214
215 // Save some registers (inputs and result) over runtime call
216 // by spilling them into the top frame.
217 if (Robj != noreg && Robj->is_volatile()) {
218 __ z_stg(Robj, Robj->encoding()*BytesPerWord, Z_SP);
219 }
220 if (Roff != noreg && Roff->is_volatile()) {
221 __ z_stg(Roff, Roff->encoding()*BytesPerWord, Z_SP);
222 }
223 if (Rval != noreg && Rval->is_volatile()) {
224 __ z_stg(Rval, Rval->encoding()*BytesPerWord, Z_SP);
225 }
226
227 // Save Rpre_val (result) over runtime call.
228 Register Rpre_save = Rpre_val;
229 if ((Rpre_val == Z_R0_scratch) || (pre_val_needed && Rpre_val->is_volatile())) {
230 guarantee(!Rtmp1->is_volatile() || !Rtmp2->is_volatile(), "oops!");
231 Rpre_save = !Rtmp1->is_volatile() ? Rtmp1 : Rtmp2;
232 }
233 __ lgr_if_needed(Rpre_save, Rpre_val);
234
235 // Push frame to protect top frame with return pc and spilled register values.
236 __ save_return_pc();
237 __ push_frame_abi160(0); // Will use Z_R0 as tmp.
238
239 // Rpre_val may be destroyed by push_frame().
240 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), Rpre_save, Z_thread);
241
242 __ pop_frame();
243 __ restore_return_pc();
244
245 // Restore spilled values.
246 if (Robj != noreg && Robj->is_volatile()) {
247 __ z_lg(Robj, Robj->encoding()*BytesPerWord, Z_SP);
248 }
249 if (Roff != noreg && Roff->is_volatile()) {
250 __ z_lg(Roff, Roff->encoding()*BytesPerWord, Z_SP);
251 }
252 if (Rval != noreg && Rval->is_volatile()) {
253 __ z_lg(Rval, Rval->encoding()*BytesPerWord, Z_SP);
254 }
255 if (pre_val_needed && Rpre_val->is_volatile()) {
256 __ lgr_if_needed(Rpre_val, Rpre_save);
257 }
258
259 __ bind(filtered);
260 BLOCK_COMMENT("} g1_write_barrier_pre");
261 }
262
g1_write_barrier_post(MacroAssembler * masm,DecoratorSet decorators,Register Rstore_addr,Register Rnew_val,Register Rtmp1,Register Rtmp2,Register Rtmp3)263 void G1BarrierSetAssembler::g1_write_barrier_post(MacroAssembler* masm, DecoratorSet decorators, Register Rstore_addr, Register Rnew_val,
264 Register Rtmp1, Register Rtmp2, Register Rtmp3) {
265 bool not_null = (decorators & IS_NOT_NULL) != 0;
266
267 assert_different_registers(Rstore_addr, Rnew_val, Rtmp1, Rtmp2); // Most probably, Rnew_val == Rtmp3.
268
269 Label callRuntime, filtered;
270
271 CardTableBarrierSet* ct = barrier_set_cast<CardTableBarrierSet>(BarrierSet::barrier_set());
272
273 BLOCK_COMMENT("g1_write_barrier_post {");
274
275 // Does store cross heap regions?
276 // It does if the two addresses specify different grain addresses.
277 if (VM_Version::has_DistinctOpnds()) {
278 __ z_xgrk(Rtmp1, Rstore_addr, Rnew_val);
279 } else {
280 __ z_lgr(Rtmp1, Rstore_addr);
281 __ z_xgr(Rtmp1, Rnew_val);
282 }
283 __ z_srag(Rtmp1, Rtmp1, HeapRegion::LogOfHRGrainBytes);
284 __ z_bre(filtered);
285
286 // Crosses regions, storing NULL?
287 if (not_null) {
288 #ifdef ASSERT
289 __ z_ltgr(Rnew_val, Rnew_val);
290 __ asm_assert_ne("null oop not allowed (G1 post)", 0x322); // Checked by caller.
291 #endif
292 } else {
293 __ z_ltgr(Rnew_val, Rnew_val);
294 __ z_bre(filtered);
295 }
296
297 Rnew_val = noreg; // end of lifetime
298
299 // Storing region crossing non-NULL, is card already dirty?
300 assert_different_registers(Rtmp1, Rtmp2, Rtmp3);
301 // Make sure not to use Z_R0 for any of these registers.
302 Register Rcard_addr = (Rtmp1 != Z_R0_scratch) ? Rtmp1 : Rtmp3;
303 Register Rbase = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp3;
304
305 // calculate address of card
306 __ load_const_optimized(Rbase, (address)ct->card_table()->byte_map_base()); // Card table base.
307 __ z_srlg(Rcard_addr, Rstore_addr, CardTable::card_shift); // Index into card table.
308 __ z_algr(Rcard_addr, Rbase); // Explicit calculation needed for cli.
309 Rbase = noreg; // end of lifetime
310
311 // Filter young.
312 __ z_cli(0, Rcard_addr, G1CardTable::g1_young_card_val());
313 __ z_bre(filtered);
314
315 // Check the card value. If dirty, we're done.
316 // This also avoids false sharing of the (already dirty) card.
317 __ z_sync(); // Required to support concurrent cleaning.
318 __ z_cli(0, Rcard_addr, G1CardTable::dirty_card_val()); // Reload after membar.
319 __ z_bre(filtered);
320
321 // Storing a region crossing, non-NULL oop, card is clean.
322 // Dirty card and log.
323 __ z_mvi(0, Rcard_addr, G1CardTable::dirty_card_val());
324
325 Register Rcard_addr_x = Rcard_addr;
326 Register Rqueue_index = (Rtmp2 != Z_R0_scratch) ? Rtmp2 : Rtmp1;
327 Register Rqueue_buf = (Rtmp3 != Z_R0_scratch) ? Rtmp3 : Rtmp1;
328 const int qidx_off = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
329 const int qbuf_off = in_bytes(G1ThreadLocalData::dirty_card_queue_buffer_offset());
330 if ((Rcard_addr == Rqueue_buf) || (Rcard_addr == Rqueue_index)) {
331 Rcard_addr_x = Z_R0_scratch; // Register shortage. We have to use Z_R0.
332 }
333 __ lgr_if_needed(Rcard_addr_x, Rcard_addr);
334
335 __ load_and_test_long(Rqueue_index, Address(Z_thread, qidx_off));
336 __ z_bre(callRuntime); // Index == 0 then jump to runtime.
337
338 __ z_lg(Rqueue_buf, qbuf_off, Z_thread);
339
340 __ add2reg(Rqueue_index, -wordSize); // Decrement index.
341 __ z_stg(Rqueue_index, qidx_off, Z_thread);
342
343 __ z_stg(Rcard_addr_x, 0, Rqueue_index, Rqueue_buf); // Store card.
344 __ z_bru(filtered);
345
346 __ bind(callRuntime);
347
348 // TODO: do we need a frame? Introduced to be on the safe side.
349 bool needs_frame = true;
350 __ lgr_if_needed(Rcard_addr, Rcard_addr_x); // copy back asap. push_frame will destroy Z_R0_scratch!
351
352 // VM call need frame to access(write) O register.
353 if (needs_frame) {
354 __ save_return_pc();
355 __ push_frame_abi160(0); // Will use Z_R0 as tmp on old CPUs.
356 }
357
358 // Save the live input values.
359 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), Rcard_addr, Z_thread);
360
361 if (needs_frame) {
362 __ pop_frame();
363 __ restore_return_pc();
364 }
365
366 __ bind(filtered);
367
368 BLOCK_COMMENT("} g1_write_barrier_post");
369 }
370
oop_store_at(MacroAssembler * masm,DecoratorSet decorators,BasicType type,const Address & dst,Register val,Register tmp1,Register tmp2,Register tmp3)371 void G1BarrierSetAssembler::oop_store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
372 const Address& dst, Register val, Register tmp1, Register tmp2, Register tmp3) {
373 bool is_array = (decorators & IS_ARRAY) != 0;
374 bool on_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0;
375 bool precise = is_array || on_anonymous;
376 // Load and record the previous value.
377 g1_write_barrier_pre(masm, decorators, &dst, tmp3, val, tmp1, tmp2, false);
378
379 BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2, tmp3);
380
381 // No need for post barrier if storing NULL
382 if (val != noreg) {
383 const Register base = dst.base(),
384 idx = dst.index();
385 const intptr_t disp = dst.disp();
386 if (precise && (disp != 0 || idx != noreg)) {
387 __ add2reg_with_index(base, disp, idx, base);
388 }
389 g1_write_barrier_post(masm, decorators, base, val, tmp1, tmp2, tmp3);
390 }
391 }
392
resolve_jobject(MacroAssembler * masm,Register value,Register tmp1,Register tmp2)393 void G1BarrierSetAssembler::resolve_jobject(MacroAssembler* masm, Register value, Register tmp1, Register tmp2) {
394 NearLabel Ldone, Lnot_weak;
395 __ z_ltgr(tmp1, value);
396 __ z_bre(Ldone); // Use NULL result as-is.
397
398 __ z_nill(value, ~JNIHandles::weak_tag_mask);
399 __ z_lg(value, 0, value); // Resolve (untagged) jobject.
400
401 __ z_tmll(tmp1, JNIHandles::weak_tag_mask); // Test for jweak tag.
402 __ z_braz(Lnot_weak);
403 __ verify_oop(value);
404 DecoratorSet decorators = IN_NATIVE | ON_PHANTOM_OOP_REF;
405 g1_write_barrier_pre(masm, decorators, (const Address*)NULL, value, noreg, tmp1, tmp2, true);
406 __ bind(Lnot_weak);
407 __ verify_oop(value);
408 __ bind(Ldone);
409 }
410
411 #ifdef COMPILER1
412
413 #undef __
414 #define __ ce->masm()->
415
gen_pre_barrier_stub(LIR_Assembler * ce,G1PreBarrierStub * stub)416 void G1BarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, G1PreBarrierStub* stub) {
417 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
418 // At this point we know that marking is in progress.
419 // If do_load() is true then we have to emit the
420 // load of the previous value; otherwise it has already
421 // been loaded into _pre_val.
422 __ bind(*stub->entry());
423 ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
424 assert(stub->pre_val()->is_register(), "Precondition.");
425
426 Register pre_val_reg = stub->pre_val()->as_register();
427
428 if (stub->do_load()) {
429 ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
430 }
431
432 __ z_ltgr(Z_R1_scratch, pre_val_reg); // Pass oop in Z_R1_scratch to Runtime1::g1_pre_barrier_slow_id.
433 __ branch_optimized(Assembler::bcondZero, *stub->continuation());
434 ce->emit_call_c(bs->pre_barrier_c1_runtime_code_blob()->code_begin());
435 __ branch_optimized(Assembler::bcondAlways, *stub->continuation());
436 }
437
gen_post_barrier_stub(LIR_Assembler * ce,G1PostBarrierStub * stub)438 void G1BarrierSetAssembler::gen_post_barrier_stub(LIR_Assembler* ce, G1PostBarrierStub* stub) {
439 G1BarrierSetC1* bs = (G1BarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
440 __ bind(*stub->entry());
441 ce->check_reserved_argument_area(16); // RT stub needs 2 spill slots.
442 assert(stub->addr()->is_register(), "Precondition.");
443 assert(stub->new_val()->is_register(), "Precondition.");
444 Register new_val_reg = stub->new_val()->as_register();
445 __ z_ltgr(new_val_reg, new_val_reg);
446 __ branch_optimized(Assembler::bcondZero, *stub->continuation());
447 __ z_lgr(Z_R1_scratch, stub->addr()->as_pointer_register());
448 ce->emit_call_c(bs->post_barrier_c1_runtime_code_blob()->code_begin());
449 __ branch_optimized(Assembler::bcondAlways, *stub->continuation());
450 }
451
452 #undef __
453
454 #define __ sasm->
455
save_volatile_registers(StubAssembler * sasm,Register return_pc=Z_R14)456 static OopMap* save_volatile_registers(StubAssembler* sasm, Register return_pc = Z_R14) {
457 __ block_comment("save_volatile_registers");
458 RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
459 int frame_size_in_slots = RegisterSaver::live_reg_frame_size(reg_set) / VMRegImpl::stack_slot_size;
460 sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word);
461 return RegisterSaver::save_live_registers(sasm, reg_set, return_pc);
462 }
463
restore_volatile_registers(StubAssembler * sasm)464 static void restore_volatile_registers(StubAssembler* sasm) {
465 __ block_comment("restore_volatile_registers");
466 RegisterSaver::RegisterSet reg_set = RegisterSaver::all_volatile_registers;
467 RegisterSaver::restore_live_registers(sasm, reg_set);
468 }
469
generate_c1_pre_barrier_runtime_stub(StubAssembler * sasm)470 void G1BarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
471 // Z_R1_scratch: previous value of memory
472
473 BarrierSet* bs = BarrierSet::barrier_set();
474 __ set_info("g1_pre_barrier_slow_id", false);
475
476 Register pre_val = Z_R1_scratch;
477 Register tmp = Z_R6; // Must be non-volatile because it is used to save pre_val.
478 Register tmp2 = Z_R7;
479
480 Label refill, restart, marking_not_active;
481 int satb_q_active_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_active_offset());
482 int satb_q_index_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_index_offset());
483 int satb_q_buf_byte_offset = in_bytes(G1ThreadLocalData::satb_mark_queue_buffer_offset());
484
485 // Save tmp registers (see assertion in G1PreBarrierStub::emit_code()).
486 __ z_stg(tmp, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
487 __ z_stg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
488
489 // Is marking still active?
490 if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
491 __ load_and_test_int(tmp, Address(Z_thread, satb_q_active_byte_offset));
492 } else {
493 assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
494 __ load_and_test_byte(tmp, Address(Z_thread, satb_q_active_byte_offset));
495 }
496 __ z_bre(marking_not_active); // Activity indicator is zero, so there is no marking going on currently.
497
498 __ bind(restart);
499 // Load the index into the SATB buffer. SATBMarkQueue::_index is a
500 // size_t so ld_ptr is appropriate.
501 __ z_ltg(tmp, satb_q_index_byte_offset, Z_R0, Z_thread);
502
503 // index == 0?
504 __ z_brz(refill);
505
506 __ z_lg(tmp2, satb_q_buf_byte_offset, Z_thread);
507 __ add2reg(tmp, -oopSize);
508
509 __ z_stg(pre_val, 0, tmp, tmp2); // [_buf + index] := <address_of_card>
510 __ z_stg(tmp, satb_q_index_byte_offset, Z_thread);
511
512 __ bind(marking_not_active);
513 // Restore tmp registers (see assertion in G1PreBarrierStub::emit_code()).
514 __ z_lg(tmp, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
515 __ z_lg(tmp2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
516 __ z_br(Z_R14);
517
518 __ bind(refill);
519 save_volatile_registers(sasm);
520 __ z_lgr(tmp, pre_val); // save pre_val
521 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1SATBMarkQueueSet::handle_zero_index_for_thread),
522 Z_thread);
523 __ z_lgr(pre_val, tmp); // restore pre_val
524 restore_volatile_registers(sasm);
525 __ z_bru(restart);
526 }
527
generate_c1_post_barrier_runtime_stub(StubAssembler * sasm)528 void G1BarrierSetAssembler::generate_c1_post_barrier_runtime_stub(StubAssembler* sasm) {
529 // Z_R1_scratch: oop address, address of updated memory slot
530
531 BarrierSet* bs = BarrierSet::barrier_set();
532 __ set_info("g1_post_barrier_slow_id", false);
533
534 Register addr_oop = Z_R1_scratch;
535 Register addr_card = Z_R1_scratch;
536 Register r1 = Z_R6; // Must be saved/restored.
537 Register r2 = Z_R7; // Must be saved/restored.
538 Register cardtable = r1; // Must be non-volatile, because it is used to save addr_card.
539 CardTableBarrierSet* ctbs = barrier_set_cast<CardTableBarrierSet>(bs);
540 CardTable* ct = ctbs->card_table();
541 CardTable::CardValue* byte_map_base = ct->byte_map_base();
542
543 // Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
544 __ z_stg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
545
546 Label not_already_dirty, restart, refill, young_card;
547
548 // Calculate address of card corresponding to the updated oop slot.
549 AddressLiteral rs(byte_map_base);
550 __ z_srlg(addr_card, addr_oop, CardTable::card_shift);
551 addr_oop = noreg; // dead now
552 __ load_const_optimized(cardtable, rs); // cardtable := <card table base>
553 __ z_agr(addr_card, cardtable); // addr_card := addr_oop>>card_shift + cardtable
554
555 __ z_cli(0, addr_card, (int)G1CardTable::g1_young_card_val());
556 __ z_bre(young_card);
557
558 __ z_sync(); // Required to support concurrent cleaning.
559
560 __ z_cli(0, addr_card, (int)CardTable::dirty_card_val());
561 __ z_brne(not_already_dirty);
562
563 __ bind(young_card);
564 // We didn't take the branch, so we're already dirty: restore
565 // used registers and return.
566 __ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
567 __ z_br(Z_R14);
568
569 // Not dirty.
570 __ bind(not_already_dirty);
571
572 // First, dirty it: [addr_card] := 0
573 __ z_mvi(0, addr_card, CardTable::dirty_card_val());
574
575 Register idx = cardtable; // Must be non-volatile, because it is used to save addr_card.
576 Register buf = r2;
577 cardtable = noreg; // now dead
578
579 // Save registers used below (see assertion in G1PreBarrierStub::emit_code()).
580 __ z_stg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
581
582 ByteSize dirty_card_q_index_byte_offset = G1ThreadLocalData::dirty_card_queue_index_offset();
583 ByteSize dirty_card_q_buf_byte_offset = G1ThreadLocalData::dirty_card_queue_buffer_offset();
584
585 __ bind(restart);
586
587 // Get the index into the update buffer. G1DirtyCardQueue::_index is
588 // a size_t so z_ltg is appropriate here.
589 __ z_ltg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
590
591 // index == 0?
592 __ z_brz(refill);
593
594 __ z_lg(buf, Address(Z_thread, dirty_card_q_buf_byte_offset));
595 __ add2reg(idx, -oopSize);
596
597 __ z_stg(addr_card, 0, idx, buf); // [_buf + index] := <address_of_card>
598 __ z_stg(idx, Address(Z_thread, dirty_card_q_index_byte_offset));
599 // Restore killed registers and return.
600 __ z_lg(r1, 0*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
601 __ z_lg(r2, 1*BytesPerWord + FrameMap::first_available_sp_in_frame, Z_SP);
602 __ z_br(Z_R14);
603
604 __ bind(refill);
605 save_volatile_registers(sasm);
606 __ z_lgr(idx, addr_card); // Save addr_card, tmp3 must be non-volatile.
607 __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1DirtyCardQueueSet::handle_zero_index_for_thread),
608 Z_thread);
609 __ z_lgr(addr_card, idx);
610 restore_volatile_registers(sasm); // Restore addr_card.
611 __ z_bru(restart);
612 }
613
614 #undef __
615
616 #endif // COMPILER1
617