1 /*
2  * Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
27 #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
28 #include "gc/shenandoah/shenandoahForwarding.hpp"
29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
30 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
31 #include "gc/shenandoah/shenandoahRuntime.hpp"
32 #include "gc/shenandoah/shenandoahThreadLocalData.hpp"
33 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp"
34 #include "interpreter/interpreter.hpp"
35 #include "interpreter/interp_masm.hpp"
36 #include "runtime/sharedRuntime.hpp"
37 #include "runtime/thread.hpp"
38 #ifdef COMPILER1
39 #include "c1/c1_LIRAssembler.hpp"
40 #include "c1/c1_MacroAssembler.hpp"
41 #include "gc/shenandoah/c1/shenandoahBarrierSetC1.hpp"
42 #endif
43 
44 #define __ masm->
45 
46 address ShenandoahBarrierSetAssembler::_shenandoah_lrb = NULL;
47 
arraycopy_prologue(MacroAssembler * masm,DecoratorSet decorators,bool is_oop,Register src,Register dst,Register count,RegSet saved_regs)48 void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop,
49                                                        Register src, Register dst, Register count, RegSet saved_regs) {
50   if (is_oop) {
51     bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0;
52     if ((ShenandoahSATBBarrier && !dest_uninitialized) || ShenandoahStoreValEnqueueBarrier || ShenandoahLoadRefBarrier) {
53 
54       Label done;
55 
56       // Avoid calling runtime if count == 0
57       __ cbz(count, done);
58 
59       // Is GC active?
60       Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
61       __ ldrb(rscratch1, gc_state);
62       if (ShenandoahSATBBarrier && dest_uninitialized) {
63         __ tbz(rscratch1, ShenandoahHeap::HAS_FORWARDED_BITPOS, done);
64       } else {
65         __ mov(rscratch2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::MARKING);
66         __ tst(rscratch1, rscratch2);
67         __ br(Assembler::EQ, done);
68       }
69 
70       __ push(saved_regs, sp);
71       if (UseCompressedOops) {
72         __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_narrow_oop_entry), src, dst, count);
73       } else {
74         __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::arraycopy_barrier_oop_entry), src, dst, count);
75       }
76       __ pop(saved_regs, sp);
77       __ bind(done);
78     }
79   }
80 }
81 
shenandoah_write_barrier_pre(MacroAssembler * masm,Register obj,Register pre_val,Register thread,Register tmp,bool tosca_live,bool expand_call)82 void ShenandoahBarrierSetAssembler::shenandoah_write_barrier_pre(MacroAssembler* masm,
83                                                                  Register obj,
84                                                                  Register pre_val,
85                                                                  Register thread,
86                                                                  Register tmp,
87                                                                  bool tosca_live,
88                                                                  bool expand_call) {
89   if (ShenandoahSATBBarrier) {
90     satb_write_barrier_pre(masm, obj, pre_val, thread, tmp, tosca_live, expand_call);
91   }
92 }
93 
satb_write_barrier_pre(MacroAssembler * masm,Register obj,Register pre_val,Register thread,Register tmp,bool tosca_live,bool expand_call)94 void ShenandoahBarrierSetAssembler::satb_write_barrier_pre(MacroAssembler* masm,
95                                                            Register obj,
96                                                            Register pre_val,
97                                                            Register thread,
98                                                            Register tmp,
99                                                            bool tosca_live,
100                                                            bool expand_call) {
101   // If expand_call is true then we expand the call_VM_leaf macro
102   // directly to skip generating the check by
103   // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
104 
105   assert(thread == rthread, "must be");
106 
107   Label done;
108   Label runtime;
109 
110   assert_different_registers(obj, pre_val, tmp, rscratch1);
111   assert(pre_val != noreg &&  tmp != noreg, "expecting a register");
112 
113   Address in_progress(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()));
114   Address index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
115   Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
116 
117   // Is marking active?
118   if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) {
119     __ ldrw(tmp, in_progress);
120   } else {
121     assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption");
122     __ ldrb(tmp, in_progress);
123   }
124   __ cbzw(tmp, done);
125 
126   // Do we need to load the previous value?
127   if (obj != noreg) {
128     __ load_heap_oop(pre_val, Address(obj, 0), noreg, noreg, AS_RAW);
129   }
130 
131   // Is the previous value null?
132   __ cbz(pre_val, done);
133 
134   // Can we store original value in the thread's buffer?
135   // Is index == 0?
136   // (The index field is typed as size_t.)
137 
138   __ ldr(tmp, index);                      // tmp := *index_adr
139   __ cbz(tmp, runtime);                    // tmp == 0?
140                                         // If yes, goto runtime
141 
142   __ sub(tmp, tmp, wordSize);              // tmp := tmp - wordSize
143   __ str(tmp, index);                      // *index_adr := tmp
144   __ ldr(rscratch1, buffer);
145   __ add(tmp, tmp, rscratch1);             // tmp := tmp + *buffer_adr
146 
147   // Record the previous value
148   __ str(pre_val, Address(tmp, 0));
149   __ b(done);
150 
151   __ bind(runtime);
152   // save the live input values
153   RegSet saved = RegSet::of(pre_val);
154   if (tosca_live) saved += RegSet::of(r0);
155   if (obj != noreg) saved += RegSet::of(obj);
156 
157   __ push(saved, sp);
158 
159   // Calling the runtime using the regular call_VM_leaf mechanism generates
160   // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
161   // that checks that the *(rfp+frame::interpreter_frame_last_sp) == NULL.
162   //
163   // If we care generating the pre-barrier without a frame (e.g. in the
164   // intrinsified Reference.get() routine) then ebp might be pointing to
165   // the caller frame and so this check will most likely fail at runtime.
166   //
167   // Expanding the call directly bypasses the generation of the check.
168   // So when we do not have have a full interpreter frame on the stack
169   // expand_call should be passed true.
170 
171   if (expand_call) {
172     assert(pre_val != c_rarg1, "smashed arg");
173     __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
174   } else {
175     __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
176   }
177 
178   __ pop(saved, sp);
179 
180   __ bind(done);
181 }
182 
resolve_forward_pointer(MacroAssembler * masm,Register dst,Register tmp)183 void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst, Register tmp) {
184   assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
185   Label is_null;
186   __ cbz(dst, is_null);
187   resolve_forward_pointer_not_null(masm, dst, tmp);
188   __ bind(is_null);
189 }
190 
191 // IMPORTANT: This must preserve all registers, even rscratch1 and rscratch2, except those explicitely
192 // passed in.
resolve_forward_pointer_not_null(MacroAssembler * masm,Register dst,Register tmp)193 void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst, Register tmp) {
194   assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled");
195   // The below loads the mark word, checks if the lowest two bits are
196   // set, and if so, clear the lowest two bits and copy the result
197   // to dst. Otherwise it leaves dst alone.
198   // Implementing this is surprisingly awkward. I do it here by:
199   // - Inverting the mark word
200   // - Test lowest two bits == 0
201   // - If so, set the lowest two bits
202   // - Invert the result back, and copy to dst
203 
204   bool borrow_reg = (tmp == noreg);
205   if (borrow_reg) {
206     // No free registers available. Make one useful.
207     tmp = rscratch1;
208     if (tmp == dst) {
209       tmp = rscratch2;
210     }
211     __ push(RegSet::of(tmp), sp);
212   }
213 
214   assert_different_registers(tmp, dst);
215 
216   Label done;
217   __ ldr(tmp, Address(dst, oopDesc::mark_offset_in_bytes()));
218   __ eon(tmp, tmp, zr);
219   __ ands(zr, tmp, markWord::lock_mask_in_place);
220   __ br(Assembler::NE, done);
221   __ orr(tmp, tmp, markWord::marked_value);
222   __ eon(dst, tmp, zr);
223   __ bind(done);
224 
225   if (borrow_reg) {
226     __ pop(RegSet::of(tmp), sp);
227   }
228 }
229 
load_reference_barrier_not_null(MacroAssembler * masm,Register dst,Address load_addr)230 void ShenandoahBarrierSetAssembler::load_reference_barrier_not_null(MacroAssembler* masm, Register dst, Address load_addr) {
231   assert(ShenandoahLoadRefBarrier, "Should be enabled");
232   assert(dst != rscratch2, "need rscratch2");
233   assert_different_registers(load_addr.base(), load_addr.index(), rscratch1, rscratch2);
234 
235   Label done;
236   __ enter();
237   Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
238   __ ldrb(rscratch2, gc_state);
239 
240   // Check for heap stability
241   __ tbz(rscratch2, ShenandoahHeap::HAS_FORWARDED_BITPOS, done);
242 
243   // use r1 for load address
244   Register result_dst = dst;
245   if (dst == r1) {
246     __ mov(rscratch1, dst);
247     dst = rscratch1;
248   }
249 
250   // Save r0 and r1, unless it is an output register
251   RegSet to_save = RegSet::of(r0, r1) - result_dst;
252   __ push(to_save, sp);
253   __ lea(r1, load_addr);
254   __ mov(r0, dst);
255 
256   __ far_call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_lrb())));
257 
258   __ mov(result_dst, r0);
259   __ pop(to_save, sp);
260 
261   __ bind(done);
262   __ leave();
263 }
264 
load_reference_barrier_native(MacroAssembler * masm,Register dst,Address load_addr)265 void ShenandoahBarrierSetAssembler::load_reference_barrier_native(MacroAssembler* masm, Register dst, Address load_addr) {
266   if (!ShenandoahLoadRefBarrier) {
267     return;
268   }
269 
270   assert(dst != rscratch2, "need rscratch2");
271 
272   Label is_null;
273   Label done;
274 
275   __ block_comment("load_reference_barrier_native { ");
276 
277   __ cbz(dst, is_null);
278 
279   __ enter();
280 
281   Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
282   __ ldrb(rscratch2, gc_state);
283 
284   // Check for heap in evacuation phase
285   __ tbz(rscratch2, ShenandoahHeap::HAS_FORWARDED_BITPOS, done);
286 
287   __ mov(rscratch2, dst);
288   __ push_call_clobbered_registers();
289   __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native));
290   __ lea(r1, load_addr);
291   __ mov(r0, rscratch2);
292   __ blr(lr);
293   __ mov(rscratch2, r0);
294   __ pop_call_clobbered_registers();
295   __ mov(dst, rscratch2);
296 
297   __ bind(done);
298   __ leave();
299   __ bind(is_null);
300   __ block_comment("} load_reference_barrier_native");
301 }
302 
storeval_barrier(MacroAssembler * masm,Register dst,Register tmp)303 void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) {
304   if (ShenandoahStoreValEnqueueBarrier) {
305     // Save possibly live regs.
306     RegSet live_regs = RegSet::range(r0, r4) - dst;
307     __ push(live_regs, sp);
308     __ strd(v0, __ pre(sp, 2 * -wordSize));
309 
310     satb_write_barrier_pre(masm, noreg, dst, rthread, tmp, true, false);
311 
312     // Restore possibly live regs.
313     __ ldrd(v0, __ post(sp, 2 * wordSize));
314     __ pop(live_regs, sp);
315   }
316 }
317 
load_reference_barrier(MacroAssembler * masm,Register dst,Address load_addr)318 void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Address load_addr) {
319   if (ShenandoahLoadRefBarrier) {
320     Label is_null;
321     __ cbz(dst, is_null);
322     load_reference_barrier_not_null(masm, dst, load_addr);
323     __ bind(is_null);
324   }
325 }
326 
327 //
328 // Arguments:
329 //
330 // Inputs:
331 //   src:        oop location to load from, might be clobbered
332 //
333 // Output:
334 //   dst:        oop loaded from src location
335 //
336 // Kill:
337 //   rscratch1 (scratch reg)
338 //
339 // Alias:
340 //   dst: rscratch1 (might use rscratch1 as temporary output register to avoid clobbering src)
341 //
load_at(MacroAssembler * masm,DecoratorSet decorators,BasicType type,Register dst,Address src,Register tmp1,Register tmp_thread)342 void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
343                                             Register dst, Address src, Register tmp1, Register tmp_thread) {
344   // 1: non-reference load, no additional barrier is needed
345   if (!is_reference_type(type)) {
346     BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
347     return;
348   }
349 
350   // 2: load a reference from src location and apply LRB if needed
351   if (ShenandoahBarrierSet::need_load_reference_barrier(decorators, type)) {
352     Register result_dst = dst;
353 
354     // Preserve src location for LRB
355     if (dst == src.base() || dst == src.index()) {
356       dst = rscratch1;
357     }
358     assert_different_registers(dst, src.base(), src.index());
359 
360     BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
361 
362     if (ShenandoahBarrierSet::use_load_reference_barrier_native(decorators, type)) {
363       load_reference_barrier_native(masm, dst, src);
364     } else {
365       load_reference_barrier(masm, dst, src);
366     }
367 
368     if (dst != result_dst) {
369       __ mov(result_dst, dst);
370       dst = result_dst;
371     }
372   } else {
373     BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread);
374   }
375 
376   // 3: apply keep-alive barrier if needed
377   if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
378     __ enter();
379     __ push_call_clobbered_registers();
380     satb_write_barrier_pre(masm /* masm */,
381                            noreg /* obj */,
382                            dst /* pre_val */,
383                            rthread /* thread */,
384                            tmp1 /* tmp */,
385                            true /* tosca_live */,
386                            true /* expand_call */);
387     __ pop_call_clobbered_registers();
388     __ leave();
389   }
390 }
391 
store_at(MacroAssembler * masm,DecoratorSet decorators,BasicType type,Address dst,Register val,Register tmp1,Register tmp2)392 void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
393                                              Address dst, Register val, Register tmp1, Register tmp2) {
394   bool on_oop = is_reference_type(type);
395   if (!on_oop) {
396     BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2);
397     return;
398   }
399 
400   // flatten object address if needed
401   if (dst.index() == noreg && dst.offset() == 0) {
402     if (dst.base() != r3) {
403       __ mov(r3, dst.base());
404     }
405   } else {
406     __ lea(r3, dst);
407   }
408 
409   shenandoah_write_barrier_pre(masm,
410                                r3 /* obj */,
411                                tmp2 /* pre_val */,
412                                rthread /* thread */,
413                                tmp1  /* tmp */,
414                                val != noreg /* tosca_live */,
415                                false /* expand_call */);
416 
417   if (val == noreg) {
418     BarrierSetAssembler::store_at(masm, decorators, type, Address(r3, 0), noreg, noreg, noreg);
419   } else {
420     storeval_barrier(masm, val, tmp1);
421     // G1 barrier needs uncompressed oop for region cross check.
422     Register new_val = val;
423     if (UseCompressedOops) {
424       new_val = rscratch2;
425       __ mov(new_val, val);
426     }
427     BarrierSetAssembler::store_at(masm, decorators, type, Address(r3, 0), val, noreg, noreg);
428   }
429 
430 }
431 
try_resolve_jobject_in_native(MacroAssembler * masm,Register jni_env,Register obj,Register tmp,Label & slowpath)432 void ShenandoahBarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
433                                                                   Register obj, Register tmp, Label& slowpath) {
434   Label done;
435   // Resolve jobject
436   BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, obj, tmp, slowpath);
437 
438   // Check for null.
439   __ cbz(obj, done);
440 
441   assert(obj != rscratch2, "need rscratch2");
442   Address gc_state(jni_env, ShenandoahThreadLocalData::gc_state_offset() - JavaThread::jni_environment_offset());
443   __ lea(rscratch2, gc_state);
444   __ ldrb(rscratch2, Address(rscratch2));
445 
446   // Check for heap in evacuation phase
447   __ tbnz(rscratch2, ShenandoahHeap::EVACUATION_BITPOS, slowpath);
448 
449   __ bind(done);
450 }
451 
452 
cmpxchg_oop(MacroAssembler * masm,Register addr,Register expected,Register new_val,bool acquire,bool release,bool weak,bool is_cae,Register result)453 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val,
454                                                 bool acquire, bool release, bool weak, bool is_cae,
455                                                 Register result) {
456   Register tmp1 = rscratch1;
457   Register tmp2 = rscratch2;
458   bool is_narrow = UseCompressedOops;
459   Assembler::operand_size size = is_narrow ? Assembler::word : Assembler::xword;
460 
461   assert_different_registers(addr, expected, new_val, tmp1, tmp2);
462 
463   Label retry, done, fail;
464 
465   // CAS, using LL/SC pair.
466   __ bind(retry);
467   __ load_exclusive(tmp1, addr, size, acquire);
468   if (is_narrow) {
469     __ cmpw(tmp1, expected);
470   } else {
471     __ cmp(tmp1, expected);
472   }
473   __ br(Assembler::NE, fail);
474   __ store_exclusive(tmp2, new_val, addr, size, release);
475   if (weak) {
476     __ cmpw(tmp2, 0u); // If the store fails, return NE to our caller
477   } else {
478     __ cbnzw(tmp2, retry);
479   }
480   __ b(done);
481 
482  __  bind(fail);
483   // Check if rb(expected)==rb(tmp1)
484   // Shuffle registers so that we have memory value ready for next expected.
485   __ mov(tmp2, expected);
486   __ mov(expected, tmp1);
487   if (is_narrow) {
488     __ decode_heap_oop(tmp1, tmp1);
489     __ decode_heap_oop(tmp2, tmp2);
490   }
491   resolve_forward_pointer(masm, tmp1);
492   resolve_forward_pointer(masm, tmp2);
493   __ cmp(tmp1, tmp2);
494   // Retry with expected now being the value we just loaded from addr.
495   __ br(Assembler::EQ, retry);
496   if (is_cae && is_narrow) {
497     // For cmp-and-exchange and narrow oops, we need to restore
498     // the compressed old-value. We moved it to 'expected' a few lines up.
499     __ mov(tmp1, expected);
500   }
501   __ bind(done);
502 
503   if (is_cae) {
504     __ mov(result, tmp1);
505   } else {
506     __ cset(result, Assembler::EQ);
507   }
508 }
509 
510 #undef __
511 
512 #ifdef COMPILER1
513 
514 #define __ ce->masm()->
515 
gen_pre_barrier_stub(LIR_Assembler * ce,ShenandoahPreBarrierStub * stub)516 void ShenandoahBarrierSetAssembler::gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub) {
517   ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
518   // At this point we know that marking is in progress.
519   // If do_load() is true then we have to emit the
520   // load of the previous value; otherwise it has already
521   // been loaded into _pre_val.
522 
523   __ bind(*stub->entry());
524 
525   assert(stub->pre_val()->is_register(), "Precondition.");
526 
527   Register pre_val_reg = stub->pre_val()->as_register();
528 
529   if (stub->do_load()) {
530     ce->mem2reg(stub->addr(), stub->pre_val(), T_OBJECT, stub->patch_code(), stub->info(), false /*wide*/, false /*unaligned*/);
531   }
532   __ cbz(pre_val_reg, *stub->continuation());
533   ce->store_parameter(stub->pre_val()->as_register(), 0);
534   __ far_call(RuntimeAddress(bs->pre_barrier_c1_runtime_code_blob()->code_begin()));
535   __ b(*stub->continuation());
536 }
537 
gen_load_reference_barrier_stub(LIR_Assembler * ce,ShenandoahLoadReferenceBarrierStub * stub)538 void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) {
539   ShenandoahBarrierSetC1* bs = (ShenandoahBarrierSetC1*)BarrierSet::barrier_set()->barrier_set_c1();
540   __ bind(*stub->entry());
541 
542   Register obj = stub->obj()->as_register();
543   Register res = stub->result()->as_register();
544   Register addr = stub->addr()->as_pointer_register();
545   Register tmp1 = stub->tmp1()->as_register();
546   Register tmp2 = stub->tmp2()->as_register();
547 
548   assert(res == r0, "result must arrive in r0");
549 
550   if (res != obj) {
551     __ mov(res, obj);
552   }
553 
554   // Check for null.
555   __ cbz(res, *stub->continuation());
556 
557   // Check for object in cset.
558   __ mov(tmp2, ShenandoahHeap::in_cset_fast_test_addr());
559   __ lsr(tmp1, res, ShenandoahHeapRegion::region_size_bytes_shift_jint());
560   __ ldrb(tmp2, Address(tmp2, tmp1));
561   __ cbz(tmp2, *stub->continuation());
562 
563   // Check if object is already forwarded.
564   Label slow_path;
565   __ ldr(tmp1, Address(res, oopDesc::mark_offset_in_bytes()));
566   __ eon(tmp1, tmp1, zr);
567   __ ands(zr, tmp1, markWord::lock_mask_in_place);
568   __ br(Assembler::NE, slow_path);
569 
570   // Decode forwarded object.
571   __ orr(tmp1, tmp1, markWord::marked_value);
572   __ eon(res, tmp1, zr);
573   __ b(*stub->continuation());
574 
575   __ bind(slow_path);
576   ce->store_parameter(res, 0);
577   ce->store_parameter(addr, 1);
578   if (stub->is_native()) {
579     __ far_call(RuntimeAddress(bs->load_reference_barrier_native_rt_code_blob()->code_begin()));
580   } else {
581     __ far_call(RuntimeAddress(bs->load_reference_barrier_rt_code_blob()->code_begin()));
582   }
583 
584   __ b(*stub->continuation());
585 }
586 
587 #undef __
588 
589 #define __ sasm->
590 
generate_c1_pre_barrier_runtime_stub(StubAssembler * sasm)591 void ShenandoahBarrierSetAssembler::generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm) {
592   __ prologue("shenandoah_pre_barrier", false);
593 
594   // arg0 : previous value of memory
595 
596   BarrierSet* bs = BarrierSet::barrier_set();
597 
598   const Register pre_val = r0;
599   const Register thread = rthread;
600   const Register tmp = rscratch1;
601 
602   Address queue_index(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_index_offset()));
603   Address buffer(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_buffer_offset()));
604 
605   Label done;
606   Label runtime;
607 
608   // Is marking still active?
609   Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset()));
610   __ ldrb(tmp, gc_state);
611   __ mov(rscratch2, ShenandoahHeap::MARKING);
612   __ tst(tmp, rscratch2);
613   __ br(Assembler::EQ, done);
614 
615   // Can we store original value in the thread's buffer?
616   __ ldr(tmp, queue_index);
617   __ cbz(tmp, runtime);
618 
619   __ sub(tmp, tmp, wordSize);
620   __ str(tmp, queue_index);
621   __ ldr(rscratch2, buffer);
622   __ add(tmp, tmp, rscratch2);
623   __ load_parameter(0, rscratch2);
624   __ str(rscratch2, Address(tmp, 0));
625   __ b(done);
626 
627   __ bind(runtime);
628   __ push_call_clobbered_registers();
629   __ load_parameter(0, pre_val);
630   __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_field_pre_entry), pre_val, thread);
631   __ pop_call_clobbered_registers();
632   __ bind(done);
633 
634   __ epilogue();
635 }
636 
generate_c1_load_reference_barrier_runtime_stub(StubAssembler * sasm,bool is_native)637 void ShenandoahBarrierSetAssembler::generate_c1_load_reference_barrier_runtime_stub(StubAssembler* sasm, bool is_native) {
638   __ prologue("shenandoah_load_reference_barrier", false);
639   // arg0 : object to be resolved
640 
641   __ push_call_clobbered_registers();
642   __ load_parameter(0, r0);
643   __ load_parameter(1, r1);
644   if (is_native) {
645     __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_native));
646   } else if (UseCompressedOops) {
647     __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow));
648   } else {
649     __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier));
650   }
651   __ blr(lr);
652   __ mov(rscratch1, r0);
653   __ pop_call_clobbered_registers();
654   __ mov(r0, rscratch1);
655 
656   __ epilogue();
657 }
658 
659 #undef __
660 
661 #endif // COMPILER1
662 
shenandoah_lrb()663 address ShenandoahBarrierSetAssembler::shenandoah_lrb() {
664   assert(_shenandoah_lrb != NULL, "need load reference barrier stub");
665   return _shenandoah_lrb;
666 }
667 
668 #define __ cgen->assembler()->
669 
670 // Shenandoah load reference barrier.
671 //
672 // Input:
673 //   r0: OOP to evacuate.  Not null.
674 //   r1: load address
675 //
676 // Output:
677 //   r0: Pointer to evacuated OOP.
678 //
679 // Trash rscratch1, rscratch2.  Preserve everything else.
generate_shenandoah_lrb(StubCodeGenerator * cgen)680 address ShenandoahBarrierSetAssembler::generate_shenandoah_lrb(StubCodeGenerator* cgen) {
681 
682   __ align(6);
683   StubCodeMark mark(cgen, "StubRoutines", "shenandoah_lrb");
684   address start = __ pc();
685 
686   Label slow_path;
687   __ mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr());
688   __ lsr(rscratch1, r0, ShenandoahHeapRegion::region_size_bytes_shift_jint());
689   __ ldrb(rscratch2, Address(rscratch2, rscratch1));
690   __ tbnz(rscratch2, 0, slow_path);
691   __ ret(lr);
692 
693   __ bind(slow_path);
694   __ enter(); // required for proper stackwalking of RuntimeStub frame
695 
696   __ push_call_clobbered_registers();
697 
698   if (UseCompressedOops) {
699     __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_narrow));
700   } else {
701     __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier));
702   }
703   __ blr(lr);
704   __ mov(rscratch1, r0);
705   __ pop_call_clobbered_registers();
706   __ mov(r0, rscratch1);
707 
708   __ leave(); // required for proper stackwalking of RuntimeStub frame
709   __ ret(lr);
710 
711   return start;
712 }
713 
714 #undef __
715 
barrier_stubs_init()716 void ShenandoahBarrierSetAssembler::barrier_stubs_init() {
717   if (ShenandoahLoadRefBarrier) {
718     int stub_code_size = 2048;
719     ResourceMark rm;
720     BufferBlob* bb = BufferBlob::create("shenandoah_barrier_stubs", stub_code_size);
721     CodeBuffer buf(bb);
722     StubCodeGenerator cgen(&buf);
723     _shenandoah_lrb = generate_shenandoah_lrb(&cgen);
724   }
725 }
726