1 /*
2  * Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "classfile/classLoaderData.hpp"
27 #include "gc/shared/barrierSet.hpp"
28 #include "gc/shared/barrierSetAssembler.hpp"
29 #include "gc/shared/barrierSetNMethod.hpp"
30 #include "gc/shared/collectedHeap.hpp"
31 #include "interpreter/interp_masm.hpp"
32 #include "memory/universe.hpp"
33 #include "runtime/jniHandles.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "runtime/stubRoutines.hpp"
36 #include "runtime/thread.hpp"
37 
38 #define __ masm->
39 
load_at(MacroAssembler * masm,DecoratorSet decorators,BasicType type,Register dst,Address src,Register tmp1,Register tmp_thread)40 void BarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
41                                   Register dst, Address src, Register tmp1, Register tmp_thread) {
42   bool in_heap = (decorators & IN_HEAP) != 0;
43   bool in_native = (decorators & IN_NATIVE) != 0;
44   bool is_not_null = (decorators & IS_NOT_NULL) != 0;
45   bool atomic = (decorators & MO_RELAXED) != 0;
46 
47   switch (type) {
48   case T_OBJECT:
49   case T_ARRAY: {
50     if (in_heap) {
51 #ifdef _LP64
52       if (UseCompressedOops) {
53         __ movl(dst, src);
54         if (is_not_null) {
55           __ decode_heap_oop_not_null(dst);
56         } else {
57           __ decode_heap_oop(dst);
58         }
59       } else
60 #endif
61       {
62         __ movptr(dst, src);
63       }
64     } else {
65       assert(in_native, "why else?");
66       __ movptr(dst, src);
67     }
68     break;
69   }
70   case T_BOOLEAN: __ load_unsigned_byte(dst, src);  break;
71   case T_BYTE:    __ load_signed_byte(dst, src);    break;
72   case T_CHAR:    __ load_unsigned_short(dst, src); break;
73   case T_SHORT:   __ load_signed_short(dst, src);   break;
74   case T_INT:     __ movl  (dst, src);              break;
75   case T_ADDRESS: __ movptr(dst, src);              break;
76   case T_FLOAT:
77     assert(dst == noreg, "only to ftos");
78     __ load_float(src);
79     break;
80   case T_DOUBLE:
81     assert(dst == noreg, "only to dtos");
82     __ load_double(src);
83     break;
84   case T_LONG:
85     assert(dst == noreg, "only to ltos");
86 #ifdef _LP64
87     __ movq(rax, src);
88 #else
89     if (atomic) {
90       __ fild_d(src);               // Must load atomically
91       __ subptr(rsp,2*wordSize);    // Make space for store
92       __ fistp_d(Address(rsp,0));
93       __ pop(rax);
94       __ pop(rdx);
95     } else {
96       __ movl(rax, src);
97       __ movl(rdx, src.plus_disp(wordSize));
98     }
99 #endif
100     break;
101   default: Unimplemented();
102   }
103 }
104 
store_at(MacroAssembler * masm,DecoratorSet decorators,BasicType type,Address dst,Register val,Register tmp1,Register tmp2)105 void BarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type,
106                                    Address dst, Register val, Register tmp1, Register tmp2) {
107   bool in_heap = (decorators & IN_HEAP) != 0;
108   bool in_native = (decorators & IN_NATIVE) != 0;
109   bool is_not_null = (decorators & IS_NOT_NULL) != 0;
110   bool atomic = (decorators & MO_RELAXED) != 0;
111 
112   switch (type) {
113   case T_OBJECT:
114   case T_ARRAY: {
115     if (in_heap) {
116       if (val == noreg) {
117         assert(!is_not_null, "inconsistent access");
118 #ifdef _LP64
119         if (UseCompressedOops) {
120           __ movl(dst, (int32_t)NULL_WORD);
121         } else {
122           __ movslq(dst, (int32_t)NULL_WORD);
123         }
124 #else
125         __ movl(dst, (int32_t)NULL_WORD);
126 #endif
127       } else {
128 #ifdef _LP64
129         if (UseCompressedOops) {
130           assert(!dst.uses(val), "not enough registers");
131           if (is_not_null) {
132             __ encode_heap_oop_not_null(val);
133           } else {
134             __ encode_heap_oop(val);
135           }
136           __ movl(dst, val);
137         } else
138 #endif
139         {
140           __ movptr(dst, val);
141         }
142       }
143     } else {
144       assert(in_native, "why else?");
145       assert(val != noreg, "not supported");
146       __ movptr(dst, val);
147     }
148     break;
149   }
150   case T_BOOLEAN:
151     __ andl(val, 0x1);  // boolean is true if LSB is 1
152     __ movb(dst, val);
153     break;
154   case T_BYTE:
155     __ movb(dst, val);
156     break;
157   case T_SHORT:
158     __ movw(dst, val);
159     break;
160   case T_CHAR:
161     __ movw(dst, val);
162     break;
163   case T_INT:
164     __ movl(dst, val);
165     break;
166   case T_LONG:
167     assert(val == noreg, "only tos");
168 #ifdef _LP64
169     __ movq(dst, rax);
170 #else
171     if (atomic) {
172       __ push(rdx);
173       __ push(rax);                 // Must update atomically with FIST
174       __ fild_d(Address(rsp,0));    // So load into FPU register
175       __ fistp_d(dst);              // and put into memory atomically
176       __ addptr(rsp, 2*wordSize);
177     } else {
178       __ movptr(dst, rax);
179       __ movptr(dst.plus_disp(wordSize), rdx);
180     }
181 #endif
182     break;
183   case T_FLOAT:
184     assert(val == noreg, "only tos");
185     __ store_float(dst);
186     break;
187   case T_DOUBLE:
188     assert(val == noreg, "only tos");
189     __ store_double(dst);
190     break;
191   case T_ADDRESS:
192     __ movptr(dst, val);
193     break;
194   default: Unimplemented();
195   }
196 }
197 
try_resolve_jobject_in_native(MacroAssembler * masm,Register jni_env,Register obj,Register tmp,Label & slowpath)198 void BarrierSetAssembler::try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
199                                                         Register obj, Register tmp, Label& slowpath) {
200   __ clear_jweak_tag(obj);
201   __ movptr(obj, Address(obj, 0));
202 }
203 
tlab_allocate(MacroAssembler * masm,Register thread,Register obj,Register var_size_in_bytes,int con_size_in_bytes,Register t1,Register t2,Label & slow_case)204 void BarrierSetAssembler::tlab_allocate(MacroAssembler* masm,
205                                         Register thread, Register obj,
206                                         Register var_size_in_bytes,
207                                         int con_size_in_bytes,
208                                         Register t1,
209                                         Register t2,
210                                         Label& slow_case) {
211   assert_different_registers(obj, t1, t2);
212   assert_different_registers(obj, var_size_in_bytes, t1);
213   Register end = t2;
214   if (!thread->is_valid()) {
215 #ifdef _LP64
216     thread = r15_thread;
217 #else
218     assert(t1->is_valid(), "need temp reg");
219     thread = t1;
220     __ get_thread(thread);
221 #endif
222   }
223 
224   __ verify_tlab();
225 
226   __ movptr(obj, Address(thread, JavaThread::tlab_top_offset()));
227   if (var_size_in_bytes == noreg) {
228     __ lea(end, Address(obj, con_size_in_bytes));
229   } else {
230     __ lea(end, Address(obj, var_size_in_bytes, Address::times_1));
231   }
232   __ cmpptr(end, Address(thread, JavaThread::tlab_end_offset()));
233   __ jcc(Assembler::above, slow_case);
234 
235   // update the tlab top pointer
236   __ movptr(Address(thread, JavaThread::tlab_top_offset()), end);
237 
238   // recover var_size_in_bytes if necessary
239   if (var_size_in_bytes == end) {
240     __ subptr(var_size_in_bytes, obj);
241   }
242   __ verify_tlab();
243 }
244 
245 // Defines obj, preserves var_size_in_bytes
eden_allocate(MacroAssembler * masm,Register thread,Register obj,Register var_size_in_bytes,int con_size_in_bytes,Register t1,Label & slow_case)246 void BarrierSetAssembler::eden_allocate(MacroAssembler* masm,
247                                         Register thread, Register obj,
248                                         Register var_size_in_bytes,
249                                         int con_size_in_bytes,
250                                         Register t1,
251                                         Label& slow_case) {
252   assert(obj == rax, "obj must be in rax, for cmpxchg");
253   assert_different_registers(obj, var_size_in_bytes, t1);
254   if (!Universe::heap()->supports_inline_contig_alloc()) {
255     __ jmp(slow_case);
256   } else {
257     Register end = t1;
258     Label retry;
259     __ bind(retry);
260     ExternalAddress heap_top((address) Universe::heap()->top_addr());
261     __ movptr(obj, heap_top);
262     if (var_size_in_bytes == noreg) {
263       __ lea(end, Address(obj, con_size_in_bytes));
264     } else {
265       __ lea(end, Address(obj, var_size_in_bytes, Address::times_1));
266     }
267     // if end < obj then we wrapped around => object too long => slow case
268     __ cmpptr(end, obj);
269     __ jcc(Assembler::below, slow_case);
270     __ cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr()));
271     __ jcc(Assembler::above, slow_case);
272     // Compare obj with the top addr, and if still equal, store the new top addr in
273     // end at the address of the top addr pointer. Sets ZF if was equal, and clears
274     // it otherwise. Use lock prefix for atomicity on MPs.
275     __ locked_cmpxchgptr(end, heap_top);
276     __ jcc(Assembler::notEqual, retry);
277     incr_allocated_bytes(masm, thread, var_size_in_bytes, con_size_in_bytes, thread->is_valid() ? noreg : t1);
278   }
279 }
280 
incr_allocated_bytes(MacroAssembler * masm,Register thread,Register var_size_in_bytes,int con_size_in_bytes,Register t1)281 void BarrierSetAssembler::incr_allocated_bytes(MacroAssembler* masm, Register thread,
282                                                Register var_size_in_bytes,
283                                                int con_size_in_bytes,
284                                                Register t1) {
285   if (!thread->is_valid()) {
286 #ifdef _LP64
287     thread = r15_thread;
288 #else
289     assert(t1->is_valid(), "need temp reg");
290     thread = t1;
291     __ get_thread(thread);
292 #endif
293   }
294 
295 #ifdef _LP64
296   if (var_size_in_bytes->is_valid()) {
297     __ addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
298   } else {
299     __ addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
300   }
301 #else
302   if (var_size_in_bytes->is_valid()) {
303     __ addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
304   } else {
305     __ addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
306   }
307   __ adcl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())+4), 0);
308 #endif
309 }
310 
311 #ifdef _LP64
nmethod_entry_barrier(MacroAssembler * masm)312 void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) {
313   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
314   if (bs_nm == NULL) {
315     return;
316   }
317   Label continuation;
318   Register thread = r15_thread;
319   Address disarmed_addr(thread, in_bytes(bs_nm->thread_disarmed_offset()));
320   __ align(8);
321   __ cmpl(disarmed_addr, 0);
322   __ jcc(Assembler::equal, continuation);
323   __ call(RuntimeAddress(StubRoutines::x86::method_entry_barrier()));
324   __ bind(continuation);
325 }
326 #else
nmethod_entry_barrier(MacroAssembler * masm)327 void BarrierSetAssembler::nmethod_entry_barrier(MacroAssembler* masm) {
328   BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
329   if (bs_nm == NULL) {
330     return;
331   }
332 
333   Label continuation;
334 
335   Register tmp = rdi;
336   __ push(tmp);
337   __ movptr(tmp, (intptr_t)bs_nm->disarmed_value_address());
338   Address disarmed_addr(tmp, 0);
339   __ align(4);
340   __ cmpl(disarmed_addr, 0);
341   __ pop(tmp);
342   __ jcc(Assembler::equal, continuation);
343   __ call(RuntimeAddress(StubRoutines::x86::method_entry_barrier()));
344   __ bind(continuation);
345 }
346 #endif
347 
c2i_entry_barrier(MacroAssembler * masm)348 void BarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
349   BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod();
350   if (bs == NULL) {
351     return;
352   }
353 
354   Label bad_call;
355   __ cmpptr(rbx, 0); // rbx contains the incoming method for c2i adapters.
356   __ jcc(Assembler::equal, bad_call);
357 
358 #ifdef _LP64
359   Register tmp1 = rscratch1;
360   Register tmp2 = rscratch2;
361 #else
362   Register tmp1 = rax;
363   Register tmp2 = rcx;
364   __ push(tmp1);
365   __ push(tmp2);
366 #endif // _LP64
367 
368   // Pointer chase to the method holder to find out if the method is concurrently unloading.
369   Label method_live;
370   __ load_method_holder_cld(tmp1, rbx);
371 
372    // Is it a strong CLD?
373   __ cmpl(Address(tmp1, ClassLoaderData::keep_alive_offset()), 0);
374   __ jcc(Assembler::greater, method_live);
375 
376    // Is it a weak but alive CLD?
377   __ movptr(tmp1, Address(tmp1, ClassLoaderData::holder_offset()));
378   __ resolve_weak_handle(tmp1, tmp2);
379   __ cmpptr(tmp1, 0);
380   __ jcc(Assembler::notEqual, method_live);
381 
382 #ifndef _LP64
383   __ pop(tmp2);
384   __ pop(tmp1);
385 #endif
386 
387   __ bind(bad_call);
388   __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
389   __ bind(method_live);
390 
391 #ifndef _LP64
392   __ pop(tmp2);
393   __ pop(tmp1);
394 #endif
395 }
396