1 /*
2 * Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_CodeStubs.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_LIRAssembler.hpp"
29 #include "c1/c1_MacroAssembler.hpp"
30 #include "c1/c1_Runtime1.hpp"
31 #include "classfile/javaClasses.hpp"
32 #include "nativeInst_x86.hpp"
33 #include "runtime/sharedRuntime.hpp"
34 #include "utilities/align.hpp"
35 #include "utilities/macros.hpp"
36 #include "vmreg_x86.inline.hpp"
37
38
39 #define __ ce->masm()->
40
41 #ifndef _LP64
42 float ConversionStub::float_zero = 0.0;
43 double ConversionStub::double_zero = 0.0;
44
emit_code(LIR_Assembler * ce)45 void ConversionStub::emit_code(LIR_Assembler* ce) {
46 __ bind(_entry);
47 assert(bytecode() == Bytecodes::_f2i || bytecode() == Bytecodes::_d2i, "other conversions do not require stub");
48
49
50 if (input()->is_single_xmm()) {
51 __ comiss(input()->as_xmm_float_reg(),
52 ExternalAddress((address)&float_zero));
53 } else if (input()->is_double_xmm()) {
54 __ comisd(input()->as_xmm_double_reg(),
55 ExternalAddress((address)&double_zero));
56 } else {
57 __ push(rax);
58 __ ftst();
59 __ fnstsw_ax();
60 __ sahf();
61 __ pop(rax);
62 }
63
64 Label NaN, do_return;
65 __ jccb(Assembler::parity, NaN);
66 __ jccb(Assembler::below, do_return);
67
68 // input is > 0 -> return maxInt
69 // result register already contains 0x80000000, so subtracting 1 gives 0x7fffffff
70 __ decrement(result()->as_register());
71 __ jmpb(do_return);
72
73 // input is NaN -> return 0
74 __ bind(NaN);
75 __ xorptr(result()->as_register(), result()->as_register());
76
77 __ bind(do_return);
78 __ jmp(_continuation);
79 }
80 #endif // !_LP64
81
emit_code(LIR_Assembler * ce)82 void C1SafepointPollStub::emit_code(LIR_Assembler* ce) {
83 __ bind(_entry);
84 InternalAddress safepoint_pc(ce->masm()->pc() - ce->masm()->offset() + safepoint_offset());
85 #ifdef _LP64
86 __ lea(rscratch1, safepoint_pc);
87 __ movptr(Address(r15_thread, JavaThread::saved_exception_pc_offset()), rscratch1);
88 #else
89 const Register tmp1 = rcx;
90 const Register tmp2 = rdx;
91 __ push(tmp1);
92 __ push(tmp2);
93
94 __ lea(tmp1, safepoint_pc);
95 __ get_thread(tmp2);
96 __ movptr(Address(tmp2, JavaThread::saved_exception_pc_offset()), tmp1);
97
98 __ pop(tmp2);
99 __ pop(tmp1);
100 #endif /* _LP64 */
101 assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
102 "polling page return stub not created yet");
103
104 address stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
105 __ jump(RuntimeAddress(stub));
106 }
107
emit_code(LIR_Assembler * ce)108 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
109 __ bind(_entry);
110 Metadata *m = _method->as_constant_ptr()->as_metadata();
111 ce->store_parameter(m, 1);
112 ce->store_parameter(_bci, 0);
113 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
114 ce->add_call_info_here(_info);
115 ce->verify_oop_map(_info);
116 __ jmp(_continuation);
117 }
118
RangeCheckStub(CodeEmitInfo * info,LIR_Opr index,LIR_Opr array)119 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array)
120 : _index(index), _array(array), _throw_index_out_of_bounds_exception(false) {
121 assert(info != NULL, "must have info");
122 _info = new CodeEmitInfo(info);
123 }
124
RangeCheckStub(CodeEmitInfo * info,LIR_Opr index)125 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index)
126 : _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) {
127 assert(info != NULL, "must have info");
128 _info = new CodeEmitInfo(info);
129 }
130
emit_code(LIR_Assembler * ce)131 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
132 __ bind(_entry);
133 if (_info->deoptimize_on_exception()) {
134 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
135 __ call(RuntimeAddress(a));
136 ce->add_call_info_here(_info);
137 ce->verify_oop_map(_info);
138 debug_only(__ should_not_reach_here());
139 return;
140 }
141
142 // pass the array index on stack because all registers must be preserved
143 if (_index->is_cpu_register()) {
144 ce->store_parameter(_index->as_register(), 0);
145 } else {
146 ce->store_parameter(_index->as_jint(), 0);
147 }
148 Runtime1::StubID stub_id;
149 if (_throw_index_out_of_bounds_exception) {
150 stub_id = Runtime1::throw_index_exception_id;
151 } else {
152 stub_id = Runtime1::throw_range_check_failed_id;
153 ce->store_parameter(_array->as_pointer_register(), 1);
154 }
155 __ call(RuntimeAddress(Runtime1::entry_for(stub_id)));
156 ce->add_call_info_here(_info);
157 ce->verify_oop_map(_info);
158 debug_only(__ should_not_reach_here());
159 }
160
PredicateFailedStub(CodeEmitInfo * info)161 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
162 _info = new CodeEmitInfo(info);
163 }
164
emit_code(LIR_Assembler * ce)165 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
166 __ bind(_entry);
167 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
168 __ call(RuntimeAddress(a));
169 ce->add_call_info_here(_info);
170 ce->verify_oop_map(_info);
171 debug_only(__ should_not_reach_here());
172 }
173
emit_code(LIR_Assembler * ce)174 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
175 if (_offset != -1) {
176 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
177 }
178 __ bind(_entry);
179 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::throw_div0_exception_id)));
180 ce->add_call_info_here(_info);
181 debug_only(__ should_not_reach_here());
182 }
183
184
185 // Implementation of NewInstanceStub
186
NewInstanceStub(LIR_Opr klass_reg,LIR_Opr result,ciInstanceKlass * klass,CodeEmitInfo * info,Runtime1::StubID stub_id)187 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
188 _result = result;
189 _klass = klass;
190 _klass_reg = klass_reg;
191 _info = new CodeEmitInfo(info);
192 assert(stub_id == Runtime1::new_instance_id ||
193 stub_id == Runtime1::fast_new_instance_id ||
194 stub_id == Runtime1::fast_new_instance_init_check_id,
195 "need new_instance id");
196 _stub_id = stub_id;
197 }
198
199
emit_code(LIR_Assembler * ce)200 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
201 assert(__ rsp_offset() == 0, "frame size should be fixed");
202 __ bind(_entry);
203 __ movptr(rdx, _klass_reg->as_register());
204 __ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
205 ce->add_call_info_here(_info);
206 ce->verify_oop_map(_info);
207 assert(_result->as_register() == rax, "result must in rax,");
208 __ jmp(_continuation);
209 }
210
211
212 // Implementation of NewTypeArrayStub
213
NewTypeArrayStub(LIR_Opr klass_reg,LIR_Opr length,LIR_Opr result,CodeEmitInfo * info)214 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
215 _klass_reg = klass_reg;
216 _length = length;
217 _result = result;
218 _info = new CodeEmitInfo(info);
219 }
220
221
emit_code(LIR_Assembler * ce)222 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
223 assert(__ rsp_offset() == 0, "frame size should be fixed");
224 __ bind(_entry);
225 assert(_length->as_register() == rbx, "length must in rbx,");
226 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
227 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_type_array_id)));
228 ce->add_call_info_here(_info);
229 ce->verify_oop_map(_info);
230 assert(_result->as_register() == rax, "result must in rax,");
231 __ jmp(_continuation);
232 }
233
234
235 // Implementation of NewObjectArrayStub
236
NewObjectArrayStub(LIR_Opr klass_reg,LIR_Opr length,LIR_Opr result,CodeEmitInfo * info)237 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
238 _klass_reg = klass_reg;
239 _result = result;
240 _length = length;
241 _info = new CodeEmitInfo(info);
242 }
243
244
emit_code(LIR_Assembler * ce)245 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
246 assert(__ rsp_offset() == 0, "frame size should be fixed");
247 __ bind(_entry);
248 assert(_length->as_register() == rbx, "length must in rbx,");
249 assert(_klass_reg->as_register() == rdx, "klass_reg must in rdx");
250 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::new_object_array_id)));
251 ce->add_call_info_here(_info);
252 ce->verify_oop_map(_info);
253 assert(_result->as_register() == rax, "result must in rax,");
254 __ jmp(_continuation);
255 }
256
257
258 // Implementation of MonitorAccessStubs
259
MonitorEnterStub(LIR_Opr obj_reg,LIR_Opr lock_reg,CodeEmitInfo * info)260 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
261 : MonitorAccessStub(obj_reg, lock_reg)
262 {
263 _info = new CodeEmitInfo(info);
264 }
265
266
emit_code(LIR_Assembler * ce)267 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
268 assert(__ rsp_offset() == 0, "frame size should be fixed");
269 __ bind(_entry);
270 ce->store_parameter(_obj_reg->as_register(), 1);
271 ce->store_parameter(_lock_reg->as_register(), 0);
272 Runtime1::StubID enter_id;
273 if (ce->compilation()->has_fpu_code()) {
274 enter_id = Runtime1::monitorenter_id;
275 } else {
276 enter_id = Runtime1::monitorenter_nofpu_id;
277 }
278 __ call(RuntimeAddress(Runtime1::entry_for(enter_id)));
279 ce->add_call_info_here(_info);
280 ce->verify_oop_map(_info);
281 __ jmp(_continuation);
282 }
283
284
emit_code(LIR_Assembler * ce)285 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
286 __ bind(_entry);
287 if (_compute_lock) {
288 // lock_reg was destroyed by fast unlocking attempt => recompute it
289 ce->monitor_address(_monitor_ix, _lock_reg);
290 }
291 ce->store_parameter(_lock_reg->as_register(), 0);
292 // note: non-blocking leaf routine => no call info needed
293 Runtime1::StubID exit_id;
294 if (ce->compilation()->has_fpu_code()) {
295 exit_id = Runtime1::monitorexit_id;
296 } else {
297 exit_id = Runtime1::monitorexit_nofpu_id;
298 }
299 __ call(RuntimeAddress(Runtime1::entry_for(exit_id)));
300 __ jmp(_continuation);
301 }
302
303
304 // Implementation of patching:
305 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
306 // - Replace original code with a call to the stub
307 // At Runtime:
308 // - call to stub, jump to runtime
309 // - in runtime: preserve all registers (rspecially objects, i.e., source and destination object)
310 // - in runtime: after initializing class, restore original code, reexecute instruction
311
312 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
313
align_patch_site(MacroAssembler * masm)314 void PatchingStub::align_patch_site(MacroAssembler* masm) {
315 // We're patching a 5-7 byte instruction on intel and we need to
316 // make sure that we don't see a piece of the instruction. It
317 // appears mostly impossible on Intel to simply invalidate other
318 // processors caches and since they may do aggressive prefetch it's
319 // very hard to make a guess about what code might be in the icache.
320 // Force the instruction to be double word aligned so that it
321 // doesn't span a cache line.
322 masm->align(align_up((int)NativeGeneralJump::instruction_size, wordSize));
323 }
324
emit_code(LIR_Assembler * ce)325 void PatchingStub::emit_code(LIR_Assembler* ce) {
326 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
327
328 Label call_patch;
329
330 // static field accesses have special semantics while the class
331 // initializer is being run so we emit a test which can be used to
332 // check that this code is being executed by the initializing
333 // thread.
334 address being_initialized_entry = __ pc();
335 if (CommentedAssembly) {
336 __ block_comment(" patch template");
337 }
338 if (_id == load_klass_id) {
339 // produce a copy of the load klass instruction for use by the being initialized case
340 #ifdef ASSERT
341 address start = __ pc();
342 #endif
343 Metadata* o = NULL;
344 __ mov_metadata(_obj, o);
345 #ifdef ASSERT
346 for (int i = 0; i < _bytes_to_copy; i++) {
347 address ptr = (address)(_pc_start + i);
348 int a_byte = (*ptr) & 0xFF;
349 assert(a_byte == *start++, "should be the same code");
350 }
351 #endif
352 } else if (_id == load_mirror_id) {
353 // produce a copy of the load mirror instruction for use by the being
354 // initialized case
355 #ifdef ASSERT
356 address start = __ pc();
357 #endif
358 jobject o = NULL;
359 __ movoop(_obj, o);
360 #ifdef ASSERT
361 for (int i = 0; i < _bytes_to_copy; i++) {
362 address ptr = (address)(_pc_start + i);
363 int a_byte = (*ptr) & 0xFF;
364 assert(a_byte == *start++, "should be the same code");
365 }
366 #endif
367 } else {
368 // make a copy the code which is going to be patched.
369 for (int i = 0; i < _bytes_to_copy; i++) {
370 address ptr = (address)(_pc_start + i);
371 int a_byte = (*ptr) & 0xFF;
372 __ emit_int8(a_byte);
373 *ptr = 0x90; // make the site look like a nop
374 }
375 }
376
377 address end_of_patch = __ pc();
378 int bytes_to_skip = 0;
379 if (_id == load_mirror_id) {
380 int offset = __ offset();
381 if (CommentedAssembly) {
382 __ block_comment(" being_initialized check");
383 }
384 assert(_obj != noreg, "must be a valid register");
385 Register tmp = rax;
386 Register tmp2 = rbx;
387 __ push(tmp);
388 __ push(tmp2);
389 // Load without verification to keep code size small. We need it because
390 // begin_initialized_entry_offset has to fit in a byte. Also, we know it's not null.
391 __ movptr(tmp2, Address(_obj, java_lang_Class::klass_offset()));
392 __ get_thread(tmp);
393 __ cmpptr(tmp, Address(tmp2, InstanceKlass::init_thread_offset()));
394 __ pop(tmp2);
395 __ pop(tmp);
396 __ jcc(Assembler::notEqual, call_patch);
397
398 // access_field patches may execute the patched code before it's
399 // copied back into place so we need to jump back into the main
400 // code of the nmethod to continue execution.
401 __ jmp(_patch_site_continuation);
402
403 // make sure this extra code gets skipped
404 bytes_to_skip += __ offset() - offset;
405 }
406 if (CommentedAssembly) {
407 __ block_comment("patch data encoded as movl");
408 }
409 // Now emit the patch record telling the runtime how to find the
410 // pieces of the patch. We only need 3 bytes but for readability of
411 // the disassembly we make the data look like a movl reg, imm32,
412 // which requires 5 bytes
413 int sizeof_patch_record = 5;
414 bytes_to_skip += sizeof_patch_record;
415
416 // emit the offsets needed to find the code to patch
417 int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
418
419 __ emit_int8((unsigned char)0xB8);
420 __ emit_int8(0);
421 __ emit_int8(being_initialized_entry_offset);
422 __ emit_int8(bytes_to_skip);
423 __ emit_int8(_bytes_to_copy);
424 address patch_info_pc = __ pc();
425 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
426
427 address entry = __ pc();
428 NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
429 address target = NULL;
430 relocInfo::relocType reloc_type = relocInfo::none;
431 switch (_id) {
432 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
433 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
434 case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
435 case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
436 default: ShouldNotReachHere();
437 }
438 __ bind(call_patch);
439
440 if (CommentedAssembly) {
441 __ block_comment("patch entry point");
442 }
443 __ call(RuntimeAddress(target));
444 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
445 ce->add_call_info_here(_info);
446 int jmp_off = __ offset();
447 __ jmp(_patch_site_entry);
448 // Add enough nops so deoptimization can overwrite the jmp above with a call
449 // and not destroy the world. We cannot use fat nops here, since the concurrent
450 // code rewrite may transiently create the illegal instruction sequence.
451 for (int j = __ offset() ; j < jmp_off + 5 ; j++ ) {
452 __ nop();
453 }
454 if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
455 CodeSection* cs = __ code_section();
456 RelocIterator iter(cs, (address)_pc_start, (address)(_pc_start + 1));
457 relocInfo::change_reloc_info_for_address(&iter, (address) _pc_start, reloc_type, relocInfo::none);
458 }
459 }
460
461
emit_code(LIR_Assembler * ce)462 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
463 __ bind(_entry);
464 ce->store_parameter(_trap_request, 0);
465 __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id)));
466 ce->add_call_info_here(_info);
467 DEBUG_ONLY(__ should_not_reach_here());
468 }
469
470
emit_code(LIR_Assembler * ce)471 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
472 address a;
473 if (_info->deoptimize_on_exception()) {
474 // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
475 a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
476 } else {
477 a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
478 }
479
480 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
481 __ bind(_entry);
482 __ call(RuntimeAddress(a));
483 ce->add_call_info_here(_info);
484 ce->verify_oop_map(_info);
485 debug_only(__ should_not_reach_here());
486 }
487
488
emit_code(LIR_Assembler * ce)489 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
490 assert(__ rsp_offset() == 0, "frame size should be fixed");
491
492 __ bind(_entry);
493 // pass the object on stack because all registers must be preserved
494 if (_obj->is_cpu_register()) {
495 ce->store_parameter(_obj->as_register(), 0);
496 }
497 __ call(RuntimeAddress(Runtime1::entry_for(_stub)));
498 ce->add_call_info_here(_info);
499 debug_only(__ should_not_reach_here());
500 }
501
502
emit_code(LIR_Assembler * ce)503 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
504 //---------------slow case: call to native-----------------
505 __ bind(_entry);
506 // Figure out where the args should go
507 // This should really convert the IntrinsicID to the Method* and signature
508 // but I don't know how to do that.
509 //
510 VMRegPair args[5];
511 BasicType signature[5] = { T_OBJECT, T_INT, T_OBJECT, T_INT, T_INT};
512 SharedRuntime::java_calling_convention(signature, args, 5);
513
514 // push parameters
515 // (src, src_pos, dest, destPos, length)
516 Register r[5];
517 r[0] = src()->as_register();
518 r[1] = src_pos()->as_register();
519 r[2] = dst()->as_register();
520 r[3] = dst_pos()->as_register();
521 r[4] = length()->as_register();
522
523 // next registers will get stored on the stack
524 for (int i = 0; i < 5 ; i++ ) {
525 VMReg r_1 = args[i].first();
526 if (r_1->is_stack()) {
527 int st_off = r_1->reg2stack() * wordSize;
528 __ movptr (Address(rsp, st_off), r[i]);
529 } else {
530 assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
531 }
532 }
533
534 ce->align_call(lir_static_call);
535
536 ce->emit_static_call_stub();
537 if (ce->compilation()->bailed_out()) {
538 return; // CodeCache is full
539 }
540 AddressLiteral resolve(SharedRuntime::get_resolve_static_call_stub(),
541 relocInfo::static_call_type);
542 __ call(resolve);
543 ce->add_call_info_here(info());
544
545 #ifndef PRODUCT
546 __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
547 #endif
548
549 __ jmp(_continuation);
550 }
551
552 #undef __
553