1 /*
2 * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/assembler.inline.hpp"
27 #include "c1/c1_Compilation.hpp"
28 #include "c1/c1_Instruction.hpp"
29 #include "c1/c1_InstructionPrinter.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_MacroAssembler.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciInstance.hpp"
34 #include "gc/shared/barrierSet.hpp"
35 #include "runtime/os.hpp"
36
patching_epilog(PatchingStub * patch,LIR_PatchCode patch_code,Register obj,CodeEmitInfo * info)37 void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_code, Register obj, CodeEmitInfo* info) {
38 // We must have enough patching space so that call can be inserted.
39 // We cannot use fat nops here, since the concurrent code rewrite may transiently
40 // create the illegal instruction sequence.
41 while ((intx) _masm->pc() - (intx) patch->pc_start() < NativeGeneralJump::instruction_size) {
42 _masm->nop();
43 }
44 patch->install(_masm, patch_code, obj, info);
45 append_code_stub(patch);
46
47 #ifdef ASSERT
48 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
49 if (patch->id() == PatchingStub::access_field_id) {
50 switch (code) {
51 case Bytecodes::_putstatic:
52 case Bytecodes::_getstatic:
53 case Bytecodes::_putfield:
54 case Bytecodes::_getfield:
55 break;
56 default:
57 ShouldNotReachHere();
58 }
59 } else if (patch->id() == PatchingStub::load_klass_id) {
60 switch (code) {
61 case Bytecodes::_new:
62 case Bytecodes::_anewarray:
63 case Bytecodes::_multianewarray:
64 case Bytecodes::_instanceof:
65 case Bytecodes::_checkcast:
66 break;
67 default:
68 ShouldNotReachHere();
69 }
70 } else if (patch->id() == PatchingStub::load_mirror_id) {
71 switch (code) {
72 case Bytecodes::_putstatic:
73 case Bytecodes::_getstatic:
74 case Bytecodes::_ldc:
75 case Bytecodes::_ldc_w:
76 break;
77 default:
78 ShouldNotReachHere();
79 }
80 } else if (patch->id() == PatchingStub::load_appendix_id) {
81 Bytecodes::Code bc_raw = info->scope()->method()->raw_code_at_bci(info->stack()->bci());
82 assert(Bytecodes::has_optional_appendix(bc_raw), "unexpected appendix resolution");
83 } else {
84 ShouldNotReachHere();
85 }
86 #endif
87 }
88
patching_id(CodeEmitInfo * info)89 PatchingStub::PatchID LIR_Assembler::patching_id(CodeEmitInfo* info) {
90 IRScope* scope = info->scope();
91 Bytecodes::Code bc_raw = scope->method()->raw_code_at_bci(info->stack()->bci());
92 if (Bytecodes::has_optional_appendix(bc_raw)) {
93 return PatchingStub::load_appendix_id;
94 }
95 return PatchingStub::load_mirror_id;
96 }
97
98 //---------------------------------------------------------------
99
100
LIR_Assembler(Compilation * c)101 LIR_Assembler::LIR_Assembler(Compilation* c):
102 _masm(c->masm())
103 , _bs(BarrierSet::barrier_set())
104 , _compilation(c)
105 , _frame_map(c->frame_map())
106 , _current_block(NULL)
107 , _pending_non_safepoint(NULL)
108 , _pending_non_safepoint_offset(0)
109 {
110 _slow_case_stubs = new CodeStubList();
111 }
112
113
~LIR_Assembler()114 LIR_Assembler::~LIR_Assembler() {
115 // The unwind handler label may be unnbound if this destructor is invoked because of a bail-out.
116 // Reset it here to avoid an assertion.
117 _unwind_handler_entry.reset();
118 }
119
120
check_codespace()121 void LIR_Assembler::check_codespace() {
122 CodeSection* cs = _masm->code_section();
123 if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
124 BAILOUT("CodeBuffer overflow");
125 }
126 }
127
128
append_code_stub(CodeStub * stub)129 void LIR_Assembler::append_code_stub(CodeStub* stub) {
130 _slow_case_stubs->append(stub);
131 }
132
emit_stubs(CodeStubList * stub_list)133 void LIR_Assembler::emit_stubs(CodeStubList* stub_list) {
134 for (int m = 0; m < stub_list->length(); m++) {
135 CodeStub* s = stub_list->at(m);
136
137 check_codespace();
138 CHECK_BAILOUT();
139
140 #ifndef PRODUCT
141 if (CommentedAssembly) {
142 stringStream st;
143 s->print_name(&st);
144 st.print(" slow case");
145 _masm->block_comment(st.as_string());
146 }
147 #endif
148 s->emit_code(this);
149 #ifdef ASSERT
150 s->assert_no_unbound_labels();
151 #endif
152 }
153 }
154
155
emit_slow_case_stubs()156 void LIR_Assembler::emit_slow_case_stubs() {
157 emit_stubs(_slow_case_stubs);
158 }
159
160
needs_icache(ciMethod * method) const161 bool LIR_Assembler::needs_icache(ciMethod* method) const {
162 return !method->is_static();
163 }
164
needs_clinit_barrier_on_entry(ciMethod * method) const165 bool LIR_Assembler::needs_clinit_barrier_on_entry(ciMethod* method) const {
166 return VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier();
167 }
168
code_offset() const169 int LIR_Assembler::code_offset() const {
170 return _masm->offset();
171 }
172
173
pc() const174 address LIR_Assembler::pc() const {
175 return _masm->pc();
176 }
177
178 // To bang the stack of this compiled method we use the stack size
179 // that the interpreter would need in case of a deoptimization. This
180 // removes the need to bang the stack in the deoptimization blob which
181 // in turn simplifies stack overflow handling.
bang_size_in_bytes() const182 int LIR_Assembler::bang_size_in_bytes() const {
183 return MAX2(initial_frame_size_in_bytes() + os::extra_bang_size_in_bytes(), _compilation->interpreter_frame_size());
184 }
185
emit_exception_entries(ExceptionInfoList * info_list)186 void LIR_Assembler::emit_exception_entries(ExceptionInfoList* info_list) {
187 for (int i = 0; i < info_list->length(); i++) {
188 XHandlers* handlers = info_list->at(i)->exception_handlers();
189
190 for (int j = 0; j < handlers->length(); j++) {
191 XHandler* handler = handlers->handler_at(j);
192 assert(handler->lir_op_id() != -1, "handler not processed by LinearScan");
193 assert(handler->entry_code() == NULL ||
194 handler->entry_code()->instructions_list()->last()->code() == lir_branch ||
195 handler->entry_code()->instructions_list()->last()->code() == lir_delay_slot, "last operation must be branch");
196
197 if (handler->entry_pco() == -1) {
198 // entry code not emitted yet
199 if (handler->entry_code() != NULL && handler->entry_code()->instructions_list()->length() > 1) {
200 handler->set_entry_pco(code_offset());
201 if (CommentedAssembly) {
202 _masm->block_comment("Exception adapter block");
203 }
204 emit_lir_list(handler->entry_code());
205 } else {
206 handler->set_entry_pco(handler->entry_block()->exception_handler_pco());
207 }
208
209 assert(handler->entry_pco() != -1, "must be set now");
210 }
211 }
212 }
213 }
214
215
emit_code(BlockList * hir)216 void LIR_Assembler::emit_code(BlockList* hir) {
217 if (PrintLIR) {
218 print_LIR(hir);
219 }
220
221 int n = hir->length();
222 for (int i = 0; i < n; i++) {
223 emit_block(hir->at(i));
224 CHECK_BAILOUT();
225 }
226
227 flush_debug_info(code_offset());
228
229 DEBUG_ONLY(check_no_unbound_labels());
230 }
231
232
emit_block(BlockBegin * block)233 void LIR_Assembler::emit_block(BlockBegin* block) {
234 if (block->is_set(BlockBegin::backward_branch_target_flag)) {
235 align_backward_branch_target();
236 }
237
238 // if this block is the start of an exception handler, record the
239 // PC offset of the first instruction for later construction of
240 // the ExceptionHandlerTable
241 if (block->is_set(BlockBegin::exception_entry_flag)) {
242 block->set_exception_handler_pco(code_offset());
243 }
244
245 #ifndef PRODUCT
246 if (PrintLIRWithAssembly) {
247 // don't print Phi's
248 InstructionPrinter ip(false);
249 block->print(ip);
250 }
251 #endif /* PRODUCT */
252
253 assert(block->lir() != NULL, "must have LIR");
254 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
255
256 #ifndef PRODUCT
257 if (CommentedAssembly) {
258 stringStream st;
259 st.print_cr(" block B%d [%d, %d]", block->block_id(), block->bci(), block->end()->printable_bci());
260 _masm->block_comment(st.as_string());
261 }
262 #endif
263
264 emit_lir_list(block->lir());
265
266 X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
267 }
268
269
emit_lir_list(LIR_List * list)270 void LIR_Assembler::emit_lir_list(LIR_List* list) {
271 peephole(list);
272
273 int n = list->length();
274 for (int i = 0; i < n; i++) {
275 LIR_Op* op = list->at(i);
276
277 check_codespace();
278 CHECK_BAILOUT();
279
280 #ifndef PRODUCT
281 if (CommentedAssembly) {
282 // Don't record out every op since that's too verbose. Print
283 // branches since they include block and stub names. Also print
284 // patching moves since they generate funny looking code.
285 if (op->code() == lir_branch ||
286 (op->code() == lir_move && op->as_Op1()->patch_code() != lir_patch_none) ||
287 (op->code() == lir_leal && op->as_Op1()->patch_code() != lir_patch_none)) {
288 stringStream st;
289 op->print_on(&st);
290 _masm->block_comment(st.as_string());
291 }
292 }
293 if (PrintLIRWithAssembly) {
294 // print out the LIR operation followed by the resulting assembly
295 list->at(i)->print(); tty->cr();
296 }
297 #endif /* PRODUCT */
298
299 op->emit_code(this);
300
301 if (compilation()->debug_info_recorder()->recording_non_safepoints()) {
302 process_debug_info(op);
303 }
304
305 #ifndef PRODUCT
306 if (PrintLIRWithAssembly) {
307 _masm->code()->decode();
308 }
309 #endif /* PRODUCT */
310 }
311 }
312
313 #ifdef ASSERT
check_no_unbound_labels()314 void LIR_Assembler::check_no_unbound_labels() {
315 CHECK_BAILOUT();
316
317 for (int i = 0; i < _branch_target_blocks.length() - 1; i++) {
318 if (!_branch_target_blocks.at(i)->label()->is_bound()) {
319 tty->print_cr("label of block B%d is not bound", _branch_target_blocks.at(i)->block_id());
320 assert(false, "unbound label");
321 }
322 }
323 }
324 #endif
325
326 //----------------------------------debug info--------------------------------
327
328
add_debug_info_for_branch(CodeEmitInfo * info)329 void LIR_Assembler::add_debug_info_for_branch(CodeEmitInfo* info) {
330 int pc_offset = code_offset();
331 flush_debug_info(pc_offset);
332 info->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
333 if (info->exception_handlers() != NULL) {
334 compilation()->add_exception_handlers_for_pco(pc_offset, info->exception_handlers());
335 }
336 }
337
338
add_call_info(int pc_offset,CodeEmitInfo * cinfo)339 void LIR_Assembler::add_call_info(int pc_offset, CodeEmitInfo* cinfo) {
340 flush_debug_info(pc_offset);
341 cinfo->record_debug_info(compilation()->debug_info_recorder(), pc_offset);
342 if (cinfo->exception_handlers() != NULL) {
343 compilation()->add_exception_handlers_for_pco(pc_offset, cinfo->exception_handlers());
344 }
345 }
346
debug_info(Instruction * ins)347 static ValueStack* debug_info(Instruction* ins) {
348 StateSplit* ss = ins->as_StateSplit();
349 if (ss != NULL) return ss->state();
350 return ins->state_before();
351 }
352
process_debug_info(LIR_Op * op)353 void LIR_Assembler::process_debug_info(LIR_Op* op) {
354 Instruction* src = op->source();
355 if (src == NULL) return;
356 int pc_offset = code_offset();
357 if (_pending_non_safepoint == src) {
358 _pending_non_safepoint_offset = pc_offset;
359 return;
360 }
361 ValueStack* vstack = debug_info(src);
362 if (vstack == NULL) return;
363 if (_pending_non_safepoint != NULL) {
364 // Got some old debug info. Get rid of it.
365 if (debug_info(_pending_non_safepoint) == vstack) {
366 _pending_non_safepoint_offset = pc_offset;
367 return;
368 }
369 if (_pending_non_safepoint_offset < pc_offset) {
370 record_non_safepoint_debug_info();
371 }
372 _pending_non_safepoint = NULL;
373 }
374 // Remember the debug info.
375 if (pc_offset > compilation()->debug_info_recorder()->last_pc_offset()) {
376 _pending_non_safepoint = src;
377 _pending_non_safepoint_offset = pc_offset;
378 }
379 }
380
381 // Index caller states in s, where 0 is the oldest, 1 its callee, etc.
382 // Return NULL if n is too large.
383 // Returns the caller_bci for the next-younger state, also.
nth_oldest(ValueStack * s,int n,int & bci_result)384 static ValueStack* nth_oldest(ValueStack* s, int n, int& bci_result) {
385 ValueStack* t = s;
386 for (int i = 0; i < n; i++) {
387 if (t == NULL) break;
388 t = t->caller_state();
389 }
390 if (t == NULL) return NULL;
391 for (;;) {
392 ValueStack* tc = t->caller_state();
393 if (tc == NULL) return s;
394 t = tc;
395 bci_result = tc->bci();
396 s = s->caller_state();
397 }
398 }
399
record_non_safepoint_debug_info()400 void LIR_Assembler::record_non_safepoint_debug_info() {
401 int pc_offset = _pending_non_safepoint_offset;
402 ValueStack* vstack = debug_info(_pending_non_safepoint);
403 int bci = vstack->bci();
404
405 DebugInformationRecorder* debug_info = compilation()->debug_info_recorder();
406 assert(debug_info->recording_non_safepoints(), "sanity");
407
408 debug_info->add_non_safepoint(pc_offset);
409
410 // Visit scopes from oldest to youngest.
411 for (int n = 0; ; n++) {
412 int s_bci = bci;
413 ValueStack* s = nth_oldest(vstack, n, s_bci);
414 if (s == NULL) break;
415 IRScope* scope = s->scope();
416 //Always pass false for reexecute since these ScopeDescs are never used for deopt
417 methodHandle null_mh;
418 debug_info->describe_scope(pc_offset, null_mh, scope->method(), s->bci(), false/*reexecute*/);
419 }
420
421 debug_info->end_non_safepoint(pc_offset);
422 }
423
424
add_debug_info_for_null_check_here(CodeEmitInfo * cinfo)425 ImplicitNullCheckStub* LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
426 return add_debug_info_for_null_check(code_offset(), cinfo);
427 }
428
add_debug_info_for_null_check(int pc_offset,CodeEmitInfo * cinfo)429 ImplicitNullCheckStub* LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
430 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
431 append_code_stub(stub);
432 return stub;
433 }
434
add_debug_info_for_div0_here(CodeEmitInfo * info)435 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
436 add_debug_info_for_div0(code_offset(), info);
437 }
438
add_debug_info_for_div0(int pc_offset,CodeEmitInfo * cinfo)439 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
440 DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
441 append_code_stub(stub);
442 }
443
emit_rtcall(LIR_OpRTCall * op)444 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
445 rt_call(op->result_opr(), op->addr(), op->arguments(), op->tmp(), op->info());
446 }
447
448
emit_call(LIR_OpJavaCall * op)449 void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
450 verify_oop_map(op->info());
451
452 // must align calls sites, otherwise they can't be updated atomically
453 align_call(op->code());
454
455 // emit the static call stub stuff out of line
456 emit_static_call_stub();
457 CHECK_BAILOUT();
458
459 switch (op->code()) {
460 case lir_static_call:
461 case lir_dynamic_call:
462 call(op, relocInfo::static_call_type);
463 break;
464 case lir_optvirtual_call:
465 call(op, relocInfo::opt_virtual_call_type);
466 break;
467 case lir_icvirtual_call:
468 ic_call(op);
469 break;
470 case lir_virtual_call:
471 vtable_call(op);
472 break;
473 default:
474 fatal("unexpected op code: %s", op->name());
475 break;
476 }
477
478 // JSR 292
479 // Record if this method has MethodHandle invokes.
480 if (op->is_method_handle_invoke()) {
481 compilation()->set_has_method_handle_invokes(true);
482 }
483
484 #if defined(X86) && defined(TIERED)
485 // C2 leave fpu stack dirty clean it
486 if (UseSSE < 2) {
487 int i;
488 for ( i = 1; i <= 7 ; i++ ) {
489 ffree(i);
490 }
491 if (!op->result_opr()->is_float_kind()) {
492 ffree(0);
493 }
494 }
495 #endif // X86 && TIERED
496 }
497
498
emit_opLabel(LIR_OpLabel * op)499 void LIR_Assembler::emit_opLabel(LIR_OpLabel* op) {
500 _masm->bind (*(op->label()));
501 }
502
503
emit_op1(LIR_Op1 * op)504 void LIR_Assembler::emit_op1(LIR_Op1* op) {
505 switch (op->code()) {
506 case lir_move:
507 if (op->move_kind() == lir_move_volatile) {
508 assert(op->patch_code() == lir_patch_none, "can't patch volatiles");
509 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
510 } else {
511 move_op(op->in_opr(), op->result_opr(), op->type(),
512 op->patch_code(), op->info(), op->pop_fpu_stack(),
513 op->move_kind() == lir_move_unaligned,
514 op->move_kind() == lir_move_wide);
515 }
516 break;
517
518 case lir_roundfp: {
519 LIR_OpRoundFP* round_op = op->as_OpRoundFP();
520 roundfp_op(round_op->in_opr(), round_op->tmp(), round_op->result_opr(), round_op->pop_fpu_stack());
521 break;
522 }
523
524 case lir_return:
525 return_op(op->in_opr());
526 break;
527
528 case lir_safepoint:
529 if (compilation()->debug_info_recorder()->last_pc_offset() == code_offset()) {
530 _masm->nop();
531 }
532 safepoint_poll(op->in_opr(), op->info());
533 break;
534
535 case lir_fxch:
536 fxch(op->in_opr()->as_jint());
537 break;
538
539 case lir_fld:
540 fld(op->in_opr()->as_jint());
541 break;
542
543 case lir_ffree:
544 ffree(op->in_opr()->as_jint());
545 break;
546
547 case lir_branch:
548 break;
549
550 case lir_push:
551 push(op->in_opr());
552 break;
553
554 case lir_pop:
555 pop(op->in_opr());
556 break;
557
558 case lir_leal:
559 leal(op->in_opr(), op->result_opr(), op->patch_code(), op->info());
560 break;
561
562 case lir_null_check: {
563 ImplicitNullCheckStub* stub = add_debug_info_for_null_check_here(op->info());
564
565 if (op->in_opr()->is_single_cpu()) {
566 _masm->null_check(op->in_opr()->as_register(), stub->entry());
567 } else {
568 Unimplemented();
569 }
570 break;
571 }
572
573 case lir_monaddr:
574 monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
575 break;
576
577 #ifdef SPARC
578 case lir_pack64:
579 pack64(op->in_opr(), op->result_opr());
580 break;
581
582 case lir_unpack64:
583 unpack64(op->in_opr(), op->result_opr());
584 break;
585 #endif
586
587 case lir_unwind:
588 unwind_op(op->in_opr());
589 break;
590
591 default:
592 Unimplemented();
593 break;
594 }
595 }
596
597
emit_op0(LIR_Op0 * op)598 void LIR_Assembler::emit_op0(LIR_Op0* op) {
599 switch (op->code()) {
600 case lir_word_align: {
601 _masm->align(BytesPerWord);
602 break;
603 }
604
605 case lir_nop:
606 assert(op->info() == NULL, "not supported");
607 _masm->nop();
608 break;
609
610 case lir_label:
611 Unimplemented();
612 break;
613
614 case lir_build_frame:
615 build_frame();
616 break;
617
618 case lir_std_entry:
619 // init offsets
620 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
621 _masm->align(CodeEntryAlignment);
622 if (needs_icache(compilation()->method())) {
623 check_icache();
624 }
625 offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
626 _masm->verified_entry();
627 if (needs_clinit_barrier_on_entry(compilation()->method())) {
628 clinit_barrier(compilation()->method());
629 }
630 build_frame();
631 offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
632 break;
633
634 case lir_osr_entry:
635 offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
636 osr_entry();
637 break;
638
639 case lir_24bit_FPU:
640 set_24bit_FPU();
641 break;
642
643 case lir_reset_FPU:
644 reset_FPU();
645 break;
646
647 case lir_breakpoint:
648 breakpoint();
649 break;
650
651 case lir_fpop_raw:
652 fpop();
653 break;
654
655 case lir_membar:
656 membar();
657 break;
658
659 case lir_membar_acquire:
660 membar_acquire();
661 break;
662
663 case lir_membar_release:
664 membar_release();
665 break;
666
667 case lir_membar_loadload:
668 membar_loadload();
669 break;
670
671 case lir_membar_storestore:
672 membar_storestore();
673 break;
674
675 case lir_membar_loadstore:
676 membar_loadstore();
677 break;
678
679 case lir_membar_storeload:
680 membar_storeload();
681 break;
682
683 case lir_get_thread:
684 get_thread(op->result_opr());
685 break;
686
687 case lir_on_spin_wait:
688 on_spin_wait();
689 break;
690
691 default:
692 ShouldNotReachHere();
693 break;
694 }
695 }
696
697
emit_op2(LIR_Op2 * op)698 void LIR_Assembler::emit_op2(LIR_Op2* op) {
699 switch (op->code()) {
700 case lir_cmp:
701 if (op->info() != NULL) {
702 assert(op->in_opr1()->is_address() || op->in_opr2()->is_address(),
703 "shouldn't be codeemitinfo for non-address operands");
704 add_debug_info_for_null_check_here(op->info()); // exception possible
705 }
706 comp_op(op->condition(), op->in_opr1(), op->in_opr2(), op);
707 break;
708
709 case lir_cmp_l2i:
710 case lir_cmp_fd2i:
711 case lir_ucmp_fd2i:
712 comp_fl2i(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
713 break;
714
715 case lir_cmove:
716 cmove(op->condition(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->type());
717 break;
718
719 case lir_shl:
720 case lir_shr:
721 case lir_ushr:
722 if (op->in_opr2()->is_constant()) {
723 shift_op(op->code(), op->in_opr1(), op->in_opr2()->as_constant_ptr()->as_jint(), op->result_opr());
724 } else {
725 shift_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
726 }
727 break;
728
729 case lir_add:
730 case lir_sub:
731 case lir_mul:
732 case lir_mul_strictfp:
733 case lir_div:
734 case lir_div_strictfp:
735 case lir_rem:
736 assert(op->fpu_pop_count() < 2, "");
737 arith_op(
738 op->code(),
739 op->in_opr1(),
740 op->in_opr2(),
741 op->result_opr(),
742 op->info(),
743 op->fpu_pop_count() == 1);
744 break;
745
746 case lir_abs:
747 case lir_sqrt:
748 case lir_tan:
749 case lir_log10:
750 intrinsic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op);
751 break;
752
753 case lir_neg:
754 negate(op->in_opr1(), op->result_opr(), op->in_opr2());
755 break;
756
757 case lir_logic_and:
758 case lir_logic_or:
759 case lir_logic_xor:
760 logic_op(
761 op->code(),
762 op->in_opr1(),
763 op->in_opr2(),
764 op->result_opr());
765 break;
766
767 case lir_throw:
768 throw_op(op->in_opr1(), op->in_opr2(), op->info());
769 break;
770
771 case lir_xadd:
772 case lir_xchg:
773 atomic_op(op->code(), op->in_opr1(), op->in_opr2(), op->result_opr(), op->tmp1_opr());
774 break;
775
776 default:
777 Unimplemented();
778 break;
779 }
780 }
781
782
build_frame()783 void LIR_Assembler::build_frame() {
784 _masm->build_frame(initial_frame_size_in_bytes(), bang_size_in_bytes());
785 }
786
787
roundfp_op(LIR_Opr src,LIR_Opr tmp,LIR_Opr dest,bool pop_fpu_stack)788 void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack) {
789 assert((src->is_single_fpu() && dest->is_single_stack()) ||
790 (src->is_double_fpu() && dest->is_double_stack()),
791 "round_fp: rounds register -> stack location");
792
793 reg2stack (src, dest, src->type(), pop_fpu_stack);
794 }
795
796
move_op(LIR_Opr src,LIR_Opr dest,BasicType type,LIR_PatchCode patch_code,CodeEmitInfo * info,bool pop_fpu_stack,bool unaligned,bool wide)797 void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
798 if (src->is_register()) {
799 if (dest->is_register()) {
800 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
801 reg2reg(src, dest);
802 } else if (dest->is_stack()) {
803 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
804 reg2stack(src, dest, type, pop_fpu_stack);
805 } else if (dest->is_address()) {
806 reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned);
807 } else {
808 ShouldNotReachHere();
809 }
810
811 } else if (src->is_stack()) {
812 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
813 if (dest->is_register()) {
814 stack2reg(src, dest, type);
815 } else if (dest->is_stack()) {
816 stack2stack(src, dest, type);
817 } else {
818 ShouldNotReachHere();
819 }
820
821 } else if (src->is_constant()) {
822 if (dest->is_register()) {
823 const2reg(src, dest, patch_code, info); // patching is possible
824 } else if (dest->is_stack()) {
825 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
826 const2stack(src, dest);
827 } else if (dest->is_address()) {
828 assert(patch_code == lir_patch_none, "no patching allowed here");
829 const2mem(src, dest, type, info, wide);
830 } else {
831 ShouldNotReachHere();
832 }
833
834 } else if (src->is_address()) {
835 mem2reg(src, dest, type, patch_code, info, wide, unaligned);
836
837 } else {
838 ShouldNotReachHere();
839 }
840 }
841
842
verify_oop_map(CodeEmitInfo * info)843 void LIR_Assembler::verify_oop_map(CodeEmitInfo* info) {
844 #ifndef PRODUCT
845 if (VerifyOops) {
846 OopMapStream s(info->oop_map());
847 while (!s.is_done()) {
848 OopMapValue v = s.current();
849 if (v.is_oop()) {
850 VMReg r = v.reg();
851 if (!r->is_stack()) {
852 stringStream st;
853 st.print("bad oop %s at %d", r->as_Register()->name(), _masm->offset());
854 #ifdef SPARC
855 _masm->_verify_oop(r->as_Register(), os::strdup(st.as_string(), mtCompiler), __FILE__, __LINE__);
856 #else
857 _masm->verify_oop(r->as_Register());
858 #endif
859 } else {
860 _masm->verify_stack_oop(r->reg2stack() * VMRegImpl::stack_slot_size);
861 }
862 }
863 check_codespace();
864 CHECK_BAILOUT();
865
866 s.next();
867 }
868 }
869 #endif
870 }
871