1 /*
2  * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4  *
5  * This code is free software; you can redistribute it and/or modify it
6  * under the terms of the GNU General Public License version 2 only, as
7  * published by the Free Software Foundation.
8  *
9  * This code is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * version 2 for more details (a copy is included in the LICENSE file that
13  * accompanied this code).
14  *
15  * You should have received a copy of the GNU General Public License version
16  * 2 along with this work; if not, write to the Free Software Foundation,
17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18  *
19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20  * or visit www.oracle.com if you need additional information or have any
21  * questions.
22  *
23  */
24 
25 #include "precompiled.hpp"
26 #include "asm/macroAssembler.inline.hpp"
27 #include "code/codeCache.hpp"
28 #include "memory/resourceArea.hpp"
29 #include "nativeInst_sparc.hpp"
30 #include "oops/oop.inline.hpp"
31 #include "runtime/handles.hpp"
32 #include "runtime/sharedRuntime.hpp"
33 #include "runtime/stubRoutines.hpp"
34 #include "utilities/ostream.hpp"
35 #ifdef COMPILER1
36 #include "c1/c1_Runtime1.hpp"
37 #endif
38 
set_data64_sethi(address instaddr,intptr_t x)39 void NativeInstruction::set_data64_sethi(address instaddr, intptr_t x) {
40   ResourceMark rm;
41   CodeBuffer buf(instaddr, 10 * BytesPerInstWord );
42   MacroAssembler* _masm = new MacroAssembler(&buf);
43   Register destreg;
44 
45   destreg = inv_rd(*(unsigned int *)instaddr);
46   // Generate a the new sequence
47   _masm->patchable_sethi(x, destreg);
48   ICache::invalidate_range(instaddr, 7 * BytesPerInstWord);
49 }
50 
verify_data64_sethi(address instaddr,intptr_t x)51 void NativeInstruction::verify_data64_sethi(address instaddr, intptr_t x) {
52   ResourceMark rm;
53   unsigned char buffer[10 * BytesPerInstWord];
54   CodeBuffer buf(buffer, 10 * BytesPerInstWord);
55   MacroAssembler masm(&buf);
56 
57   Register destreg = inv_rd(*(unsigned int *)instaddr);
58   // Generate the proper sequence into a temporary buffer and compare
59   // it with the original sequence.
60   masm.patchable_sethi(x, destreg);
61   int len = buffer - masm.pc();
62   for (int i = 0; i < len; i++) {
63     guarantee(instaddr[i] == buffer[i], "instructions must match");
64   }
65 }
66 
verify()67 void NativeInstruction::verify() {
68   // make sure code pattern is actually an instruction address
69   address addr = addr_at(0);
70   if (addr == 0 || ((intptr_t)addr & 3) != 0) {
71     fatal("not an instruction address");
72   }
73 }
74 
print()75 void NativeInstruction::print() {
76   tty->print_cr(INTPTR_FORMAT ": 0x%x", p2i(addr_at(0)), long_at(0));
77 }
78 
set_long_at(int offset,int i)79 void NativeInstruction::set_long_at(int offset, int i) {
80   address addr = addr_at(offset);
81   *(int*)addr = i;
82   ICache::invalidate_word(addr);
83 }
84 
set_jlong_at(int offset,jlong i)85 void NativeInstruction::set_jlong_at(int offset, jlong i) {
86   address addr = addr_at(offset);
87   *(jlong*)addr = i;
88   // Don't need to invalidate 2 words here, because
89   // the flush instruction operates on doublewords.
90   ICache::invalidate_word(addr);
91 }
92 
set_addr_at(int offset,address x)93 void NativeInstruction::set_addr_at(int offset, address x) {
94   address addr = addr_at(offset);
95   assert( ((intptr_t)addr & (wordSize-1)) == 0, "set_addr_at bad address alignment");
96   *(uintptr_t*)addr = (uintptr_t)x;
97   // Don't need to invalidate 2 words here in the 64-bit case,
98   // because the flush instruction operates on doublewords.
99   ICache::invalidate_word(addr);
100   // The Intel code has this assertion for NativeCall::set_destination,
101   // NativeMovConstReg::set_data, NativeMovRegMem::set_offset,
102   // NativeJump::set_jump_destination, and NativePushImm32::set_data
103   //assert (Patching_lock->owned_by_self(), "must hold lock to patch instruction")
104 }
105 
is_zero_test(Register & reg)106 bool NativeInstruction::is_zero_test(Register &reg) {
107   int x = long_at(0);
108   Assembler::op3s temp = (Assembler::op3s) (Assembler::sub_op3 | Assembler::cc_bit_op3);
109   if (is_op3(x, temp, Assembler::arith_op) &&
110       inv_immed(x) && inv_rd(x) == G0) {
111       if (inv_rs1(x) == G0) {
112         reg = inv_rs2(x);
113         return true;
114       } else if (inv_rs2(x) == G0) {
115         reg = inv_rs1(x);
116         return true;
117       }
118   }
119   return false;
120 }
121 
is_load_store_with_small_offset(Register reg)122 bool NativeInstruction::is_load_store_with_small_offset(Register reg) {
123   int x = long_at(0);
124   if (is_op(x, Assembler::ldst_op) &&
125       inv_rs1(x) == reg && inv_immed(x)) {
126     return true;
127   }
128   return false;
129 }
130 
verify()131 void NativeCall::verify() {
132   NativeInstruction::verify();
133   // make sure code pattern is actually a call instruction
134   int x = long_at(0);
135   if (!is_op(x, Assembler::call_op)) {
136     fatal("not a call: 0x%x @ " INTPTR_FORMAT, x, p2i(instruction_address()));
137   }
138 }
139 
print()140 void NativeCall::print() {
141   tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, p2i(instruction_address()), p2i(destination()));
142 }
143 
144 
145 // MT-safe patching of a call instruction (and following word).
146 // First patches the second word, and then atomicly replaces
147 // the first word with the first new instruction word.
148 // Other processors might briefly see the old first word
149 // followed by the new second word.  This is OK if the old
150 // second word is harmless, and the new second word may be
151 // harmlessly executed in the delay slot of the call.
replace_mt_safe(address instr_addr,address code_buffer)152 void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
153   assert(Patching_lock->is_locked() ||
154          SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
155    assert (instr_addr != NULL, "illegal address for code patching");
156    NativeCall* n_call =  nativeCall_at (instr_addr); // checking that it is a call
157    assert(NativeCall::instruction_size == 8, "wrong instruction size; must be 8");
158    int i0 = ((int*)code_buffer)[0];
159    int i1 = ((int*)code_buffer)[1];
160    int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord);
161    assert(inv_op(*contention_addr) == Assembler::arith_op ||
162           *contention_addr == nop_instruction(),
163           "must not interfere with original call");
164    // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
165    n_call->set_long_at(1*BytesPerInstWord, i1);
166    n_call->set_long_at(0*BytesPerInstWord, i0);
167    // NOTE:  It is possible that another thread T will execute
168    // only the second patched word.
169    // In other words, since the original instruction is this
170    //    call patching_stub; nop                   (NativeCall)
171    // and the new sequence from the buffer is this:
172    //    sethi %hi(K), %r; add %r, %lo(K), %r      (NativeMovConstReg)
173    // what T will execute is this:
174    //    call patching_stub; add %r, %lo(K), %r
175    // thereby putting garbage into %r before calling the patching stub.
176    // This is OK, because the patching stub ignores the value of %r.
177 
178    // Make sure the first-patched instruction, which may co-exist
179    // briefly with the call, will do something harmless.
180    assert(inv_op(*contention_addr) == Assembler::arith_op ||
181           *contention_addr == nop_instruction(),
182           "must not interfere with original call");
183 }
184 
185 // Similar to replace_mt_safe, but just changes the destination.  The
186 // important thing is that free-running threads are able to execute this
187 // call instruction at all times.  Thus, the displacement field must be
188 // instruction-word-aligned.  This is always true on SPARC.
189 //
190 // Used in the runtime linkage of calls; see class CompiledIC.
set_destination_mt_safe(address dest)191 void NativeCall::set_destination_mt_safe(address dest) {
192   assert(Patching_lock->is_locked() ||
193          SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
194   // set_destination uses set_long_at which does the ICache::invalidate
195   set_destination(dest);
196 }
197 
198 // Code for unit testing implementation of NativeCall class
test()199 void NativeCall::test() {
200 #ifdef ASSERT
201   ResourceMark rm;
202   CodeBuffer cb("test", 100, 100);
203   MacroAssembler* a = new MacroAssembler(&cb);
204   NativeCall  *nc;
205   uint idx;
206   int offsets[] = {
207     0x0,
208     0xfffffff0,
209     0x7ffffff0,
210     0x80000000,
211     0x20,
212     0x4000,
213   };
214 
215   VM_Version::allow_all();
216 
217   a->call( a->pc(), relocInfo::none );
218   a->delayed()->nop();
219   nc = nativeCall_at( cb.insts_begin() );
220   nc->print();
221 
222   nc = nativeCall_overwriting_at( nc->next_instruction_address() );
223   for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
224     nc->set_destination( cb.insts_begin() + offsets[idx] );
225     assert(nc->destination() == (cb.insts_begin() + offsets[idx]), "check unit test");
226     nc->print();
227   }
228 
229   nc = nativeCall_before( cb.insts_begin() + 8 );
230   nc->print();
231 
232   VM_Version::revert();
233 #endif
234 }
235 // End code for unit testing implementation of NativeCall class
236 
237 //-------------------------------------------------------------------
238 
set_destination(address dest)239 void NativeFarCall::set_destination(address dest) {
240   // Address materialized in the instruction stream, so nothing to do.
241   return;
242 #if 0 // What we'd do if we really did want to change the destination
243   if (destination() == dest) {
244     return;
245   }
246   ResourceMark rm;
247   CodeBuffer buf(addr_at(0), instruction_size + 1);
248   MacroAssembler* _masm = new MacroAssembler(&buf);
249   // Generate the new sequence
250   AddressLiteral(dest);
251   _masm->jumpl_to(dest, O7, O7);
252   ICache::invalidate_range(addr_at(0), instruction_size );
253 #endif
254 }
255 
verify()256 void NativeFarCall::verify() {
257   // make sure code pattern is actually a jumpl_to instruction
258   assert((int)instruction_size == (int)NativeJump::instruction_size, "same as jump_to");
259   assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
260   nativeJump_at(addr_at(0))->verify();
261 }
262 
is_call_at(address instr)263 bool NativeFarCall::is_call_at(address instr) {
264   return nativeInstruction_at(instr)->is_sethi();
265 }
266 
print()267 void NativeFarCall::print() {
268   tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, p2i(instruction_address()), p2i(destination()));
269 }
270 
destination_is_compiled_verified_entry_point()271 bool NativeFarCall::destination_is_compiled_verified_entry_point() {
272   nmethod* callee = CodeCache::find_nmethod(destination());
273   if (callee == NULL) {
274     return false;
275   } else {
276     return destination() == callee->verified_entry_point();
277   }
278 }
279 
280 // MT-safe patching of a far call.
replace_mt_safe(address instr_addr,address code_buffer)281 void NativeFarCall::replace_mt_safe(address instr_addr, address code_buffer) {
282   Unimplemented();
283 }
284 
285 // Code for unit testing implementation of NativeFarCall class
test()286 void NativeFarCall::test() {
287   Unimplemented();
288 }
289 // End code for unit testing implementation of NativeFarCall class
290 
291 //-------------------------------------------------------------------
292 
293 
verify()294 void NativeMovConstReg::verify() {
295   NativeInstruction::verify();
296   // make sure code pattern is actually a "set_metadata" synthetic instruction
297   // see MacroAssembler::set_oop()
298   int i0 = long_at(sethi_offset);
299   int i1 = long_at(add_offset);
300 
301   // verify the pattern "sethi %hi22(imm), reg ;  add reg, %lo10(imm), reg"
302   Register rd = inv_rd(i0);
303   if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
304     fatal("not a set_metadata");
305   }
306 }
307 
308 
print()309 void NativeMovConstReg::print() {
310   tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, p2i(instruction_address()), data());
311 }
312 
313 
data() const314 intptr_t NativeMovConstReg::data() const {
315   return data64(addr_at(sethi_offset), long_at(add_offset));
316 }
317 
318 
set_data(intptr_t x)319 void NativeMovConstReg::set_data(intptr_t x) {
320   set_data64_sethi(addr_at(sethi_offset), x);
321   set_long_at(add_offset,   set_data32_simm13( long_at(add_offset),   x));
322 
323   // also store the value into an oop_Relocation cell, if any
324   CodeBlob* cb = CodeCache::find_blob(instruction_address());
325   nmethod*  nm = cb ? cb->as_nmethod_or_null() : NULL;
326   if (nm != NULL) {
327     RelocIterator iter(nm, instruction_address(), next_instruction_address());
328     oop* oop_addr = NULL;
329     Metadata** metadata_addr = NULL;
330     while (iter.next()) {
331       if (iter.type() == relocInfo::oop_type) {
332         oop_Relocation *r = iter.oop_reloc();
333         if (oop_addr == NULL) {
334           oop_addr = r->oop_addr();
335           *oop_addr = cast_to_oop(x);
336         } else {
337           assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
338         }
339       }
340       if (iter.type() == relocInfo::metadata_type) {
341         metadata_Relocation *r = iter.metadata_reloc();
342         if (metadata_addr == NULL) {
343           metadata_addr = r->metadata_addr();
344           *metadata_addr = (Metadata*)x;
345         } else {
346           assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here");
347         }
348       }
349     }
350   }
351 }
352 
353 
354 // Code for unit testing implementation of NativeMovConstReg class
test()355 void NativeMovConstReg::test() {
356 #ifdef ASSERT
357   ResourceMark rm;
358   CodeBuffer cb("test", 100, 100);
359   MacroAssembler* a = new MacroAssembler(&cb);
360   NativeMovConstReg* nm;
361   uint idx;
362   int offsets[] = {
363     0x0,
364     0x7fffffff,
365     0x80000000,
366     0xffffffff,
367     0x20,
368     4096,
369     4097,
370   };
371 
372   VM_Version::allow_all();
373 
374   AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
375   a->sethi(al1, I3);
376   a->add(I3, al1.low10(), I3);
377   AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
378   a->sethi(al2, O2);
379   a->add(O2, al2.low10(), O2);
380 
381   nm = nativeMovConstReg_at( cb.insts_begin() );
382   nm->print();
383 
384   nm = nativeMovConstReg_at( nm->next_instruction_address() );
385   for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
386     nm->set_data( offsets[idx] );
387     assert(nm->data() == offsets[idx], "check unit test");
388   }
389   nm->print();
390 
391   VM_Version::revert();
392 #endif
393 }
394 // End code for unit testing implementation of NativeMovConstReg class
395 
396 //-------------------------------------------------------------------
397 
verify()398 void NativeMovConstReg32::verify() {
399   NativeInstruction::verify();
400   // make sure code pattern is actually a "set_metadata" synthetic instruction
401   // see MacroAssembler::set_oop()
402   int i0 = long_at(sethi_offset);
403   int i1 = long_at(add_offset);
404 
405   // verify the pattern "sethi %hi22(imm), reg ;  add reg, %lo10(imm), reg"
406   Register rd = inv_rd(i0);
407   if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
408     fatal("not a set_metadata");
409   }
410 }
411 
412 
print()413 void NativeMovConstReg32::print() {
414   tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, p2i(instruction_address()), data());
415 }
416 
417 
data() const418 intptr_t NativeMovConstReg32::data() const {
419   return data32(long_at(sethi_offset), long_at(add_offset));
420 }
421 
422 
set_data(intptr_t x)423 void NativeMovConstReg32::set_data(intptr_t x) {
424   set_long_at(sethi_offset, set_data32_sethi(  long_at(sethi_offset), x));
425   set_long_at(add_offset,   set_data32_simm13( long_at(add_offset),   x));
426 
427   // also store the value into an oop_Relocation cell, if any
428   CodeBlob* cb = CodeCache::find_blob(instruction_address());
429   nmethod*  nm = cb ? cb->as_nmethod_or_null() : NULL;
430   if (nm != NULL) {
431     RelocIterator iter(nm, instruction_address(), next_instruction_address());
432     oop* oop_addr = NULL;
433     Metadata** metadata_addr = NULL;
434     while (iter.next()) {
435       if (iter.type() == relocInfo::oop_type) {
436         oop_Relocation *r = iter.oop_reloc();
437         if (oop_addr == NULL) {
438           oop_addr = r->oop_addr();
439           *oop_addr = cast_to_oop(x);
440         } else {
441           assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
442         }
443       }
444       if (iter.type() == relocInfo::metadata_type) {
445         metadata_Relocation *r = iter.metadata_reloc();
446         if (metadata_addr == NULL) {
447           metadata_addr = r->metadata_addr();
448           *metadata_addr = (Metadata*)x;
449         } else {
450           assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here");
451         }
452       }
453     }
454   }
455 }
456 
457 //-------------------------------------------------------------------
458 
verify()459 void NativeMovConstRegPatching::verify() {
460   NativeInstruction::verify();
461   // Make sure code pattern is sethi/nop/add.
462   int i0 = long_at(sethi_offset);
463   int i1 = long_at(nop_offset);
464   int i2 = long_at(add_offset);
465   assert((int)nop_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
466 
467   // Verify the pattern "sethi %hi22(imm), reg; nop; add reg, %lo10(imm), reg"
468   // The casual reader should note that on Sparc a nop is a special case if sethi
469   // in which the destination register is %g0.
470   Register rd0 = inv_rd(i0);
471   Register rd1 = inv_rd(i1);
472   if (!(is_op2(i0, Assembler::sethi_op2) && rd0 != G0 &&
473         is_op2(i1, Assembler::sethi_op2) && rd1 == G0 &&        // nop is a special case of sethi
474         is_op3(i2, Assembler::add_op3, Assembler::arith_op) &&
475         inv_immed(i2) && (unsigned)get_simm13(i2) < (1 << 10) &&
476         rd0 == inv_rs1(i2) && rd0 == inv_rd(i2))) {
477     fatal("not a set_metadata");
478   }
479 }
480 
481 
print()482 void NativeMovConstRegPatching::print() {
483   tty->print_cr(INTPTR_FORMAT ": mov reg, 0x%x", p2i(instruction_address()), data());
484 }
485 
486 
data() const487 int NativeMovConstRegPatching::data() const {
488   return data64(addr_at(sethi_offset), long_at(add_offset));
489 }
490 
491 
set_data(int x)492 void NativeMovConstRegPatching::set_data(int x) {
493   set_data64_sethi(addr_at(sethi_offset), x);
494   set_long_at(add_offset, set_data32_simm13(long_at(add_offset), x));
495 
496   // also store the value into an oop_Relocation cell, if any
497   CodeBlob* cb = CodeCache::find_blob(instruction_address());
498   nmethod*  nm = cb ? cb->as_nmethod_or_null() : NULL;
499   if (nm != NULL) {
500     RelocIterator iter(nm, instruction_address(), next_instruction_address());
501     oop* oop_addr = NULL;
502     Metadata** metadata_addr = NULL;
503     while (iter.next()) {
504       if (iter.type() == relocInfo::oop_type) {
505         oop_Relocation *r = iter.oop_reloc();
506         if (oop_addr == NULL) {
507           oop_addr = r->oop_addr();
508           *oop_addr = cast_to_oop(x);
509         } else {
510           assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
511         }
512       }
513       if (iter.type() == relocInfo::metadata_type) {
514         metadata_Relocation *r = iter.metadata_reloc();
515         if (metadata_addr == NULL) {
516           metadata_addr = r->metadata_addr();
517           *metadata_addr = (Metadata*)x;
518         } else {
519           assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here");
520         }
521       }
522     }
523   }
524 }
525 
526 
527 // Code for unit testing implementation of NativeMovConstRegPatching class
test()528 void NativeMovConstRegPatching::test() {
529 #ifdef ASSERT
530   ResourceMark rm;
531   CodeBuffer cb("test", 100, 100);
532   MacroAssembler* a = new MacroAssembler(&cb);
533   NativeMovConstRegPatching* nm;
534   uint idx;
535   int offsets[] = {
536     0x0,
537     0x7fffffff,
538     0x80000000,
539     0xffffffff,
540     0x20,
541     4096,
542     4097,
543   };
544 
545   VM_Version::allow_all();
546 
547   AddressLiteral al1(0xaaaabbbb, relocInfo::external_word_type);
548   a->sethi(al1, I3);
549   a->nop();
550   a->add(I3, al1.low10(), I3);
551   AddressLiteral al2(0xccccdddd, relocInfo::external_word_type);
552   a->sethi(al2, O2);
553   a->nop();
554   a->add(O2, al2.low10(), O2);
555 
556   nm = nativeMovConstRegPatching_at( cb.insts_begin() );
557   nm->print();
558 
559   nm = nativeMovConstRegPatching_at( nm->next_instruction_address() );
560   for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
561     nm->set_data( offsets[idx] );
562     assert(nm->data() == offsets[idx], "check unit test");
563   }
564   nm->print();
565 
566   VM_Version::revert();
567 #endif // ASSERT
568 }
569 // End code for unit testing implementation of NativeMovConstRegPatching class
570 
571 
572 //-------------------------------------------------------------------
573 
574 
verify()575 void NativeMovRegMem::verify() {
576   NativeInstruction::verify();
577   // make sure code pattern is actually a "ld" or "st" of some sort.
578   int i0 = long_at(0);
579   int op3 = inv_op3(i0);
580 
581   assert((int)add_offset == NativeMovConstReg::add_offset, "sethi size ok");
582 
583   if (!(is_op(i0, Assembler::ldst_op) &&
584         inv_immed(i0) &&
585         0 != (op3 < op3_ldst_int_limit
586          ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
587          : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))))
588   {
589     int i1 = long_at(ldst_offset);
590     Register rd = inv_rd(i0);
591 
592     op3 = inv_op3(i1);
593     if (!is_op(i1, Assembler::ldst_op) && rd == inv_rs2(i1) &&
594          0 != (op3 < op3_ldst_int_limit
595               ? (1 <<  op3                      ) & (op3_mask_ld  | op3_mask_st)
596                : (1 << (op3 - op3_ldst_int_limit)) & (op3_mask_ldf | op3_mask_stf))) {
597       fatal("not a ld* or st* op");
598     }
599   }
600 }
601 
602 
print()603 void NativeMovRegMem::print() {
604   if (is_immediate()) {
605     // offset is a signed 13-bit immediate, so casting it to int will not lose significant bits
606     tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %d]", p2i(instruction_address()), (int)offset());
607   } else {
608     tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", p2i(instruction_address()));
609   }
610 }
611 
612 
613 // Code for unit testing implementation of NativeMovRegMem class
test()614 void NativeMovRegMem::test() {
615 #ifdef ASSERT
616   ResourceMark rm;
617   CodeBuffer cb("test", 1000, 1000);
618   MacroAssembler* a = new MacroAssembler(&cb);
619   NativeMovRegMem* nm;
620   uint idx = 0;
621   uint idx1;
622   int offsets[] = {
623     0x0,
624     0xffffffff,
625     0x7fffffff,
626     0x80000000,
627     4096,
628     4097,
629     0x20,
630     0x4000,
631   };
632 
633   VM_Version::allow_all();
634 
635   AddressLiteral al1(0xffffffff, relocInfo::external_word_type);
636   AddressLiteral al2(0xaaaabbbb, relocInfo::external_word_type);
637   a->ldsw( G5, al1.low10(), G4 ); idx++;
638   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
639   a->ldsw( G5, I3, G4 ); idx++;
640   a->ldsb( G5, al1.low10(), G4 ); idx++;
641   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
642   a->ldsb( G5, I3, G4 ); idx++;
643   a->ldsh( G5, al1.low10(), G4 ); idx++;
644   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
645   a->ldsh( G5, I3, G4 ); idx++;
646   a->lduw( G5, al1.low10(), G4 ); idx++;
647   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
648   a->lduw( G5, I3, G4 ); idx++;
649   a->ldub( G5, al1.low10(), G4 ); idx++;
650   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
651   a->ldub( G5, I3, G4 ); idx++;
652   a->lduh( G5, al1.low10(), G4 ); idx++;
653   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
654   a->lduh( G5, I3, G4 ); idx++;
655   a->ldx( G5, al1.low10(), G4 ); idx++;
656   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
657   a->ldx( G5, I3, G4 ); idx++;
658   a->ldd( G5, al1.low10(), G4 ); idx++;
659   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
660   a->ldd( G5, I3, G4 ); idx++;
661   a->ldf( FloatRegisterImpl::D, O2, -1, F14 ); idx++;
662   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
663   a->ldf( FloatRegisterImpl::S, O0, I3, F15 ); idx++;
664 
665   a->stw( G5, G4, al1.low10() ); idx++;
666   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
667   a->stw( G5, G4, I3 ); idx++;
668   a->stb( G5, G4, al1.low10() ); idx++;
669   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
670   a->stb( G5, G4, I3 ); idx++;
671   a->sth( G5, G4, al1.low10() ); idx++;
672   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
673   a->sth( G5, G4, I3 ); idx++;
674   a->stx( G5, G4, al1.low10() ); idx++;
675   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
676   a->stx( G5, G4, I3 ); idx++;
677   a->std( G5, G4, al1.low10() ); idx++;
678   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
679   a->std( G5, G4, I3 ); idx++;
680   a->stf( FloatRegisterImpl::S, F18, O2, -1 ); idx++;
681   a->sethi(al2, I3); a->add(I3, al2.low10(), I3);
682   a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++;
683 
684   nm = nativeMovRegMem_at( cb.insts_begin() );
685   nm->print();
686   nm->set_offset( low10(0) );
687   nm->print();
688   nm->add_offset_in_bytes( low10(0xbb) * wordSize );
689   nm->print();
690 
691   while (--idx) {
692     nm = nativeMovRegMem_at( nm->next_instruction_address() );
693     nm->print();
694     for (idx1 = 0; idx1 < ARRAY_SIZE(offsets); idx1++) {
695       nm->set_offset( nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1] );
696       assert(nm->offset() == (nm->is_immediate() ? low10(offsets[idx1]) : offsets[idx1]),
697              "check unit test");
698       nm->print();
699     }
700     nm->add_offset_in_bytes( low10(0xbb) * wordSize );
701     nm->print();
702   }
703 
704   VM_Version::revert();
705 #endif // ASSERT
706 }
707 
708 // End code for unit testing implementation of NativeMovRegMem class
709 
710 
711 //--------------------------------------------------------------------------------
712 
713 
verify()714 void NativeJump::verify() {
715   NativeInstruction::verify();
716   int i0 = long_at(sethi_offset);
717   int i1 = long_at(jmpl_offset);
718   assert((int)jmpl_offset == (int)NativeMovConstReg::add_offset, "sethi size ok");
719   // verify the pattern "sethi %hi22(imm), treg ;  jmpl treg, %lo10(imm), lreg"
720   Register rd = inv_rd(i0);
721   // In LP64, the jump instruction location varies for non relocatable
722   // jumps, for example is could be sethi, xor, jmp instead of the
723   // 7 instructions for sethi.  So let's check sethi only.
724   if (!is_op2(i0, Assembler::sethi_op2) && rd != G0 ) {
725     fatal("not a jump_to instruction");
726   }
727 }
728 
729 
print()730 void NativeJump::print() {
731   tty->print_cr(INTPTR_FORMAT ": jmpl reg, " INTPTR_FORMAT, p2i(instruction_address()), p2i(jump_destination()));
732 }
733 
734 
735 // Code for unit testing implementation of NativeJump class
test()736 void NativeJump::test() {
737 #ifdef ASSERT
738   ResourceMark rm;
739   CodeBuffer cb("test", 100, 100);
740   MacroAssembler* a = new MacroAssembler(&cb);
741   NativeJump* nj;
742   uint idx;
743   int offsets[] = {
744     0x0,
745     0xffffffff,
746     0x7fffffff,
747     0x80000000,
748     4096,
749     4097,
750     0x20,
751     0x4000,
752   };
753 
754   VM_Version::allow_all();
755 
756   AddressLiteral al(0x7fffbbbb, relocInfo::external_word_type);
757   a->sethi(al, I3);
758   a->jmpl(I3, al.low10(), G0, RelocationHolder::none);
759   a->delayed()->nop();
760   a->sethi(al, I3);
761   a->jmpl(I3, al.low10(), L3, RelocationHolder::none);
762   a->delayed()->nop();
763 
764   nj = nativeJump_at( cb.insts_begin() );
765   nj->print();
766 
767   nj = nativeJump_at( nj->next_instruction_address() );
768   for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
769     nj->set_jump_destination( nj->instruction_address() + offsets[idx] );
770     assert(nj->jump_destination() == (nj->instruction_address() + offsets[idx]), "check unit test");
771     nj->print();
772   }
773 
774   VM_Version::revert();
775 #endif // ASSERT
776 }
777 // End code for unit testing implementation of NativeJump class
778 
779 
insert(address code_pos,address entry)780 void NativeJump::insert(address code_pos, address entry) {
781   Unimplemented();
782 }
783 
784 // MT safe inserting of a jump over an unknown instruction sequence (used by nmethod::makeZombie)
785 // The problem: jump_to <dest> is a 3-word instruction (including its delay slot).
786 // Atomic write can be only with 1 word.
patch_verified_entry(address entry,address verified_entry,address dest)787 void NativeJump::patch_verified_entry(address entry, address verified_entry, address dest) {
788   // Here's one way to do it:  Pre-allocate a three-word jump sequence somewhere
789   // in the header of the nmethod, within a short branch's span of the patch point.
790   // Set up the jump sequence using NativeJump::insert, and then use an annulled
791   // unconditional branch at the target site (an atomic 1-word update).
792   // Limitations:  You can only patch nmethods, with any given nmethod patched at
793   // most once, and the patch must be in the nmethod's header.
794   // It's messy, but you can ask the CodeCache for the nmethod containing the
795   // target address.
796 
797   // %%%%% For now, do something MT-stupid:
798   ResourceMark rm;
799   int code_size = 1 * BytesPerInstWord;
800   CodeBuffer cb(verified_entry, code_size + 1);
801   MacroAssembler* a = new MacroAssembler(&cb);
802   a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
803   ICache::invalidate_range(verified_entry, code_size);
804 }
805 
806 
insert(address code_pos)807 void NativeIllegalInstruction::insert(address code_pos) {
808   NativeIllegalInstruction* nii = (NativeIllegalInstruction*) nativeInstruction_at(code_pos);
809   nii->set_long_at(0, illegal_instruction());
810 }
811 
812 static int illegal_instruction_bits = 0;
813 
illegal_instruction()814 int NativeInstruction::illegal_instruction() {
815   if (illegal_instruction_bits == 0) {
816     ResourceMark rm;
817     char buf[40];
818     CodeBuffer cbuf((address)&buf[0], 20);
819     MacroAssembler* a = new MacroAssembler(&cbuf);
820     address ia = a->pc();
821     a->trap(ST_RESERVED_FOR_USER_0 + 1);
822     int bits = *(int*)ia;
823     assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
824     illegal_instruction_bits = bits;
825     assert(illegal_instruction_bits != 0, "oops");
826   }
827   return illegal_instruction_bits;
828 }
829 
830 static int ic_miss_trap_bits = 0;
831 
is_ic_miss_trap()832 bool NativeInstruction::is_ic_miss_trap() {
833   if (ic_miss_trap_bits == 0) {
834     ResourceMark rm;
835     char buf[40];
836     CodeBuffer cbuf((address)&buf[0], 20);
837     MacroAssembler* a = new MacroAssembler(&cbuf);
838     address ia = a->pc();
839     a->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0 + 2);
840     int bits = *(int*)ia;
841     assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
842     ic_miss_trap_bits = bits;
843     assert(ic_miss_trap_bits != 0, "oops");
844   }
845   return long_at(0) == ic_miss_trap_bits;
846 }
847 
848 
is_illegal()849 bool NativeInstruction::is_illegal() {
850   if (illegal_instruction_bits == 0) {
851     return false;
852   }
853   return long_at(0) == illegal_instruction_bits;
854 }
855 
856 
verify()857 void NativeGeneralJump::verify() {
858   assert(((NativeInstruction *)this)->is_jump() ||
859          ((NativeInstruction *)this)->is_cond_jump(), "not a general jump instruction");
860 }
861 
862 
insert_unconditional(address code_pos,address entry)863 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
864   Assembler::Condition condition = Assembler::always;
865   int x = Assembler::op2(Assembler::br_op2) | Assembler::annul(false) |
866     Assembler::cond(condition) | Assembler::wdisp((intptr_t)entry, (intptr_t)code_pos, 22);
867   NativeGeneralJump* ni = (NativeGeneralJump*) nativeInstruction_at(code_pos);
868   ni->set_long_at(0, x);
869 }
870 
871 
872 // MT-safe patching of a jmp instruction (and following word).
873 // First patches the second word, and then atomicly replaces
874 // the first word with the first new instruction word.
875 // Other processors might briefly see the old first word
876 // followed by the new second word.  This is OK if the old
877 // second word is harmless, and the new second word may be
878 // harmlessly executed in the delay slot of the call.
replace_mt_safe(address instr_addr,address code_buffer)879 void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
880    assert(Patching_lock->is_locked() ||
881          SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
882    assert (instr_addr != NULL, "illegal address for code patching");
883    NativeGeneralJump* h_jump =  nativeGeneralJump_at (instr_addr); // checking that it is a call
884    assert(NativeGeneralJump::instruction_size == 8, "wrong instruction size; must be 8");
885    int i0 = ((int*)code_buffer)[0];
886    int i1 = ((int*)code_buffer)[1];
887    int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord);
888    assert(inv_op(*contention_addr) == Assembler::arith_op ||
889           *contention_addr == nop_instruction(),
890           "must not interfere with original call");
891    // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
892    h_jump->set_long_at(1*BytesPerInstWord, i1);
893    h_jump->set_long_at(0*BytesPerInstWord, i0);
894    // NOTE:  It is possible that another thread T will execute
895    // only the second patched word.
896    // In other words, since the original instruction is this
897    //    jmp patching_stub; nop                    (NativeGeneralJump)
898    // and the new sequence from the buffer is this:
899    //    sethi %hi(K), %r; add %r, %lo(K), %r      (NativeMovConstReg)
900    // what T will execute is this:
901    //    jmp patching_stub; add %r, %lo(K), %r
902    // thereby putting garbage into %r before calling the patching stub.
903    // This is OK, because the patching stub ignores the value of %r.
904 
905    // Make sure the first-patched instruction, which may co-exist
906    // briefly with the call, will do something harmless.
907    assert(inv_op(*contention_addr) == Assembler::arith_op ||
908           *contention_addr == nop_instruction(),
909           "must not interfere with original call");
910 }
911