1 /*
2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2014, 2108, Red Hat Inc. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef CPU_AARCH64_NATIVEINST_AARCH64_HPP
27 #define CPU_AARCH64_NATIVEINST_AARCH64_HPP
28
29 #include "asm/assembler.hpp"
30 #include "runtime/icache.hpp"
31 #include "runtime/os.hpp"
32
33 // We have interfaces for the following instructions:
34 // - NativeInstruction
35 // - - NativeCall
36 // - - NativeMovConstReg
37 // - - NativeMovConstRegPatching
38 // - - NativeMovRegMem
39 // - - NativeMovRegMemPatching
40 // - - NativeJump
41 // - - NativeIllegalOpCode
42 // - - NativeGeneralJump
43 // - - NativeReturn
44 // - - NativeReturnX (return with argument)
45 // - - NativePushConst
46 // - - NativeTstRegMem
47
48 // The base class for different kinds of native instruction abstractions.
49 // Provides the primitive operations to manipulate code relative to this.
50
51 class NativeCall;
52
53 class NativeInstruction {
54 friend class Relocation;
55 friend bool is_NativeCallTrampolineStub_at(address);
56 public:
57 enum {
58 instruction_size = 4
59 };
60
encoding() const61 juint encoding() const {
62 return uint_at(0);
63 }
64
is_blr() const65 bool is_blr() const { return (encoding() & 0xff9ffc1f) == 0xd61f0000; } // blr(register) or br(register)
is_adr_aligned() const66 bool is_adr_aligned() const { return (encoding() & 0xff000000) == 0x10000000; } // adr Xn, <label>, where label is aligned to 4 bytes (address of instruction).
67
68 inline bool is_nop();
69 inline bool is_illegal();
70 inline bool is_return();
71 bool is_jump();
72 bool is_general_jump();
73 inline bool is_jump_or_nop();
74 inline bool is_cond_jump();
75 bool is_safepoint_poll();
76 bool is_movz();
77 bool is_movk();
78 bool is_sigill_zombie_not_entrant();
79 bool is_stop();
80
81 protected:
addr_at(int offset) const82 address addr_at(int offset) const { return address(this) + offset; }
83
sbyte_at(int offset) const84 s_char sbyte_at(int offset) const { return *(s_char*) addr_at(offset); }
ubyte_at(int offset) const85 u_char ubyte_at(int offset) const { return *(u_char*) addr_at(offset); }
86
int_at(int offset) const87 jint int_at(int offset) const { return *(jint*) addr_at(offset); }
uint_at(int offset) const88 juint uint_at(int offset) const { return *(juint*) addr_at(offset); }
89
ptr_at(int offset) const90 address ptr_at(int offset) const { return *(address*) addr_at(offset); }
91
oop_at(int offset) const92 oop oop_at (int offset) const { return *(oop*) addr_at(offset); }
93
94
set_char_at(int offset,char c)95 void set_char_at(int offset, char c) { *addr_at(offset) = (u_char)c; }
set_int_at(int offset,jint i)96 void set_int_at(int offset, jint i) { *(jint*)addr_at(offset) = i; }
set_uint_at(int offset,jint i)97 void set_uint_at(int offset, jint i) { *(juint*)addr_at(offset) = i; }
set_ptr_at(int offset,address ptr)98 void set_ptr_at (int offset, address ptr) { *(address*) addr_at(offset) = ptr; }
set_oop_at(int offset,oop o)99 void set_oop_at (int offset, oop o) { *(oop*) addr_at(offset) = o; }
100
101 void wrote(int offset);
102
103 public:
104
105 // unit test stuff
test()106 static void test() {} // override for testing
107
108 inline friend NativeInstruction* nativeInstruction_at(address address);
109
110 static bool is_adrp_at(address instr);
111
112 static bool is_ldr_literal_at(address instr);
113
is_ldr_literal()114 bool is_ldr_literal() {
115 return is_ldr_literal_at(addr_at(0));
116 }
117
118 static bool is_ldrw_to_zr(address instr);
119
is_call_at(address instr)120 static bool is_call_at(address instr) {
121 const uint32_t insn = (*(uint32_t*)instr);
122 return (insn >> 26) == 0b100101;
123 }
124
is_call()125 bool is_call() {
126 return is_call_at(addr_at(0));
127 }
128
maybe_cpool_ref(address instr)129 static bool maybe_cpool_ref(address instr) {
130 return is_adrp_at(instr) || is_ldr_literal_at(instr);
131 }
132
is_Membar()133 bool is_Membar() {
134 unsigned int insn = uint_at(0);
135 return Instruction_aarch64::extract(insn, 31, 12) == 0b11010101000000110011 &&
136 Instruction_aarch64::extract(insn, 7, 0) == 0b10111111;
137 }
138
is_Imm_LdSt()139 bool is_Imm_LdSt() {
140 unsigned int insn = uint_at(0);
141 return Instruction_aarch64::extract(insn, 29, 27) == 0b111 &&
142 Instruction_aarch64::extract(insn, 23, 23) == 0b0 &&
143 Instruction_aarch64::extract(insn, 26, 25) == 0b00;
144 }
145 };
146
nativeInstruction_at(address address)147 inline NativeInstruction* nativeInstruction_at(address address) {
148 return (NativeInstruction*)address;
149 }
150
151 // The natural type of an AArch64 instruction is uint32_t
nativeInstruction_at(uint32_t * address)152 inline NativeInstruction* nativeInstruction_at(uint32_t *address) {
153 return (NativeInstruction*)address;
154 }
155
156 class NativePltCall: public NativeInstruction {
157 public:
158 enum Arm_specific_constants {
159 instruction_size = 4,
160 instruction_offset = 0,
161 displacement_offset = 1,
162 return_address_offset = 4
163 };
instruction_address() const164 address instruction_address() const { return addr_at(instruction_offset); }
next_instruction_address() const165 address next_instruction_address() const { return addr_at(return_address_offset); }
displacement_address() const166 address displacement_address() const { return addr_at(displacement_offset); }
displacement() const167 int displacement() const { return (jint) int_at(displacement_offset); }
return_address() const168 address return_address() const { return addr_at(return_address_offset); }
169 address destination() const;
170 address plt_entry() const;
171 address plt_jump() const;
172 address plt_load_got() const;
173 address plt_resolve_call() const;
174 address plt_c2i_stub() const;
175 void set_stub_to_clean();
176
177 void reset_to_plt_resolve_call();
178 void set_destination_mt_safe(address dest);
179
180 void verify() const;
181 };
182
nativePltCall_at(address address)183 inline NativePltCall* nativePltCall_at(address address) {
184 NativePltCall* call = (NativePltCall*) address;
185 #ifdef ASSERT
186 call->verify();
187 #endif
188 return call;
189 }
190
nativePltCall_before(address addr)191 inline NativePltCall* nativePltCall_before(address addr) {
192 address at = addr - NativePltCall::instruction_size;
193 return nativePltCall_at(at);
194 }
195
196 inline NativeCall* nativeCall_at(address address);
197 // The NativeCall is an abstraction for accessing/manipulating native
198 // call instructions (used to manipulate inline caches, primitive &
199 // DSO calls, etc.).
200
201 class NativeCall: public NativeInstruction {
202 public:
203 enum Aarch64_specific_constants {
204 instruction_size = 4,
205 instruction_offset = 0,
206 displacement_offset = 0,
207 return_address_offset = 4
208 };
209
instruction_address() const210 address instruction_address() const { return addr_at(instruction_offset); }
next_instruction_address() const211 address next_instruction_address() const { return addr_at(return_address_offset); }
displacement() const212 int displacement() const { return (int_at(displacement_offset) << 6) >> 4; }
displacement_address() const213 address displacement_address() const { return addr_at(displacement_offset); }
return_address() const214 address return_address() const { return addr_at(return_address_offset); }
215 address destination() const;
216
set_destination(address dest)217 void set_destination(address dest) {
218 int offset = dest - instruction_address();
219 unsigned int insn = 0b100101 << 26;
220 assert((offset & 3) == 0, "should be");
221 offset >>= 2;
222 offset &= (1 << 26) - 1; // mask off insn part
223 insn |= offset;
224 set_int_at(displacement_offset, insn);
225 }
226
verify_alignment()227 void verify_alignment() { ; }
228 void verify();
229 void print();
230
231 // Creation
232 inline friend NativeCall* nativeCall_at(address address);
233 inline friend NativeCall* nativeCall_before(address return_address);
234
is_call_before(address return_address)235 static bool is_call_before(address return_address) {
236 return is_call_at(return_address - NativeCall::return_address_offset);
237 }
238
239 #if INCLUDE_AOT
240 // Return true iff a call from instr to target is out of range.
241 // Used for calls from JIT- to AOT-compiled code.
is_far_call(address instr,address target)242 static bool is_far_call(address instr, address target) {
243 // On AArch64 we use trampolines which can reach anywhere in the
244 // address space, so calls are never out of range.
245 return false;
246 }
247 #endif
248
249 // MT-safe patching of a call instruction.
250 static void insert(address code_pos, address entry);
251
252 static void replace_mt_safe(address instr_addr, address code_buffer);
253
254 // Similar to replace_mt_safe, but just changes the destination. The
255 // important thing is that free-running threads are able to execute
256 // this call instruction at all times. If the call is an immediate BL
257 // instruction we can simply rely on atomicity of 32-bit writes to
258 // make sure other threads will see no intermediate states.
259
260 // We cannot rely on locks here, since the free-running threads must run at
261 // full speed.
262 //
263 // Used in the runtime linkage of calls; see class CompiledIC.
264 // (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.)
265
266 // The parameter assert_lock disables the assertion during code generation.
267 void set_destination_mt_safe(address dest, bool assert_lock = true);
268
269 address get_trampoline();
270 address trampoline_jump(CodeBuffer &cbuf, address dest);
271 };
272
nativeCall_at(address address)273 inline NativeCall* nativeCall_at(address address) {
274 NativeCall* call = (NativeCall*)(address - NativeCall::instruction_offset);
275 #ifdef ASSERT
276 call->verify();
277 #endif
278 return call;
279 }
280
nativeCall_before(address return_address)281 inline NativeCall* nativeCall_before(address return_address) {
282 NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset);
283 #ifdef ASSERT
284 call->verify();
285 #endif
286 return call;
287 }
288
289 // An interface for accessing/manipulating native mov reg, imm instructions.
290 // (used to manipulate inlined 64-bit data calls, etc.)
291 class NativeMovConstReg: public NativeInstruction {
292 public:
293 enum Aarch64_specific_constants {
294 instruction_size = 3 * 4, // movz, movk, movk. See movptr().
295 instruction_offset = 0,
296 displacement_offset = 0,
297 };
298
instruction_address() const299 address instruction_address() const { return addr_at(instruction_offset); }
next_instruction_address() const300 address next_instruction_address() const {
301 if (nativeInstruction_at(instruction_address())->is_movz())
302 // Assume movz, movk, movk
303 return addr_at(instruction_size);
304 else if (is_adrp_at(instruction_address()))
305 return addr_at(2*4);
306 else if (is_ldr_literal_at(instruction_address()))
307 return(addr_at(4));
308 assert(false, "Unknown instruction in NativeMovConstReg");
309 return NULL;
310 }
311
312 intptr_t data() const;
313 void set_data(intptr_t x);
314
flush()315 void flush() {
316 if (! maybe_cpool_ref(instruction_address())) {
317 ICache::invalidate_range(instruction_address(), instruction_size);
318 }
319 }
320
321 void verify();
322 void print();
323
324 // unit test stuff
test()325 static void test() {}
326
327 // Creation
328 inline friend NativeMovConstReg* nativeMovConstReg_at(address address);
329 inline friend NativeMovConstReg* nativeMovConstReg_before(address address);
330 };
331
nativeMovConstReg_at(address address)332 inline NativeMovConstReg* nativeMovConstReg_at(address address) {
333 NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_offset);
334 #ifdef ASSERT
335 test->verify();
336 #endif
337 return test;
338 }
339
nativeMovConstReg_before(address address)340 inline NativeMovConstReg* nativeMovConstReg_before(address address) {
341 NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset);
342 #ifdef ASSERT
343 test->verify();
344 #endif
345 return test;
346 }
347
348 class NativeMovConstRegPatching: public NativeMovConstReg {
349 private:
nativeMovConstRegPatching_at(address address)350 friend NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) {
351 NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_offset);
352 #ifdef ASSERT
353 test->verify();
354 #endif
355 return test;
356 }
357 };
358
359 // An interface for accessing/manipulating native moves of the form:
360 // mov[b/w/l/q] [reg + offset], reg (instruction_code_reg2mem)
361 // mov[b/w/l/q] reg, [reg+offset] (instruction_code_mem2reg
362 // mov[s/z]x[w/b/q] [reg + offset], reg
363 // fld_s [reg+offset]
364 // fld_d [reg+offset]
365 // fstp_s [reg + offset]
366 // fstp_d [reg + offset]
367 // mov_literal64 scratch,<pointer> ; mov[b/w/l/q] 0(scratch),reg | mov[b/w/l/q] reg,0(scratch)
368 //
369 // Warning: These routines must be able to handle any instruction sequences
370 // that are generated as a result of the load/store byte,word,long
371 // macros. For example: The load_unsigned_byte instruction generates
372 // an xor reg,reg inst prior to generating the movb instruction. This
373 // class must skip the xor instruction.
374
375 class NativeMovRegMem: public NativeInstruction {
376 enum AArch64_specific_constants {
377 instruction_size = 4,
378 instruction_offset = 0,
379 data_offset = 0,
380 next_instruction_offset = 4
381 };
382
383 public:
384 // helper
instruction_start() const385 int instruction_start() const { return instruction_offset; }
386
instruction_address() const387 address instruction_address() const { return addr_at(instruction_offset); }
388
num_bytes_to_end_of_patch() const389 int num_bytes_to_end_of_patch() const { return instruction_offset + instruction_size; }
390
391 int offset() const;
392
393 void set_offset(int x);
394
add_offset_in_bytes(int add_offset)395 void add_offset_in_bytes(int add_offset) { set_offset ( ( offset() + add_offset ) ); }
396
397 void verify();
398 void print ();
399
400 // unit test stuff
test()401 static void test() {}
402
403 private:
404 inline friend NativeMovRegMem* nativeMovRegMem_at (address address);
405 };
406
nativeMovRegMem_at(address address)407 inline NativeMovRegMem* nativeMovRegMem_at (address address) {
408 NativeMovRegMem* test = (NativeMovRegMem*)(address - NativeMovRegMem::instruction_offset);
409 #ifdef ASSERT
410 test->verify();
411 #endif
412 return test;
413 }
414
415 class NativeMovRegMemPatching: public NativeMovRegMem {
416 private:
nativeMovRegMemPatching_at(address address)417 friend NativeMovRegMemPatching* nativeMovRegMemPatching_at (address address) {Unimplemented(); return 0; }
418 };
419
420 // An interface for accessing/manipulating native leal instruction of form:
421 // leal reg, [reg + offset]
422
423 class NativeLoadAddress: public NativeInstruction {
424 enum AArch64_specific_constants {
425 instruction_size = 4,
426 instruction_offset = 0,
427 data_offset = 0,
428 next_instruction_offset = 4
429 };
430
431 public:
432 void verify();
433 void print ();
434
435 // unit test stuff
test()436 static void test() {}
437 };
438
439 // adrp x16, #page
440 // add x16, x16, #offset
441 // ldr x16, [x16]
442 class NativeLoadGot: public NativeInstruction {
443 public:
444 enum AArch64_specific_constants {
445 instruction_length = 4 * NativeInstruction::instruction_size,
446 offset_offset = 0,
447 };
448
instruction_address() const449 address instruction_address() const { return addr_at(0); }
return_address() const450 address return_address() const { return addr_at(instruction_length); }
451 address got_address() const;
next_instruction_address() const452 address next_instruction_address() const { return return_address(); }
453 intptr_t data() const;
set_data(intptr_t data)454 void set_data(intptr_t data) {
455 intptr_t *addr = (intptr_t *) got_address();
456 *addr = data;
457 }
458
459 void verify() const;
460 private:
461 void report_and_fail() const;
462 };
463
nativeLoadGot_at(address addr)464 inline NativeLoadGot* nativeLoadGot_at(address addr) {
465 NativeLoadGot* load = (NativeLoadGot*) addr;
466 #ifdef ASSERT
467 load->verify();
468 #endif
469 return load;
470 }
471
472 class NativeJump: public NativeInstruction {
473 public:
474 enum AArch64_specific_constants {
475 instruction_size = 4,
476 instruction_offset = 0,
477 data_offset = 0,
478 next_instruction_offset = 4
479 };
480
instruction_address() const481 address instruction_address() const { return addr_at(instruction_offset); }
next_instruction_address() const482 address next_instruction_address() const { return addr_at(instruction_size); }
483 address jump_destination() const;
484 void set_jump_destination(address dest);
485
486 // Creation
487 inline friend NativeJump* nativeJump_at(address address);
488
489 void verify();
490
491 // Unit testing stuff
test()492 static void test() {}
493
494 // Insertion of native jump instruction
495 static void insert(address code_pos, address entry);
496 // MT-safe insertion of native jump at verified method entry
497 static void check_verified_entry_alignment(address entry, address verified_entry);
498 static void patch_verified_entry(address entry, address verified_entry, address dest);
499 };
500
nativeJump_at(address address)501 inline NativeJump* nativeJump_at(address address) {
502 NativeJump* jump = (NativeJump*)(address - NativeJump::instruction_offset);
503 #ifdef ASSERT
504 jump->verify();
505 #endif
506 return jump;
507 }
508
509 class NativeGeneralJump: public NativeJump {
510 public:
511 enum AArch64_specific_constants {
512 instruction_size = 4 * 4,
513 instruction_offset = 0,
514 data_offset = 0,
515 next_instruction_offset = 4 * 4
516 };
517
518 address jump_destination() const;
519 void set_jump_destination(address dest);
520
521 static void insert_unconditional(address code_pos, address entry);
522 static void replace_mt_safe(address instr_addr, address code_buffer);
523 static void verify();
524 };
525
nativeGeneralJump_at(address address)526 inline NativeGeneralJump* nativeGeneralJump_at(address address) {
527 NativeGeneralJump* jump = (NativeGeneralJump*)(address);
528 debug_only(jump->verify();)
529 return jump;
530 }
531
532 class NativeGotJump: public NativeInstruction {
533 public:
534 enum AArch64_specific_constants {
535 instruction_size = 4 * NativeInstruction::instruction_size,
536 };
537
538 void verify() const;
instruction_address() const539 address instruction_address() const { return addr_at(0); }
540 address destination() const;
return_address() const541 address return_address() const { return addr_at(instruction_size); }
542 address got_address() const;
next_instruction_address() const543 address next_instruction_address() const { return addr_at(instruction_size); }
544 bool is_GotJump() const;
545
set_jump_destination(address dest)546 void set_jump_destination(address dest) {
547 address* got = (address *)got_address();
548 *got = dest;
549 }
550 };
551
nativeGotJump_at(address addr)552 inline NativeGotJump* nativeGotJump_at(address addr) {
553 NativeGotJump* jump = (NativeGotJump*)(addr);
554 return jump;
555 }
556
557 class NativePopReg : public NativeInstruction {
558 public:
559 // Insert a pop instruction
560 static void insert(address code_pos, Register reg);
561 };
562
563
564 class NativeIllegalInstruction: public NativeInstruction {
565 public:
566 // Insert illegal opcode as specific address
567 static void insert(address code_pos);
568 };
569
570 // return instruction that does not pop values of the stack
571 class NativeReturn: public NativeInstruction {
572 public:
573 };
574
575 // return instruction that does pop values of the stack
576 class NativeReturnX: public NativeInstruction {
577 public:
578 };
579
580 // Simple test vs memory
581 class NativeTstRegMem: public NativeInstruction {
582 public:
583 };
584
is_nop()585 inline bool NativeInstruction::is_nop() {
586 uint32_t insn = *(uint32_t*)addr_at(0);
587 return insn == 0xd503201f;
588 }
589
is_jump()590 inline bool NativeInstruction::is_jump() {
591 uint32_t insn = *(uint32_t*)addr_at(0);
592
593 if (Instruction_aarch64::extract(insn, 30, 26) == 0b00101) {
594 // Unconditional branch (immediate)
595 return true;
596 } else if (Instruction_aarch64::extract(insn, 31, 25) == 0b0101010) {
597 // Conditional branch (immediate)
598 return true;
599 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011010) {
600 // Compare & branch (immediate)
601 return true;
602 } else if (Instruction_aarch64::extract(insn, 30, 25) == 0b011011) {
603 // Test & branch (immediate)
604 return true;
605 } else
606 return false;
607 }
608
is_jump_or_nop()609 inline bool NativeInstruction::is_jump_or_nop() {
610 return is_nop() || is_jump();
611 }
612
613 // Call trampoline stubs.
614 class NativeCallTrampolineStub : public NativeInstruction {
615 public:
616
617 enum AArch64_specific_constants {
618 instruction_size = 4 * 4,
619 instruction_offset = 0,
620 data_offset = 2 * 4,
621 next_instruction_offset = 4 * 4
622 };
623
624 address destination(nmethod *nm = NULL) const;
625 void set_destination(address new_destination);
626 ptrdiff_t destination_offset() const;
627 };
628
is_NativeCallTrampolineStub_at(address addr)629 inline bool is_NativeCallTrampolineStub_at(address addr) {
630 // Ensure that the stub is exactly
631 // ldr xscratch1, L
632 // br xscratch1
633 // L:
634 uint32_t *i = (uint32_t *)addr;
635 return i[0] == 0x58000048 && i[1] == 0xd61f0100;
636 }
637
nativeCallTrampolineStub_at(address addr)638 inline NativeCallTrampolineStub* nativeCallTrampolineStub_at(address addr) {
639 assert(is_NativeCallTrampolineStub_at(addr), "no call trampoline found");
640 return (NativeCallTrampolineStub*)addr;
641 }
642
643 class NativeMembar : public NativeInstruction {
644 public:
get_kind()645 unsigned int get_kind() { return Instruction_aarch64::extract(uint_at(0), 11, 8); }
set_kind(int order_kind)646 void set_kind(int order_kind) { Instruction_aarch64::patch(addr_at(0), 11, 8, order_kind); }
647 };
648
NativeMembar_at(address addr)649 inline NativeMembar *NativeMembar_at(address addr) {
650 assert(nativeInstruction_at(addr)->is_Membar(), "no membar found");
651 return (NativeMembar*)addr;
652 }
653
654 class NativeLdSt : public NativeInstruction {
655 private:
size()656 int32_t size() { return Instruction_aarch64::extract(uint_at(0), 31, 30); }
657 // Check whether instruction is with unscaled offset.
is_ldst_ur()658 bool is_ldst_ur() {
659 return (Instruction_aarch64::extract(uint_at(0), 29, 21) == 0b111000010 ||
660 Instruction_aarch64::extract(uint_at(0), 29, 21) == 0b111000000) &&
661 Instruction_aarch64::extract(uint_at(0), 11, 10) == 0b00;
662 }
is_ldst_unsigned_offset()663 bool is_ldst_unsigned_offset() {
664 return Instruction_aarch64::extract(uint_at(0), 29, 22) == 0b11100101 ||
665 Instruction_aarch64::extract(uint_at(0), 29, 22) == 0b11100100;
666 }
667 public:
target()668 Register target() {
669 uint32_t r = Instruction_aarch64::extract(uint_at(0), 4, 0);
670 return r == 0x1f ? zr : as_Register(r);
671 }
base()672 Register base() {
673 uint32_t b = Instruction_aarch64::extract(uint_at(0), 9, 5);
674 return b == 0x1f ? sp : as_Register(b);
675 }
offset()676 int64_t offset() {
677 if (is_ldst_ur()) {
678 return Instruction_aarch64::sextract(uint_at(0), 20, 12);
679 } else if (is_ldst_unsigned_offset()) {
680 return Instruction_aarch64::extract(uint_at(0), 21, 10) << size();
681 } else {
682 // others like: pre-index or post-index.
683 ShouldNotReachHere();
684 return 0;
685 }
686 }
size_in_bytes()687 size_t size_in_bytes() { return 1 << size(); }
is_not_pre_post_index()688 bool is_not_pre_post_index() { return (is_ldst_ur() || is_ldst_unsigned_offset()); }
is_load()689 bool is_load() {
690 assert(Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b01 ||
691 Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b00, "must be ldr or str");
692
693 return Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b01;
694 }
is_store()695 bool is_store() {
696 assert(Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b01 ||
697 Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b00, "must be ldr or str");
698
699 return Instruction_aarch64::extract(uint_at(0), 23, 22) == 0b00;
700 }
701 };
702
NativeLdSt_at(address addr)703 inline NativeLdSt *NativeLdSt_at(address addr) {
704 assert(nativeInstruction_at(addr)->is_Imm_LdSt(), "no immediate load/store found");
705 return (NativeLdSt*)addr;
706 }
707 #endif // CPU_AARCH64_NATIVEINST_AARCH64_HPP
708