1 /*
2 * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef CPU_X86_NATIVEINST_X86_HPP
26 #define CPU_X86_NATIVEINST_X86_HPP
27
28 #include "asm/assembler.hpp"
29 #include "runtime/icache.hpp"
30 #include "runtime/safepointMechanism.hpp"
31
32 // We have interfaces for the following instructions:
33 // - NativeInstruction
34 // - - NativeCall
35 // - - NativeMovConstReg
36 // - - NativeMovConstRegPatching
37 // - - NativeMovRegMem
38 // - - NativeMovRegMemPatching
39 // - - NativeJump
40 // - - NativeFarJump
41 // - - NativeIllegalOpCode
42 // - - NativeGeneralJump
43 // - - NativeReturn
44 // - - NativeReturnX (return with argument)
45 // - - NativePushConst
46 // - - NativeTstRegMem
47
48 // The base class for different kinds of native instruction abstractions.
49 // Provides the primitive operations to manipulate code relative to this.
50
51 class NativeInstruction {
52 friend class Relocation;
53
54 public:
55 enum Intel_specific_constants {
56 nop_instruction_code = 0x90,
57 nop_instruction_size = 1
58 };
59
is_nop()60 bool is_nop() { return ubyte_at(0) == nop_instruction_code; }
61 inline bool is_call();
62 inline bool is_call_reg();
63 inline bool is_illegal();
64 inline bool is_return();
65 inline bool is_jump();
66 inline bool is_jump_reg();
67 inline bool is_far_jump();
68 inline bool is_cond_jump();
69 inline bool is_safepoint_poll();
70 inline bool is_mov_literal64();
71
72 protected:
addr_at(int offset) const73 address addr_at(int offset) const { return address(this) + offset; }
74
sbyte_at(int offset) const75 s_char sbyte_at(int offset) const { return *(s_char*) addr_at(offset); }
ubyte_at(int offset) const76 u_char ubyte_at(int offset) const { return *(u_char*) addr_at(offset); }
77
int_at(int offset) const78 jint int_at(int offset) const { return *(jint*) addr_at(offset); }
79
ptr_at(int offset) const80 intptr_t ptr_at(int offset) const { return *(intptr_t*) addr_at(offset); }
81
oop_at(int offset) const82 oop oop_at (int offset) const { return *(oop*) addr_at(offset); }
83
84
set_char_at(int offset,char c)85 void set_char_at(int offset, char c) { *addr_at(offset) = (u_char)c; wrote(offset); }
set_int_at(int offset,jint i)86 void set_int_at(int offset, jint i) { *(jint*)addr_at(offset) = i; wrote(offset); }
set_ptr_at(int offset,intptr_t ptr)87 void set_ptr_at (int offset, intptr_t ptr) { *(intptr_t*) addr_at(offset) = ptr; wrote(offset); }
set_oop_at(int offset,oop o)88 void set_oop_at (int offset, oop o) { *(oop*) addr_at(offset) = o; wrote(offset); }
89
90 // This doesn't really do anything on Intel, but it is the place where
91 // cache invalidation belongs, generically:
92 void wrote(int offset);
93
94 public:
95
96 inline friend NativeInstruction* nativeInstruction_at(address address);
97 };
98
nativeInstruction_at(address address)99 inline NativeInstruction* nativeInstruction_at(address address) {
100 NativeInstruction* inst = (NativeInstruction*)address;
101 #ifdef ASSERT
102 //inst->verify();
103 #endif
104 return inst;
105 }
106
107 class NativePltCall: public NativeInstruction {
108 public:
109 enum Intel_specific_constants {
110 instruction_code = 0xE8,
111 instruction_size = 5,
112 instruction_offset = 0,
113 displacement_offset = 1,
114 return_address_offset = 5
115 };
instruction_address() const116 address instruction_address() const { return addr_at(instruction_offset); }
next_instruction_address() const117 address next_instruction_address() const { return addr_at(return_address_offset); }
displacement_address() const118 address displacement_address() const { return addr_at(displacement_offset); }
displacement() const119 int displacement() const { return (jint) int_at(displacement_offset); }
return_address() const120 address return_address() const { return addr_at(return_address_offset); }
121 address destination() const;
122 address plt_entry() const;
123 address plt_jump() const;
124 address plt_load_got() const;
125 address plt_resolve_call() const;
126 address plt_c2i_stub() const;
127 void set_stub_to_clean();
128
129 void reset_to_plt_resolve_call();
130 void set_destination_mt_safe(address dest);
131
132 void verify() const;
133 };
134
nativePltCall_at(address address)135 inline NativePltCall* nativePltCall_at(address address) {
136 NativePltCall* call = (NativePltCall*) address;
137 #ifdef ASSERT
138 call->verify();
139 #endif
140 return call;
141 }
142
nativePltCall_before(address addr)143 inline NativePltCall* nativePltCall_before(address addr) {
144 address at = addr - NativePltCall::instruction_size;
145 return nativePltCall_at(at);
146 }
147
148 class NativeCall;
149 inline NativeCall* nativeCall_at(address address);
150 // The NativeCall is an abstraction for accessing/manipulating native call imm32/rel32off
151 // instructions (used to manipulate inline caches, primitive & dll calls, etc.).
152
153 class NativeCall: public NativeInstruction {
154 public:
155 enum Intel_specific_constants {
156 instruction_code = 0xE8,
157 instruction_size = 5,
158 instruction_offset = 0,
159 displacement_offset = 1,
160 return_address_offset = 5
161 };
162
163 enum { cache_line_size = BytesPerWord }; // conservative estimate!
164
instruction_address() const165 address instruction_address() const { return addr_at(instruction_offset); }
next_instruction_address() const166 address next_instruction_address() const { return addr_at(return_address_offset); }
displacement() const167 int displacement() const { return (jint) int_at(displacement_offset); }
displacement_address() const168 address displacement_address() const { return addr_at(displacement_offset); }
return_address() const169 address return_address() const { return addr_at(return_address_offset); }
170 address destination() const;
set_destination(address dest)171 void set_destination(address dest) {
172 #ifdef AMD64
173 intptr_t disp = dest - return_address();
174 guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset");
175 #endif // AMD64
176 set_int_at(displacement_offset, dest - return_address());
177 }
178 void set_destination_mt_safe(address dest);
179
verify_alignment()180 void verify_alignment() { assert((intptr_t)addr_at(displacement_offset) % BytesPerInt == 0, "must be aligned"); }
181 void verify();
182 void print();
183
184 // Creation
185 inline friend NativeCall* nativeCall_at(address address);
186 inline friend NativeCall* nativeCall_before(address return_address);
187
is_call_at(address instr)188 static bool is_call_at(address instr) {
189 return ((*instr) & 0xFF) == NativeCall::instruction_code;
190 }
191
is_call_before(address return_address)192 static bool is_call_before(address return_address) {
193 return is_call_at(return_address - NativeCall::return_address_offset);
194 }
195
is_call_to(address instr,address target)196 static bool is_call_to(address instr, address target) {
197 return nativeInstruction_at(instr)->is_call() &&
198 nativeCall_at(instr)->destination() == target;
199 }
200
201 // MT-safe patching of a call instruction.
202 static void insert(address code_pos, address entry);
203
204 static void replace_mt_safe(address instr_addr, address code_buffer);
205 };
206
nativeCall_at(address address)207 inline NativeCall* nativeCall_at(address address) {
208 NativeCall* call = (NativeCall*)(address - NativeCall::instruction_offset);
209 #ifdef ASSERT
210 call->verify();
211 #endif
212 return call;
213 }
214
nativeCall_before(address return_address)215 inline NativeCall* nativeCall_before(address return_address) {
216 NativeCall* call = (NativeCall*)(return_address - NativeCall::return_address_offset);
217 #ifdef ASSERT
218 call->verify();
219 #endif
220 return call;
221 }
222
223 class NativeCallReg: public NativeInstruction {
224 public:
225 enum Intel_specific_constants {
226 instruction_code = 0xFF,
227 instruction_offset = 0,
228 return_address_offset_norex = 2,
229 return_address_offset_rex = 3
230 };
231
next_instruction_offset() const232 int next_instruction_offset() const {
233 if (ubyte_at(0) == NativeCallReg::instruction_code) {
234 return return_address_offset_norex;
235 } else {
236 return return_address_offset_rex;
237 }
238 }
239 };
240
241 // An interface for accessing/manipulating native mov reg, imm32 instructions.
242 // (used to manipulate inlined 32bit data dll calls, etc.)
243 class NativeMovConstReg: public NativeInstruction {
244 #ifdef AMD64
245 static const bool has_rex = true;
246 static const int rex_size = 1;
247 #else
248 static const bool has_rex = false;
249 static const int rex_size = 0;
250 #endif // AMD64
251 public:
252 enum Intel_specific_constants {
253 instruction_code = 0xB8,
254 instruction_size = 1 + rex_size + wordSize,
255 instruction_offset = 0,
256 data_offset = 1 + rex_size,
257 next_instruction_offset = instruction_size,
258 register_mask = 0x07
259 };
260
instruction_address() const261 address instruction_address() const { return addr_at(instruction_offset); }
next_instruction_address() const262 address next_instruction_address() const { return addr_at(next_instruction_offset); }
data() const263 intptr_t data() const { return ptr_at(data_offset); }
set_data(intptr_t x)264 void set_data(intptr_t x) { set_ptr_at(data_offset, x); }
265
266 void verify();
267 void print();
268
269 // Creation
270 inline friend NativeMovConstReg* nativeMovConstReg_at(address address);
271 inline friend NativeMovConstReg* nativeMovConstReg_before(address address);
272 };
273
nativeMovConstReg_at(address address)274 inline NativeMovConstReg* nativeMovConstReg_at(address address) {
275 NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_offset);
276 #ifdef ASSERT
277 test->verify();
278 #endif
279 return test;
280 }
281
nativeMovConstReg_before(address address)282 inline NativeMovConstReg* nativeMovConstReg_before(address address) {
283 NativeMovConstReg* test = (NativeMovConstReg*)(address - NativeMovConstReg::instruction_size - NativeMovConstReg::instruction_offset);
284 #ifdef ASSERT
285 test->verify();
286 #endif
287 return test;
288 }
289
290 class NativeMovConstRegPatching: public NativeMovConstReg {
291 private:
nativeMovConstRegPatching_at(address address)292 friend NativeMovConstRegPatching* nativeMovConstRegPatching_at(address address) {
293 NativeMovConstRegPatching* test = (NativeMovConstRegPatching*)(address - instruction_offset);
294 #ifdef ASSERT
295 test->verify();
296 #endif
297 return test;
298 }
299 };
300
301 // An interface for accessing/manipulating native moves of the form:
302 // mov[b/w/l/q] [reg + offset], reg (instruction_code_reg2mem)
303 // mov[b/w/l/q] reg, [reg+offset] (instruction_code_mem2reg
304 // mov[s/z]x[w/b/q] [reg + offset], reg
305 // fld_s [reg+offset]
306 // fld_d [reg+offset]
307 // fstp_s [reg + offset]
308 // fstp_d [reg + offset]
309 // mov_literal64 scratch,<pointer> ; mov[b/w/l/q] 0(scratch),reg | mov[b/w/l/q] reg,0(scratch)
310 //
311 // Warning: These routines must be able to handle any instruction sequences
312 // that are generated as a result of the load/store byte,word,long
313 // macros. For example: The load_unsigned_byte instruction generates
314 // an xor reg,reg inst prior to generating the movb instruction. This
315 // class must skip the xor instruction.
316
317 class NativeMovRegMem: public NativeInstruction {
318 public:
319 enum Intel_specific_constants {
320 instruction_prefix_wide_lo = Assembler::REX,
321 instruction_prefix_wide_hi = Assembler::REX_WRXB,
322 instruction_code_xor = 0x33,
323 instruction_extended_prefix = 0x0F,
324 instruction_code_mem2reg_movslq = 0x63,
325 instruction_code_mem2reg_movzxb = 0xB6,
326 instruction_code_mem2reg_movsxb = 0xBE,
327 instruction_code_mem2reg_movzxw = 0xB7,
328 instruction_code_mem2reg_movsxw = 0xBF,
329 instruction_operandsize_prefix = 0x66,
330 instruction_code_reg2mem = 0x89,
331 instruction_code_mem2reg = 0x8b,
332 instruction_code_reg2memb = 0x88,
333 instruction_code_mem2regb = 0x8a,
334 instruction_code_float_s = 0xd9,
335 instruction_code_float_d = 0xdd,
336 instruction_code_long_volatile = 0xdf,
337 instruction_code_xmm_ss_prefix = 0xf3,
338 instruction_code_xmm_sd_prefix = 0xf2,
339 instruction_code_xmm_code = 0x0f,
340 instruction_code_xmm_load = 0x10,
341 instruction_code_xmm_store = 0x11,
342 instruction_code_xmm_lpd = 0x12,
343
344 instruction_code_lea = 0x8d,
345
346 instruction_VEX_prefix_2bytes = Assembler::VEX_2bytes,
347 instruction_VEX_prefix_3bytes = Assembler::VEX_3bytes,
348 instruction_EVEX_prefix_4bytes = Assembler::EVEX_4bytes,
349
350 instruction_offset = 0,
351 data_offset = 2,
352 next_instruction_offset = 4
353 };
354
355 // helper
356 int instruction_start() const;
357
instruction_address() const358 address instruction_address() const {
359 return addr_at(instruction_start());
360 }
361
num_bytes_to_end_of_patch() const362 int num_bytes_to_end_of_patch() const {
363 return patch_offset() + sizeof(jint);
364 }
365
offset() const366 int offset() const {
367 return int_at(patch_offset());
368 }
369
set_offset(int x)370 void set_offset(int x) {
371 set_int_at(patch_offset(), x);
372 }
373
add_offset_in_bytes(int add_offset)374 void add_offset_in_bytes(int add_offset) {
375 int patch_off = patch_offset();
376 set_int_at(patch_off, int_at(patch_off) + add_offset);
377 }
378
379 void verify();
380 void print ();
381
382 private:
383 int patch_offset() const;
384 inline friend NativeMovRegMem* nativeMovRegMem_at (address address);
385 };
386
nativeMovRegMem_at(address address)387 inline NativeMovRegMem* nativeMovRegMem_at (address address) {
388 NativeMovRegMem* test = (NativeMovRegMem*)(address - NativeMovRegMem::instruction_offset);
389 #ifdef ASSERT
390 test->verify();
391 #endif
392 return test;
393 }
394
395
396 // An interface for accessing/manipulating native leal instruction of form:
397 // leal reg, [reg + offset]
398
399 class NativeLoadAddress: public NativeMovRegMem {
400 #ifdef AMD64
401 static const bool has_rex = true;
402 static const int rex_size = 1;
403 #else
404 static const bool has_rex = false;
405 static const int rex_size = 0;
406 #endif // AMD64
407 public:
408 enum Intel_specific_constants {
409 instruction_prefix_wide = Assembler::REX_W,
410 instruction_prefix_wide_extended = Assembler::REX_WB,
411 lea_instruction_code = 0x8D,
412 mov64_instruction_code = 0xB8
413 };
414
415 void verify();
416 void print ();
417
418 private:
nativeLoadAddress_at(address address)419 friend NativeLoadAddress* nativeLoadAddress_at (address address) {
420 NativeLoadAddress* test = (NativeLoadAddress*)(address - instruction_offset);
421 #ifdef ASSERT
422 test->verify();
423 #endif
424 return test;
425 }
426 };
427
428 // destination is rbx or rax
429 // mov rbx, [rip + offset]
430 class NativeLoadGot: public NativeInstruction {
431 #ifdef AMD64
432 static const bool has_rex = true;
433 static const int rex_size = 1;
434 #else
435 static const bool has_rex = false;
436 static const int rex_size = 0;
437 #endif
438
439 enum Intel_specific_constants {
440 rex_prefix = 0x48,
441 rex_b_prefix = 0x49,
442 instruction_code = 0x8b,
443 modrm_rbx_code = 0x1d,
444 modrm_rax_code = 0x05,
445 instruction_length = 6 + rex_size,
446 offset_offset = 2 + rex_size
447 };
448
rip_offset() const449 int rip_offset() const { return int_at(offset_offset); }
return_address() const450 address return_address() const { return addr_at(instruction_length); }
got_address() const451 address got_address() const { return return_address() + rip_offset(); }
452
453 #ifdef ASSERT
454 void report_and_fail() const;
instruction_address() const455 address instruction_address() const { return addr_at(0); }
456 #endif
457
458 public:
next_instruction_address() const459 address next_instruction_address() const { return return_address(); }
460 intptr_t data() const;
set_data(intptr_t data)461 void set_data(intptr_t data) {
462 intptr_t *addr = (intptr_t *) got_address();
463 *addr = data;
464 }
465
466 DEBUG_ONLY( void verify() const );
467 };
468
nativeLoadGot_at(address addr)469 inline NativeLoadGot* nativeLoadGot_at(address addr) {
470 NativeLoadGot* load = (NativeLoadGot*) addr;
471 #ifdef ASSERT
472 load->verify();
473 #endif
474 return load;
475 }
476
477 // jump rel32off
478
479 class NativeJump: public NativeInstruction {
480 public:
481 enum Intel_specific_constants {
482 instruction_code = 0xe9,
483 instruction_size = 5,
484 instruction_offset = 0,
485 data_offset = 1,
486 next_instruction_offset = 5
487 };
488
instruction_address() const489 address instruction_address() const { return addr_at(instruction_offset); }
next_instruction_address() const490 address next_instruction_address() const { return addr_at(next_instruction_offset); }
jump_destination() const491 address jump_destination() const {
492 address dest = (int_at(data_offset)+next_instruction_address());
493 // 32bit used to encode unresolved jmp as jmp -1
494 // 64bit can't produce this so it used jump to self.
495 // Now 32bit and 64bit use jump to self as the unresolved address
496 // which the inline cache code (and relocs) know about
497
498 // return -1 if jump to self
499 dest = (dest == (address) this) ? (address) -1 : dest;
500 return dest;
501 }
502
set_jump_destination(address dest)503 void set_jump_destination(address dest) {
504 intptr_t val = dest - next_instruction_address();
505 if (dest == (address) -1) {
506 val = -5; // jump to self
507 }
508 #ifdef AMD64
509 assert((labs(val) & 0xFFFFFFFF00000000) == 0 || dest == (address)-1, "must be 32bit offset or -1");
510 #endif // AMD64
511 set_int_at(data_offset, (jint)val);
512 }
513
514 // Creation
515 inline friend NativeJump* nativeJump_at(address address);
516
517 void verify();
518
519 // Insertion of native jump instruction
520 static void insert(address code_pos, address entry);
521 // MT-safe insertion of native jump at verified method entry
522 static void check_verified_entry_alignment(address entry, address verified_entry);
523 static void patch_verified_entry(address entry, address verified_entry, address dest);
524 };
525
nativeJump_at(address address)526 inline NativeJump* nativeJump_at(address address) {
527 NativeJump* jump = (NativeJump*)(address - NativeJump::instruction_offset);
528 #ifdef ASSERT
529 jump->verify();
530 #endif
531 return jump;
532 }
533
534 // far jump reg
535 class NativeFarJump: public NativeInstruction {
536 public:
537 address jump_destination() const;
538
539 // Creation
540 inline friend NativeFarJump* nativeFarJump_at(address address);
541
542 void verify();
543
544 };
545
nativeFarJump_at(address address)546 inline NativeFarJump* nativeFarJump_at(address address) {
547 NativeFarJump* jump = (NativeFarJump*)(address);
548 #ifdef ASSERT
549 jump->verify();
550 #endif
551 return jump;
552 }
553
554 // Handles all kinds of jump on Intel. Long/far, conditional/unconditional
555 class NativeGeneralJump: public NativeInstruction {
556 public:
557 enum Intel_specific_constants {
558 // Constants does not apply, since the lengths and offsets depends on the actual jump
559 // used
560 // Instruction codes:
561 // Unconditional jumps: 0xE9 (rel32off), 0xEB (rel8off)
562 // Conditional jumps: 0x0F8x (rel32off), 0x7x (rel8off)
563 unconditional_long_jump = 0xe9,
564 unconditional_short_jump = 0xeb,
565 instruction_size = 5
566 };
567
instruction_address() const568 address instruction_address() const { return addr_at(0); }
569 address jump_destination() const;
570
571 // Creation
572 inline friend NativeGeneralJump* nativeGeneralJump_at(address address);
573
574 // Insertion of native general jump instruction
575 static void insert_unconditional(address code_pos, address entry);
576 static void replace_mt_safe(address instr_addr, address code_buffer);
577
578 void verify();
579 };
580
nativeGeneralJump_at(address address)581 inline NativeGeneralJump* nativeGeneralJump_at(address address) {
582 NativeGeneralJump* jump = (NativeGeneralJump*)(address);
583 debug_only(jump->verify();)
584 return jump;
585 }
586
587 class NativeGotJump: public NativeInstruction {
588 enum Intel_specific_constants {
589 rex_prefix = 0x41,
590 instruction_code = 0xff,
591 modrm_code = 0x25,
592 instruction_size = 6,
593 rip_offset = 2
594 };
595
has_rex() const596 bool has_rex() const { return ubyte_at(0) == rex_prefix; }
rex_size() const597 int rex_size() const { return has_rex() ? 1 : 0; }
598
return_address() const599 address return_address() const { return addr_at(instruction_size + rex_size()); }
got_offset() const600 int got_offset() const { return (jint) int_at(rip_offset + rex_size()); }
601
602 #ifdef ASSERT
603 void report_and_fail() const;
instruction_address() const604 address instruction_address() const { return addr_at(0); }
605 #endif
606
607 public:
got_address() const608 address got_address() const { return return_address() + got_offset(); }
next_instruction_address() const609 address next_instruction_address() const { return return_address(); }
is_GotJump() const610 bool is_GotJump() const { return ubyte_at(rex_size()) == instruction_code; }
611
612 address destination() const;
set_jump_destination(address dest)613 void set_jump_destination(address dest) {
614 address *got_entry = (address *) got_address();
615 *got_entry = dest;
616 }
617
618 DEBUG_ONLY( void verify() const; )
619 };
620
nativeGotJump_at(address addr)621 inline NativeGotJump* nativeGotJump_at(address addr) {
622 NativeGotJump* jump = (NativeGotJump*)(addr);
623 debug_only(jump->verify());
624 return jump;
625 }
626
627 class NativePopReg : public NativeInstruction {
628 public:
629 enum Intel_specific_constants {
630 instruction_code = 0x58,
631 instruction_size = 1,
632 instruction_offset = 0,
633 data_offset = 1,
634 next_instruction_offset = 1
635 };
636
637 // Insert a pop instruction
638 static void insert(address code_pos, Register reg);
639 };
640
641
642 class NativeIllegalInstruction: public NativeInstruction {
643 public:
644 enum Intel_specific_constants {
645 instruction_code = 0x0B0F, // Real byte order is: 0x0F, 0x0B
646 instruction_size = 2,
647 instruction_offset = 0,
648 next_instruction_offset = 2
649 };
650
651 // Insert illegal opcode as specific address
652 static void insert(address code_pos);
653 };
654
655 // return instruction that does not pop values of the stack
656 class NativeReturn: public NativeInstruction {
657 public:
658 enum Intel_specific_constants {
659 instruction_code = 0xC3,
660 instruction_size = 1,
661 instruction_offset = 0,
662 next_instruction_offset = 1
663 };
664 };
665
666 // return instruction that does pop values of the stack
667 class NativeReturnX: public NativeInstruction {
668 public:
669 enum Intel_specific_constants {
670 instruction_code = 0xC2,
671 instruction_size = 2,
672 instruction_offset = 0,
673 next_instruction_offset = 2
674 };
675 };
676
677 // Simple test vs memory
678 class NativeTstRegMem: public NativeInstruction {
679 public:
680 enum Intel_specific_constants {
681 instruction_rex_prefix_mask = 0xF0,
682 instruction_rex_prefix = Assembler::REX,
683 instruction_rex_b_prefix = Assembler::REX_B,
684 instruction_code_memXregl = 0x85,
685 modrm_mask = 0x38, // select reg from the ModRM byte
686 modrm_reg = 0x00 // rax
687 };
688 };
689
is_illegal()690 inline bool NativeInstruction::is_illegal() { return (short)int_at(0) == (short)NativeIllegalInstruction::instruction_code; }
is_call()691 inline bool NativeInstruction::is_call() { return ubyte_at(0) == NativeCall::instruction_code; }
is_call_reg()692 inline bool NativeInstruction::is_call_reg() { return ubyte_at(0) == NativeCallReg::instruction_code ||
693 (ubyte_at(1) == NativeCallReg::instruction_code &&
694 (ubyte_at(0) == Assembler::REX || ubyte_at(0) == Assembler::REX_B)); }
is_return()695 inline bool NativeInstruction::is_return() { return ubyte_at(0) == NativeReturn::instruction_code ||
696 ubyte_at(0) == NativeReturnX::instruction_code; }
is_jump()697 inline bool NativeInstruction::is_jump() { return ubyte_at(0) == NativeJump::instruction_code ||
698 ubyte_at(0) == 0xEB; /* short jump */ }
is_jump_reg()699 inline bool NativeInstruction::is_jump_reg() {
700 int pos = 0;
701 if (ubyte_at(0) == Assembler::REX_B) pos = 1;
702 return ubyte_at(pos) == 0xFF && (ubyte_at(pos + 1) & 0xF0) == 0xE0;
703 }
is_far_jump()704 inline bool NativeInstruction::is_far_jump() { return is_mov_literal64(); }
is_cond_jump()705 inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) == 0x800F /* long jump */ ||
706 (ubyte_at(0) & 0xF0) == 0x70; /* short jump */ }
is_safepoint_poll()707 inline bool NativeInstruction::is_safepoint_poll() {
708 #ifdef AMD64
709 const bool has_rex_prefix = ubyte_at(0) == NativeTstRegMem::instruction_rex_b_prefix;
710 const int test_offset = has_rex_prefix ? 1 : 0;
711 #else
712 const int test_offset = 0;
713 #endif
714 const bool is_test_opcode = ubyte_at(test_offset) == NativeTstRegMem::instruction_code_memXregl;
715 const bool is_rax_target = (ubyte_at(test_offset + 1) & NativeTstRegMem::modrm_mask) == NativeTstRegMem::modrm_reg;
716 return is_test_opcode && is_rax_target;
717 }
718
is_mov_literal64()719 inline bool NativeInstruction::is_mov_literal64() {
720 #ifdef AMD64
721 return ((ubyte_at(0) == Assembler::REX_W || ubyte_at(0) == Assembler::REX_WB) &&
722 (ubyte_at(1) & (0xff ^ NativeMovConstReg::register_mask)) == 0xB8);
723 #else
724 return false;
725 #endif // AMD64
726 }
727
728 #endif // CPU_X86_NATIVEINST_X86_HPP
729