1 /*
2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "asm/assembler.inline.hpp"
28 #include "gc/shared/cardTableBarrierSet.hpp"
29 #include "gc/shared/collectedHeap.inline.hpp"
30 #include "interpreter/interpreter.hpp"
31 #include "memory/resourceArea.hpp"
32 #include "prims/methodHandles.hpp"
33 #include "runtime/biasedLocking.hpp"
34 #include "runtime/objectMonitor.hpp"
35 #include "runtime/os.hpp"
36 #include "runtime/sharedRuntime.hpp"
37 #include "runtime/stubRoutines.hpp"
38 #include "utilities/macros.hpp"
39
40 #ifdef PRODUCT
41 #define BLOCK_COMMENT(str) /* nothing */
42 #define STOP(error) stop(error)
43 #else
44 #define BLOCK_COMMENT(str) block_comment(str)
45 #define STOP(error) block_comment(error); stop(error)
46 #endif
47
48 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
49 // Implementation of AddressLiteral
50
51 // A 2-D table for managing compressed displacement(disp8) on EVEX enabled platforms.
52 unsigned char tuple_table[Assembler::EVEX_ETUP + 1][Assembler::AVX_512bit + 1] = {
53 // -----------------Table 4.5 -------------------- //
54 16, 32, 64, // EVEX_FV(0)
55 4, 4, 4, // EVEX_FV(1) - with Evex.b
56 16, 32, 64, // EVEX_FV(2) - with Evex.w
57 8, 8, 8, // EVEX_FV(3) - with Evex.w and Evex.b
58 8, 16, 32, // EVEX_HV(0)
59 4, 4, 4, // EVEX_HV(1) - with Evex.b
60 // -----------------Table 4.6 -------------------- //
61 16, 32, 64, // EVEX_FVM(0)
62 1, 1, 1, // EVEX_T1S(0)
63 2, 2, 2, // EVEX_T1S(1)
64 4, 4, 4, // EVEX_T1S(2)
65 8, 8, 8, // EVEX_T1S(3)
66 4, 4, 4, // EVEX_T1F(0)
67 8, 8, 8, // EVEX_T1F(1)
68 8, 8, 8, // EVEX_T2(0)
69 0, 16, 16, // EVEX_T2(1)
70 0, 16, 16, // EVEX_T4(0)
71 0, 0, 32, // EVEX_T4(1)
72 0, 0, 32, // EVEX_T8(0)
73 8, 16, 32, // EVEX_HVM(0)
74 4, 8, 16, // EVEX_QVM(0)
75 2, 4, 8, // EVEX_OVM(0)
76 16, 16, 16, // EVEX_M128(0)
77 8, 32, 64, // EVEX_DUP(0)
78 0, 0, 0 // EVEX_NTUP
79 };
80
AddressLiteral(address target,relocInfo::relocType rtype)81 AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) {
82 _is_lval = false;
83 _target = target;
84 switch (rtype) {
85 case relocInfo::oop_type:
86 case relocInfo::metadata_type:
87 // Oops are a special case. Normally they would be their own section
88 // but in cases like icBuffer they are literals in the code stream that
89 // we don't have a section for. We use none so that we get a literal address
90 // which is always patchable.
91 break;
92 case relocInfo::external_word_type:
93 _rspec = external_word_Relocation::spec(target);
94 break;
95 case relocInfo::internal_word_type:
96 _rspec = internal_word_Relocation::spec(target);
97 break;
98 case relocInfo::opt_virtual_call_type:
99 _rspec = opt_virtual_call_Relocation::spec();
100 break;
101 case relocInfo::static_call_type:
102 _rspec = static_call_Relocation::spec();
103 break;
104 case relocInfo::runtime_call_type:
105 _rspec = runtime_call_Relocation::spec();
106 break;
107 case relocInfo::poll_type:
108 case relocInfo::poll_return_type:
109 _rspec = Relocation::spec_simple(rtype);
110 break;
111 case relocInfo::none:
112 break;
113 default:
114 ShouldNotReachHere();
115 break;
116 }
117 }
118
119 // Implementation of Address
120
121 #ifdef _LP64
122
make_array(ArrayAddress adr)123 Address Address::make_array(ArrayAddress adr) {
124 // Not implementable on 64bit machines
125 // Should have been handled higher up the call chain.
126 ShouldNotReachHere();
127 return Address();
128 }
129
130 // exceedingly dangerous constructor
Address(int disp,address loc,relocInfo::relocType rtype)131 Address::Address(int disp, address loc, relocInfo::relocType rtype) {
132 _base = noreg;
133 _index = noreg;
134 _scale = no_scale;
135 _disp = disp;
136 _xmmindex = xnoreg;
137 _isxmmindex = false;
138 switch (rtype) {
139 case relocInfo::external_word_type:
140 _rspec = external_word_Relocation::spec(loc);
141 break;
142 case relocInfo::internal_word_type:
143 _rspec = internal_word_Relocation::spec(loc);
144 break;
145 case relocInfo::runtime_call_type:
146 // HMM
147 _rspec = runtime_call_Relocation::spec();
148 break;
149 case relocInfo::poll_type:
150 case relocInfo::poll_return_type:
151 _rspec = Relocation::spec_simple(rtype);
152 break;
153 case relocInfo::none:
154 break;
155 default:
156 ShouldNotReachHere();
157 }
158 }
159 #else // LP64
160
make_array(ArrayAddress adr)161 Address Address::make_array(ArrayAddress adr) {
162 AddressLiteral base = adr.base();
163 Address index = adr.index();
164 assert(index._disp == 0, "must not have disp"); // maybe it can?
165 Address array(index._base, index._index, index._scale, (intptr_t) base.target());
166 array._rspec = base._rspec;
167 return array;
168 }
169
170 // exceedingly dangerous constructor
Address(address loc,RelocationHolder spec)171 Address::Address(address loc, RelocationHolder spec) {
172 _base = noreg;
173 _index = noreg;
174 _scale = no_scale;
175 _disp = (intptr_t) loc;
176 _rspec = spec;
177 _xmmindex = xnoreg;
178 _isxmmindex = false;
179 }
180
181 #endif // _LP64
182
183
184
185 // Convert the raw encoding form into the form expected by the constructor for
186 // Address. An index of 4 (rsp) corresponds to having no index, so convert
187 // that to noreg for the Address constructor.
make_raw(int base,int index,int scale,int disp,relocInfo::relocType disp_reloc)188 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) {
189 RelocationHolder rspec;
190 if (disp_reloc != relocInfo::none) {
191 rspec = Relocation::spec_simple(disp_reloc);
192 }
193 bool valid_index = index != rsp->encoding();
194 if (valid_index) {
195 Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp));
196 madr._rspec = rspec;
197 return madr;
198 } else {
199 Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp));
200 madr._rspec = rspec;
201 return madr;
202 }
203 }
204
205 // Implementation of Assembler
206
code_fill_byte()207 int AbstractAssembler::code_fill_byte() {
208 return (u_char)'\xF4'; // hlt
209 }
210
211 // make this go away someday
emit_data(jint data,relocInfo::relocType rtype,int format)212 void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) {
213 if (rtype == relocInfo::none)
214 emit_int32(data);
215 else
216 emit_data(data, Relocation::spec_simple(rtype), format);
217 }
218
emit_data(jint data,RelocationHolder const & rspec,int format)219 void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) {
220 assert(imm_operand == 0, "default format must be immediate in this file");
221 assert(inst_mark() != NULL, "must be inside InstructionMark");
222 if (rspec.type() != relocInfo::none) {
223 #ifdef ASSERT
224 check_relocation(rspec, format);
225 #endif
226 // Do not use AbstractAssembler::relocate, which is not intended for
227 // embedded words. Instead, relocate to the enclosing instruction.
228
229 // hack. call32 is too wide for mask so use disp32
230 if (format == call32_operand)
231 code_section()->relocate(inst_mark(), rspec, disp32_operand);
232 else
233 code_section()->relocate(inst_mark(), rspec, format);
234 }
235 emit_int32(data);
236 }
237
encode(Register r)238 static int encode(Register r) {
239 int enc = r->encoding();
240 if (enc >= 8) {
241 enc -= 8;
242 }
243 return enc;
244 }
245
emit_arith_b(int op1,int op2,Register dst,int imm8)246 void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
247 assert(dst->has_byte_register(), "must have byte register");
248 assert(isByte(op1) && isByte(op2), "wrong opcode");
249 assert(isByte(imm8), "not a byte");
250 assert((op1 & 0x01) == 0, "should be 8bit operation");
251 emit_int8(op1);
252 emit_int8(op2 | encode(dst));
253 emit_int8(imm8);
254 }
255
256
emit_arith(int op1,int op2,Register dst,int32_t imm32)257 void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) {
258 assert(isByte(op1) && isByte(op2), "wrong opcode");
259 assert((op1 & 0x01) == 1, "should be 32bit operation");
260 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
261 if (is8bit(imm32)) {
262 emit_int8(op1 | 0x02); // set sign bit
263 emit_int8(op2 | encode(dst));
264 emit_int8(imm32 & 0xFF);
265 } else {
266 emit_int8(op1);
267 emit_int8(op2 | encode(dst));
268 emit_int32(imm32);
269 }
270 }
271
272 // Force generation of a 4 byte immediate value even if it fits into 8bit
emit_arith_imm32(int op1,int op2,Register dst,int32_t imm32)273 void Assembler::emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32) {
274 assert(isByte(op1) && isByte(op2), "wrong opcode");
275 assert((op1 & 0x01) == 1, "should be 32bit operation");
276 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
277 emit_int8(op1);
278 emit_int8(op2 | encode(dst));
279 emit_int32(imm32);
280 }
281
282 // immediate-to-memory forms
emit_arith_operand(int op1,Register rm,Address adr,int32_t imm32)283 void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) {
284 assert((op1 & 0x01) == 1, "should be 32bit operation");
285 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
286 if (is8bit(imm32)) {
287 emit_int8(op1 | 0x02); // set sign bit
288 emit_operand(rm, adr, 1);
289 emit_int8(imm32 & 0xFF);
290 } else {
291 emit_int8(op1);
292 emit_operand(rm, adr, 4);
293 emit_int32(imm32);
294 }
295 }
296
297
emit_arith(int op1,int op2,Register dst,Register src)298 void Assembler::emit_arith(int op1, int op2, Register dst, Register src) {
299 assert(isByte(op1) && isByte(op2), "wrong opcode");
300 emit_int8(op1);
301 emit_int8(op2 | encode(dst) << 3 | encode(src));
302 }
303
304
query_compressed_disp_byte(int disp,bool is_evex_inst,int vector_len,int cur_tuple_type,int in_size_in_bits,int cur_encoding)305 bool Assembler::query_compressed_disp_byte(int disp, bool is_evex_inst, int vector_len,
306 int cur_tuple_type, int in_size_in_bits, int cur_encoding) {
307 int mod_idx = 0;
308 // We will test if the displacement fits the compressed format and if so
309 // apply the compression to the displacment iff the result is8bit.
310 if (VM_Version::supports_evex() && is_evex_inst) {
311 switch (cur_tuple_type) {
312 case EVEX_FV:
313 if ((cur_encoding & VEX_W) == VEX_W) {
314 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 3 : 2;
315 } else {
316 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0;
317 }
318 break;
319
320 case EVEX_HV:
321 mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0;
322 break;
323
324 case EVEX_FVM:
325 break;
326
327 case EVEX_T1S:
328 switch (in_size_in_bits) {
329 case EVEX_8bit:
330 break;
331
332 case EVEX_16bit:
333 mod_idx = 1;
334 break;
335
336 case EVEX_32bit:
337 mod_idx = 2;
338 break;
339
340 case EVEX_64bit:
341 mod_idx = 3;
342 break;
343 }
344 break;
345
346 case EVEX_T1F:
347 case EVEX_T2:
348 case EVEX_T4:
349 mod_idx = (in_size_in_bits == EVEX_64bit) ? 1 : 0;
350 break;
351
352 case EVEX_T8:
353 break;
354
355 case EVEX_HVM:
356 break;
357
358 case EVEX_QVM:
359 break;
360
361 case EVEX_OVM:
362 break;
363
364 case EVEX_M128:
365 break;
366
367 case EVEX_DUP:
368 break;
369
370 default:
371 assert(0, "no valid evex tuple_table entry");
372 break;
373 }
374
375 if (vector_len >= AVX_128bit && vector_len <= AVX_512bit) {
376 int disp_factor = tuple_table[cur_tuple_type + mod_idx][vector_len];
377 if ((disp % disp_factor) == 0) {
378 int new_disp = disp / disp_factor;
379 if ((-0x80 <= new_disp && new_disp < 0x80)) {
380 disp = new_disp;
381 }
382 } else {
383 return false;
384 }
385 }
386 }
387 return (-0x80 <= disp && disp < 0x80);
388 }
389
390
emit_compressed_disp_byte(int & disp)391 bool Assembler::emit_compressed_disp_byte(int &disp) {
392 int mod_idx = 0;
393 // We will test if the displacement fits the compressed format and if so
394 // apply the compression to the displacment iff the result is8bit.
395 if (VM_Version::supports_evex() && _attributes && _attributes->is_evex_instruction()) {
396 int evex_encoding = _attributes->get_evex_encoding();
397 int tuple_type = _attributes->get_tuple_type();
398 switch (tuple_type) {
399 case EVEX_FV:
400 if ((evex_encoding & VEX_W) == VEX_W) {
401 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 3 : 2;
402 } else {
403 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0;
404 }
405 break;
406
407 case EVEX_HV:
408 mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0;
409 break;
410
411 case EVEX_FVM:
412 break;
413
414 case EVEX_T1S:
415 switch (_attributes->get_input_size()) {
416 case EVEX_8bit:
417 break;
418
419 case EVEX_16bit:
420 mod_idx = 1;
421 break;
422
423 case EVEX_32bit:
424 mod_idx = 2;
425 break;
426
427 case EVEX_64bit:
428 mod_idx = 3;
429 break;
430 }
431 break;
432
433 case EVEX_T1F:
434 case EVEX_T2:
435 case EVEX_T4:
436 mod_idx = (_attributes->get_input_size() == EVEX_64bit) ? 1 : 0;
437 break;
438
439 case EVEX_T8:
440 break;
441
442 case EVEX_HVM:
443 break;
444
445 case EVEX_QVM:
446 break;
447
448 case EVEX_OVM:
449 break;
450
451 case EVEX_M128:
452 break;
453
454 case EVEX_DUP:
455 break;
456
457 default:
458 assert(0, "no valid evex tuple_table entry");
459 break;
460 }
461
462 int vector_len = _attributes->get_vector_len();
463 if (vector_len >= AVX_128bit && vector_len <= AVX_512bit) {
464 int disp_factor = tuple_table[tuple_type + mod_idx][vector_len];
465 if ((disp % disp_factor) == 0) {
466 int new_disp = disp / disp_factor;
467 if (is8bit(new_disp)) {
468 disp = new_disp;
469 }
470 } else {
471 return false;
472 }
473 }
474 }
475 return is8bit(disp);
476 }
477
478
emit_operand(Register reg,Register base,Register index,Address::ScaleFactor scale,int disp,RelocationHolder const & rspec,int rip_relative_correction)479 void Assembler::emit_operand(Register reg, Register base, Register index,
480 Address::ScaleFactor scale, int disp,
481 RelocationHolder const& rspec,
482 int rip_relative_correction) {
483 relocInfo::relocType rtype = (relocInfo::relocType) rspec.type();
484
485 // Encode the registers as needed in the fields they are used in
486
487 int regenc = encode(reg) << 3;
488 int indexenc = index->is_valid() ? encode(index) << 3 : 0;
489 int baseenc = base->is_valid() ? encode(base) : 0;
490
491 if (base->is_valid()) {
492 if (index->is_valid()) {
493 assert(scale != Address::no_scale, "inconsistent address");
494 // [base + index*scale + disp]
495 if (disp == 0 && rtype == relocInfo::none &&
496 base != rbp LP64_ONLY(&& base != r13)) {
497 // [base + index*scale]
498 // [00 reg 100][ss index base]
499 assert(index != rsp, "illegal addressing mode");
500 emit_int8(0x04 | regenc);
501 emit_int8(scale << 6 | indexenc | baseenc);
502 } else if (emit_compressed_disp_byte(disp) && rtype == relocInfo::none) {
503 // [base + index*scale + imm8]
504 // [01 reg 100][ss index base] imm8
505 assert(index != rsp, "illegal addressing mode");
506 emit_int8(0x44 | regenc);
507 emit_int8(scale << 6 | indexenc | baseenc);
508 emit_int8(disp & 0xFF);
509 } else {
510 // [base + index*scale + disp32]
511 // [10 reg 100][ss index base] disp32
512 assert(index != rsp, "illegal addressing mode");
513 emit_int8(0x84 | regenc);
514 emit_int8(scale << 6 | indexenc | baseenc);
515 emit_data(disp, rspec, disp32_operand);
516 }
517 } else if (base == rsp LP64_ONLY(|| base == r12)) {
518 // [rsp + disp]
519 if (disp == 0 && rtype == relocInfo::none) {
520 // [rsp]
521 // [00 reg 100][00 100 100]
522 emit_int8(0x04 | regenc);
523 emit_int8(0x24);
524 } else if (emit_compressed_disp_byte(disp) && rtype == relocInfo::none) {
525 // [rsp + imm8]
526 // [01 reg 100][00 100 100] disp8
527 emit_int8(0x44 | regenc);
528 emit_int8(0x24);
529 emit_int8(disp & 0xFF);
530 } else {
531 // [rsp + imm32]
532 // [10 reg 100][00 100 100] disp32
533 emit_int8(0x84 | regenc);
534 emit_int8(0x24);
535 emit_data(disp, rspec, disp32_operand);
536 }
537 } else {
538 // [base + disp]
539 assert(base != rsp LP64_ONLY(&& base != r12), "illegal addressing mode");
540 if (disp == 0 && rtype == relocInfo::none &&
541 base != rbp LP64_ONLY(&& base != r13)) {
542 // [base]
543 // [00 reg base]
544 emit_int8(0x00 | regenc | baseenc);
545 } else if (emit_compressed_disp_byte(disp) && rtype == relocInfo::none) {
546 // [base + disp8]
547 // [01 reg base] disp8
548 emit_int8(0x40 | regenc | baseenc);
549 emit_int8(disp & 0xFF);
550 } else {
551 // [base + disp32]
552 // [10 reg base] disp32
553 emit_int8(0x80 | regenc | baseenc);
554 emit_data(disp, rspec, disp32_operand);
555 }
556 }
557 } else {
558 if (index->is_valid()) {
559 assert(scale != Address::no_scale, "inconsistent address");
560 // [index*scale + disp]
561 // [00 reg 100][ss index 101] disp32
562 assert(index != rsp, "illegal addressing mode");
563 emit_int8(0x04 | regenc);
564 emit_int8(scale << 6 | indexenc | 0x05);
565 emit_data(disp, rspec, disp32_operand);
566 } else if (rtype != relocInfo::none ) {
567 // [disp] (64bit) RIP-RELATIVE (32bit) abs
568 // [00 000 101] disp32
569
570 emit_int8(0x05 | regenc);
571 // Note that the RIP-rel. correction applies to the generated
572 // disp field, but _not_ to the target address in the rspec.
573
574 // disp was created by converting the target address minus the pc
575 // at the start of the instruction. That needs more correction here.
576 // intptr_t disp = target - next_ip;
577 assert(inst_mark() != NULL, "must be inside InstructionMark");
578 address next_ip = pc() + sizeof(int32_t) + rip_relative_correction;
579 int64_t adjusted = disp;
580 // Do rip-rel adjustment for 64bit
581 LP64_ONLY(adjusted -= (next_ip - inst_mark()));
582 assert(is_simm32(adjusted),
583 "must be 32bit offset (RIP relative address)");
584 emit_data((int32_t) adjusted, rspec, disp32_operand);
585
586 } else {
587 // 32bit never did this, did everything as the rip-rel/disp code above
588 // [disp] ABSOLUTE
589 // [00 reg 100][00 100 101] disp32
590 emit_int8(0x04 | regenc);
591 emit_int8(0x25);
592 emit_data(disp, rspec, disp32_operand);
593 }
594 }
595 }
596
emit_operand(XMMRegister reg,Register base,Register index,Address::ScaleFactor scale,int disp,RelocationHolder const & rspec)597 void Assembler::emit_operand(XMMRegister reg, Register base, Register index,
598 Address::ScaleFactor scale, int disp,
599 RelocationHolder const& rspec) {
600 if (UseAVX > 2) {
601 int xreg_enc = reg->encoding();
602 if (xreg_enc > 15) {
603 XMMRegister new_reg = as_XMMRegister(xreg_enc & 0xf);
604 emit_operand((Register)new_reg, base, index, scale, disp, rspec);
605 return;
606 }
607 }
608 emit_operand((Register)reg, base, index, scale, disp, rspec);
609 }
610
emit_operand(XMMRegister reg,Register base,XMMRegister index,Address::ScaleFactor scale,int disp,RelocationHolder const & rspec)611 void Assembler::emit_operand(XMMRegister reg, Register base, XMMRegister index,
612 Address::ScaleFactor scale, int disp,
613 RelocationHolder const& rspec) {
614 if (UseAVX > 2) {
615 int xreg_enc = reg->encoding();
616 int xmmindex_enc = index->encoding();
617 XMMRegister new_reg = as_XMMRegister(xreg_enc & 0xf);
618 XMMRegister new_index = as_XMMRegister(xmmindex_enc & 0xf);
619 emit_operand((Register)new_reg, base, (Register)new_index, scale, disp, rspec);
620 } else {
621 emit_operand((Register)reg, base, (Register)index, scale, disp, rspec);
622 }
623 }
624
625
626 // Secret local extension to Assembler::WhichOperand:
627 #define end_pc_operand (_WhichOperand_limit)
628
locate_operand(address inst,WhichOperand which)629 address Assembler::locate_operand(address inst, WhichOperand which) {
630 // Decode the given instruction, and return the address of
631 // an embedded 32-bit operand word.
632
633 // If "which" is disp32_operand, selects the displacement portion
634 // of an effective address specifier.
635 // If "which" is imm64_operand, selects the trailing immediate constant.
636 // If "which" is call32_operand, selects the displacement of a call or jump.
637 // Caller is responsible for ensuring that there is such an operand,
638 // and that it is 32/64 bits wide.
639
640 // If "which" is end_pc_operand, find the end of the instruction.
641
642 address ip = inst;
643 bool is_64bit = false;
644
645 debug_only(bool has_disp32 = false);
646 int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn
647
648 again_after_prefix:
649 switch (0xFF & *ip++) {
650
651 // These convenience macros generate groups of "case" labels for the switch.
652 #define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3
653 #define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \
654 case (x)+4: case (x)+5: case (x)+6: case (x)+7
655 #define REP16(x) REP8((x)+0): \
656 case REP8((x)+8)
657
658 case CS_segment:
659 case SS_segment:
660 case DS_segment:
661 case ES_segment:
662 case FS_segment:
663 case GS_segment:
664 // Seems dubious
665 LP64_ONLY(assert(false, "shouldn't have that prefix"));
666 assert(ip == inst+1, "only one prefix allowed");
667 goto again_after_prefix;
668
669 case 0x67:
670 case REX:
671 case REX_B:
672 case REX_X:
673 case REX_XB:
674 case REX_R:
675 case REX_RB:
676 case REX_RX:
677 case REX_RXB:
678 NOT_LP64(assert(false, "64bit prefixes"));
679 goto again_after_prefix;
680
681 case REX_W:
682 case REX_WB:
683 case REX_WX:
684 case REX_WXB:
685 case REX_WR:
686 case REX_WRB:
687 case REX_WRX:
688 case REX_WRXB:
689 NOT_LP64(assert(false, "64bit prefixes"));
690 is_64bit = true;
691 goto again_after_prefix;
692
693 case 0xFF: // pushq a; decl a; incl a; call a; jmp a
694 case 0x88: // movb a, r
695 case 0x89: // movl a, r
696 case 0x8A: // movb r, a
697 case 0x8B: // movl r, a
698 case 0x8F: // popl a
699 debug_only(has_disp32 = true);
700 break;
701
702 case 0x68: // pushq #32
703 if (which == end_pc_operand) {
704 return ip + 4;
705 }
706 assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate");
707 return ip; // not produced by emit_operand
708
709 case 0x66: // movw ... (size prefix)
710 again_after_size_prefix2:
711 switch (0xFF & *ip++) {
712 case REX:
713 case REX_B:
714 case REX_X:
715 case REX_XB:
716 case REX_R:
717 case REX_RB:
718 case REX_RX:
719 case REX_RXB:
720 case REX_W:
721 case REX_WB:
722 case REX_WX:
723 case REX_WXB:
724 case REX_WR:
725 case REX_WRB:
726 case REX_WRX:
727 case REX_WRXB:
728 NOT_LP64(assert(false, "64bit prefix found"));
729 goto again_after_size_prefix2;
730 case 0x8B: // movw r, a
731 case 0x89: // movw a, r
732 debug_only(has_disp32 = true);
733 break;
734 case 0xC7: // movw a, #16
735 debug_only(has_disp32 = true);
736 tail_size = 2; // the imm16
737 break;
738 case 0x0F: // several SSE/SSE2 variants
739 ip--; // reparse the 0x0F
740 goto again_after_prefix;
741 default:
742 ShouldNotReachHere();
743 }
744 break;
745
746 case REP8(0xB8): // movl/q r, #32/#64(oop?)
747 if (which == end_pc_operand) return ip + (is_64bit ? 8 : 4);
748 // these asserts are somewhat nonsensical
749 #ifndef _LP64
750 assert(which == imm_operand || which == disp32_operand,
751 "which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip));
752 #else
753 assert((which == call32_operand || which == imm_operand) && is_64bit ||
754 which == narrow_oop_operand && !is_64bit,
755 "which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip));
756 #endif // _LP64
757 return ip;
758
759 case 0x69: // imul r, a, #32
760 case 0xC7: // movl a, #32(oop?)
761 tail_size = 4;
762 debug_only(has_disp32 = true); // has both kinds of operands!
763 break;
764
765 case 0x0F: // movx..., etc.
766 switch (0xFF & *ip++) {
767 case 0x3A: // pcmpestri
768 tail_size = 1;
769 case 0x38: // ptest, pmovzxbw
770 ip++; // skip opcode
771 debug_only(has_disp32 = true); // has both kinds of operands!
772 break;
773
774 case 0x70: // pshufd r, r/a, #8
775 debug_only(has_disp32 = true); // has both kinds of operands!
776 case 0x73: // psrldq r, #8
777 tail_size = 1;
778 break;
779
780 case 0x12: // movlps
781 case 0x28: // movaps
782 case 0x2E: // ucomiss
783 case 0x2F: // comiss
784 case 0x54: // andps
785 case 0x55: // andnps
786 case 0x56: // orps
787 case 0x57: // xorps
788 case 0x58: // addpd
789 case 0x59: // mulpd
790 case 0x6E: // movd
791 case 0x7E: // movd
792 case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush
793 case 0xFE: // paddd
794 debug_only(has_disp32 = true);
795 break;
796
797 case 0xAD: // shrd r, a, %cl
798 case 0xAF: // imul r, a
799 case 0xBE: // movsbl r, a (movsxb)
800 case 0xBF: // movswl r, a (movsxw)
801 case 0xB6: // movzbl r, a (movzxb)
802 case 0xB7: // movzwl r, a (movzxw)
803 case REP16(0x40): // cmovl cc, r, a
804 case 0xB0: // cmpxchgb
805 case 0xB1: // cmpxchg
806 case 0xC1: // xaddl
807 case 0xC7: // cmpxchg8
808 case REP16(0x90): // setcc a
809 debug_only(has_disp32 = true);
810 // fall out of the switch to decode the address
811 break;
812
813 case 0xC4: // pinsrw r, a, #8
814 debug_only(has_disp32 = true);
815 case 0xC5: // pextrw r, r, #8
816 tail_size = 1; // the imm8
817 break;
818
819 case 0xAC: // shrd r, a, #8
820 debug_only(has_disp32 = true);
821 tail_size = 1; // the imm8
822 break;
823
824 case REP16(0x80): // jcc rdisp32
825 if (which == end_pc_operand) return ip + 4;
826 assert(which == call32_operand, "jcc has no disp32 or imm");
827 return ip;
828 default:
829 ShouldNotReachHere();
830 }
831 break;
832
833 case 0x81: // addl a, #32; addl r, #32
834 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
835 // on 32bit in the case of cmpl, the imm might be an oop
836 tail_size = 4;
837 debug_only(has_disp32 = true); // has both kinds of operands!
838 break;
839
840 case 0x83: // addl a, #8; addl r, #8
841 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
842 debug_only(has_disp32 = true); // has both kinds of operands!
843 tail_size = 1;
844 break;
845
846 case 0x9B:
847 switch (0xFF & *ip++) {
848 case 0xD9: // fnstcw a
849 debug_only(has_disp32 = true);
850 break;
851 default:
852 ShouldNotReachHere();
853 }
854 break;
855
856 case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a
857 case REP4(0x10): // adc...
858 case REP4(0x20): // and...
859 case REP4(0x30): // xor...
860 case REP4(0x08): // or...
861 case REP4(0x18): // sbb...
862 case REP4(0x28): // sub...
863 case 0xF7: // mull a
864 case 0x8D: // lea r, a
865 case 0x87: // xchg r, a
866 case REP4(0x38): // cmp...
867 case 0x85: // test r, a
868 debug_only(has_disp32 = true); // has both kinds of operands!
869 break;
870
871 case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8
872 case 0xC6: // movb a, #8
873 case 0x80: // cmpb a, #8
874 case 0x6B: // imul r, a, #8
875 debug_only(has_disp32 = true); // has both kinds of operands!
876 tail_size = 1; // the imm8
877 break;
878
879 case 0xC4: // VEX_3bytes
880 case 0xC5: // VEX_2bytes
881 assert((UseAVX > 0), "shouldn't have VEX prefix");
882 assert(ip == inst+1, "no prefixes allowed");
883 // C4 and C5 are also used as opcodes for PINSRW and PEXTRW instructions
884 // but they have prefix 0x0F and processed when 0x0F processed above.
885 //
886 // In 32-bit mode the VEX first byte C4 and C5 alias onto LDS and LES
887 // instructions (these instructions are not supported in 64-bit mode).
888 // To distinguish them bits [7:6] are set in the VEX second byte since
889 // ModRM byte can not be of the form 11xxxxxx in 32-bit mode. To set
890 // those VEX bits REX and vvvv bits are inverted.
891 //
892 // Fortunately C2 doesn't generate these instructions so we don't need
893 // to check for them in product version.
894
895 // Check second byte
896 NOT_LP64(assert((0xC0 & *ip) == 0xC0, "shouldn't have LDS and LES instructions"));
897
898 int vex_opcode;
899 // First byte
900 if ((0xFF & *inst) == VEX_3bytes) {
901 vex_opcode = VEX_OPCODE_MASK & *ip;
902 ip++; // third byte
903 is_64bit = ((VEX_W & *ip) == VEX_W);
904 } else {
905 vex_opcode = VEX_OPCODE_0F;
906 }
907 ip++; // opcode
908 // To find the end of instruction (which == end_pc_operand).
909 switch (vex_opcode) {
910 case VEX_OPCODE_0F:
911 switch (0xFF & *ip) {
912 case 0x70: // pshufd r, r/a, #8
913 case 0x71: // ps[rl|ra|ll]w r, #8
914 case 0x72: // ps[rl|ra|ll]d r, #8
915 case 0x73: // ps[rl|ra|ll]q r, #8
916 case 0xC2: // cmp[ps|pd|ss|sd] r, r, r/a, #8
917 case 0xC4: // pinsrw r, r, r/a, #8
918 case 0xC5: // pextrw r/a, r, #8
919 case 0xC6: // shufp[s|d] r, r, r/a, #8
920 tail_size = 1; // the imm8
921 break;
922 }
923 break;
924 case VEX_OPCODE_0F_3A:
925 tail_size = 1;
926 break;
927 }
928 ip++; // skip opcode
929 debug_only(has_disp32 = true); // has both kinds of operands!
930 break;
931
932 case 0x62: // EVEX_4bytes
933 assert(VM_Version::supports_evex(), "shouldn't have EVEX prefix");
934 assert(ip == inst+1, "no prefixes allowed");
935 // no EVEX collisions, all instructions that have 0x62 opcodes
936 // have EVEX versions and are subopcodes of 0x66
937 ip++; // skip P0 and exmaine W in P1
938 is_64bit = ((VEX_W & *ip) == VEX_W);
939 ip++; // move to P2
940 ip++; // skip P2, move to opcode
941 // To find the end of instruction (which == end_pc_operand).
942 switch (0xFF & *ip) {
943 case 0x22: // pinsrd r, r/a, #8
944 case 0x61: // pcmpestri r, r/a, #8
945 case 0x70: // pshufd r, r/a, #8
946 case 0x73: // psrldq r, #8
947 tail_size = 1; // the imm8
948 break;
949 default:
950 break;
951 }
952 ip++; // skip opcode
953 debug_only(has_disp32 = true); // has both kinds of operands!
954 break;
955
956 case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1
957 case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl
958 case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a
959 case 0xDD: // fld_d a; fst_d a; fstp_d a
960 case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a
961 case 0xDF: // fild_d a; fistp_d a
962 case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a
963 case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a
964 case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a
965 debug_only(has_disp32 = true);
966 break;
967
968 case 0xE8: // call rdisp32
969 case 0xE9: // jmp rdisp32
970 if (which == end_pc_operand) return ip + 4;
971 assert(which == call32_operand, "call has no disp32 or imm");
972 return ip;
973
974 case 0xF0: // Lock
975 goto again_after_prefix;
976
977 case 0xF3: // For SSE
978 case 0xF2: // For SSE2
979 switch (0xFF & *ip++) {
980 case REX:
981 case REX_B:
982 case REX_X:
983 case REX_XB:
984 case REX_R:
985 case REX_RB:
986 case REX_RX:
987 case REX_RXB:
988 case REX_W:
989 case REX_WB:
990 case REX_WX:
991 case REX_WXB:
992 case REX_WR:
993 case REX_WRB:
994 case REX_WRX:
995 case REX_WRXB:
996 NOT_LP64(assert(false, "found 64bit prefix"));
997 ip++;
998 default:
999 ip++;
1000 }
1001 debug_only(has_disp32 = true); // has both kinds of operands!
1002 break;
1003
1004 default:
1005 ShouldNotReachHere();
1006
1007 #undef REP8
1008 #undef REP16
1009 }
1010
1011 assert(which != call32_operand, "instruction is not a call, jmp, or jcc");
1012 #ifdef _LP64
1013 assert(which != imm_operand, "instruction is not a movq reg, imm64");
1014 #else
1015 // assert(which != imm_operand || has_imm32, "instruction has no imm32 field");
1016 assert(which != imm_operand || has_disp32, "instruction has no imm32 field");
1017 #endif // LP64
1018 assert(which != disp32_operand || has_disp32, "instruction has no disp32 field");
1019
1020 // parse the output of emit_operand
1021 int op2 = 0xFF & *ip++;
1022 int base = op2 & 0x07;
1023 int op3 = -1;
1024 const int b100 = 4;
1025 const int b101 = 5;
1026 if (base == b100 && (op2 >> 6) != 3) {
1027 op3 = 0xFF & *ip++;
1028 base = op3 & 0x07; // refetch the base
1029 }
1030 // now ip points at the disp (if any)
1031
1032 switch (op2 >> 6) {
1033 case 0:
1034 // [00 reg 100][ss index base]
1035 // [00 reg 100][00 100 esp]
1036 // [00 reg base]
1037 // [00 reg 100][ss index 101][disp32]
1038 // [00 reg 101] [disp32]
1039
1040 if (base == b101) {
1041 if (which == disp32_operand)
1042 return ip; // caller wants the disp32
1043 ip += 4; // skip the disp32
1044 }
1045 break;
1046
1047 case 1:
1048 // [01 reg 100][ss index base][disp8]
1049 // [01 reg 100][00 100 esp][disp8]
1050 // [01 reg base] [disp8]
1051 ip += 1; // skip the disp8
1052 break;
1053
1054 case 2:
1055 // [10 reg 100][ss index base][disp32]
1056 // [10 reg 100][00 100 esp][disp32]
1057 // [10 reg base] [disp32]
1058 if (which == disp32_operand)
1059 return ip; // caller wants the disp32
1060 ip += 4; // skip the disp32
1061 break;
1062
1063 case 3:
1064 // [11 reg base] (not a memory addressing mode)
1065 break;
1066 }
1067
1068 if (which == end_pc_operand) {
1069 return ip + tail_size;
1070 }
1071
1072 #ifdef _LP64
1073 assert(which == narrow_oop_operand && !is_64bit, "instruction is not a movl adr, imm32");
1074 #else
1075 assert(which == imm_operand, "instruction has only an imm field");
1076 #endif // LP64
1077 return ip;
1078 }
1079
locate_next_instruction(address inst)1080 address Assembler::locate_next_instruction(address inst) {
1081 // Secretly share code with locate_operand:
1082 return locate_operand(inst, end_pc_operand);
1083 }
1084
1085
1086 #ifdef ASSERT
check_relocation(RelocationHolder const & rspec,int format)1087 void Assembler::check_relocation(RelocationHolder const& rspec, int format) {
1088 address inst = inst_mark();
1089 assert(inst != NULL && inst < pc(), "must point to beginning of instruction");
1090 address opnd;
1091
1092 Relocation* r = rspec.reloc();
1093 if (r->type() == relocInfo::none) {
1094 return;
1095 } else if (r->is_call() || format == call32_operand) {
1096 // assert(format == imm32_operand, "cannot specify a nonzero format");
1097 opnd = locate_operand(inst, call32_operand);
1098 } else if (r->is_data()) {
1099 assert(format == imm_operand || format == disp32_operand
1100 LP64_ONLY(|| format == narrow_oop_operand), "format ok");
1101 opnd = locate_operand(inst, (WhichOperand)format);
1102 } else {
1103 assert(format == imm_operand, "cannot specify a format");
1104 return;
1105 }
1106 assert(opnd == pc(), "must put operand where relocs can find it");
1107 }
1108 #endif // ASSERT
1109
emit_operand32(Register reg,Address adr)1110 void Assembler::emit_operand32(Register reg, Address adr) {
1111 assert(reg->encoding() < 8, "no extended registers");
1112 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
1113 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
1114 adr._rspec);
1115 }
1116
emit_operand(Register reg,Address adr,int rip_relative_correction)1117 void Assembler::emit_operand(Register reg, Address adr,
1118 int rip_relative_correction) {
1119 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
1120 adr._rspec,
1121 rip_relative_correction);
1122 }
1123
emit_operand(XMMRegister reg,Address adr)1124 void Assembler::emit_operand(XMMRegister reg, Address adr) {
1125 if (adr.isxmmindex()) {
1126 emit_operand(reg, adr._base, adr._xmmindex, adr._scale, adr._disp, adr._rspec);
1127 } else {
1128 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
1129 adr._rspec);
1130 }
1131 }
1132
1133 // MMX operations
emit_operand(MMXRegister reg,Address adr)1134 void Assembler::emit_operand(MMXRegister reg, Address adr) {
1135 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
1136 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
1137 }
1138
1139 // work around gcc (3.2.1-7a) bug
emit_operand(Address adr,MMXRegister reg)1140 void Assembler::emit_operand(Address adr, MMXRegister reg) {
1141 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
1142 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
1143 }
1144
1145
emit_farith(int b1,int b2,int i)1146 void Assembler::emit_farith(int b1, int b2, int i) {
1147 assert(isByte(b1) && isByte(b2), "wrong opcode");
1148 assert(0 <= i && i < 8, "illegal stack offset");
1149 emit_int8(b1);
1150 emit_int8(b2 + i);
1151 }
1152
1153
1154 // Now the Assembler instructions (identical for 32/64 bits)
1155
adcl(Address dst,int32_t imm32)1156 void Assembler::adcl(Address dst, int32_t imm32) {
1157 InstructionMark im(this);
1158 prefix(dst);
1159 emit_arith_operand(0x81, rdx, dst, imm32);
1160 }
1161
adcl(Address dst,Register src)1162 void Assembler::adcl(Address dst, Register src) {
1163 InstructionMark im(this);
1164 prefix(dst, src);
1165 emit_int8(0x11);
1166 emit_operand(src, dst);
1167 }
1168
adcl(Register dst,int32_t imm32)1169 void Assembler::adcl(Register dst, int32_t imm32) {
1170 prefix(dst);
1171 emit_arith(0x81, 0xD0, dst, imm32);
1172 }
1173
adcl(Register dst,Address src)1174 void Assembler::adcl(Register dst, Address src) {
1175 InstructionMark im(this);
1176 prefix(src, dst);
1177 emit_int8(0x13);
1178 emit_operand(dst, src);
1179 }
1180
adcl(Register dst,Register src)1181 void Assembler::adcl(Register dst, Register src) {
1182 (void) prefix_and_encode(dst->encoding(), src->encoding());
1183 emit_arith(0x13, 0xC0, dst, src);
1184 }
1185
addl(Address dst,int32_t imm32)1186 void Assembler::addl(Address dst, int32_t imm32) {
1187 InstructionMark im(this);
1188 prefix(dst);
1189 emit_arith_operand(0x81, rax, dst, imm32);
1190 }
1191
addb(Address dst,int imm8)1192 void Assembler::addb(Address dst, int imm8) {
1193 InstructionMark im(this);
1194 prefix(dst);
1195 emit_int8((unsigned char)0x80);
1196 emit_operand(rax, dst, 1);
1197 emit_int8(imm8);
1198 }
1199
addw(Address dst,int imm16)1200 void Assembler::addw(Address dst, int imm16) {
1201 InstructionMark im(this);
1202 emit_int8(0x66);
1203 prefix(dst);
1204 emit_int8((unsigned char)0x81);
1205 emit_operand(rax, dst, 2);
1206 emit_int16(imm16);
1207 }
1208
addl(Address dst,Register src)1209 void Assembler::addl(Address dst, Register src) {
1210 InstructionMark im(this);
1211 prefix(dst, src);
1212 emit_int8(0x01);
1213 emit_operand(src, dst);
1214 }
1215
addl(Register dst,int32_t imm32)1216 void Assembler::addl(Register dst, int32_t imm32) {
1217 prefix(dst);
1218 emit_arith(0x81, 0xC0, dst, imm32);
1219 }
1220
addl(Register dst,Address src)1221 void Assembler::addl(Register dst, Address src) {
1222 InstructionMark im(this);
1223 prefix(src, dst);
1224 emit_int8(0x03);
1225 emit_operand(dst, src);
1226 }
1227
addl(Register dst,Register src)1228 void Assembler::addl(Register dst, Register src) {
1229 (void) prefix_and_encode(dst->encoding(), src->encoding());
1230 emit_arith(0x03, 0xC0, dst, src);
1231 }
1232
addr_nop_4()1233 void Assembler::addr_nop_4() {
1234 assert(UseAddressNop, "no CPU support");
1235 // 4 bytes: NOP DWORD PTR [EAX+0]
1236 emit_int8(0x0F);
1237 emit_int8(0x1F);
1238 emit_int8(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc);
1239 emit_int8(0); // 8-bits offset (1 byte)
1240 }
1241
addr_nop_5()1242 void Assembler::addr_nop_5() {
1243 assert(UseAddressNop, "no CPU support");
1244 // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset
1245 emit_int8(0x0F);
1246 emit_int8(0x1F);
1247 emit_int8(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4);
1248 emit_int8(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
1249 emit_int8(0); // 8-bits offset (1 byte)
1250 }
1251
addr_nop_7()1252 void Assembler::addr_nop_7() {
1253 assert(UseAddressNop, "no CPU support");
1254 // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset
1255 emit_int8(0x0F);
1256 emit_int8(0x1F);
1257 emit_int8((unsigned char)0x80);
1258 // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc);
1259 emit_int32(0); // 32-bits offset (4 bytes)
1260 }
1261
addr_nop_8()1262 void Assembler::addr_nop_8() {
1263 assert(UseAddressNop, "no CPU support");
1264 // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
1265 emit_int8(0x0F);
1266 emit_int8(0x1F);
1267 emit_int8((unsigned char)0x84);
1268 // emit_rm(cbuf, 0x2, EAX_enc, 0x4);
1269 emit_int8(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
1270 emit_int32(0); // 32-bits offset (4 bytes)
1271 }
1272
addsd(XMMRegister dst,XMMRegister src)1273 void Assembler::addsd(XMMRegister dst, XMMRegister src) {
1274 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1275 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1276 attributes.set_rex_vex_w_reverted();
1277 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
1278 emit_int8(0x58);
1279 emit_int8((unsigned char)(0xC0 | encode));
1280 }
1281
addsd(XMMRegister dst,Address src)1282 void Assembler::addsd(XMMRegister dst, Address src) {
1283 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1284 InstructionMark im(this);
1285 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1286 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
1287 attributes.set_rex_vex_w_reverted();
1288 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
1289 emit_int8(0x58);
1290 emit_operand(dst, src);
1291 }
1292
addss(XMMRegister dst,XMMRegister src)1293 void Assembler::addss(XMMRegister dst, XMMRegister src) {
1294 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1295 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1296 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
1297 emit_int8(0x58);
1298 emit_int8((unsigned char)(0xC0 | encode));
1299 }
1300
addss(XMMRegister dst,Address src)1301 void Assembler::addss(XMMRegister dst, Address src) {
1302 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1303 InstructionMark im(this);
1304 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1305 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
1306 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
1307 emit_int8(0x58);
1308 emit_operand(dst, src);
1309 }
1310
aesdec(XMMRegister dst,Address src)1311 void Assembler::aesdec(XMMRegister dst, Address src) {
1312 assert(VM_Version::supports_aes(), "");
1313 InstructionMark im(this);
1314 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
1315 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1316 emit_int8((unsigned char)0xDE);
1317 emit_operand(dst, src);
1318 }
1319
aesdec(XMMRegister dst,XMMRegister src)1320 void Assembler::aesdec(XMMRegister dst, XMMRegister src) {
1321 assert(VM_Version::supports_aes(), "");
1322 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
1323 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1324 emit_int8((unsigned char)0xDE);
1325 emit_int8(0xC0 | encode);
1326 }
1327
vaesdec(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)1328 void Assembler::vaesdec(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1329 assert(VM_Version::supports_vaes(), "");
1330 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
1331 attributes.set_is_evex_instruction();
1332 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1333 emit_int8((unsigned char)0xDE);
1334 emit_int8((unsigned char)(0xC0 | encode));
1335 }
1336
1337
aesdeclast(XMMRegister dst,Address src)1338 void Assembler::aesdeclast(XMMRegister dst, Address src) {
1339 assert(VM_Version::supports_aes(), "");
1340 InstructionMark im(this);
1341 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
1342 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1343 emit_int8((unsigned char)0xDF);
1344 emit_operand(dst, src);
1345 }
1346
aesdeclast(XMMRegister dst,XMMRegister src)1347 void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) {
1348 assert(VM_Version::supports_aes(), "");
1349 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
1350 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1351 emit_int8((unsigned char)0xDF);
1352 emit_int8((unsigned char)(0xC0 | encode));
1353 }
1354
vaesdeclast(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)1355 void Assembler::vaesdeclast(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
1356 assert(VM_Version::supports_vaes(), "");
1357 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
1358 attributes.set_is_evex_instruction();
1359 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1360 emit_int8((unsigned char)0xDF);
1361 emit_int8((unsigned char)(0xC0 | encode));
1362 }
1363
aesenc(XMMRegister dst,Address src)1364 void Assembler::aesenc(XMMRegister dst, Address src) {
1365 assert(VM_Version::supports_aes(), "");
1366 InstructionMark im(this);
1367 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
1368 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1369 emit_int8((unsigned char)0xDC);
1370 emit_operand(dst, src);
1371 }
1372
aesenc(XMMRegister dst,XMMRegister src)1373 void Assembler::aesenc(XMMRegister dst, XMMRegister src) {
1374 assert(VM_Version::supports_aes(), "");
1375 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
1376 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1377 emit_int8((unsigned char)0xDC);
1378 emit_int8(0xC0 | encode);
1379 }
1380
aesenclast(XMMRegister dst,Address src)1381 void Assembler::aesenclast(XMMRegister dst, Address src) {
1382 assert(VM_Version::supports_aes(), "");
1383 InstructionMark im(this);
1384 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
1385 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1386 emit_int8((unsigned char)0xDD);
1387 emit_operand(dst, src);
1388 }
1389
aesenclast(XMMRegister dst,XMMRegister src)1390 void Assembler::aesenclast(XMMRegister dst, XMMRegister src) {
1391 assert(VM_Version::supports_aes(), "");
1392 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
1393 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
1394 emit_int8((unsigned char)0xDD);
1395 emit_int8((unsigned char)(0xC0 | encode));
1396 }
1397
andl(Address dst,int32_t imm32)1398 void Assembler::andl(Address dst, int32_t imm32) {
1399 InstructionMark im(this);
1400 prefix(dst);
1401 emit_int8((unsigned char)0x81);
1402 emit_operand(rsp, dst, 4);
1403 emit_int32(imm32);
1404 }
1405
andl(Register dst,int32_t imm32)1406 void Assembler::andl(Register dst, int32_t imm32) {
1407 prefix(dst);
1408 emit_arith(0x81, 0xE0, dst, imm32);
1409 }
1410
andl(Register dst,Address src)1411 void Assembler::andl(Register dst, Address src) {
1412 InstructionMark im(this);
1413 prefix(src, dst);
1414 emit_int8(0x23);
1415 emit_operand(dst, src);
1416 }
1417
andl(Register dst,Register src)1418 void Assembler::andl(Register dst, Register src) {
1419 (void) prefix_and_encode(dst->encoding(), src->encoding());
1420 emit_arith(0x23, 0xC0, dst, src);
1421 }
1422
andnl(Register dst,Register src1,Register src2)1423 void Assembler::andnl(Register dst, Register src1, Register src2) {
1424 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1425 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
1426 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
1427 emit_int8((unsigned char)0xF2);
1428 emit_int8((unsigned char)(0xC0 | encode));
1429 }
1430
andnl(Register dst,Register src1,Address src2)1431 void Assembler::andnl(Register dst, Register src1, Address src2) {
1432 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1433 InstructionMark im(this);
1434 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
1435 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
1436 emit_int8((unsigned char)0xF2);
1437 emit_operand(dst, src2);
1438 }
1439
bsfl(Register dst,Register src)1440 void Assembler::bsfl(Register dst, Register src) {
1441 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1442 emit_int8(0x0F);
1443 emit_int8((unsigned char)0xBC);
1444 emit_int8((unsigned char)(0xC0 | encode));
1445 }
1446
bsrl(Register dst,Register src)1447 void Assembler::bsrl(Register dst, Register src) {
1448 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1449 emit_int8(0x0F);
1450 emit_int8((unsigned char)0xBD);
1451 emit_int8((unsigned char)(0xC0 | encode));
1452 }
1453
bswapl(Register reg)1454 void Assembler::bswapl(Register reg) { // bswap
1455 int encode = prefix_and_encode(reg->encoding());
1456 emit_int8(0x0F);
1457 emit_int8((unsigned char)(0xC8 | encode));
1458 }
1459
blsil(Register dst,Register src)1460 void Assembler::blsil(Register dst, Register src) {
1461 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1462 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
1463 int encode = vex_prefix_and_encode(rbx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
1464 emit_int8((unsigned char)0xF3);
1465 emit_int8((unsigned char)(0xC0 | encode));
1466 }
1467
blsil(Register dst,Address src)1468 void Assembler::blsil(Register dst, Address src) {
1469 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1470 InstructionMark im(this);
1471 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
1472 vex_prefix(src, dst->encoding(), rbx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
1473 emit_int8((unsigned char)0xF3);
1474 emit_operand(rbx, src);
1475 }
1476
blsmskl(Register dst,Register src)1477 void Assembler::blsmskl(Register dst, Register src) {
1478 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1479 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
1480 int encode = vex_prefix_and_encode(rdx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
1481 emit_int8((unsigned char)0xF3);
1482 emit_int8((unsigned char)(0xC0 | encode));
1483 }
1484
blsmskl(Register dst,Address src)1485 void Assembler::blsmskl(Register dst, Address src) {
1486 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1487 InstructionMark im(this);
1488 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
1489 vex_prefix(src, dst->encoding(), rdx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
1490 emit_int8((unsigned char)0xF3);
1491 emit_operand(rdx, src);
1492 }
1493
blsrl(Register dst,Register src)1494 void Assembler::blsrl(Register dst, Register src) {
1495 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1496 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
1497 int encode = vex_prefix_and_encode(rcx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
1498 emit_int8((unsigned char)0xF3);
1499 emit_int8((unsigned char)(0xC0 | encode));
1500 }
1501
blsrl(Register dst,Address src)1502 void Assembler::blsrl(Register dst, Address src) {
1503 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
1504 InstructionMark im(this);
1505 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
1506 vex_prefix(src, dst->encoding(), rcx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
1507 emit_int8((unsigned char)0xF3);
1508 emit_operand(rcx, src);
1509 }
1510
call(Label & L,relocInfo::relocType rtype)1511 void Assembler::call(Label& L, relocInfo::relocType rtype) {
1512 // suspect disp32 is always good
1513 int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand);
1514
1515 if (L.is_bound()) {
1516 const int long_size = 5;
1517 int offs = (int)( target(L) - pc() );
1518 assert(offs <= 0, "assembler error");
1519 InstructionMark im(this);
1520 // 1110 1000 #32-bit disp
1521 emit_int8((unsigned char)0xE8);
1522 emit_data(offs - long_size, rtype, operand);
1523 } else {
1524 InstructionMark im(this);
1525 // 1110 1000 #32-bit disp
1526 L.add_patch_at(code(), locator());
1527
1528 emit_int8((unsigned char)0xE8);
1529 emit_data(int(0), rtype, operand);
1530 }
1531 }
1532
call(Register dst)1533 void Assembler::call(Register dst) {
1534 int encode = prefix_and_encode(dst->encoding());
1535 emit_int8((unsigned char)0xFF);
1536 emit_int8((unsigned char)(0xD0 | encode));
1537 }
1538
1539
call(Address adr)1540 void Assembler::call(Address adr) {
1541 InstructionMark im(this);
1542 prefix(adr);
1543 emit_int8((unsigned char)0xFF);
1544 emit_operand(rdx, adr);
1545 }
1546
call_literal(address entry,RelocationHolder const & rspec)1547 void Assembler::call_literal(address entry, RelocationHolder const& rspec) {
1548 InstructionMark im(this);
1549 emit_int8((unsigned char)0xE8);
1550 intptr_t disp = entry - (pc() + sizeof(int32_t));
1551 // Entry is NULL in case of a scratch emit.
1552 assert(entry == NULL || is_simm32(disp), "disp=" INTPTR_FORMAT " must be 32bit offset (call2)", disp);
1553 // Technically, should use call32_operand, but this format is
1554 // implied by the fact that we're emitting a call instruction.
1555
1556 int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand);
1557 emit_data((int) disp, rspec, operand);
1558 }
1559
cdql()1560 void Assembler::cdql() {
1561 emit_int8((unsigned char)0x99);
1562 }
1563
cld()1564 void Assembler::cld() {
1565 emit_int8((unsigned char)0xFC);
1566 }
1567
cmovl(Condition cc,Register dst,Register src)1568 void Assembler::cmovl(Condition cc, Register dst, Register src) {
1569 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
1570 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1571 emit_int8(0x0F);
1572 emit_int8(0x40 | cc);
1573 emit_int8((unsigned char)(0xC0 | encode));
1574 }
1575
1576
cmovl(Condition cc,Register dst,Address src)1577 void Assembler::cmovl(Condition cc, Register dst, Address src) {
1578 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
1579 prefix(src, dst);
1580 emit_int8(0x0F);
1581 emit_int8(0x40 | cc);
1582 emit_operand(dst, src);
1583 }
1584
cmpb(Address dst,int imm8)1585 void Assembler::cmpb(Address dst, int imm8) {
1586 InstructionMark im(this);
1587 prefix(dst);
1588 emit_int8((unsigned char)0x80);
1589 emit_operand(rdi, dst, 1);
1590 emit_int8(imm8);
1591 }
1592
cmpl(Address dst,int32_t imm32)1593 void Assembler::cmpl(Address dst, int32_t imm32) {
1594 InstructionMark im(this);
1595 prefix(dst);
1596 emit_int8((unsigned char)0x81);
1597 emit_operand(rdi, dst, 4);
1598 emit_int32(imm32);
1599 }
1600
cmpl(Register dst,int32_t imm32)1601 void Assembler::cmpl(Register dst, int32_t imm32) {
1602 prefix(dst);
1603 emit_arith(0x81, 0xF8, dst, imm32);
1604 }
1605
cmpl(Register dst,Register src)1606 void Assembler::cmpl(Register dst, Register src) {
1607 (void) prefix_and_encode(dst->encoding(), src->encoding());
1608 emit_arith(0x3B, 0xC0, dst, src);
1609 }
1610
cmpl(Register dst,Address src)1611 void Assembler::cmpl(Register dst, Address src) {
1612 InstructionMark im(this);
1613 prefix(src, dst);
1614 emit_int8((unsigned char)0x3B);
1615 emit_operand(dst, src);
1616 }
1617
cmpw(Address dst,int imm16)1618 void Assembler::cmpw(Address dst, int imm16) {
1619 InstructionMark im(this);
1620 assert(!dst.base_needs_rex() && !dst.index_needs_rex(), "no extended registers");
1621 emit_int8(0x66);
1622 emit_int8((unsigned char)0x81);
1623 emit_operand(rdi, dst, 2);
1624 emit_int16(imm16);
1625 }
1626
1627 // The 32-bit cmpxchg compares the value at adr with the contents of rax,
1628 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,.
1629 // The ZF is set if the compared values were equal, and cleared otherwise.
cmpxchgl(Register reg,Address adr)1630 void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg
1631 InstructionMark im(this);
1632 prefix(adr, reg);
1633 emit_int8(0x0F);
1634 emit_int8((unsigned char)0xB1);
1635 emit_operand(reg, adr);
1636 }
1637
1638 // The 8-bit cmpxchg compares the value at adr with the contents of rax,
1639 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,.
1640 // The ZF is set if the compared values were equal, and cleared otherwise.
cmpxchgb(Register reg,Address adr)1641 void Assembler::cmpxchgb(Register reg, Address adr) { // cmpxchg
1642 InstructionMark im(this);
1643 prefix(adr, reg, true);
1644 emit_int8(0x0F);
1645 emit_int8((unsigned char)0xB0);
1646 emit_operand(reg, adr);
1647 }
1648
comisd(XMMRegister dst,Address src)1649 void Assembler::comisd(XMMRegister dst, Address src) {
1650 // NOTE: dbx seems to decode this as comiss even though the
1651 // 0x66 is there. Strangly ucomisd comes out correct
1652 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1653 InstructionMark im(this);
1654 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);;
1655 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
1656 attributes.set_rex_vex_w_reverted();
1657 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
1658 emit_int8(0x2F);
1659 emit_operand(dst, src);
1660 }
1661
comisd(XMMRegister dst,XMMRegister src)1662 void Assembler::comisd(XMMRegister dst, XMMRegister src) {
1663 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1664 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1665 attributes.set_rex_vex_w_reverted();
1666 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
1667 emit_int8(0x2F);
1668 emit_int8((unsigned char)(0xC0 | encode));
1669 }
1670
comiss(XMMRegister dst,Address src)1671 void Assembler::comiss(XMMRegister dst, Address src) {
1672 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1673 InstructionMark im(this);
1674 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1675 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
1676 simd_prefix(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
1677 emit_int8(0x2F);
1678 emit_operand(dst, src);
1679 }
1680
comiss(XMMRegister dst,XMMRegister src)1681 void Assembler::comiss(XMMRegister dst, XMMRegister src) {
1682 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1683 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1684 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
1685 emit_int8(0x2F);
1686 emit_int8((unsigned char)(0xC0 | encode));
1687 }
1688
cpuid()1689 void Assembler::cpuid() {
1690 emit_int8(0x0F);
1691 emit_int8((unsigned char)0xA2);
1692 }
1693
1694 // Opcode / Instruction Op / En 64 - Bit Mode Compat / Leg Mode Description Implemented
1695 // F2 0F 38 F0 / r CRC32 r32, r / m8 RM Valid Valid Accumulate CRC32 on r / m8. v
1696 // F2 REX 0F 38 F0 / r CRC32 r32, r / m8* RM Valid N.E. Accumulate CRC32 on r / m8. -
1697 // F2 REX.W 0F 38 F0 / r CRC32 r64, r / m8 RM Valid N.E. Accumulate CRC32 on r / m8. -
1698 //
1699 // F2 0F 38 F1 / r CRC32 r32, r / m16 RM Valid Valid Accumulate CRC32 on r / m16. v
1700 //
1701 // F2 0F 38 F1 / r CRC32 r32, r / m32 RM Valid Valid Accumulate CRC32 on r / m32. v
1702 //
1703 // F2 REX.W 0F 38 F1 / r CRC32 r64, r / m64 RM Valid N.E. Accumulate CRC32 on r / m64. v
crc32(Register crc,Register v,int8_t sizeInBytes)1704 void Assembler::crc32(Register crc, Register v, int8_t sizeInBytes) {
1705 assert(VM_Version::supports_sse4_2(), "");
1706 int8_t w = 0x01;
1707 Prefix p = Prefix_EMPTY;
1708
1709 emit_int8((int8_t)0xF2);
1710 switch (sizeInBytes) {
1711 case 1:
1712 w = 0;
1713 break;
1714 case 2:
1715 case 4:
1716 break;
1717 LP64_ONLY(case 8:)
1718 // This instruction is not valid in 32 bits
1719 // Note:
1720 // http://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf
1721 //
1722 // Page B - 72 Vol. 2C says
1723 // qwreg2 to qwreg 1111 0010 : 0100 1R0B : 0000 1111 : 0011 1000 : 1111 0000 : 11 qwreg1 qwreg2
1724 // mem64 to qwreg 1111 0010 : 0100 1R0B : 0000 1111 : 0011 1000 : 1111 0000 : mod qwreg r / m
1725 // F0!!!
1726 // while 3 - 208 Vol. 2A
1727 // F2 REX.W 0F 38 F1 / r CRC32 r64, r / m64 RM Valid N.E.Accumulate CRC32 on r / m64.
1728 //
1729 // the 0 on a last bit is reserved for a different flavor of this instruction :
1730 // F2 REX.W 0F 38 F0 / r CRC32 r64, r / m8 RM Valid N.E.Accumulate CRC32 on r / m8.
1731 p = REX_W;
1732 break;
1733 default:
1734 assert(0, "Unsupported value for a sizeInBytes argument");
1735 break;
1736 }
1737 LP64_ONLY(prefix(crc, v, p);)
1738 emit_int8((int8_t)0x0F);
1739 emit_int8(0x38);
1740 emit_int8((int8_t)(0xF0 | w));
1741 emit_int8(0xC0 | ((crc->encoding() & 0x7) << 3) | (v->encoding() & 7));
1742 }
1743
crc32(Register crc,Address adr,int8_t sizeInBytes)1744 void Assembler::crc32(Register crc, Address adr, int8_t sizeInBytes) {
1745 assert(VM_Version::supports_sse4_2(), "");
1746 InstructionMark im(this);
1747 int8_t w = 0x01;
1748 Prefix p = Prefix_EMPTY;
1749
1750 emit_int8((int8_t)0xF2);
1751 switch (sizeInBytes) {
1752 case 1:
1753 w = 0;
1754 break;
1755 case 2:
1756 case 4:
1757 break;
1758 LP64_ONLY(case 8:)
1759 // This instruction is not valid in 32 bits
1760 p = REX_W;
1761 break;
1762 default:
1763 assert(0, "Unsupported value for a sizeInBytes argument");
1764 break;
1765 }
1766 LP64_ONLY(prefix(crc, adr, p);)
1767 emit_int8((int8_t)0x0F);
1768 emit_int8(0x38);
1769 emit_int8((int8_t)(0xF0 | w));
1770 emit_operand(crc, adr);
1771 }
1772
cvtdq2pd(XMMRegister dst,XMMRegister src)1773 void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
1774 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1775 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
1776 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
1777 emit_int8((unsigned char)0xE6);
1778 emit_int8((unsigned char)(0xC0 | encode));
1779 }
1780
cvtdq2ps(XMMRegister dst,XMMRegister src)1781 void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
1782 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1783 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
1784 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
1785 emit_int8(0x5B);
1786 emit_int8((unsigned char)(0xC0 | encode));
1787 }
1788
cvtsd2ss(XMMRegister dst,XMMRegister src)1789 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
1790 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1791 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1792 attributes.set_rex_vex_w_reverted();
1793 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
1794 emit_int8(0x5A);
1795 emit_int8((unsigned char)(0xC0 | encode));
1796 }
1797
cvtsd2ss(XMMRegister dst,Address src)1798 void Assembler::cvtsd2ss(XMMRegister dst, Address src) {
1799 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1800 InstructionMark im(this);
1801 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1802 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
1803 attributes.set_rex_vex_w_reverted();
1804 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
1805 emit_int8(0x5A);
1806 emit_operand(dst, src);
1807 }
1808
cvtsi2sdl(XMMRegister dst,Register src)1809 void Assembler::cvtsi2sdl(XMMRegister dst, Register src) {
1810 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1811 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1812 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
1813 emit_int8(0x2A);
1814 emit_int8((unsigned char)(0xC0 | encode));
1815 }
1816
cvtsi2sdl(XMMRegister dst,Address src)1817 void Assembler::cvtsi2sdl(XMMRegister dst, Address src) {
1818 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1819 InstructionMark im(this);
1820 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1821 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
1822 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
1823 emit_int8(0x2A);
1824 emit_operand(dst, src);
1825 }
1826
cvtsi2ssl(XMMRegister dst,Register src)1827 void Assembler::cvtsi2ssl(XMMRegister dst, Register src) {
1828 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1829 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1830 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
1831 emit_int8(0x2A);
1832 emit_int8((unsigned char)(0xC0 | encode));
1833 }
1834
cvtsi2ssl(XMMRegister dst,Address src)1835 void Assembler::cvtsi2ssl(XMMRegister dst, Address src) {
1836 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1837 InstructionMark im(this);
1838 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1839 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
1840 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
1841 emit_int8(0x2A);
1842 emit_operand(dst, src);
1843 }
1844
cvtsi2ssq(XMMRegister dst,Register src)1845 void Assembler::cvtsi2ssq(XMMRegister dst, Register src) {
1846 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1847 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1848 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
1849 emit_int8(0x2A);
1850 emit_int8((unsigned char)(0xC0 | encode));
1851 }
1852
cvtss2sd(XMMRegister dst,XMMRegister src)1853 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
1854 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1855 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1856 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
1857 emit_int8(0x5A);
1858 emit_int8((unsigned char)(0xC0 | encode));
1859 }
1860
cvtss2sd(XMMRegister dst,Address src)1861 void Assembler::cvtss2sd(XMMRegister dst, Address src) {
1862 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1863 InstructionMark im(this);
1864 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1865 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
1866 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
1867 emit_int8(0x5A);
1868 emit_operand(dst, src);
1869 }
1870
1871
cvttsd2sil(Register dst,XMMRegister src)1872 void Assembler::cvttsd2sil(Register dst, XMMRegister src) {
1873 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1874 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1875 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
1876 emit_int8(0x2C);
1877 emit_int8((unsigned char)(0xC0 | encode));
1878 }
1879
cvttss2sil(Register dst,XMMRegister src)1880 void Assembler::cvttss2sil(Register dst, XMMRegister src) {
1881 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1882 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1883 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
1884 emit_int8(0x2C);
1885 emit_int8((unsigned char)(0xC0 | encode));
1886 }
1887
cvttpd2dq(XMMRegister dst,XMMRegister src)1888 void Assembler::cvttpd2dq(XMMRegister dst, XMMRegister src) {
1889 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1890 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit;
1891 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
1892 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
1893 emit_int8((unsigned char)0xE6);
1894 emit_int8((unsigned char)(0xC0 | encode));
1895 }
1896
decl(Address dst)1897 void Assembler::decl(Address dst) {
1898 // Don't use it directly. Use MacroAssembler::decrement() instead.
1899 InstructionMark im(this);
1900 prefix(dst);
1901 emit_int8((unsigned char)0xFF);
1902 emit_operand(rcx, dst);
1903 }
1904
divsd(XMMRegister dst,Address src)1905 void Assembler::divsd(XMMRegister dst, Address src) {
1906 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1907 InstructionMark im(this);
1908 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1909 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
1910 attributes.set_rex_vex_w_reverted();
1911 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
1912 emit_int8(0x5E);
1913 emit_operand(dst, src);
1914 }
1915
divsd(XMMRegister dst,XMMRegister src)1916 void Assembler::divsd(XMMRegister dst, XMMRegister src) {
1917 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1918 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1919 attributes.set_rex_vex_w_reverted();
1920 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
1921 emit_int8(0x5E);
1922 emit_int8((unsigned char)(0xC0 | encode));
1923 }
1924
divss(XMMRegister dst,Address src)1925 void Assembler::divss(XMMRegister dst, Address src) {
1926 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1927 InstructionMark im(this);
1928 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1929 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
1930 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
1931 emit_int8(0x5E);
1932 emit_operand(dst, src);
1933 }
1934
divss(XMMRegister dst,XMMRegister src)1935 void Assembler::divss(XMMRegister dst, XMMRegister src) {
1936 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1937 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
1938 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
1939 emit_int8(0x5E);
1940 emit_int8((unsigned char)(0xC0 | encode));
1941 }
1942
emms()1943 void Assembler::emms() {
1944 NOT_LP64(assert(VM_Version::supports_mmx(), ""));
1945 emit_int8(0x0F);
1946 emit_int8(0x77);
1947 }
1948
hlt()1949 void Assembler::hlt() {
1950 emit_int8((unsigned char)0xF4);
1951 }
1952
idivl(Register src)1953 void Assembler::idivl(Register src) {
1954 int encode = prefix_and_encode(src->encoding());
1955 emit_int8((unsigned char)0xF7);
1956 emit_int8((unsigned char)(0xF8 | encode));
1957 }
1958
divl(Register src)1959 void Assembler::divl(Register src) { // Unsigned
1960 int encode = prefix_and_encode(src->encoding());
1961 emit_int8((unsigned char)0xF7);
1962 emit_int8((unsigned char)(0xF0 | encode));
1963 }
1964
imull(Register src)1965 void Assembler::imull(Register src) {
1966 int encode = prefix_and_encode(src->encoding());
1967 emit_int8((unsigned char)0xF7);
1968 emit_int8((unsigned char)(0xE8 | encode));
1969 }
1970
imull(Register dst,Register src)1971 void Assembler::imull(Register dst, Register src) {
1972 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1973 emit_int8(0x0F);
1974 emit_int8((unsigned char)0xAF);
1975 emit_int8((unsigned char)(0xC0 | encode));
1976 }
1977
1978
imull(Register dst,Register src,int value)1979 void Assembler::imull(Register dst, Register src, int value) {
1980 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1981 if (is8bit(value)) {
1982 emit_int8(0x6B);
1983 emit_int8((unsigned char)(0xC0 | encode));
1984 emit_int8(value & 0xFF);
1985 } else {
1986 emit_int8(0x69);
1987 emit_int8((unsigned char)(0xC0 | encode));
1988 emit_int32(value);
1989 }
1990 }
1991
imull(Register dst,Address src)1992 void Assembler::imull(Register dst, Address src) {
1993 InstructionMark im(this);
1994 prefix(src, dst);
1995 emit_int8(0x0F);
1996 emit_int8((unsigned char) 0xAF);
1997 emit_operand(dst, src);
1998 }
1999
2000
incl(Address dst)2001 void Assembler::incl(Address dst) {
2002 // Don't use it directly. Use MacroAssembler::increment() instead.
2003 InstructionMark im(this);
2004 prefix(dst);
2005 emit_int8((unsigned char)0xFF);
2006 emit_operand(rax, dst);
2007 }
2008
jcc(Condition cc,Label & L,bool maybe_short)2009 void Assembler::jcc(Condition cc, Label& L, bool maybe_short) {
2010 InstructionMark im(this);
2011 assert((0 <= cc) && (cc < 16), "illegal cc");
2012 if (L.is_bound()) {
2013 address dst = target(L);
2014 assert(dst != NULL, "jcc most probably wrong");
2015
2016 const int short_size = 2;
2017 const int long_size = 6;
2018 intptr_t offs = (intptr_t)dst - (intptr_t)pc();
2019 if (maybe_short && is8bit(offs - short_size)) {
2020 // 0111 tttn #8-bit disp
2021 emit_int8(0x70 | cc);
2022 emit_int8((offs - short_size) & 0xFF);
2023 } else {
2024 // 0000 1111 1000 tttn #32-bit disp
2025 assert(is_simm32(offs - long_size),
2026 "must be 32bit offset (call4)");
2027 emit_int8(0x0F);
2028 emit_int8((unsigned char)(0x80 | cc));
2029 emit_int32(offs - long_size);
2030 }
2031 } else {
2032 // Note: could eliminate cond. jumps to this jump if condition
2033 // is the same however, seems to be rather unlikely case.
2034 // Note: use jccb() if label to be bound is very close to get
2035 // an 8-bit displacement
2036 L.add_patch_at(code(), locator());
2037 emit_int8(0x0F);
2038 emit_int8((unsigned char)(0x80 | cc));
2039 emit_int32(0);
2040 }
2041 }
2042
jccb_0(Condition cc,Label & L,const char * file,int line)2043 void Assembler::jccb_0(Condition cc, Label& L, const char* file, int line) {
2044 if (L.is_bound()) {
2045 const int short_size = 2;
2046 address entry = target(L);
2047 #ifdef ASSERT
2048 intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size);
2049 intptr_t delta = short_branch_delta();
2050 if (delta != 0) {
2051 dist += (dist < 0 ? (-delta) :delta);
2052 }
2053 assert(is8bit(dist), "Dispacement too large for a short jmp at %s:%d", file, line);
2054 #endif
2055 intptr_t offs = (intptr_t)entry - (intptr_t)pc();
2056 // 0111 tttn #8-bit disp
2057 emit_int8(0x70 | cc);
2058 emit_int8((offs - short_size) & 0xFF);
2059 } else {
2060 InstructionMark im(this);
2061 L.add_patch_at(code(), locator(), file, line);
2062 emit_int8(0x70 | cc);
2063 emit_int8(0);
2064 }
2065 }
2066
jmp(Address adr)2067 void Assembler::jmp(Address adr) {
2068 InstructionMark im(this);
2069 prefix(adr);
2070 emit_int8((unsigned char)0xFF);
2071 emit_operand(rsp, adr);
2072 }
2073
jmp(Label & L,bool maybe_short)2074 void Assembler::jmp(Label& L, bool maybe_short) {
2075 if (L.is_bound()) {
2076 address entry = target(L);
2077 assert(entry != NULL, "jmp most probably wrong");
2078 InstructionMark im(this);
2079 const int short_size = 2;
2080 const int long_size = 5;
2081 intptr_t offs = entry - pc();
2082 if (maybe_short && is8bit(offs - short_size)) {
2083 emit_int8((unsigned char)0xEB);
2084 emit_int8((offs - short_size) & 0xFF);
2085 } else {
2086 emit_int8((unsigned char)0xE9);
2087 emit_int32(offs - long_size);
2088 }
2089 } else {
2090 // By default, forward jumps are always 32-bit displacements, since
2091 // we can't yet know where the label will be bound. If you're sure that
2092 // the forward jump will not run beyond 256 bytes, use jmpb to
2093 // force an 8-bit displacement.
2094 InstructionMark im(this);
2095 L.add_patch_at(code(), locator());
2096 emit_int8((unsigned char)0xE9);
2097 emit_int32(0);
2098 }
2099 }
2100
jmp(Register entry)2101 void Assembler::jmp(Register entry) {
2102 int encode = prefix_and_encode(entry->encoding());
2103 emit_int8((unsigned char)0xFF);
2104 emit_int8((unsigned char)(0xE0 | encode));
2105 }
2106
jmp_literal(address dest,RelocationHolder const & rspec)2107 void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) {
2108 InstructionMark im(this);
2109 emit_int8((unsigned char)0xE9);
2110 assert(dest != NULL, "must have a target");
2111 intptr_t disp = dest - (pc() + sizeof(int32_t));
2112 assert(is_simm32(disp), "must be 32bit offset (jmp)");
2113 emit_data(disp, rspec.reloc(), call32_operand);
2114 }
2115
jmpb_0(Label & L,const char * file,int line)2116 void Assembler::jmpb_0(Label& L, const char* file, int line) {
2117 if (L.is_bound()) {
2118 const int short_size = 2;
2119 address entry = target(L);
2120 assert(entry != NULL, "jmp most probably wrong");
2121 #ifdef ASSERT
2122 intptr_t dist = (intptr_t)entry - ((intptr_t)pc() + short_size);
2123 intptr_t delta = short_branch_delta();
2124 if (delta != 0) {
2125 dist += (dist < 0 ? (-delta) :delta);
2126 }
2127 assert(is8bit(dist), "Dispacement too large for a short jmp at %s:%d", file, line);
2128 #endif
2129 intptr_t offs = entry - pc();
2130 emit_int8((unsigned char)0xEB);
2131 emit_int8((offs - short_size) & 0xFF);
2132 } else {
2133 InstructionMark im(this);
2134 L.add_patch_at(code(), locator(), file, line);
2135 emit_int8((unsigned char)0xEB);
2136 emit_int8(0);
2137 }
2138 }
2139
ldmxcsr(Address src)2140 void Assembler::ldmxcsr( Address src) {
2141 if (UseAVX > 0 ) {
2142 InstructionMark im(this);
2143 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
2144 vex_prefix(src, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
2145 emit_int8((unsigned char)0xAE);
2146 emit_operand(as_Register(2), src);
2147 } else {
2148 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2149 InstructionMark im(this);
2150 prefix(src);
2151 emit_int8(0x0F);
2152 emit_int8((unsigned char)0xAE);
2153 emit_operand(as_Register(2), src);
2154 }
2155 }
2156
leal(Register dst,Address src)2157 void Assembler::leal(Register dst, Address src) {
2158 InstructionMark im(this);
2159 #ifdef _LP64
2160 emit_int8(0x67); // addr32
2161 prefix(src, dst);
2162 #endif // LP64
2163 emit_int8((unsigned char)0x8D);
2164 emit_operand(dst, src);
2165 }
2166
lfence()2167 void Assembler::lfence() {
2168 emit_int8(0x0F);
2169 emit_int8((unsigned char)0xAE);
2170 emit_int8((unsigned char)0xE8);
2171 }
2172
lock()2173 void Assembler::lock() {
2174 emit_int8((unsigned char)0xF0);
2175 }
2176
lzcntl(Register dst,Register src)2177 void Assembler::lzcntl(Register dst, Register src) {
2178 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
2179 emit_int8((unsigned char)0xF3);
2180 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2181 emit_int8(0x0F);
2182 emit_int8((unsigned char)0xBD);
2183 emit_int8((unsigned char)(0xC0 | encode));
2184 }
2185
2186 // Emit mfence instruction
mfence()2187 void Assembler::mfence() {
2188 NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");)
2189 emit_int8(0x0F);
2190 emit_int8((unsigned char)0xAE);
2191 emit_int8((unsigned char)0xF0);
2192 }
2193
mov(Register dst,Register src)2194 void Assembler::mov(Register dst, Register src) {
2195 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
2196 }
2197
movapd(XMMRegister dst,XMMRegister src)2198 void Assembler::movapd(XMMRegister dst, XMMRegister src) {
2199 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2200 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit;
2201 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2202 attributes.set_rex_vex_w_reverted();
2203 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2204 emit_int8(0x28);
2205 emit_int8((unsigned char)(0xC0 | encode));
2206 }
2207
movaps(XMMRegister dst,XMMRegister src)2208 void Assembler::movaps(XMMRegister dst, XMMRegister src) {
2209 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2210 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit;
2211 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2212 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
2213 emit_int8(0x28);
2214 emit_int8((unsigned char)(0xC0 | encode));
2215 }
2216
movlhps(XMMRegister dst,XMMRegister src)2217 void Assembler::movlhps(XMMRegister dst, XMMRegister src) {
2218 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2219 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2220 int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
2221 emit_int8(0x16);
2222 emit_int8((unsigned char)(0xC0 | encode));
2223 }
2224
movb(Register dst,Address src)2225 void Assembler::movb(Register dst, Address src) {
2226 NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
2227 InstructionMark im(this);
2228 prefix(src, dst, true);
2229 emit_int8((unsigned char)0x8A);
2230 emit_operand(dst, src);
2231 }
2232
movddup(XMMRegister dst,XMMRegister src)2233 void Assembler::movddup(XMMRegister dst, XMMRegister src) {
2234 NOT_LP64(assert(VM_Version::supports_sse3(), ""));
2235 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit;
2236 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2237 attributes.set_rex_vex_w_reverted();
2238 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2239 emit_int8(0x12);
2240 emit_int8(0xC0 | encode);
2241 }
2242
kmovbl(KRegister dst,Register src)2243 void Assembler::kmovbl(KRegister dst, Register src) {
2244 assert(VM_Version::supports_avx512dq(), "");
2245 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
2246 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2247 emit_int8((unsigned char)0x92);
2248 emit_int8((unsigned char)(0xC0 | encode));
2249 }
2250
kmovbl(Register dst,KRegister src)2251 void Assembler::kmovbl(Register dst, KRegister src) {
2252 assert(VM_Version::supports_avx512dq(), "");
2253 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
2254 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2255 emit_int8((unsigned char)0x93);
2256 emit_int8((unsigned char)(0xC0 | encode));
2257 }
2258
kmovwl(KRegister dst,Register src)2259 void Assembler::kmovwl(KRegister dst, Register src) {
2260 assert(VM_Version::supports_evex(), "");
2261 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
2262 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
2263 emit_int8((unsigned char)0x92);
2264 emit_int8((unsigned char)(0xC0 | encode));
2265 }
2266
kmovwl(Register dst,KRegister src)2267 void Assembler::kmovwl(Register dst, KRegister src) {
2268 assert(VM_Version::supports_evex(), "");
2269 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
2270 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
2271 emit_int8((unsigned char)0x93);
2272 emit_int8((unsigned char)(0xC0 | encode));
2273 }
2274
kmovwl(KRegister dst,Address src)2275 void Assembler::kmovwl(KRegister dst, Address src) {
2276 assert(VM_Version::supports_evex(), "");
2277 InstructionMark im(this);
2278 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
2279 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
2280 emit_int8((unsigned char)0x90);
2281 emit_operand((Register)dst, src);
2282 }
2283
kmovdl(KRegister dst,Register src)2284 void Assembler::kmovdl(KRegister dst, Register src) {
2285 assert(VM_Version::supports_avx512bw(), "");
2286 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
2287 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2288 emit_int8((unsigned char)0x92);
2289 emit_int8((unsigned char)(0xC0 | encode));
2290 }
2291
kmovdl(Register dst,KRegister src)2292 void Assembler::kmovdl(Register dst, KRegister src) {
2293 assert(VM_Version::supports_avx512bw(), "");
2294 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
2295 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2296 emit_int8((unsigned char)0x93);
2297 emit_int8((unsigned char)(0xC0 | encode));
2298 }
2299
kmovql(KRegister dst,KRegister src)2300 void Assembler::kmovql(KRegister dst, KRegister src) {
2301 assert(VM_Version::supports_avx512bw(), "");
2302 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
2303 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
2304 emit_int8((unsigned char)0x90);
2305 emit_int8((unsigned char)(0xC0 | encode));
2306 }
2307
kmovql(KRegister dst,Address src)2308 void Assembler::kmovql(KRegister dst, Address src) {
2309 assert(VM_Version::supports_avx512bw(), "");
2310 InstructionMark im(this);
2311 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
2312 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
2313 emit_int8((unsigned char)0x90);
2314 emit_operand((Register)dst, src);
2315 }
2316
kmovql(Address dst,KRegister src)2317 void Assembler::kmovql(Address dst, KRegister src) {
2318 assert(VM_Version::supports_avx512bw(), "");
2319 InstructionMark im(this);
2320 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
2321 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
2322 emit_int8((unsigned char)0x90);
2323 emit_operand((Register)src, dst);
2324 }
2325
kmovql(KRegister dst,Register src)2326 void Assembler::kmovql(KRegister dst, Register src) {
2327 assert(VM_Version::supports_avx512bw(), "");
2328 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
2329 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2330 emit_int8((unsigned char)0x92);
2331 emit_int8((unsigned char)(0xC0 | encode));
2332 }
2333
kmovql(Register dst,KRegister src)2334 void Assembler::kmovql(Register dst, KRegister src) {
2335 assert(VM_Version::supports_avx512bw(), "");
2336 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
2337 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2338 emit_int8((unsigned char)0x93);
2339 emit_int8((unsigned char)(0xC0 | encode));
2340 }
2341
knotwl(KRegister dst,KRegister src)2342 void Assembler::knotwl(KRegister dst, KRegister src) {
2343 assert(VM_Version::supports_evex(), "");
2344 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
2345 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
2346 emit_int8((unsigned char)0x44);
2347 emit_int8((unsigned char)(0xC0 | encode));
2348 }
2349
2350 // This instruction produces ZF or CF flags
kortestbl(KRegister src1,KRegister src2)2351 void Assembler::kortestbl(KRegister src1, KRegister src2) {
2352 assert(VM_Version::supports_avx512dq(), "");
2353 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
2354 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2355 emit_int8((unsigned char)0x98);
2356 emit_int8((unsigned char)(0xC0 | encode));
2357 }
2358
2359 // This instruction produces ZF or CF flags
kortestwl(KRegister src1,KRegister src2)2360 void Assembler::kortestwl(KRegister src1, KRegister src2) {
2361 assert(VM_Version::supports_evex(), "");
2362 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
2363 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
2364 emit_int8((unsigned char)0x98);
2365 emit_int8((unsigned char)(0xC0 | encode));
2366 }
2367
2368 // This instruction produces ZF or CF flags
kortestdl(KRegister src1,KRegister src2)2369 void Assembler::kortestdl(KRegister src1, KRegister src2) {
2370 assert(VM_Version::supports_avx512bw(), "");
2371 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
2372 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2373 emit_int8((unsigned char)0x98);
2374 emit_int8((unsigned char)(0xC0 | encode));
2375 }
2376
2377 // This instruction produces ZF or CF flags
kortestql(KRegister src1,KRegister src2)2378 void Assembler::kortestql(KRegister src1, KRegister src2) {
2379 assert(VM_Version::supports_avx512bw(), "");
2380 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
2381 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
2382 emit_int8((unsigned char)0x98);
2383 emit_int8((unsigned char)(0xC0 | encode));
2384 }
2385
2386 // This instruction produces ZF or CF flags
ktestql(KRegister src1,KRegister src2)2387 void Assembler::ktestql(KRegister src1, KRegister src2) {
2388 assert(VM_Version::supports_avx512bw(), "");
2389 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
2390 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
2391 emit_int8((unsigned char)0x99);
2392 emit_int8((unsigned char)(0xC0 | encode));
2393 }
2394
ktestq(KRegister src1,KRegister src2)2395 void Assembler::ktestq(KRegister src1, KRegister src2) {
2396 assert(VM_Version::supports_avx512bw(), "");
2397 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
2398 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
2399 emit_int8((unsigned char)0x99);
2400 emit_int8((unsigned char)(0xC0 | encode));
2401 }
2402
ktestd(KRegister src1,KRegister src2)2403 void Assembler::ktestd(KRegister src1, KRegister src2) {
2404 assert(VM_Version::supports_avx512bw(), "");
2405 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
2406 int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2407 emit_int8((unsigned char)0x99);
2408 emit_int8((unsigned char)(0xC0 | encode));
2409 }
2410
movb(Address dst,int imm8)2411 void Assembler::movb(Address dst, int imm8) {
2412 InstructionMark im(this);
2413 prefix(dst);
2414 emit_int8((unsigned char)0xC6);
2415 emit_operand(rax, dst, 1);
2416 emit_int8(imm8);
2417 }
2418
2419
movb(Address dst,Register src)2420 void Assembler::movb(Address dst, Register src) {
2421 assert(src->has_byte_register(), "must have byte register");
2422 InstructionMark im(this);
2423 prefix(dst, src, true);
2424 emit_int8((unsigned char)0x88);
2425 emit_operand(src, dst);
2426 }
2427
movdl(XMMRegister dst,Register src)2428 void Assembler::movdl(XMMRegister dst, Register src) {
2429 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2430 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2431 int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2432 emit_int8(0x6E);
2433 emit_int8((unsigned char)(0xC0 | encode));
2434 }
2435
movdl(Register dst,XMMRegister src)2436 void Assembler::movdl(Register dst, XMMRegister src) {
2437 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2438 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2439 // swap src/dst to get correct prefix
2440 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2441 emit_int8(0x7E);
2442 emit_int8((unsigned char)(0xC0 | encode));
2443 }
2444
movdl(XMMRegister dst,Address src)2445 void Assembler::movdl(XMMRegister dst, Address src) {
2446 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2447 InstructionMark im(this);
2448 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2449 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
2450 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2451 emit_int8(0x6E);
2452 emit_operand(dst, src);
2453 }
2454
movdl(Address dst,XMMRegister src)2455 void Assembler::movdl(Address dst, XMMRegister src) {
2456 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2457 InstructionMark im(this);
2458 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2459 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
2460 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2461 emit_int8(0x7E);
2462 emit_operand(src, dst);
2463 }
2464
movdqa(XMMRegister dst,XMMRegister src)2465 void Assembler::movdqa(XMMRegister dst, XMMRegister src) {
2466 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2467 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2468 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2469 emit_int8(0x6F);
2470 emit_int8((unsigned char)(0xC0 | encode));
2471 }
2472
movdqa(XMMRegister dst,Address src)2473 void Assembler::movdqa(XMMRegister dst, Address src) {
2474 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2475 InstructionMark im(this);
2476 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2477 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2478 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2479 emit_int8(0x6F);
2480 emit_operand(dst, src);
2481 }
2482
movdqu(XMMRegister dst,Address src)2483 void Assembler::movdqu(XMMRegister dst, Address src) {
2484 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2485 InstructionMark im(this);
2486 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2487 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2488 simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2489 emit_int8(0x6F);
2490 emit_operand(dst, src);
2491 }
2492
movdqu(XMMRegister dst,XMMRegister src)2493 void Assembler::movdqu(XMMRegister dst, XMMRegister src) {
2494 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2495 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2496 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2497 emit_int8(0x6F);
2498 emit_int8((unsigned char)(0xC0 | encode));
2499 }
2500
movdqu(Address dst,XMMRegister src)2501 void Assembler::movdqu(Address dst, XMMRegister src) {
2502 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2503 InstructionMark im(this);
2504 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2505 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2506 attributes.reset_is_clear_context();
2507 simd_prefix(src, xnoreg, dst, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2508 emit_int8(0x7F);
2509 emit_operand(src, dst);
2510 }
2511
2512 // Move Unaligned 256bit Vector
vmovdqu(XMMRegister dst,XMMRegister src)2513 void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) {
2514 assert(UseAVX > 0, "");
2515 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2516 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2517 emit_int8(0x6F);
2518 emit_int8((unsigned char)(0xC0 | encode));
2519 }
2520
vmovdqu(XMMRegister dst,Address src)2521 void Assembler::vmovdqu(XMMRegister dst, Address src) {
2522 assert(UseAVX > 0, "");
2523 InstructionMark im(this);
2524 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2525 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2526 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2527 emit_int8(0x6F);
2528 emit_operand(dst, src);
2529 }
2530
vmovdqu(Address dst,XMMRegister src)2531 void Assembler::vmovdqu(Address dst, XMMRegister src) {
2532 assert(UseAVX > 0, "");
2533 InstructionMark im(this);
2534 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2535 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2536 attributes.reset_is_clear_context();
2537 // swap src<->dst for encoding
2538 assert(src != xnoreg, "sanity");
2539 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2540 emit_int8(0x7F);
2541 emit_operand(src, dst);
2542 }
2543
2544 // Move Unaligned EVEX enabled Vector (programmable : 8,16,32,64)
evmovdqub(XMMRegister dst,XMMRegister src,int vector_len)2545 void Assembler::evmovdqub(XMMRegister dst, XMMRegister src, int vector_len) {
2546 assert(VM_Version::supports_evex(), "");
2547 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
2548 attributes.set_is_evex_instruction();
2549 int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3;
2550 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes);
2551 emit_int8(0x6F);
2552 emit_int8((unsigned char)(0xC0 | encode));
2553 }
2554
evmovdqub(XMMRegister dst,Address src,int vector_len)2555 void Assembler::evmovdqub(XMMRegister dst, Address src, int vector_len) {
2556 assert(VM_Version::supports_evex(), "");
2557 InstructionMark im(this);
2558 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
2559 int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3;
2560 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2561 attributes.set_is_evex_instruction();
2562 vex_prefix(src, 0, dst->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes);
2563 emit_int8(0x6F);
2564 emit_operand(dst, src);
2565 }
2566
evmovdqub(Address dst,XMMRegister src,int vector_len)2567 void Assembler::evmovdqub(Address dst, XMMRegister src, int vector_len) {
2568 assert(VM_Version::supports_evex(), "");
2569 assert(src != xnoreg, "sanity");
2570 InstructionMark im(this);
2571 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
2572 int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3;
2573 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2574 attributes.set_is_evex_instruction();
2575 vex_prefix(dst, 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes);
2576 emit_int8(0x7F);
2577 emit_operand(src, dst);
2578 }
2579
evmovdqub(XMMRegister dst,KRegister mask,Address src,int vector_len)2580 void Assembler::evmovdqub(XMMRegister dst, KRegister mask, Address src, int vector_len) {
2581 assert(VM_Version::supports_avx512vlbw(), "");
2582 InstructionMark im(this);
2583 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
2584 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2585 attributes.set_embedded_opmask_register_specifier(mask);
2586 attributes.set_is_evex_instruction();
2587 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2588 emit_int8(0x6F);
2589 emit_operand(dst, src);
2590 }
2591
evmovdquw(XMMRegister dst,Address src,int vector_len)2592 void Assembler::evmovdquw(XMMRegister dst, Address src, int vector_len) {
2593 assert(VM_Version::supports_evex(), "");
2594 InstructionMark im(this);
2595 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
2596 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2597 attributes.set_is_evex_instruction();
2598 int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3;
2599 vex_prefix(src, 0, dst->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes);
2600 emit_int8(0x6F);
2601 emit_operand(dst, src);
2602 }
2603
evmovdquw(XMMRegister dst,KRegister mask,Address src,int vector_len)2604 void Assembler::evmovdquw(XMMRegister dst, KRegister mask, Address src, int vector_len) {
2605 assert(VM_Version::supports_avx512vlbw(), "");
2606 InstructionMark im(this);
2607 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
2608 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2609 attributes.set_embedded_opmask_register_specifier(mask);
2610 attributes.set_is_evex_instruction();
2611 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2612 emit_int8(0x6F);
2613 emit_operand(dst, src);
2614 }
2615
evmovdquw(Address dst,XMMRegister src,int vector_len)2616 void Assembler::evmovdquw(Address dst, XMMRegister src, int vector_len) {
2617 assert(VM_Version::supports_evex(), "");
2618 assert(src != xnoreg, "sanity");
2619 InstructionMark im(this);
2620 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
2621 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2622 attributes.set_is_evex_instruction();
2623 int prefix = (_legacy_mode_bw) ? VEX_SIMD_F2 : VEX_SIMD_F3;
2624 vex_prefix(dst, 0, src->encoding(), (Assembler::VexSimdPrefix)prefix, VEX_OPCODE_0F, &attributes);
2625 emit_int8(0x7F);
2626 emit_operand(src, dst);
2627 }
2628
evmovdquw(Address dst,KRegister mask,XMMRegister src,int vector_len)2629 void Assembler::evmovdquw(Address dst, KRegister mask, XMMRegister src, int vector_len) {
2630 assert(VM_Version::supports_avx512vlbw(), "");
2631 assert(src != xnoreg, "sanity");
2632 InstructionMark im(this);
2633 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
2634 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2635 attributes.reset_is_clear_context();
2636 attributes.set_embedded_opmask_register_specifier(mask);
2637 attributes.set_is_evex_instruction();
2638 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2639 emit_int8(0x7F);
2640 emit_operand(src, dst);
2641 }
2642
evmovdqul(XMMRegister dst,XMMRegister src,int vector_len)2643 void Assembler::evmovdqul(XMMRegister dst, XMMRegister src, int vector_len) {
2644 assert(VM_Version::supports_evex(), "");
2645 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2646 attributes.set_is_evex_instruction();
2647 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2648 emit_int8(0x6F);
2649 emit_int8((unsigned char)(0xC0 | encode));
2650 }
2651
evmovdqul(XMMRegister dst,Address src,int vector_len)2652 void Assembler::evmovdqul(XMMRegister dst, Address src, int vector_len) {
2653 assert(VM_Version::supports_evex(), "");
2654 InstructionMark im(this);
2655 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true , /* uses_vl */ true);
2656 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2657 attributes.set_is_evex_instruction();
2658 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2659 emit_int8(0x6F);
2660 emit_operand(dst, src);
2661 }
2662
evmovdqul(Address dst,XMMRegister src,int vector_len)2663 void Assembler::evmovdqul(Address dst, XMMRegister src, int vector_len) {
2664 assert(VM_Version::supports_evex(), "");
2665 assert(src != xnoreg, "sanity");
2666 InstructionMark im(this);
2667 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2668 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2669 attributes.reset_is_clear_context();
2670 attributes.set_is_evex_instruction();
2671 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2672 emit_int8(0x7F);
2673 emit_operand(src, dst);
2674 }
2675
evmovdquq(XMMRegister dst,XMMRegister src,int vector_len)2676 void Assembler::evmovdquq(XMMRegister dst, XMMRegister src, int vector_len) {
2677 assert(VM_Version::supports_evex(), "");
2678 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2679 attributes.set_is_evex_instruction();
2680 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2681 emit_int8(0x6F);
2682 emit_int8((unsigned char)(0xC0 | encode));
2683 }
2684
evmovdquq(XMMRegister dst,Address src,int vector_len)2685 void Assembler::evmovdquq(XMMRegister dst, Address src, int vector_len) {
2686 assert(VM_Version::supports_evex(), "");
2687 InstructionMark im(this);
2688 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2689 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2690 attributes.set_is_evex_instruction();
2691 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2692 emit_int8(0x6F);
2693 emit_operand(dst, src);
2694 }
2695
evmovdquq(Address dst,XMMRegister src,int vector_len)2696 void Assembler::evmovdquq(Address dst, XMMRegister src, int vector_len) {
2697 assert(VM_Version::supports_evex(), "");
2698 assert(src != xnoreg, "sanity");
2699 InstructionMark im(this);
2700 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2701 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
2702 attributes.reset_is_clear_context();
2703 attributes.set_is_evex_instruction();
2704 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2705 emit_int8(0x7F);
2706 emit_operand(src, dst);
2707 }
2708
2709 // Uses zero extension on 64bit
2710
movl(Register dst,int32_t imm32)2711 void Assembler::movl(Register dst, int32_t imm32) {
2712 int encode = prefix_and_encode(dst->encoding());
2713 emit_int8((unsigned char)(0xB8 | encode));
2714 emit_int32(imm32);
2715 }
2716
movl(Register dst,Register src)2717 void Assembler::movl(Register dst, Register src) {
2718 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2719 emit_int8((unsigned char)0x8B);
2720 emit_int8((unsigned char)(0xC0 | encode));
2721 }
2722
movl(Register dst,Address src)2723 void Assembler::movl(Register dst, Address src) {
2724 InstructionMark im(this);
2725 prefix(src, dst);
2726 emit_int8((unsigned char)0x8B);
2727 emit_operand(dst, src);
2728 }
2729
movl(Address dst,int32_t imm32)2730 void Assembler::movl(Address dst, int32_t imm32) {
2731 InstructionMark im(this);
2732 prefix(dst);
2733 emit_int8((unsigned char)0xC7);
2734 emit_operand(rax, dst, 4);
2735 emit_int32(imm32);
2736 }
2737
movl(Address dst,Register src)2738 void Assembler::movl(Address dst, Register src) {
2739 InstructionMark im(this);
2740 prefix(dst, src);
2741 emit_int8((unsigned char)0x89);
2742 emit_operand(src, dst);
2743 }
2744
2745 // New cpus require to use movsd and movss to avoid partial register stall
2746 // when loading from memory. But for old Opteron use movlpd instead of movsd.
2747 // The selection is done in MacroAssembler::movdbl() and movflt().
movlpd(XMMRegister dst,Address src)2748 void Assembler::movlpd(XMMRegister dst, Address src) {
2749 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2750 InstructionMark im(this);
2751 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
2752 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
2753 attributes.set_rex_vex_w_reverted();
2754 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2755 emit_int8(0x12);
2756 emit_operand(dst, src);
2757 }
2758
movq(MMXRegister dst,Address src)2759 void Assembler::movq( MMXRegister dst, Address src ) {
2760 assert( VM_Version::supports_mmx(), "" );
2761 emit_int8(0x0F);
2762 emit_int8(0x6F);
2763 emit_operand(dst, src);
2764 }
2765
movq(Address dst,MMXRegister src)2766 void Assembler::movq( Address dst, MMXRegister src ) {
2767 assert( VM_Version::supports_mmx(), "" );
2768 emit_int8(0x0F);
2769 emit_int8(0x7F);
2770 // workaround gcc (3.2.1-7a) bug
2771 // In that version of gcc with only an emit_operand(MMX, Address)
2772 // gcc will tail jump and try and reverse the parameters completely
2773 // obliterating dst in the process. By having a version available
2774 // that doesn't need to swap the args at the tail jump the bug is
2775 // avoided.
2776 emit_operand(dst, src);
2777 }
2778
movq(XMMRegister dst,Address src)2779 void Assembler::movq(XMMRegister dst, Address src) {
2780 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2781 InstructionMark im(this);
2782 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2783 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
2784 attributes.set_rex_vex_w_reverted();
2785 simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2786 emit_int8(0x7E);
2787 emit_operand(dst, src);
2788 }
2789
movq(Address dst,XMMRegister src)2790 void Assembler::movq(Address dst, XMMRegister src) {
2791 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2792 InstructionMark im(this);
2793 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2794 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
2795 attributes.set_rex_vex_w_reverted();
2796 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
2797 emit_int8((unsigned char)0xD6);
2798 emit_operand(src, dst);
2799 }
2800
movsbl(Register dst,Address src)2801 void Assembler::movsbl(Register dst, Address src) { // movsxb
2802 InstructionMark im(this);
2803 prefix(src, dst);
2804 emit_int8(0x0F);
2805 emit_int8((unsigned char)0xBE);
2806 emit_operand(dst, src);
2807 }
2808
movsbl(Register dst,Register src)2809 void Assembler::movsbl(Register dst, Register src) { // movsxb
2810 NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
2811 int encode = prefix_and_encode(dst->encoding(), false, src->encoding(), true);
2812 emit_int8(0x0F);
2813 emit_int8((unsigned char)0xBE);
2814 emit_int8((unsigned char)(0xC0 | encode));
2815 }
2816
movsd(XMMRegister dst,XMMRegister src)2817 void Assembler::movsd(XMMRegister dst, XMMRegister src) {
2818 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2819 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2820 attributes.set_rex_vex_w_reverted();
2821 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2822 emit_int8(0x10);
2823 emit_int8((unsigned char)(0xC0 | encode));
2824 }
2825
movsd(XMMRegister dst,Address src)2826 void Assembler::movsd(XMMRegister dst, Address src) {
2827 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2828 InstructionMark im(this);
2829 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2830 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
2831 attributes.set_rex_vex_w_reverted();
2832 simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2833 emit_int8(0x10);
2834 emit_operand(dst, src);
2835 }
2836
movsd(Address dst,XMMRegister src)2837 void Assembler::movsd(Address dst, XMMRegister src) {
2838 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2839 InstructionMark im(this);
2840 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2841 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
2842 attributes.reset_is_clear_context();
2843 attributes.set_rex_vex_w_reverted();
2844 simd_prefix(src, xnoreg, dst, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2845 emit_int8(0x11);
2846 emit_operand(src, dst);
2847 }
2848
movss(XMMRegister dst,XMMRegister src)2849 void Assembler::movss(XMMRegister dst, XMMRegister src) {
2850 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2851 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2852 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2853 emit_int8(0x10);
2854 emit_int8((unsigned char)(0xC0 | encode));
2855 }
2856
movss(XMMRegister dst,Address src)2857 void Assembler::movss(XMMRegister dst, Address src) {
2858 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2859 InstructionMark im(this);
2860 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2861 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
2862 simd_prefix(dst, xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2863 emit_int8(0x10);
2864 emit_operand(dst, src);
2865 }
2866
movss(Address dst,XMMRegister src)2867 void Assembler::movss(Address dst, XMMRegister src) {
2868 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2869 InstructionMark im(this);
2870 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2871 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
2872 attributes.reset_is_clear_context();
2873 simd_prefix(src, xnoreg, dst, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2874 emit_int8(0x11);
2875 emit_operand(src, dst);
2876 }
2877
movswl(Register dst,Address src)2878 void Assembler::movswl(Register dst, Address src) { // movsxw
2879 InstructionMark im(this);
2880 prefix(src, dst);
2881 emit_int8(0x0F);
2882 emit_int8((unsigned char)0xBF);
2883 emit_operand(dst, src);
2884 }
2885
movswl(Register dst,Register src)2886 void Assembler::movswl(Register dst, Register src) { // movsxw
2887 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2888 emit_int8(0x0F);
2889 emit_int8((unsigned char)0xBF);
2890 emit_int8((unsigned char)(0xC0 | encode));
2891 }
2892
movw(Address dst,int imm16)2893 void Assembler::movw(Address dst, int imm16) {
2894 InstructionMark im(this);
2895
2896 emit_int8(0x66); // switch to 16-bit mode
2897 prefix(dst);
2898 emit_int8((unsigned char)0xC7);
2899 emit_operand(rax, dst, 2);
2900 emit_int16(imm16);
2901 }
2902
movw(Register dst,Address src)2903 void Assembler::movw(Register dst, Address src) {
2904 InstructionMark im(this);
2905 emit_int8(0x66);
2906 prefix(src, dst);
2907 emit_int8((unsigned char)0x8B);
2908 emit_operand(dst, src);
2909 }
2910
movw(Address dst,Register src)2911 void Assembler::movw(Address dst, Register src) {
2912 InstructionMark im(this);
2913 emit_int8(0x66);
2914 prefix(dst, src);
2915 emit_int8((unsigned char)0x89);
2916 emit_operand(src, dst);
2917 }
2918
movzbl(Register dst,Address src)2919 void Assembler::movzbl(Register dst, Address src) { // movzxb
2920 InstructionMark im(this);
2921 prefix(src, dst);
2922 emit_int8(0x0F);
2923 emit_int8((unsigned char)0xB6);
2924 emit_operand(dst, src);
2925 }
2926
movzbl(Register dst,Register src)2927 void Assembler::movzbl(Register dst, Register src) { // movzxb
2928 NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
2929 int encode = prefix_and_encode(dst->encoding(), false, src->encoding(), true);
2930 emit_int8(0x0F);
2931 emit_int8((unsigned char)0xB6);
2932 emit_int8(0xC0 | encode);
2933 }
2934
movzwl(Register dst,Address src)2935 void Assembler::movzwl(Register dst, Address src) { // movzxw
2936 InstructionMark im(this);
2937 prefix(src, dst);
2938 emit_int8(0x0F);
2939 emit_int8((unsigned char)0xB7);
2940 emit_operand(dst, src);
2941 }
2942
movzwl(Register dst,Register src)2943 void Assembler::movzwl(Register dst, Register src) { // movzxw
2944 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2945 emit_int8(0x0F);
2946 emit_int8((unsigned char)0xB7);
2947 emit_int8(0xC0 | encode);
2948 }
2949
mull(Address src)2950 void Assembler::mull(Address src) {
2951 InstructionMark im(this);
2952 prefix(src);
2953 emit_int8((unsigned char)0xF7);
2954 emit_operand(rsp, src);
2955 }
2956
mull(Register src)2957 void Assembler::mull(Register src) {
2958 int encode = prefix_and_encode(src->encoding());
2959 emit_int8((unsigned char)0xF7);
2960 emit_int8((unsigned char)(0xE0 | encode));
2961 }
2962
mulsd(XMMRegister dst,Address src)2963 void Assembler::mulsd(XMMRegister dst, Address src) {
2964 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2965 InstructionMark im(this);
2966 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2967 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
2968 attributes.set_rex_vex_w_reverted();
2969 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2970 emit_int8(0x59);
2971 emit_operand(dst, src);
2972 }
2973
mulsd(XMMRegister dst,XMMRegister src)2974 void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
2975 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2976 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2977 attributes.set_rex_vex_w_reverted();
2978 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
2979 emit_int8(0x59);
2980 emit_int8((unsigned char)(0xC0 | encode));
2981 }
2982
mulss(XMMRegister dst,Address src)2983 void Assembler::mulss(XMMRegister dst, Address src) {
2984 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2985 InstructionMark im(this);
2986 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2987 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
2988 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2989 emit_int8(0x59);
2990 emit_operand(dst, src);
2991 }
2992
mulss(XMMRegister dst,XMMRegister src)2993 void Assembler::mulss(XMMRegister dst, XMMRegister src) {
2994 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2995 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
2996 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
2997 emit_int8(0x59);
2998 emit_int8((unsigned char)(0xC0 | encode));
2999 }
3000
negl(Register dst)3001 void Assembler::negl(Register dst) {
3002 int encode = prefix_and_encode(dst->encoding());
3003 emit_int8((unsigned char)0xF7);
3004 emit_int8((unsigned char)(0xD8 | encode));
3005 }
3006
nop(int i)3007 void Assembler::nop(int i) {
3008 #ifdef ASSERT
3009 assert(i > 0, " ");
3010 // The fancy nops aren't currently recognized by debuggers making it a
3011 // pain to disassemble code while debugging. If asserts are on clearly
3012 // speed is not an issue so simply use the single byte traditional nop
3013 // to do alignment.
3014
3015 for (; i > 0 ; i--) emit_int8((unsigned char)0x90);
3016 return;
3017
3018 #endif // ASSERT
3019
3020 if (UseAddressNop && VM_Version::is_intel()) {
3021 //
3022 // Using multi-bytes nops "0x0F 0x1F [address]" for Intel
3023 // 1: 0x90
3024 // 2: 0x66 0x90
3025 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
3026 // 4: 0x0F 0x1F 0x40 0x00
3027 // 5: 0x0F 0x1F 0x44 0x00 0x00
3028 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
3029 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
3030 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
3031 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
3032 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
3033 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
3034
3035 // The rest coding is Intel specific - don't use consecutive address nops
3036
3037 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
3038 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
3039 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
3040 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
3041
3042 while(i >= 15) {
3043 // For Intel don't generate consecutive addess nops (mix with regular nops)
3044 i -= 15;
3045 emit_int8(0x66); // size prefix
3046 emit_int8(0x66); // size prefix
3047 emit_int8(0x66); // size prefix
3048 addr_nop_8();
3049 emit_int8(0x66); // size prefix
3050 emit_int8(0x66); // size prefix
3051 emit_int8(0x66); // size prefix
3052 emit_int8((unsigned char)0x90);
3053 // nop
3054 }
3055 switch (i) {
3056 case 14:
3057 emit_int8(0x66); // size prefix
3058 case 13:
3059 emit_int8(0x66); // size prefix
3060 case 12:
3061 addr_nop_8();
3062 emit_int8(0x66); // size prefix
3063 emit_int8(0x66); // size prefix
3064 emit_int8(0x66); // size prefix
3065 emit_int8((unsigned char)0x90);
3066 // nop
3067 break;
3068 case 11:
3069 emit_int8(0x66); // size prefix
3070 case 10:
3071 emit_int8(0x66); // size prefix
3072 case 9:
3073 emit_int8(0x66); // size prefix
3074 case 8:
3075 addr_nop_8();
3076 break;
3077 case 7:
3078 addr_nop_7();
3079 break;
3080 case 6:
3081 emit_int8(0x66); // size prefix
3082 case 5:
3083 addr_nop_5();
3084 break;
3085 case 4:
3086 addr_nop_4();
3087 break;
3088 case 3:
3089 // Don't use "0x0F 0x1F 0x00" - need patching safe padding
3090 emit_int8(0x66); // size prefix
3091 case 2:
3092 emit_int8(0x66); // size prefix
3093 case 1:
3094 emit_int8((unsigned char)0x90);
3095 // nop
3096 break;
3097 default:
3098 assert(i == 0, " ");
3099 }
3100 return;
3101 }
3102 if (UseAddressNop && VM_Version::is_amd()) {
3103 //
3104 // Using multi-bytes nops "0x0F 0x1F [address]" for AMD.
3105 // 1: 0x90
3106 // 2: 0x66 0x90
3107 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
3108 // 4: 0x0F 0x1F 0x40 0x00
3109 // 5: 0x0F 0x1F 0x44 0x00 0x00
3110 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
3111 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
3112 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
3113 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
3114 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
3115 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
3116
3117 // The rest coding is AMD specific - use consecutive address nops
3118
3119 // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
3120 // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
3121 // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
3122 // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
3123 // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
3124 // Size prefixes (0x66) are added for larger sizes
3125
3126 while(i >= 22) {
3127 i -= 11;
3128 emit_int8(0x66); // size prefix
3129 emit_int8(0x66); // size prefix
3130 emit_int8(0x66); // size prefix
3131 addr_nop_8();
3132 }
3133 // Generate first nop for size between 21-12
3134 switch (i) {
3135 case 21:
3136 i -= 1;
3137 emit_int8(0x66); // size prefix
3138 case 20:
3139 case 19:
3140 i -= 1;
3141 emit_int8(0x66); // size prefix
3142 case 18:
3143 case 17:
3144 i -= 1;
3145 emit_int8(0x66); // size prefix
3146 case 16:
3147 case 15:
3148 i -= 8;
3149 addr_nop_8();
3150 break;
3151 case 14:
3152 case 13:
3153 i -= 7;
3154 addr_nop_7();
3155 break;
3156 case 12:
3157 i -= 6;
3158 emit_int8(0x66); // size prefix
3159 addr_nop_5();
3160 break;
3161 default:
3162 assert(i < 12, " ");
3163 }
3164
3165 // Generate second nop for size between 11-1
3166 switch (i) {
3167 case 11:
3168 emit_int8(0x66); // size prefix
3169 case 10:
3170 emit_int8(0x66); // size prefix
3171 case 9:
3172 emit_int8(0x66); // size prefix
3173 case 8:
3174 addr_nop_8();
3175 break;
3176 case 7:
3177 addr_nop_7();
3178 break;
3179 case 6:
3180 emit_int8(0x66); // size prefix
3181 case 5:
3182 addr_nop_5();
3183 break;
3184 case 4:
3185 addr_nop_4();
3186 break;
3187 case 3:
3188 // Don't use "0x0F 0x1F 0x00" - need patching safe padding
3189 emit_int8(0x66); // size prefix
3190 case 2:
3191 emit_int8(0x66); // size prefix
3192 case 1:
3193 emit_int8((unsigned char)0x90);
3194 // nop
3195 break;
3196 default:
3197 assert(i == 0, " ");
3198 }
3199 return;
3200 }
3201
3202 if (UseAddressNop && VM_Version::is_zx()) {
3203 //
3204 // Using multi-bytes nops "0x0F 0x1F [address]" for ZX
3205 // 1: 0x90
3206 // 2: 0x66 0x90
3207 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
3208 // 4: 0x0F 0x1F 0x40 0x00
3209 // 5: 0x0F 0x1F 0x44 0x00 0x00
3210 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
3211 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
3212 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
3213 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
3214 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
3215 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
3216
3217 // The rest coding is ZX specific - don't use consecutive address nops
3218
3219 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
3220 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
3221 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
3222 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
3223
3224 while (i >= 15) {
3225 // For ZX don't generate consecutive addess nops (mix with regular nops)
3226 i -= 15;
3227 emit_int8(0x66); // size prefix
3228 emit_int8(0x66); // size prefix
3229 emit_int8(0x66); // size prefix
3230 addr_nop_8();
3231 emit_int8(0x66); // size prefix
3232 emit_int8(0x66); // size prefix
3233 emit_int8(0x66); // size prefix
3234 emit_int8((unsigned char)0x90);
3235 // nop
3236 }
3237 switch (i) {
3238 case 14:
3239 emit_int8(0x66); // size prefix
3240 case 13:
3241 emit_int8(0x66); // size prefix
3242 case 12:
3243 addr_nop_8();
3244 emit_int8(0x66); // size prefix
3245 emit_int8(0x66); // size prefix
3246 emit_int8(0x66); // size prefix
3247 emit_int8((unsigned char)0x90);
3248 // nop
3249 break;
3250 case 11:
3251 emit_int8(0x66); // size prefix
3252 case 10:
3253 emit_int8(0x66); // size prefix
3254 case 9:
3255 emit_int8(0x66); // size prefix
3256 case 8:
3257 addr_nop_8();
3258 break;
3259 case 7:
3260 addr_nop_7();
3261 break;
3262 case 6:
3263 emit_int8(0x66); // size prefix
3264 case 5:
3265 addr_nop_5();
3266 break;
3267 case 4:
3268 addr_nop_4();
3269 break;
3270 case 3:
3271 // Don't use "0x0F 0x1F 0x00" - need patching safe padding
3272 emit_int8(0x66); // size prefix
3273 case 2:
3274 emit_int8(0x66); // size prefix
3275 case 1:
3276 emit_int8((unsigned char)0x90);
3277 // nop
3278 break;
3279 default:
3280 assert(i == 0, " ");
3281 }
3282 return;
3283 }
3284
3285 // Using nops with size prefixes "0x66 0x90".
3286 // From AMD Optimization Guide:
3287 // 1: 0x90
3288 // 2: 0x66 0x90
3289 // 3: 0x66 0x66 0x90
3290 // 4: 0x66 0x66 0x66 0x90
3291 // 5: 0x66 0x66 0x90 0x66 0x90
3292 // 6: 0x66 0x66 0x90 0x66 0x66 0x90
3293 // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90
3294 // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90
3295 // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
3296 // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
3297 //
3298 while(i > 12) {
3299 i -= 4;
3300 emit_int8(0x66); // size prefix
3301 emit_int8(0x66);
3302 emit_int8(0x66);
3303 emit_int8((unsigned char)0x90);
3304 // nop
3305 }
3306 // 1 - 12 nops
3307 if(i > 8) {
3308 if(i > 9) {
3309 i -= 1;
3310 emit_int8(0x66);
3311 }
3312 i -= 3;
3313 emit_int8(0x66);
3314 emit_int8(0x66);
3315 emit_int8((unsigned char)0x90);
3316 }
3317 // 1 - 8 nops
3318 if(i > 4) {
3319 if(i > 6) {
3320 i -= 1;
3321 emit_int8(0x66);
3322 }
3323 i -= 3;
3324 emit_int8(0x66);
3325 emit_int8(0x66);
3326 emit_int8((unsigned char)0x90);
3327 }
3328 switch (i) {
3329 case 4:
3330 emit_int8(0x66);
3331 case 3:
3332 emit_int8(0x66);
3333 case 2:
3334 emit_int8(0x66);
3335 case 1:
3336 emit_int8((unsigned char)0x90);
3337 break;
3338 default:
3339 assert(i == 0, " ");
3340 }
3341 }
3342
notl(Register dst)3343 void Assembler::notl(Register dst) {
3344 int encode = prefix_and_encode(dst->encoding());
3345 emit_int8((unsigned char)0xF7);
3346 emit_int8((unsigned char)(0xD0 | encode));
3347 }
3348
orl(Address dst,int32_t imm32)3349 void Assembler::orl(Address dst, int32_t imm32) {
3350 InstructionMark im(this);
3351 prefix(dst);
3352 emit_arith_operand(0x81, rcx, dst, imm32);
3353 }
3354
orl(Register dst,int32_t imm32)3355 void Assembler::orl(Register dst, int32_t imm32) {
3356 prefix(dst);
3357 emit_arith(0x81, 0xC8, dst, imm32);
3358 }
3359
orl(Register dst,Address src)3360 void Assembler::orl(Register dst, Address src) {
3361 InstructionMark im(this);
3362 prefix(src, dst);
3363 emit_int8(0x0B);
3364 emit_operand(dst, src);
3365 }
3366
orl(Register dst,Register src)3367 void Assembler::orl(Register dst, Register src) {
3368 (void) prefix_and_encode(dst->encoding(), src->encoding());
3369 emit_arith(0x0B, 0xC0, dst, src);
3370 }
3371
orl(Address dst,Register src)3372 void Assembler::orl(Address dst, Register src) {
3373 InstructionMark im(this);
3374 prefix(dst, src);
3375 emit_int8(0x09);
3376 emit_operand(src, dst);
3377 }
3378
orb(Address dst,int imm8)3379 void Assembler::orb(Address dst, int imm8) {
3380 InstructionMark im(this);
3381 prefix(dst);
3382 emit_int8((unsigned char)0x80);
3383 emit_operand(rcx, dst, 1);
3384 emit_int8(imm8);
3385 }
3386
packuswb(XMMRegister dst,Address src)3387 void Assembler::packuswb(XMMRegister dst, Address src) {
3388 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3389 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
3390 InstructionMark im(this);
3391 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3392 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
3393 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3394 emit_int8(0x67);
3395 emit_operand(dst, src);
3396 }
3397
packuswb(XMMRegister dst,XMMRegister src)3398 void Assembler::packuswb(XMMRegister dst, XMMRegister src) {
3399 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3400 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3401 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3402 emit_int8(0x67);
3403 emit_int8((unsigned char)(0xC0 | encode));
3404 }
3405
vpackuswb(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)3406 void Assembler::vpackuswb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3407 assert(UseAVX > 0, "some form of AVX must be enabled");
3408 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3409 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3410 emit_int8(0x67);
3411 emit_int8((unsigned char)(0xC0 | encode));
3412 }
3413
vpermq(XMMRegister dst,XMMRegister src,int imm8,int vector_len)3414 void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len) {
3415 assert(VM_Version::supports_avx2(), "");
3416 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3417 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3418 emit_int8(0x00);
3419 emit_int8(0xC0 | encode);
3420 emit_int8(imm8);
3421 }
3422
vperm2i128(XMMRegister dst,XMMRegister nds,XMMRegister src,int imm8)3423 void Assembler::vperm2i128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
3424 assert(VM_Version::supports_avx2(), "");
3425 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3426 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3427 emit_int8(0x46);
3428 emit_int8(0xC0 | encode);
3429 emit_int8(imm8);
3430 }
3431
vperm2f128(XMMRegister dst,XMMRegister nds,XMMRegister src,int imm8)3432 void Assembler::vperm2f128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
3433 assert(VM_Version::supports_avx(), "");
3434 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3435 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3436 emit_int8(0x06);
3437 emit_int8(0xC0 | encode);
3438 emit_int8(imm8);
3439 }
3440
evpermi2q(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)3441 void Assembler::evpermi2q(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3442 assert(VM_Version::supports_evex(), "");
3443 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3444 attributes.set_is_evex_instruction();
3445 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3446 emit_int8(0x76);
3447 emit_int8((unsigned char)(0xC0 | encode));
3448 }
3449
3450
pause()3451 void Assembler::pause() {
3452 emit_int8((unsigned char)0xF3);
3453 emit_int8((unsigned char)0x90);
3454 }
3455
ud2()3456 void Assembler::ud2() {
3457 emit_int8(0x0F);
3458 emit_int8(0x0B);
3459 }
3460
pcmpestri(XMMRegister dst,Address src,int imm8)3461 void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
3462 assert(VM_Version::supports_sse4_2(), "");
3463 InstructionMark im(this);
3464 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3465 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3466 emit_int8(0x61);
3467 emit_operand(dst, src);
3468 emit_int8(imm8);
3469 }
3470
pcmpestri(XMMRegister dst,XMMRegister src,int imm8)3471 void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
3472 assert(VM_Version::supports_sse4_2(), "");
3473 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3474 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3475 emit_int8(0x61);
3476 emit_int8((unsigned char)(0xC0 | encode));
3477 emit_int8(imm8);
3478 }
3479
3480 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
pcmpeqb(XMMRegister dst,XMMRegister src)3481 void Assembler::pcmpeqb(XMMRegister dst, XMMRegister src) {
3482 assert(VM_Version::supports_sse2(), "");
3483 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3484 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3485 emit_int8(0x74);
3486 emit_int8((unsigned char)(0xC0 | encode));
3487 }
3488
3489 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
vpcmpeqb(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)3490 void Assembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3491 assert(VM_Version::supports_avx(), "");
3492 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3493 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3494 emit_int8(0x74);
3495 emit_int8((unsigned char)(0xC0 | encode));
3496 }
3497
3498 // In this context, kdst is written the mask used to process the equal components
evpcmpeqb(KRegister kdst,XMMRegister nds,XMMRegister src,int vector_len)3499 void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) {
3500 assert(VM_Version::supports_avx512bw(), "");
3501 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3502 attributes.set_is_evex_instruction();
3503 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3504 emit_int8(0x74);
3505 emit_int8((unsigned char)(0xC0 | encode));
3506 }
3507
evpcmpgtb(KRegister kdst,XMMRegister nds,Address src,int vector_len)3508 void Assembler::evpcmpgtb(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
3509 assert(VM_Version::supports_avx512vlbw(), "");
3510 InstructionMark im(this);
3511 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3512 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3513 attributes.set_is_evex_instruction();
3514 int dst_enc = kdst->encoding();
3515 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3516 emit_int8(0x64);
3517 emit_operand(as_Register(dst_enc), src);
3518 }
3519
evpcmpgtb(KRegister kdst,KRegister mask,XMMRegister nds,Address src,int vector_len)3520 void Assembler::evpcmpgtb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) {
3521 assert(VM_Version::supports_avx512vlbw(), "");
3522 InstructionMark im(this);
3523 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
3524 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3525 attributes.reset_is_clear_context();
3526 attributes.set_embedded_opmask_register_specifier(mask);
3527 attributes.set_is_evex_instruction();
3528 int dst_enc = kdst->encoding();
3529 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3530 emit_int8(0x64);
3531 emit_operand(as_Register(dst_enc), src);
3532 }
3533
evpcmpuw(KRegister kdst,XMMRegister nds,XMMRegister src,ComparisonPredicate vcc,int vector_len)3534 void Assembler::evpcmpuw(KRegister kdst, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len) {
3535 assert(VM_Version::supports_avx512vlbw(), "");
3536 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3537 attributes.set_is_evex_instruction();
3538 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3539 emit_int8(0x3E);
3540 emit_int8((unsigned char)(0xC0 | encode));
3541 emit_int8(vcc);
3542 }
3543
evpcmpuw(KRegister kdst,KRegister mask,XMMRegister nds,XMMRegister src,ComparisonPredicate vcc,int vector_len)3544 void Assembler::evpcmpuw(KRegister kdst, KRegister mask, XMMRegister nds, XMMRegister src, ComparisonPredicate vcc, int vector_len) {
3545 assert(VM_Version::supports_avx512vlbw(), "");
3546 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
3547 attributes.reset_is_clear_context();
3548 attributes.set_embedded_opmask_register_specifier(mask);
3549 attributes.set_is_evex_instruction();
3550 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3551 emit_int8(0x3E);
3552 emit_int8((unsigned char)(0xC0 | encode));
3553 emit_int8(vcc);
3554 }
3555
evpcmpuw(KRegister kdst,XMMRegister nds,Address src,ComparisonPredicate vcc,int vector_len)3556 void Assembler::evpcmpuw(KRegister kdst, XMMRegister nds, Address src, ComparisonPredicate vcc, int vector_len) {
3557 assert(VM_Version::supports_avx512vlbw(), "");
3558 InstructionMark im(this);
3559 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3560 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3561 attributes.set_is_evex_instruction();
3562 int dst_enc = kdst->encoding();
3563 vex_prefix(src, nds->encoding(), kdst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3564 emit_int8(0x3E);
3565 emit_operand(as_Register(dst_enc), src);
3566 emit_int8(vcc);
3567 }
3568
evpcmpeqb(KRegister kdst,XMMRegister nds,Address src,int vector_len)3569 void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
3570 assert(VM_Version::supports_avx512bw(), "");
3571 InstructionMark im(this);
3572 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3573 attributes.set_is_evex_instruction();
3574 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3575 int dst_enc = kdst->encoding();
3576 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3577 emit_int8(0x74);
3578 emit_operand(as_Register(dst_enc), src);
3579 }
3580
evpcmpeqb(KRegister kdst,KRegister mask,XMMRegister nds,Address src,int vector_len)3581 void Assembler::evpcmpeqb(KRegister kdst, KRegister mask, XMMRegister nds, Address src, int vector_len) {
3582 assert(VM_Version::supports_avx512vlbw(), "");
3583 InstructionMark im(this);
3584 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_reg_mask */ false, /* uses_vl */ true);
3585 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3586 attributes.reset_is_clear_context();
3587 attributes.set_embedded_opmask_register_specifier(mask);
3588 attributes.set_is_evex_instruction();
3589 vex_prefix(src, nds->encoding(), kdst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3590 emit_int8(0x74);
3591 emit_operand(as_Register(kdst->encoding()), src);
3592 }
3593
3594 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
pcmpeqw(XMMRegister dst,XMMRegister src)3595 void Assembler::pcmpeqw(XMMRegister dst, XMMRegister src) {
3596 assert(VM_Version::supports_sse2(), "");
3597 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3598 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3599 emit_int8(0x75);
3600 emit_int8((unsigned char)(0xC0 | encode));
3601 }
3602
3603 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
vpcmpeqw(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)3604 void Assembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3605 assert(VM_Version::supports_avx(), "");
3606 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3607 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3608 emit_int8(0x75);
3609 emit_int8((unsigned char)(0xC0 | encode));
3610 }
3611
3612 // In this context, kdst is written the mask used to process the equal components
evpcmpeqw(KRegister kdst,XMMRegister nds,XMMRegister src,int vector_len)3613 void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) {
3614 assert(VM_Version::supports_avx512bw(), "");
3615 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3616 attributes.set_is_evex_instruction();
3617 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3618 emit_int8(0x75);
3619 emit_int8((unsigned char)(0xC0 | encode));
3620 }
3621
evpcmpeqw(KRegister kdst,XMMRegister nds,Address src,int vector_len)3622 void Assembler::evpcmpeqw(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
3623 assert(VM_Version::supports_avx512bw(), "");
3624 InstructionMark im(this);
3625 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3626 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
3627 attributes.set_is_evex_instruction();
3628 int dst_enc = kdst->encoding();
3629 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3630 emit_int8(0x75);
3631 emit_operand(as_Register(dst_enc), src);
3632 }
3633
3634 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
pcmpeqd(XMMRegister dst,XMMRegister src)3635 void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
3636 assert(VM_Version::supports_sse2(), "");
3637 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3638 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3639 emit_int8(0x76);
3640 emit_int8((unsigned char)(0xC0 | encode));
3641 }
3642
3643 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
vpcmpeqd(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)3644 void Assembler::vpcmpeqd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3645 assert(VM_Version::supports_avx(), "");
3646 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3647 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3648 emit_int8(0x76);
3649 emit_int8((unsigned char)(0xC0 | encode));
3650 }
3651
3652 // In this context, kdst is written the mask used to process the equal components
evpcmpeqd(KRegister kdst,XMMRegister nds,XMMRegister src,int vector_len)3653 void Assembler::evpcmpeqd(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) {
3654 assert(VM_Version::supports_evex(), "");
3655 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3656 attributes.set_is_evex_instruction();
3657 attributes.reset_is_clear_context();
3658 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3659 emit_int8(0x76);
3660 emit_int8((unsigned char)(0xC0 | encode));
3661 }
3662
evpcmpeqd(KRegister kdst,XMMRegister nds,Address src,int vector_len)3663 void Assembler::evpcmpeqd(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
3664 assert(VM_Version::supports_evex(), "");
3665 InstructionMark im(this);
3666 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3667 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
3668 attributes.reset_is_clear_context();
3669 attributes.set_is_evex_instruction();
3670 int dst_enc = kdst->encoding();
3671 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3672 emit_int8(0x76);
3673 emit_operand(as_Register(dst_enc), src);
3674 }
3675
3676 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
pcmpeqq(XMMRegister dst,XMMRegister src)3677 void Assembler::pcmpeqq(XMMRegister dst, XMMRegister src) {
3678 assert(VM_Version::supports_sse4_1(), "");
3679 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3680 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3681 emit_int8(0x29);
3682 emit_int8((unsigned char)(0xC0 | encode));
3683 }
3684
3685 // In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
vpcmpeqq(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)3686 void Assembler::vpcmpeqq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3687 assert(VM_Version::supports_avx(), "");
3688 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3689 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3690 emit_int8(0x29);
3691 emit_int8((unsigned char)(0xC0 | encode));
3692 }
3693
3694 // In this context, kdst is written the mask used to process the equal components
evpcmpeqq(KRegister kdst,XMMRegister nds,XMMRegister src,int vector_len)3695 void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len) {
3696 assert(VM_Version::supports_evex(), "");
3697 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3698 attributes.reset_is_clear_context();
3699 attributes.set_is_evex_instruction();
3700 int encode = vex_prefix_and_encode(kdst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3701 emit_int8(0x29);
3702 emit_int8((unsigned char)(0xC0 | encode));
3703 }
3704
3705 // In this context, kdst is written the mask used to process the equal components
evpcmpeqq(KRegister kdst,XMMRegister nds,Address src,int vector_len)3706 void Assembler::evpcmpeqq(KRegister kdst, XMMRegister nds, Address src, int vector_len) {
3707 assert(VM_Version::supports_evex(), "");
3708 InstructionMark im(this);
3709 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3710 attributes.reset_is_clear_context();
3711 attributes.set_is_evex_instruction();
3712 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
3713 int dst_enc = kdst->encoding();
3714 vex_prefix(src, nds->encoding(), dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3715 emit_int8(0x29);
3716 emit_operand(as_Register(dst_enc), src);
3717 }
3718
pmovmskb(Register dst,XMMRegister src)3719 void Assembler::pmovmskb(Register dst, XMMRegister src) {
3720 assert(VM_Version::supports_sse2(), "");
3721 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3722 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3723 emit_int8((unsigned char)0xD7);
3724 emit_int8((unsigned char)(0xC0 | encode));
3725 }
3726
vpmovmskb(Register dst,XMMRegister src)3727 void Assembler::vpmovmskb(Register dst, XMMRegister src) {
3728 assert(VM_Version::supports_avx2(), "");
3729 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
3730 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3731 emit_int8((unsigned char)0xD7);
3732 emit_int8((unsigned char)(0xC0 | encode));
3733 }
3734
pextrd(Register dst,XMMRegister src,int imm8)3735 void Assembler::pextrd(Register dst, XMMRegister src, int imm8) {
3736 assert(VM_Version::supports_sse4_1(), "");
3737 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
3738 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3739 emit_int8(0x16);
3740 emit_int8((unsigned char)(0xC0 | encode));
3741 emit_int8(imm8);
3742 }
3743
pextrd(Address dst,XMMRegister src,int imm8)3744 void Assembler::pextrd(Address dst, XMMRegister src, int imm8) {
3745 assert(VM_Version::supports_sse4_1(), "");
3746 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
3747 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
3748 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3749 emit_int8(0x16);
3750 emit_operand(src, dst);
3751 emit_int8(imm8);
3752 }
3753
pextrq(Register dst,XMMRegister src,int imm8)3754 void Assembler::pextrq(Register dst, XMMRegister src, int imm8) {
3755 assert(VM_Version::supports_sse4_1(), "");
3756 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
3757 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3758 emit_int8(0x16);
3759 emit_int8((unsigned char)(0xC0 | encode));
3760 emit_int8(imm8);
3761 }
3762
pextrq(Address dst,XMMRegister src,int imm8)3763 void Assembler::pextrq(Address dst, XMMRegister src, int imm8) {
3764 assert(VM_Version::supports_sse4_1(), "");
3765 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
3766 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
3767 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3768 emit_int8(0x16);
3769 emit_operand(src, dst);
3770 emit_int8(imm8);
3771 }
3772
pextrw(Register dst,XMMRegister src,int imm8)3773 void Assembler::pextrw(Register dst, XMMRegister src, int imm8) {
3774 assert(VM_Version::supports_sse2(), "");
3775 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3776 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3777 emit_int8((unsigned char)0xC5);
3778 emit_int8((unsigned char)(0xC0 | encode));
3779 emit_int8(imm8);
3780 }
3781
pextrw(Address dst,XMMRegister src,int imm8)3782 void Assembler::pextrw(Address dst, XMMRegister src, int imm8) {
3783 assert(VM_Version::supports_sse4_1(), "");
3784 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3785 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit);
3786 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3787 emit_int8((unsigned char)0x15);
3788 emit_operand(src, dst);
3789 emit_int8(imm8);
3790 }
3791
pextrb(Address dst,XMMRegister src,int imm8)3792 void Assembler::pextrb(Address dst, XMMRegister src, int imm8) {
3793 assert(VM_Version::supports_sse4_1(), "");
3794 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3795 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit);
3796 simd_prefix(src, xnoreg, dst, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3797 emit_int8(0x14);
3798 emit_operand(src, dst);
3799 emit_int8(imm8);
3800 }
3801
pinsrd(XMMRegister dst,Register src,int imm8)3802 void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) {
3803 assert(VM_Version::supports_sse4_1(), "");
3804 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
3805 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3806 emit_int8(0x22);
3807 emit_int8((unsigned char)(0xC0 | encode));
3808 emit_int8(imm8);
3809 }
3810
pinsrd(XMMRegister dst,Address src,int imm8)3811 void Assembler::pinsrd(XMMRegister dst, Address src, int imm8) {
3812 assert(VM_Version::supports_sse4_1(), "");
3813 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
3814 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
3815 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3816 emit_int8(0x22);
3817 emit_operand(dst,src);
3818 emit_int8(imm8);
3819 }
3820
pinsrq(XMMRegister dst,Register src,int imm8)3821 void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) {
3822 assert(VM_Version::supports_sse4_1(), "");
3823 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
3824 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3825 emit_int8(0x22);
3826 emit_int8((unsigned char)(0xC0 | encode));
3827 emit_int8(imm8);
3828 }
3829
pinsrq(XMMRegister dst,Address src,int imm8)3830 void Assembler::pinsrq(XMMRegister dst, Address src, int imm8) {
3831 assert(VM_Version::supports_sse4_1(), "");
3832 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
3833 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
3834 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3835 emit_int8(0x22);
3836 emit_operand(dst, src);
3837 emit_int8(imm8);
3838 }
3839
pinsrw(XMMRegister dst,Register src,int imm8)3840 void Assembler::pinsrw(XMMRegister dst, Register src, int imm8) {
3841 assert(VM_Version::supports_sse2(), "");
3842 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3843 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3844 emit_int8((unsigned char)0xC4);
3845 emit_int8((unsigned char)(0xC0 | encode));
3846 emit_int8(imm8);
3847 }
3848
pinsrw(XMMRegister dst,Address src,int imm8)3849 void Assembler::pinsrw(XMMRegister dst, Address src, int imm8) {
3850 assert(VM_Version::supports_sse2(), "");
3851 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3852 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit);
3853 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3854 emit_int8((unsigned char)0xC4);
3855 emit_operand(dst, src);
3856 emit_int8(imm8);
3857 }
3858
pinsrb(XMMRegister dst,Address src,int imm8)3859 void Assembler::pinsrb(XMMRegister dst, Address src, int imm8) {
3860 assert(VM_Version::supports_sse4_1(), "");
3861 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3862 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit);
3863 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
3864 emit_int8(0x20);
3865 emit_operand(dst, src);
3866 emit_int8(imm8);
3867 }
3868
pmovzxbw(XMMRegister dst,Address src)3869 void Assembler::pmovzxbw(XMMRegister dst, Address src) {
3870 assert(VM_Version::supports_sse4_1(), "");
3871 InstructionMark im(this);
3872 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3873 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
3874 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3875 emit_int8(0x30);
3876 emit_operand(dst, src);
3877 }
3878
pmovzxbw(XMMRegister dst,XMMRegister src)3879 void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
3880 assert(VM_Version::supports_sse4_1(), "");
3881 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3882 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3883 emit_int8(0x30);
3884 emit_int8((unsigned char)(0xC0 | encode));
3885 }
3886
vpmovzxbw(XMMRegister dst,Address src,int vector_len)3887 void Assembler::vpmovzxbw(XMMRegister dst, Address src, int vector_len) {
3888 assert(VM_Version::supports_avx(), "");
3889 InstructionMark im(this);
3890 assert(dst != xnoreg, "sanity");
3891 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3892 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
3893 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3894 emit_int8(0x30);
3895 emit_operand(dst, src);
3896 }
3897
vpmovzxbw(XMMRegister dst,XMMRegister src,int vector_len)3898 void Assembler::vpmovzxbw(XMMRegister dst, XMMRegister src, int vector_len) {
3899 assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
3900 vector_len == AVX_256bit? VM_Version::supports_avx2() :
3901 vector_len == AVX_512bit? VM_Version::supports_avx512bw() : 0, "");
3902 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3903 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3904 emit_int8(0x30);
3905 emit_int8((unsigned char) (0xC0 | encode));
3906 }
3907
3908
evpmovzxbw(XMMRegister dst,KRegister mask,Address src,int vector_len)3909 void Assembler::evpmovzxbw(XMMRegister dst, KRegister mask, Address src, int vector_len) {
3910 assert(VM_Version::supports_avx512vlbw(), "");
3911 assert(dst != xnoreg, "sanity");
3912 InstructionMark im(this);
3913 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
3914 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
3915 attributes.set_embedded_opmask_register_specifier(mask);
3916 attributes.set_is_evex_instruction();
3917 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3918 emit_int8(0x30);
3919 emit_operand(dst, src);
3920 }
evpmovwb(Address dst,XMMRegister src,int vector_len)3921 void Assembler::evpmovwb(Address dst, XMMRegister src, int vector_len) {
3922 assert(VM_Version::supports_avx512vlbw(), "");
3923 assert(src != xnoreg, "sanity");
3924 InstructionMark im(this);
3925 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3926 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
3927 attributes.set_is_evex_instruction();
3928 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
3929 emit_int8(0x30);
3930 emit_operand(src, dst);
3931 }
3932
evpmovwb(Address dst,KRegister mask,XMMRegister src,int vector_len)3933 void Assembler::evpmovwb(Address dst, KRegister mask, XMMRegister src, int vector_len) {
3934 assert(VM_Version::supports_avx512vlbw(), "");
3935 assert(src != xnoreg, "sanity");
3936 InstructionMark im(this);
3937 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
3938 attributes.set_address_attributes(/* tuple_type */ EVEX_HVM, /* input_size_in_bits */ EVEX_NObit);
3939 attributes.reset_is_clear_context();
3940 attributes.set_embedded_opmask_register_specifier(mask);
3941 attributes.set_is_evex_instruction();
3942 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
3943 emit_int8(0x30);
3944 emit_operand(src, dst);
3945 }
3946
evpmovdb(Address dst,XMMRegister src,int vector_len)3947 void Assembler::evpmovdb(Address dst, XMMRegister src, int vector_len) {
3948 assert(VM_Version::supports_evex(), "");
3949 assert(src != xnoreg, "sanity");
3950 InstructionMark im(this);
3951 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3952 attributes.set_address_attributes(/* tuple_type */ EVEX_QVM, /* input_size_in_bits */ EVEX_NObit);
3953 attributes.set_is_evex_instruction();
3954 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F_38, &attributes);
3955 emit_int8(0x31);
3956 emit_operand(src, dst);
3957 }
3958
vpmovzxwd(XMMRegister dst,XMMRegister src,int vector_len)3959 void Assembler::vpmovzxwd(XMMRegister dst, XMMRegister src, int vector_len) {
3960 assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
3961 vector_len == AVX_256bit? VM_Version::supports_avx2() :
3962 vector_len == AVX_512bit? VM_Version::supports_evex() : 0, " ");
3963 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3964 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3965 emit_int8(0x33);
3966 emit_int8((unsigned char)(0xC0 | encode));
3967 }
3968
pmaddwd(XMMRegister dst,XMMRegister src)3969 void Assembler::pmaddwd(XMMRegister dst, XMMRegister src) {
3970 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3971 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3972 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3973 emit_int8((unsigned char)0xF5);
3974 emit_int8((unsigned char)(0xC0 | encode));
3975 }
3976
vpmaddwd(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)3977 void Assembler::vpmaddwd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3978 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
3979 (vector_len == AVX_256bit ? VM_Version::supports_avx2() :
3980 (vector_len == AVX_512bit ? VM_Version::supports_evex() : 0)), "");
3981 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
3982 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
3983 emit_int8((unsigned char)0xF5);
3984 emit_int8((unsigned char)(0xC0 | encode));
3985 }
3986
evpdpwssd(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)3987 void Assembler::evpdpwssd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
3988 assert(VM_Version::supports_evex(), "");
3989 assert(VM_Version::supports_vnni(), "must support vnni");
3990 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
3991 attributes.set_is_evex_instruction();
3992 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
3993 emit_int8(0x52);
3994 emit_int8((unsigned char)(0xC0 | encode));
3995 }
3996
3997 // generic
pop(Register dst)3998 void Assembler::pop(Register dst) {
3999 int encode = prefix_and_encode(dst->encoding());
4000 emit_int8(0x58 | encode);
4001 }
4002
popcntl(Register dst,Address src)4003 void Assembler::popcntl(Register dst, Address src) {
4004 assert(VM_Version::supports_popcnt(), "must support");
4005 InstructionMark im(this);
4006 emit_int8((unsigned char)0xF3);
4007 prefix(src, dst);
4008 emit_int8(0x0F);
4009 emit_int8((unsigned char)0xB8);
4010 emit_operand(dst, src);
4011 }
4012
popcntl(Register dst,Register src)4013 void Assembler::popcntl(Register dst, Register src) {
4014 assert(VM_Version::supports_popcnt(), "must support");
4015 emit_int8((unsigned char)0xF3);
4016 int encode = prefix_and_encode(dst->encoding(), src->encoding());
4017 emit_int8(0x0F);
4018 emit_int8((unsigned char)0xB8);
4019 emit_int8((unsigned char)(0xC0 | encode));
4020 }
4021
vpopcntd(XMMRegister dst,XMMRegister src,int vector_len)4022 void Assembler::vpopcntd(XMMRegister dst, XMMRegister src, int vector_len) {
4023 assert(VM_Version::supports_vpopcntdq(), "must support vpopcntdq feature");
4024 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4025 attributes.set_is_evex_instruction();
4026 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4027 emit_int8(0x55);
4028 emit_int8((unsigned char)(0xC0 | encode));
4029 }
4030
popf()4031 void Assembler::popf() {
4032 emit_int8((unsigned char)0x9D);
4033 }
4034
4035 #ifndef _LP64 // no 32bit push/pop on amd64
popl(Address dst)4036 void Assembler::popl(Address dst) {
4037 // NOTE: this will adjust stack by 8byte on 64bits
4038 InstructionMark im(this);
4039 prefix(dst);
4040 emit_int8((unsigned char)0x8F);
4041 emit_operand(rax, dst);
4042 }
4043 #endif
4044
prefetch_prefix(Address src)4045 void Assembler::prefetch_prefix(Address src) {
4046 prefix(src);
4047 emit_int8(0x0F);
4048 }
4049
prefetchnta(Address src)4050 void Assembler::prefetchnta(Address src) {
4051 NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
4052 InstructionMark im(this);
4053 prefetch_prefix(src);
4054 emit_int8(0x18);
4055 emit_operand(rax, src); // 0, src
4056 }
4057
prefetchr(Address src)4058 void Assembler::prefetchr(Address src) {
4059 assert(VM_Version::supports_3dnow_prefetch(), "must support");
4060 InstructionMark im(this);
4061 prefetch_prefix(src);
4062 emit_int8(0x0D);
4063 emit_operand(rax, src); // 0, src
4064 }
4065
prefetcht0(Address src)4066 void Assembler::prefetcht0(Address src) {
4067 NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
4068 InstructionMark im(this);
4069 prefetch_prefix(src);
4070 emit_int8(0x18);
4071 emit_operand(rcx, src); // 1, src
4072 }
4073
prefetcht1(Address src)4074 void Assembler::prefetcht1(Address src) {
4075 NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
4076 InstructionMark im(this);
4077 prefetch_prefix(src);
4078 emit_int8(0x18);
4079 emit_operand(rdx, src); // 2, src
4080 }
4081
prefetcht2(Address src)4082 void Assembler::prefetcht2(Address src) {
4083 NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
4084 InstructionMark im(this);
4085 prefetch_prefix(src);
4086 emit_int8(0x18);
4087 emit_operand(rbx, src); // 3, src
4088 }
4089
prefetchw(Address src)4090 void Assembler::prefetchw(Address src) {
4091 assert(VM_Version::supports_3dnow_prefetch(), "must support");
4092 InstructionMark im(this);
4093 prefetch_prefix(src);
4094 emit_int8(0x0D);
4095 emit_operand(rcx, src); // 1, src
4096 }
4097
prefix(Prefix p)4098 void Assembler::prefix(Prefix p) {
4099 emit_int8(p);
4100 }
4101
pshufb(XMMRegister dst,XMMRegister src)4102 void Assembler::pshufb(XMMRegister dst, XMMRegister src) {
4103 assert(VM_Version::supports_ssse3(), "");
4104 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4105 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4106 emit_int8(0x00);
4107 emit_int8((unsigned char)(0xC0 | encode));
4108 }
4109
vpshufb(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)4110 void Assembler::vpshufb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
4111 assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
4112 vector_len == AVX_256bit? VM_Version::supports_avx2() :
4113 0, "");
4114 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4115 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4116 emit_int8(0x00);
4117 emit_int8((unsigned char)(0xC0 | encode));
4118 }
4119
pshufb(XMMRegister dst,Address src)4120 void Assembler::pshufb(XMMRegister dst, Address src) {
4121 assert(VM_Version::supports_ssse3(), "");
4122 InstructionMark im(this);
4123 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4124 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
4125 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4126 emit_int8(0x00);
4127 emit_operand(dst, src);
4128 }
4129
pshufd(XMMRegister dst,XMMRegister src,int mode)4130 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
4131 assert(isByte(mode), "invalid value");
4132 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4133 int vector_len = VM_Version::supports_avx512novl() ? AVX_512bit : AVX_128bit;
4134 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4135 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4136 emit_int8(0x70);
4137 emit_int8((unsigned char)(0xC0 | encode));
4138 emit_int8(mode & 0xFF);
4139 }
4140
vpshufd(XMMRegister dst,XMMRegister src,int mode,int vector_len)4141 void Assembler::vpshufd(XMMRegister dst, XMMRegister src, int mode, int vector_len) {
4142 assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
4143 vector_len == AVX_256bit? VM_Version::supports_avx2() :
4144 0, "");
4145 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4146 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4147 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4148 emit_int8(0x70);
4149 emit_int8((unsigned char)(0xC0 | encode));
4150 emit_int8(mode & 0xFF);
4151 }
4152
pshufd(XMMRegister dst,Address src,int mode)4153 void Assembler::pshufd(XMMRegister dst, Address src, int mode) {
4154 assert(isByte(mode), "invalid value");
4155 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4156 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
4157 InstructionMark im(this);
4158 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4159 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
4160 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4161 emit_int8(0x70);
4162 emit_operand(dst, src);
4163 emit_int8(mode & 0xFF);
4164 }
4165
pshuflw(XMMRegister dst,XMMRegister src,int mode)4166 void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
4167 assert(isByte(mode), "invalid value");
4168 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4169 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4170 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
4171 emit_int8(0x70);
4172 emit_int8((unsigned char)(0xC0 | encode));
4173 emit_int8(mode & 0xFF);
4174 }
4175
pshuflw(XMMRegister dst,Address src,int mode)4176 void Assembler::pshuflw(XMMRegister dst, Address src, int mode) {
4177 assert(isByte(mode), "invalid value");
4178 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4179 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
4180 InstructionMark im(this);
4181 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4182 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
4183 simd_prefix(dst, xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
4184 emit_int8(0x70);
4185 emit_operand(dst, src);
4186 emit_int8(mode & 0xFF);
4187 }
evshufi64x2(XMMRegister dst,XMMRegister nds,XMMRegister src,int imm8,int vector_len)4188 void Assembler::evshufi64x2(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
4189 assert(VM_Version::supports_evex(), "requires EVEX support");
4190 assert(vector_len == Assembler::AVX_256bit || vector_len == Assembler::AVX_512bit, "");
4191 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4192 attributes.set_is_evex_instruction();
4193 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4194 emit_int8(0x43);
4195 emit_int8((unsigned char)(0xC0 | encode));
4196 emit_int8(imm8 & 0xFF);
4197 }
4198
psrldq(XMMRegister dst,int shift)4199 void Assembler::psrldq(XMMRegister dst, int shift) {
4200 // Shift left 128 bit value in dst XMMRegister by shift number of bytes.
4201 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4202 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4203 int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4204 emit_int8(0x73);
4205 emit_int8((unsigned char)(0xC0 | encode));
4206 emit_int8(shift);
4207 }
4208
vpsrldq(XMMRegister dst,XMMRegister src,int shift,int vector_len)4209 void Assembler::vpsrldq(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
4210 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
4211 vector_len == AVX_256bit ? VM_Version::supports_avx2() :
4212 vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : 0, "");
4213 InstructionAttr attributes(vector_len, /*vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4214 int encode = vex_prefix_and_encode(xmm3->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4215 emit_int8(0x73);
4216 emit_int8((unsigned char)(0xC0 | encode));
4217 emit_int8(shift & 0xFF);
4218 }
4219
pslldq(XMMRegister dst,int shift)4220 void Assembler::pslldq(XMMRegister dst, int shift) {
4221 // Shift left 128 bit value in dst XMMRegister by shift number of bytes.
4222 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4223 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4224 // XMM7 is for /7 encoding: 66 0F 73 /7 ib
4225 int encode = simd_prefix_and_encode(xmm7, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4226 emit_int8(0x73);
4227 emit_int8((unsigned char)(0xC0 | encode));
4228 emit_int8(shift);
4229 }
4230
vpslldq(XMMRegister dst,XMMRegister src,int shift,int vector_len)4231 void Assembler::vpslldq(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
4232 assert(vector_len == AVX_128bit ? VM_Version::supports_avx() :
4233 vector_len == AVX_256bit ? VM_Version::supports_avx2() :
4234 vector_len == AVX_512bit ? VM_Version::supports_avx512bw() : 0, "");
4235 InstructionAttr attributes(vector_len, /*vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4236 int encode = vex_prefix_and_encode(xmm7->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4237 emit_int8(0x73);
4238 emit_int8((unsigned char)(0xC0 | encode));
4239 emit_int8(shift & 0xFF);
4240 }
4241
ptest(XMMRegister dst,Address src)4242 void Assembler::ptest(XMMRegister dst, Address src) {
4243 assert(VM_Version::supports_sse4_1(), "");
4244 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
4245 InstructionMark im(this);
4246 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4247 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4248 emit_int8(0x17);
4249 emit_operand(dst, src);
4250 }
4251
ptest(XMMRegister dst,XMMRegister src)4252 void Assembler::ptest(XMMRegister dst, XMMRegister src) {
4253 assert(VM_Version::supports_sse4_1() || VM_Version::supports_avx(), "");
4254 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4255 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4256 emit_int8(0x17);
4257 emit_int8((unsigned char)(0xC0 | encode));
4258 }
4259
vptest(XMMRegister dst,Address src)4260 void Assembler::vptest(XMMRegister dst, Address src) {
4261 assert(VM_Version::supports_avx(), "");
4262 InstructionMark im(this);
4263 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4264 assert(dst != xnoreg, "sanity");
4265 // swap src<->dst for encoding
4266 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4267 emit_int8(0x17);
4268 emit_operand(dst, src);
4269 }
4270
vptest(XMMRegister dst,XMMRegister src)4271 void Assembler::vptest(XMMRegister dst, XMMRegister src) {
4272 assert(VM_Version::supports_avx(), "");
4273 InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4274 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
4275 emit_int8(0x17);
4276 emit_int8((unsigned char)(0xC0 | encode));
4277 }
4278
punpcklbw(XMMRegister dst,Address src)4279 void Assembler::punpcklbw(XMMRegister dst, Address src) {
4280 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4281 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
4282 InstructionMark im(this);
4283 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ true, /* uses_vl */ true);
4284 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
4285 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4286 emit_int8(0x60);
4287 emit_operand(dst, src);
4288 }
4289
punpcklbw(XMMRegister dst,XMMRegister src)4290 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) {
4291 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4292 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_vlbw, /* no_mask_reg */ true, /* uses_vl */ true);
4293 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4294 emit_int8(0x60);
4295 emit_int8((unsigned char)(0xC0 | encode));
4296 }
4297
punpckldq(XMMRegister dst,Address src)4298 void Assembler::punpckldq(XMMRegister dst, Address src) {
4299 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4300 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
4301 InstructionMark im(this);
4302 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4303 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
4304 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4305 emit_int8(0x62);
4306 emit_operand(dst, src);
4307 }
4308
punpckldq(XMMRegister dst,XMMRegister src)4309 void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
4310 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4311 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4312 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4313 emit_int8(0x62);
4314 emit_int8((unsigned char)(0xC0 | encode));
4315 }
4316
punpcklqdq(XMMRegister dst,XMMRegister src)4317 void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) {
4318 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4319 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4320 attributes.set_rex_vex_w_reverted();
4321 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4322 emit_int8(0x6C);
4323 emit_int8((unsigned char)(0xC0 | encode));
4324 }
4325
push(int32_t imm32)4326 void Assembler::push(int32_t imm32) {
4327 // in 64bits we push 64bits onto the stack but only
4328 // take a 32bit immediate
4329 emit_int8(0x68);
4330 emit_int32(imm32);
4331 }
4332
push(Register src)4333 void Assembler::push(Register src) {
4334 int encode = prefix_and_encode(src->encoding());
4335
4336 emit_int8(0x50 | encode);
4337 }
4338
pushf()4339 void Assembler::pushf() {
4340 emit_int8((unsigned char)0x9C);
4341 }
4342
4343 #ifndef _LP64 // no 32bit push/pop on amd64
pushl(Address src)4344 void Assembler::pushl(Address src) {
4345 // Note this will push 64bit on 64bit
4346 InstructionMark im(this);
4347 prefix(src);
4348 emit_int8((unsigned char)0xFF);
4349 emit_operand(rsi, src);
4350 }
4351 #endif
4352
rcll(Register dst,int imm8)4353 void Assembler::rcll(Register dst, int imm8) {
4354 assert(isShiftCount(imm8), "illegal shift count");
4355 int encode = prefix_and_encode(dst->encoding());
4356 if (imm8 == 1) {
4357 emit_int8((unsigned char)0xD1);
4358 emit_int8((unsigned char)(0xD0 | encode));
4359 } else {
4360 emit_int8((unsigned char)0xC1);
4361 emit_int8((unsigned char)0xD0 | encode);
4362 emit_int8(imm8);
4363 }
4364 }
4365
rcpps(XMMRegister dst,XMMRegister src)4366 void Assembler::rcpps(XMMRegister dst, XMMRegister src) {
4367 NOT_LP64(assert(VM_Version::supports_sse(), ""));
4368 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4369 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
4370 emit_int8(0x53);
4371 emit_int8((unsigned char)(0xC0 | encode));
4372 }
4373
rcpss(XMMRegister dst,XMMRegister src)4374 void Assembler::rcpss(XMMRegister dst, XMMRegister src) {
4375 NOT_LP64(assert(VM_Version::supports_sse(), ""));
4376 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4377 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
4378 emit_int8(0x53);
4379 emit_int8((unsigned char)(0xC0 | encode));
4380 }
4381
rdtsc()4382 void Assembler::rdtsc() {
4383 emit_int8((unsigned char)0x0F);
4384 emit_int8((unsigned char)0x31);
4385 }
4386
4387 // copies data from [esi] to [edi] using rcx pointer sized words
4388 // generic
rep_mov()4389 void Assembler::rep_mov() {
4390 emit_int8((unsigned char)0xF3);
4391 // MOVSQ
4392 LP64_ONLY(prefix(REX_W));
4393 emit_int8((unsigned char)0xA5);
4394 }
4395
4396 // sets rcx bytes with rax, value at [edi]
rep_stosb()4397 void Assembler::rep_stosb() {
4398 emit_int8((unsigned char)0xF3); // REP
4399 LP64_ONLY(prefix(REX_W));
4400 emit_int8((unsigned char)0xAA); // STOSB
4401 }
4402
4403 // sets rcx pointer sized words with rax, value at [edi]
4404 // generic
rep_stos()4405 void Assembler::rep_stos() {
4406 emit_int8((unsigned char)0xF3); // REP
4407 LP64_ONLY(prefix(REX_W)); // LP64:STOSQ, LP32:STOSD
4408 emit_int8((unsigned char)0xAB);
4409 }
4410
4411 // scans rcx pointer sized words at [edi] for occurance of rax,
4412 // generic
repne_scan()4413 void Assembler::repne_scan() { // repne_scan
4414 emit_int8((unsigned char)0xF2);
4415 // SCASQ
4416 LP64_ONLY(prefix(REX_W));
4417 emit_int8((unsigned char)0xAF);
4418 }
4419
4420 #ifdef _LP64
4421 // scans rcx 4 byte words at [edi] for occurance of rax,
4422 // generic
repne_scanl()4423 void Assembler::repne_scanl() { // repne_scan
4424 emit_int8((unsigned char)0xF2);
4425 // SCASL
4426 emit_int8((unsigned char)0xAF);
4427 }
4428 #endif
4429
ret(int imm16)4430 void Assembler::ret(int imm16) {
4431 if (imm16 == 0) {
4432 emit_int8((unsigned char)0xC3);
4433 } else {
4434 emit_int8((unsigned char)0xC2);
4435 emit_int16(imm16);
4436 }
4437 }
4438
sahf()4439 void Assembler::sahf() {
4440 #ifdef _LP64
4441 // Not supported in 64bit mode
4442 ShouldNotReachHere();
4443 #endif
4444 emit_int8((unsigned char)0x9E);
4445 }
4446
sarl(Register dst,int imm8)4447 void Assembler::sarl(Register dst, int imm8) {
4448 int encode = prefix_and_encode(dst->encoding());
4449 assert(isShiftCount(imm8), "illegal shift count");
4450 if (imm8 == 1) {
4451 emit_int8((unsigned char)0xD1);
4452 emit_int8((unsigned char)(0xF8 | encode));
4453 } else {
4454 emit_int8((unsigned char)0xC1);
4455 emit_int8((unsigned char)(0xF8 | encode));
4456 emit_int8(imm8);
4457 }
4458 }
4459
sarl(Register dst)4460 void Assembler::sarl(Register dst) {
4461 int encode = prefix_and_encode(dst->encoding());
4462 emit_int8((unsigned char)0xD3);
4463 emit_int8((unsigned char)(0xF8 | encode));
4464 }
4465
sbbl(Address dst,int32_t imm32)4466 void Assembler::sbbl(Address dst, int32_t imm32) {
4467 InstructionMark im(this);
4468 prefix(dst);
4469 emit_arith_operand(0x81, rbx, dst, imm32);
4470 }
4471
sbbl(Register dst,int32_t imm32)4472 void Assembler::sbbl(Register dst, int32_t imm32) {
4473 prefix(dst);
4474 emit_arith(0x81, 0xD8, dst, imm32);
4475 }
4476
4477
sbbl(Register dst,Address src)4478 void Assembler::sbbl(Register dst, Address src) {
4479 InstructionMark im(this);
4480 prefix(src, dst);
4481 emit_int8(0x1B);
4482 emit_operand(dst, src);
4483 }
4484
sbbl(Register dst,Register src)4485 void Assembler::sbbl(Register dst, Register src) {
4486 (void) prefix_and_encode(dst->encoding(), src->encoding());
4487 emit_arith(0x1B, 0xC0, dst, src);
4488 }
4489
setb(Condition cc,Register dst)4490 void Assembler::setb(Condition cc, Register dst) {
4491 assert(0 <= cc && cc < 16, "illegal cc");
4492 int encode = prefix_and_encode(dst->encoding(), true);
4493 emit_int8(0x0F);
4494 emit_int8((unsigned char)0x90 | cc);
4495 emit_int8((unsigned char)(0xC0 | encode));
4496 }
4497
palignr(XMMRegister dst,XMMRegister src,int imm8)4498 void Assembler::palignr(XMMRegister dst, XMMRegister src, int imm8) {
4499 assert(VM_Version::supports_ssse3(), "");
4500 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4501 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4502 emit_int8((unsigned char)0x0F);
4503 emit_int8((unsigned char)(0xC0 | encode));
4504 emit_int8(imm8);
4505 }
4506
vpalignr(XMMRegister dst,XMMRegister nds,XMMRegister src,int imm8,int vector_len)4507 void Assembler::vpalignr(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
4508 assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
4509 vector_len == AVX_256bit? VM_Version::supports_avx2() :
4510 0, "");
4511 InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
4512 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4513 emit_int8((unsigned char)0x0F);
4514 emit_int8((unsigned char)(0xC0 | encode));
4515 emit_int8(imm8);
4516 }
4517
evalignq(XMMRegister dst,XMMRegister nds,XMMRegister src,uint8_t imm8)4518 void Assembler::evalignq(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
4519 assert(VM_Version::supports_evex(), "");
4520 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
4521 attributes.set_is_evex_instruction();
4522 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4523 emit_int8(0x3);
4524 emit_int8((unsigned char)(0xC0 | encode));
4525 emit_int8(imm8);
4526 }
4527
pblendw(XMMRegister dst,XMMRegister src,int imm8)4528 void Assembler::pblendw(XMMRegister dst, XMMRegister src, int imm8) {
4529 assert(VM_Version::supports_sse4_1(), "");
4530 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4531 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
4532 emit_int8((unsigned char)0x0E);
4533 emit_int8((unsigned char)(0xC0 | encode));
4534 emit_int8(imm8);
4535 }
4536
sha1rnds4(XMMRegister dst,XMMRegister src,int imm8)4537 void Assembler::sha1rnds4(XMMRegister dst, XMMRegister src, int imm8) {
4538 assert(VM_Version::supports_sha(), "");
4539 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_3A, /* rex_w */ false);
4540 emit_int8((unsigned char)0xCC);
4541 emit_int8((unsigned char)(0xC0 | encode));
4542 emit_int8((unsigned char)imm8);
4543 }
4544
sha1nexte(XMMRegister dst,XMMRegister src)4545 void Assembler::sha1nexte(XMMRegister dst, XMMRegister src) {
4546 assert(VM_Version::supports_sha(), "");
4547 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false);
4548 emit_int8((unsigned char)0xC8);
4549 emit_int8((unsigned char)(0xC0 | encode));
4550 }
4551
sha1msg1(XMMRegister dst,XMMRegister src)4552 void Assembler::sha1msg1(XMMRegister dst, XMMRegister src) {
4553 assert(VM_Version::supports_sha(), "");
4554 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false);
4555 emit_int8((unsigned char)0xC9);
4556 emit_int8((unsigned char)(0xC0 | encode));
4557 }
4558
sha1msg2(XMMRegister dst,XMMRegister src)4559 void Assembler::sha1msg2(XMMRegister dst, XMMRegister src) {
4560 assert(VM_Version::supports_sha(), "");
4561 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false);
4562 emit_int8((unsigned char)0xCA);
4563 emit_int8((unsigned char)(0xC0 | encode));
4564 }
4565
4566 // xmm0 is implicit additional source to this instruction.
sha256rnds2(XMMRegister dst,XMMRegister src)4567 void Assembler::sha256rnds2(XMMRegister dst, XMMRegister src) {
4568 assert(VM_Version::supports_sha(), "");
4569 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false);
4570 emit_int8((unsigned char)0xCB);
4571 emit_int8((unsigned char)(0xC0 | encode));
4572 }
4573
sha256msg1(XMMRegister dst,XMMRegister src)4574 void Assembler::sha256msg1(XMMRegister dst, XMMRegister src) {
4575 assert(VM_Version::supports_sha(), "");
4576 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false);
4577 emit_int8((unsigned char)0xCC);
4578 emit_int8((unsigned char)(0xC0 | encode));
4579 }
4580
sha256msg2(XMMRegister dst,XMMRegister src)4581 void Assembler::sha256msg2(XMMRegister dst, XMMRegister src) {
4582 assert(VM_Version::supports_sha(), "");
4583 int encode = rex_prefix_and_encode(dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, /* rex_w */ false);
4584 emit_int8((unsigned char)0xCD);
4585 emit_int8((unsigned char)(0xC0 | encode));
4586 }
4587
4588
shll(Register dst,int imm8)4589 void Assembler::shll(Register dst, int imm8) {
4590 assert(isShiftCount(imm8), "illegal shift count");
4591 int encode = prefix_and_encode(dst->encoding());
4592 if (imm8 == 1 ) {
4593 emit_int8((unsigned char)0xD1);
4594 emit_int8((unsigned char)(0xE0 | encode));
4595 } else {
4596 emit_int8((unsigned char)0xC1);
4597 emit_int8((unsigned char)(0xE0 | encode));
4598 emit_int8(imm8);
4599 }
4600 }
4601
shll(Register dst)4602 void Assembler::shll(Register dst) {
4603 int encode = prefix_and_encode(dst->encoding());
4604 emit_int8((unsigned char)0xD3);
4605 emit_int8((unsigned char)(0xE0 | encode));
4606 }
4607
shrl(Register dst,int imm8)4608 void Assembler::shrl(Register dst, int imm8) {
4609 assert(isShiftCount(imm8), "illegal shift count");
4610 int encode = prefix_and_encode(dst->encoding());
4611 emit_int8((unsigned char)0xC1);
4612 emit_int8((unsigned char)(0xE8 | encode));
4613 emit_int8(imm8);
4614 }
4615
shrl(Register dst)4616 void Assembler::shrl(Register dst) {
4617 int encode = prefix_and_encode(dst->encoding());
4618 emit_int8((unsigned char)0xD3);
4619 emit_int8((unsigned char)(0xE8 | encode));
4620 }
4621
4622 // copies a single word from [esi] to [edi]
smovl()4623 void Assembler::smovl() {
4624 emit_int8((unsigned char)0xA5);
4625 }
4626
sqrtsd(XMMRegister dst,XMMRegister src)4627 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
4628 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4629 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4630 attributes.set_rex_vex_w_reverted();
4631 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
4632 emit_int8(0x51);
4633 emit_int8((unsigned char)(0xC0 | encode));
4634 }
4635
sqrtsd(XMMRegister dst,Address src)4636 void Assembler::sqrtsd(XMMRegister dst, Address src) {
4637 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4638 InstructionMark im(this);
4639 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4640 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
4641 attributes.set_rex_vex_w_reverted();
4642 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
4643 emit_int8(0x51);
4644 emit_operand(dst, src);
4645 }
4646
sqrtss(XMMRegister dst,XMMRegister src)4647 void Assembler::sqrtss(XMMRegister dst, XMMRegister src) {
4648 NOT_LP64(assert(VM_Version::supports_sse(), ""));
4649 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4650 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
4651 emit_int8(0x51);
4652 emit_int8((unsigned char)(0xC0 | encode));
4653 }
4654
std()4655 void Assembler::std() {
4656 emit_int8((unsigned char)0xFD);
4657 }
4658
sqrtss(XMMRegister dst,Address src)4659 void Assembler::sqrtss(XMMRegister dst, Address src) {
4660 NOT_LP64(assert(VM_Version::supports_sse(), ""));
4661 InstructionMark im(this);
4662 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4663 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
4664 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
4665 emit_int8(0x51);
4666 emit_operand(dst, src);
4667 }
4668
stmxcsr(Address dst)4669 void Assembler::stmxcsr( Address dst) {
4670 if (UseAVX > 0 ) {
4671 assert(VM_Version::supports_avx(), "");
4672 InstructionMark im(this);
4673 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
4674 vex_prefix(dst, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
4675 emit_int8((unsigned char)0xAE);
4676 emit_operand(as_Register(3), dst);
4677 } else {
4678 NOT_LP64(assert(VM_Version::supports_sse(), ""));
4679 InstructionMark im(this);
4680 prefix(dst);
4681 emit_int8(0x0F);
4682 emit_int8((unsigned char)0xAE);
4683 emit_operand(as_Register(3), dst);
4684 }
4685 }
4686
subl(Address dst,int32_t imm32)4687 void Assembler::subl(Address dst, int32_t imm32) {
4688 InstructionMark im(this);
4689 prefix(dst);
4690 emit_arith_operand(0x81, rbp, dst, imm32);
4691 }
4692
subl(Address dst,Register src)4693 void Assembler::subl(Address dst, Register src) {
4694 InstructionMark im(this);
4695 prefix(dst, src);
4696 emit_int8(0x29);
4697 emit_operand(src, dst);
4698 }
4699
subl(Register dst,int32_t imm32)4700 void Assembler::subl(Register dst, int32_t imm32) {
4701 prefix(dst);
4702 emit_arith(0x81, 0xE8, dst, imm32);
4703 }
4704
4705 // Force generation of a 4 byte immediate value even if it fits into 8bit
subl_imm32(Register dst,int32_t imm32)4706 void Assembler::subl_imm32(Register dst, int32_t imm32) {
4707 prefix(dst);
4708 emit_arith_imm32(0x81, 0xE8, dst, imm32);
4709 }
4710
subl(Register dst,Address src)4711 void Assembler::subl(Register dst, Address src) {
4712 InstructionMark im(this);
4713 prefix(src, dst);
4714 emit_int8(0x2B);
4715 emit_operand(dst, src);
4716 }
4717
subl(Register dst,Register src)4718 void Assembler::subl(Register dst, Register src) {
4719 (void) prefix_and_encode(dst->encoding(), src->encoding());
4720 emit_arith(0x2B, 0xC0, dst, src);
4721 }
4722
subsd(XMMRegister dst,XMMRegister src)4723 void Assembler::subsd(XMMRegister dst, XMMRegister src) {
4724 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4725 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4726 attributes.set_rex_vex_w_reverted();
4727 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
4728 emit_int8(0x5C);
4729 emit_int8((unsigned char)(0xC0 | encode));
4730 }
4731
subsd(XMMRegister dst,Address src)4732 void Assembler::subsd(XMMRegister dst, Address src) {
4733 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4734 InstructionMark im(this);
4735 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4736 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
4737 attributes.set_rex_vex_w_reverted();
4738 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
4739 emit_int8(0x5C);
4740 emit_operand(dst, src);
4741 }
4742
subss(XMMRegister dst,XMMRegister src)4743 void Assembler::subss(XMMRegister dst, XMMRegister src) {
4744 NOT_LP64(assert(VM_Version::supports_sse(), ""));
4745 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true , /* uses_vl */ false);
4746 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
4747 emit_int8(0x5C);
4748 emit_int8((unsigned char)(0xC0 | encode));
4749 }
4750
subss(XMMRegister dst,Address src)4751 void Assembler::subss(XMMRegister dst, Address src) {
4752 NOT_LP64(assert(VM_Version::supports_sse(), ""));
4753 InstructionMark im(this);
4754 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4755 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
4756 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
4757 emit_int8(0x5C);
4758 emit_operand(dst, src);
4759 }
4760
testb(Register dst,int imm8)4761 void Assembler::testb(Register dst, int imm8) {
4762 NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
4763 (void) prefix_and_encode(dst->encoding(), true);
4764 emit_arith_b(0xF6, 0xC0, dst, imm8);
4765 }
4766
testb(Address dst,int imm8)4767 void Assembler::testb(Address dst, int imm8) {
4768 InstructionMark im(this);
4769 prefix(dst);
4770 emit_int8((unsigned char)0xF6);
4771 emit_operand(rax, dst, 1);
4772 emit_int8(imm8);
4773 }
4774
testl(Register dst,int32_t imm32)4775 void Assembler::testl(Register dst, int32_t imm32) {
4776 // not using emit_arith because test
4777 // doesn't support sign-extension of
4778 // 8bit operands
4779 int encode = dst->encoding();
4780 if (encode == 0) {
4781 emit_int8((unsigned char)0xA9);
4782 } else {
4783 encode = prefix_and_encode(encode);
4784 emit_int8((unsigned char)0xF7);
4785 emit_int8((unsigned char)(0xC0 | encode));
4786 }
4787 emit_int32(imm32);
4788 }
4789
testl(Register dst,Register src)4790 void Assembler::testl(Register dst, Register src) {
4791 (void) prefix_and_encode(dst->encoding(), src->encoding());
4792 emit_arith(0x85, 0xC0, dst, src);
4793 }
4794
testl(Register dst,Address src)4795 void Assembler::testl(Register dst, Address src) {
4796 InstructionMark im(this);
4797 prefix(src, dst);
4798 emit_int8((unsigned char)0x85);
4799 emit_operand(dst, src);
4800 }
4801
tzcntl(Register dst,Register src)4802 void Assembler::tzcntl(Register dst, Register src) {
4803 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
4804 emit_int8((unsigned char)0xF3);
4805 int encode = prefix_and_encode(dst->encoding(), src->encoding());
4806 emit_int8(0x0F);
4807 emit_int8((unsigned char)0xBC);
4808 emit_int8((unsigned char)0xC0 | encode);
4809 }
4810
tzcntq(Register dst,Register src)4811 void Assembler::tzcntq(Register dst, Register src) {
4812 assert(VM_Version::supports_bmi1(), "tzcnt instruction not supported");
4813 emit_int8((unsigned char)0xF3);
4814 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4815 emit_int8(0x0F);
4816 emit_int8((unsigned char)0xBC);
4817 emit_int8((unsigned char)(0xC0 | encode));
4818 }
4819
ucomisd(XMMRegister dst,Address src)4820 void Assembler::ucomisd(XMMRegister dst, Address src) {
4821 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4822 InstructionMark im(this);
4823 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4824 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
4825 attributes.set_rex_vex_w_reverted();
4826 simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4827 emit_int8(0x2E);
4828 emit_operand(dst, src);
4829 }
4830
ucomisd(XMMRegister dst,XMMRegister src)4831 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
4832 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4833 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4834 attributes.set_rex_vex_w_reverted();
4835 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
4836 emit_int8(0x2E);
4837 emit_int8((unsigned char)(0xC0 | encode));
4838 }
4839
ucomiss(XMMRegister dst,Address src)4840 void Assembler::ucomiss(XMMRegister dst, Address src) {
4841 NOT_LP64(assert(VM_Version::supports_sse(), ""));
4842 InstructionMark im(this);
4843 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4844 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
4845 simd_prefix(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
4846 emit_int8(0x2E);
4847 emit_operand(dst, src);
4848 }
4849
ucomiss(XMMRegister dst,XMMRegister src)4850 void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
4851 NOT_LP64(assert(VM_Version::supports_sse(), ""));
4852 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4853 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
4854 emit_int8(0x2E);
4855 emit_int8((unsigned char)(0xC0 | encode));
4856 }
4857
xabort(int8_t imm8)4858 void Assembler::xabort(int8_t imm8) {
4859 emit_int8((unsigned char)0xC6);
4860 emit_int8((unsigned char)0xF8);
4861 emit_int8((unsigned char)(imm8 & 0xFF));
4862 }
4863
xaddb(Address dst,Register src)4864 void Assembler::xaddb(Address dst, Register src) {
4865 InstructionMark im(this);
4866 prefix(dst, src, true);
4867 emit_int8(0x0F);
4868 emit_int8((unsigned char)0xC0);
4869 emit_operand(src, dst);
4870 }
4871
xaddw(Address dst,Register src)4872 void Assembler::xaddw(Address dst, Register src) {
4873 InstructionMark im(this);
4874 emit_int8(0x66);
4875 prefix(dst, src);
4876 emit_int8(0x0F);
4877 emit_int8((unsigned char)0xC1);
4878 emit_operand(src, dst);
4879 }
4880
xaddl(Address dst,Register src)4881 void Assembler::xaddl(Address dst, Register src) {
4882 InstructionMark im(this);
4883 prefix(dst, src);
4884 emit_int8(0x0F);
4885 emit_int8((unsigned char)0xC1);
4886 emit_operand(src, dst);
4887 }
4888
xbegin(Label & abort,relocInfo::relocType rtype)4889 void Assembler::xbegin(Label& abort, relocInfo::relocType rtype) {
4890 InstructionMark im(this);
4891 relocate(rtype);
4892 if (abort.is_bound()) {
4893 address entry = target(abort);
4894 assert(entry != NULL, "abort entry NULL");
4895 intptr_t offset = entry - pc();
4896 emit_int8((unsigned char)0xC7);
4897 emit_int8((unsigned char)0xF8);
4898 emit_int32(offset - 6); // 2 opcode + 4 address
4899 } else {
4900 abort.add_patch_at(code(), locator());
4901 emit_int8((unsigned char)0xC7);
4902 emit_int8((unsigned char)0xF8);
4903 emit_int32(0);
4904 }
4905 }
4906
xchgb(Register dst,Address src)4907 void Assembler::xchgb(Register dst, Address src) { // xchg
4908 InstructionMark im(this);
4909 prefix(src, dst, true);
4910 emit_int8((unsigned char)0x86);
4911 emit_operand(dst, src);
4912 }
4913
xchgw(Register dst,Address src)4914 void Assembler::xchgw(Register dst, Address src) { // xchg
4915 InstructionMark im(this);
4916 emit_int8(0x66);
4917 prefix(src, dst);
4918 emit_int8((unsigned char)0x87);
4919 emit_operand(dst, src);
4920 }
4921
xchgl(Register dst,Address src)4922 void Assembler::xchgl(Register dst, Address src) { // xchg
4923 InstructionMark im(this);
4924 prefix(src, dst);
4925 emit_int8((unsigned char)0x87);
4926 emit_operand(dst, src);
4927 }
4928
xchgl(Register dst,Register src)4929 void Assembler::xchgl(Register dst, Register src) {
4930 int encode = prefix_and_encode(dst->encoding(), src->encoding());
4931 emit_int8((unsigned char)0x87);
4932 emit_int8((unsigned char)(0xC0 | encode));
4933 }
4934
xend()4935 void Assembler::xend() {
4936 emit_int8((unsigned char)0x0F);
4937 emit_int8((unsigned char)0x01);
4938 emit_int8((unsigned char)0xD5);
4939 }
4940
xgetbv()4941 void Assembler::xgetbv() {
4942 emit_int8(0x0F);
4943 emit_int8(0x01);
4944 emit_int8((unsigned char)0xD0);
4945 }
4946
xorl(Register dst,int32_t imm32)4947 void Assembler::xorl(Register dst, int32_t imm32) {
4948 prefix(dst);
4949 emit_arith(0x81, 0xF0, dst, imm32);
4950 }
4951
xorl(Register dst,Address src)4952 void Assembler::xorl(Register dst, Address src) {
4953 InstructionMark im(this);
4954 prefix(src, dst);
4955 emit_int8(0x33);
4956 emit_operand(dst, src);
4957 }
4958
xorl(Register dst,Register src)4959 void Assembler::xorl(Register dst, Register src) {
4960 (void) prefix_and_encode(dst->encoding(), src->encoding());
4961 emit_arith(0x33, 0xC0, dst, src);
4962 }
4963
xorb(Register dst,Address src)4964 void Assembler::xorb(Register dst, Address src) {
4965 InstructionMark im(this);
4966 prefix(src, dst);
4967 emit_int8(0x32);
4968 emit_operand(dst, src);
4969 }
4970
4971 // AVX 3-operands scalar float-point arithmetic instructions
4972
vaddsd(XMMRegister dst,XMMRegister nds,Address src)4973 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) {
4974 assert(VM_Version::supports_avx(), "");
4975 InstructionMark im(this);
4976 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4977 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
4978 attributes.set_rex_vex_w_reverted();
4979 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
4980 emit_int8(0x58);
4981 emit_operand(dst, src);
4982 }
4983
vaddsd(XMMRegister dst,XMMRegister nds,XMMRegister src)4984 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
4985 assert(VM_Version::supports_avx(), "");
4986 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4987 attributes.set_rex_vex_w_reverted();
4988 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
4989 emit_int8(0x58);
4990 emit_int8((unsigned char)(0xC0 | encode));
4991 }
4992
vaddss(XMMRegister dst,XMMRegister nds,Address src)4993 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) {
4994 assert(VM_Version::supports_avx(), "");
4995 InstructionMark im(this);
4996 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
4997 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
4998 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
4999 emit_int8(0x58);
5000 emit_operand(dst, src);
5001 }
5002
vaddss(XMMRegister dst,XMMRegister nds,XMMRegister src)5003 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
5004 assert(VM_Version::supports_avx(), "");
5005 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
5006 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
5007 emit_int8(0x58);
5008 emit_int8((unsigned char)(0xC0 | encode));
5009 }
5010
vdivsd(XMMRegister dst,XMMRegister nds,Address src)5011 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) {
5012 assert(VM_Version::supports_avx(), "");
5013 InstructionMark im(this);
5014 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
5015 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
5016 attributes.set_rex_vex_w_reverted();
5017 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
5018 emit_int8(0x5E);
5019 emit_operand(dst, src);
5020 }
5021
vdivsd(XMMRegister dst,XMMRegister nds,XMMRegister src)5022 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
5023 assert(VM_Version::supports_avx(), "");
5024 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
5025 attributes.set_rex_vex_w_reverted();
5026 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
5027 emit_int8(0x5E);
5028 emit_int8((unsigned char)(0xC0 | encode));
5029 }
5030
vdivss(XMMRegister dst,XMMRegister nds,Address src)5031 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) {
5032 assert(VM_Version::supports_avx(), "");
5033 InstructionMark im(this);
5034 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
5035 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
5036 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
5037 emit_int8(0x5E);
5038 emit_operand(dst, src);
5039 }
5040
vdivss(XMMRegister dst,XMMRegister nds,XMMRegister src)5041 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
5042 assert(VM_Version::supports_avx(), "");
5043 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
5044 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
5045 emit_int8(0x5E);
5046 emit_int8((unsigned char)(0xC0 | encode));
5047 }
5048
vfmadd231sd(XMMRegister dst,XMMRegister src1,XMMRegister src2)5049 void Assembler::vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
5050 assert(VM_Version::supports_fma(), "");
5051 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
5052 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5053 emit_int8((unsigned char)0xB9);
5054 emit_int8((unsigned char)(0xC0 | encode));
5055 }
5056
vfmadd231ss(XMMRegister dst,XMMRegister src1,XMMRegister src2)5057 void Assembler::vfmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
5058 assert(VM_Version::supports_fma(), "");
5059 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
5060 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5061 emit_int8((unsigned char)0xB9);
5062 emit_int8((unsigned char)(0xC0 | encode));
5063 }
5064
vmulsd(XMMRegister dst,XMMRegister nds,Address src)5065 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) {
5066 assert(VM_Version::supports_avx(), "");
5067 InstructionMark im(this);
5068 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
5069 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
5070 attributes.set_rex_vex_w_reverted();
5071 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
5072 emit_int8(0x59);
5073 emit_operand(dst, src);
5074 }
5075
vmulsd(XMMRegister dst,XMMRegister nds,XMMRegister src)5076 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
5077 assert(VM_Version::supports_avx(), "");
5078 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
5079 attributes.set_rex_vex_w_reverted();
5080 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
5081 emit_int8(0x59);
5082 emit_int8((unsigned char)(0xC0 | encode));
5083 }
5084
vmulss(XMMRegister dst,XMMRegister nds,Address src)5085 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) {
5086 assert(VM_Version::supports_avx(), "");
5087 InstructionMark im(this);
5088 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
5089 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
5090 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
5091 emit_int8(0x59);
5092 emit_operand(dst, src);
5093 }
5094
vmulss(XMMRegister dst,XMMRegister nds,XMMRegister src)5095 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
5096 assert(VM_Version::supports_avx(), "");
5097 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
5098 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
5099 emit_int8(0x59);
5100 emit_int8((unsigned char)(0xC0 | encode));
5101 }
5102
vsubsd(XMMRegister dst,XMMRegister nds,Address src)5103 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) {
5104 assert(VM_Version::supports_avx(), "");
5105 InstructionMark im(this);
5106 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
5107 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
5108 attributes.set_rex_vex_w_reverted();
5109 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
5110 emit_int8(0x5C);
5111 emit_operand(dst, src);
5112 }
5113
vsubsd(XMMRegister dst,XMMRegister nds,XMMRegister src)5114 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
5115 assert(VM_Version::supports_avx(), "");
5116 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
5117 attributes.set_rex_vex_w_reverted();
5118 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
5119 emit_int8(0x5C);
5120 emit_int8((unsigned char)(0xC0 | encode));
5121 }
5122
vsubss(XMMRegister dst,XMMRegister nds,Address src)5123 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) {
5124 assert(VM_Version::supports_avx(), "");
5125 InstructionMark im(this);
5126 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
5127 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
5128 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
5129 emit_int8(0x5C);
5130 emit_operand(dst, src);
5131 }
5132
vsubss(XMMRegister dst,XMMRegister nds,XMMRegister src)5133 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
5134 assert(VM_Version::supports_avx(), "");
5135 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
5136 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
5137 emit_int8(0x5C);
5138 emit_int8((unsigned char)(0xC0 | encode));
5139 }
5140
5141 //====================VECTOR ARITHMETIC=====================================
5142
5143 // Float-point vector arithmetic
5144
addpd(XMMRegister dst,XMMRegister src)5145 void Assembler::addpd(XMMRegister dst, XMMRegister src) {
5146 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5147 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5148 attributes.set_rex_vex_w_reverted();
5149 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5150 emit_int8(0x58);
5151 emit_int8((unsigned char)(0xC0 | encode));
5152 }
5153
addpd(XMMRegister dst,Address src)5154 void Assembler::addpd(XMMRegister dst, Address src) {
5155 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5156 InstructionMark im(this);
5157 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5158 attributes.set_rex_vex_w_reverted();
5159 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
5160 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5161 emit_int8(0x58);
5162 emit_operand(dst, src);
5163 }
5164
5165
addps(XMMRegister dst,XMMRegister src)5166 void Assembler::addps(XMMRegister dst, XMMRegister src) {
5167 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5168 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5169 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
5170 emit_int8(0x58);
5171 emit_int8((unsigned char)(0xC0 | encode));
5172 }
5173
vaddpd(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)5174 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5175 assert(VM_Version::supports_avx(), "");
5176 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5177 attributes.set_rex_vex_w_reverted();
5178 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5179 emit_int8(0x58);
5180 emit_int8((unsigned char)(0xC0 | encode));
5181 }
5182
vaddps(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)5183 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5184 assert(VM_Version::supports_avx(), "");
5185 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5186 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
5187 emit_int8(0x58);
5188 emit_int8((unsigned char)(0xC0 | encode));
5189 }
5190
vaddpd(XMMRegister dst,XMMRegister nds,Address src,int vector_len)5191 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5192 assert(VM_Version::supports_avx(), "");
5193 InstructionMark im(this);
5194 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5195 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
5196 attributes.set_rex_vex_w_reverted();
5197 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5198 emit_int8(0x58);
5199 emit_operand(dst, src);
5200 }
5201
vaddps(XMMRegister dst,XMMRegister nds,Address src,int vector_len)5202 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5203 assert(VM_Version::supports_avx(), "");
5204 InstructionMark im(this);
5205 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5206 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
5207 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
5208 emit_int8(0x58);
5209 emit_operand(dst, src);
5210 }
5211
subpd(XMMRegister dst,XMMRegister src)5212 void Assembler::subpd(XMMRegister dst, XMMRegister src) {
5213 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5214 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5215 attributes.set_rex_vex_w_reverted();
5216 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5217 emit_int8(0x5C);
5218 emit_int8((unsigned char)(0xC0 | encode));
5219 }
5220
subps(XMMRegister dst,XMMRegister src)5221 void Assembler::subps(XMMRegister dst, XMMRegister src) {
5222 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5223 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5224 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
5225 emit_int8(0x5C);
5226 emit_int8((unsigned char)(0xC0 | encode));
5227 }
5228
vsubpd(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)5229 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5230 assert(VM_Version::supports_avx(), "");
5231 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5232 attributes.set_rex_vex_w_reverted();
5233 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5234 emit_int8(0x5C);
5235 emit_int8((unsigned char)(0xC0 | encode));
5236 }
5237
vsubps(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)5238 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5239 assert(VM_Version::supports_avx(), "");
5240 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5241 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
5242 emit_int8(0x5C);
5243 emit_int8((unsigned char)(0xC0 | encode));
5244 }
5245
vsubpd(XMMRegister dst,XMMRegister nds,Address src,int vector_len)5246 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5247 assert(VM_Version::supports_avx(), "");
5248 InstructionMark im(this);
5249 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5250 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
5251 attributes.set_rex_vex_w_reverted();
5252 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5253 emit_int8(0x5C);
5254 emit_operand(dst, src);
5255 }
5256
vsubps(XMMRegister dst,XMMRegister nds,Address src,int vector_len)5257 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5258 assert(VM_Version::supports_avx(), "");
5259 InstructionMark im(this);
5260 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5261 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
5262 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
5263 emit_int8(0x5C);
5264 emit_operand(dst, src);
5265 }
5266
mulpd(XMMRegister dst,XMMRegister src)5267 void Assembler::mulpd(XMMRegister dst, XMMRegister src) {
5268 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5269 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5270 attributes.set_rex_vex_w_reverted();
5271 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5272 emit_int8(0x59);
5273 emit_int8((unsigned char)(0xC0 | encode));
5274 }
5275
mulpd(XMMRegister dst,Address src)5276 void Assembler::mulpd(XMMRegister dst, Address src) {
5277 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5278 InstructionMark im(this);
5279 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5280 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
5281 attributes.set_rex_vex_w_reverted();
5282 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5283 emit_int8(0x59);
5284 emit_operand(dst, src);
5285 }
5286
mulps(XMMRegister dst,XMMRegister src)5287 void Assembler::mulps(XMMRegister dst, XMMRegister src) {
5288 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5289 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5290 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
5291 emit_int8(0x59);
5292 emit_int8((unsigned char)(0xC0 | encode));
5293 }
5294
vmulpd(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)5295 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5296 assert(VM_Version::supports_avx(), "");
5297 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5298 attributes.set_rex_vex_w_reverted();
5299 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5300 emit_int8(0x59);
5301 emit_int8((unsigned char)(0xC0 | encode));
5302 }
5303
vmulps(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)5304 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5305 assert(VM_Version::supports_avx(), "");
5306 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5307 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
5308 emit_int8(0x59);
5309 emit_int8((unsigned char)(0xC0 | encode));
5310 }
5311
vmulpd(XMMRegister dst,XMMRegister nds,Address src,int vector_len)5312 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5313 assert(VM_Version::supports_avx(), "");
5314 InstructionMark im(this);
5315 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5316 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
5317 attributes.set_rex_vex_w_reverted();
5318 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5319 emit_int8(0x59);
5320 emit_operand(dst, src);
5321 }
5322
vmulps(XMMRegister dst,XMMRegister nds,Address src,int vector_len)5323 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5324 assert(VM_Version::supports_avx(), "");
5325 InstructionMark im(this);
5326 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5327 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
5328 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
5329 emit_int8(0x59);
5330 emit_operand(dst, src);
5331 }
5332
vfmadd231pd(XMMRegister dst,XMMRegister src1,XMMRegister src2,int vector_len)5333 void Assembler::vfmadd231pd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) {
5334 assert(VM_Version::supports_fma(), "");
5335 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5336 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5337 emit_int8((unsigned char)0xB8);
5338 emit_int8((unsigned char)(0xC0 | encode));
5339 }
5340
vfmadd231ps(XMMRegister dst,XMMRegister src1,XMMRegister src2,int vector_len)5341 void Assembler::vfmadd231ps(XMMRegister dst, XMMRegister src1, XMMRegister src2, int vector_len) {
5342 assert(VM_Version::supports_fma(), "");
5343 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5344 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5345 emit_int8((unsigned char)0xB8);
5346 emit_int8((unsigned char)(0xC0 | encode));
5347 }
5348
vfmadd231pd(XMMRegister dst,XMMRegister src1,Address src2,int vector_len)5349 void Assembler::vfmadd231pd(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) {
5350 assert(VM_Version::supports_fma(), "");
5351 InstructionMark im(this);
5352 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5353 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
5354 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5355 emit_int8((unsigned char)0xB8);
5356 emit_operand(dst, src2);
5357 }
5358
vfmadd231ps(XMMRegister dst,XMMRegister src1,Address src2,int vector_len)5359 void Assembler::vfmadd231ps(XMMRegister dst, XMMRegister src1, Address src2, int vector_len) {
5360 assert(VM_Version::supports_fma(), "");
5361 InstructionMark im(this);
5362 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5363 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
5364 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5365 emit_int8((unsigned char)0xB8);
5366 emit_operand(dst, src2);
5367 }
5368
divpd(XMMRegister dst,XMMRegister src)5369 void Assembler::divpd(XMMRegister dst, XMMRegister src) {
5370 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5371 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5372 attributes.set_rex_vex_w_reverted();
5373 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5374 emit_int8(0x5E);
5375 emit_int8((unsigned char)(0xC0 | encode));
5376 }
5377
divps(XMMRegister dst,XMMRegister src)5378 void Assembler::divps(XMMRegister dst, XMMRegister src) {
5379 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5380 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5381 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
5382 emit_int8(0x5E);
5383 emit_int8((unsigned char)(0xC0 | encode));
5384 }
5385
vdivpd(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)5386 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5387 assert(VM_Version::supports_avx(), "");
5388 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5389 attributes.set_rex_vex_w_reverted();
5390 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5391 emit_int8(0x5E);
5392 emit_int8((unsigned char)(0xC0 | encode));
5393 }
5394
vdivps(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)5395 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5396 assert(VM_Version::supports_avx(), "");
5397 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5398 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
5399 emit_int8(0x5E);
5400 emit_int8((unsigned char)(0xC0 | encode));
5401 }
5402
vdivpd(XMMRegister dst,XMMRegister nds,Address src,int vector_len)5403 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5404 assert(VM_Version::supports_avx(), "");
5405 InstructionMark im(this);
5406 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5407 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
5408 attributes.set_rex_vex_w_reverted();
5409 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5410 emit_int8(0x5E);
5411 emit_operand(dst, src);
5412 }
5413
vdivps(XMMRegister dst,XMMRegister nds,Address src,int vector_len)5414 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5415 assert(VM_Version::supports_avx(), "");
5416 InstructionMark im(this);
5417 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5418 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
5419 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
5420 emit_int8(0x5E);
5421 emit_operand(dst, src);
5422 }
5423
vsqrtpd(XMMRegister dst,XMMRegister src,int vector_len)5424 void Assembler::vsqrtpd(XMMRegister dst, XMMRegister src, int vector_len) {
5425 assert(VM_Version::supports_avx(), "");
5426 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5427 attributes.set_rex_vex_w_reverted();
5428 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5429 emit_int8(0x51);
5430 emit_int8((unsigned char)(0xC0 | encode));
5431 }
5432
vsqrtpd(XMMRegister dst,Address src,int vector_len)5433 void Assembler::vsqrtpd(XMMRegister dst, Address src, int vector_len) {
5434 assert(VM_Version::supports_avx(), "");
5435 InstructionMark im(this);
5436 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5437 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
5438 attributes.set_rex_vex_w_reverted();
5439 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5440 emit_int8(0x51);
5441 emit_operand(dst, src);
5442 }
5443
vsqrtps(XMMRegister dst,XMMRegister src,int vector_len)5444 void Assembler::vsqrtps(XMMRegister dst, XMMRegister src, int vector_len) {
5445 assert(VM_Version::supports_avx(), "");
5446 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5447 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
5448 emit_int8(0x51);
5449 emit_int8((unsigned char)(0xC0 | encode));
5450 }
5451
vsqrtps(XMMRegister dst,Address src,int vector_len)5452 void Assembler::vsqrtps(XMMRegister dst, Address src, int vector_len) {
5453 assert(VM_Version::supports_avx(), "");
5454 InstructionMark im(this);
5455 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5456 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
5457 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
5458 emit_int8(0x51);
5459 emit_operand(dst, src);
5460 }
5461
andpd(XMMRegister dst,XMMRegister src)5462 void Assembler::andpd(XMMRegister dst, XMMRegister src) {
5463 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5464 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
5465 attributes.set_rex_vex_w_reverted();
5466 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5467 emit_int8(0x54);
5468 emit_int8((unsigned char)(0xC0 | encode));
5469 }
5470
andps(XMMRegister dst,XMMRegister src)5471 void Assembler::andps(XMMRegister dst, XMMRegister src) {
5472 NOT_LP64(assert(VM_Version::supports_sse(), ""));
5473 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
5474 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
5475 emit_int8(0x54);
5476 emit_int8((unsigned char)(0xC0 | encode));
5477 }
5478
andps(XMMRegister dst,Address src)5479 void Assembler::andps(XMMRegister dst, Address src) {
5480 NOT_LP64(assert(VM_Version::supports_sse(), ""));
5481 InstructionMark im(this);
5482 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
5483 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
5484 simd_prefix(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
5485 emit_int8(0x54);
5486 emit_operand(dst, src);
5487 }
5488
andpd(XMMRegister dst,Address src)5489 void Assembler::andpd(XMMRegister dst, Address src) {
5490 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5491 InstructionMark im(this);
5492 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
5493 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
5494 attributes.set_rex_vex_w_reverted();
5495 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5496 emit_int8(0x54);
5497 emit_operand(dst, src);
5498 }
5499
vandpd(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)5500 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5501 assert(VM_Version::supports_avx(), "");
5502 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
5503 attributes.set_rex_vex_w_reverted();
5504 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5505 emit_int8(0x54);
5506 emit_int8((unsigned char)(0xC0 | encode));
5507 }
5508
vandps(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)5509 void Assembler::vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5510 assert(VM_Version::supports_avx(), "");
5511 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
5512 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
5513 emit_int8(0x54);
5514 emit_int8((unsigned char)(0xC0 | encode));
5515 }
5516
vandpd(XMMRegister dst,XMMRegister nds,Address src,int vector_len)5517 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5518 assert(VM_Version::supports_avx(), "");
5519 InstructionMark im(this);
5520 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
5521 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
5522 attributes.set_rex_vex_w_reverted();
5523 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5524 emit_int8(0x54);
5525 emit_operand(dst, src);
5526 }
5527
vandps(XMMRegister dst,XMMRegister nds,Address src,int vector_len)5528 void Assembler::vandps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5529 assert(VM_Version::supports_avx(), "");
5530 InstructionMark im(this);
5531 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
5532 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
5533 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
5534 emit_int8(0x54);
5535 emit_operand(dst, src);
5536 }
5537
unpckhpd(XMMRegister dst,XMMRegister src)5538 void Assembler::unpckhpd(XMMRegister dst, XMMRegister src) {
5539 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5540 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5541 attributes.set_rex_vex_w_reverted();
5542 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5543 emit_int8(0x15);
5544 emit_int8((unsigned char)(0xC0 | encode));
5545 }
5546
unpcklpd(XMMRegister dst,XMMRegister src)5547 void Assembler::unpcklpd(XMMRegister dst, XMMRegister src) {
5548 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5549 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5550 attributes.set_rex_vex_w_reverted();
5551 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5552 emit_int8(0x14);
5553 emit_int8((unsigned char)(0xC0 | encode));
5554 }
5555
xorpd(XMMRegister dst,XMMRegister src)5556 void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
5557 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5558 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
5559 attributes.set_rex_vex_w_reverted();
5560 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5561 emit_int8(0x57);
5562 emit_int8((unsigned char)(0xC0 | encode));
5563 }
5564
xorps(XMMRegister dst,XMMRegister src)5565 void Assembler::xorps(XMMRegister dst, XMMRegister src) {
5566 NOT_LP64(assert(VM_Version::supports_sse(), ""));
5567 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
5568 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
5569 emit_int8(0x57);
5570 emit_int8((unsigned char)(0xC0 | encode));
5571 }
5572
xorpd(XMMRegister dst,Address src)5573 void Assembler::xorpd(XMMRegister dst, Address src) {
5574 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5575 InstructionMark im(this);
5576 InstructionAttr attributes(AVX_128bit, /* rex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
5577 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
5578 attributes.set_rex_vex_w_reverted();
5579 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5580 emit_int8(0x57);
5581 emit_operand(dst, src);
5582 }
5583
xorps(XMMRegister dst,Address src)5584 void Assembler::xorps(XMMRegister dst, Address src) {
5585 NOT_LP64(assert(VM_Version::supports_sse(), ""));
5586 InstructionMark im(this);
5587 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
5588 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
5589 simd_prefix(dst, dst, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
5590 emit_int8(0x57);
5591 emit_operand(dst, src);
5592 }
5593
vxorpd(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)5594 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5595 assert(VM_Version::supports_avx(), "");
5596 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
5597 attributes.set_rex_vex_w_reverted();
5598 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5599 emit_int8(0x57);
5600 emit_int8((unsigned char)(0xC0 | encode));
5601 }
5602
vxorps(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)5603 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5604 assert(VM_Version::supports_avx(), "");
5605 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
5606 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
5607 emit_int8(0x57);
5608 emit_int8((unsigned char)(0xC0 | encode));
5609 }
5610
vxorpd(XMMRegister dst,XMMRegister nds,Address src,int vector_len)5611 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5612 assert(VM_Version::supports_avx(), "");
5613 InstructionMark im(this);
5614 InstructionAttr attributes(vector_len, /* vex_w */ !_legacy_mode_dq, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
5615 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
5616 attributes.set_rex_vex_w_reverted();
5617 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5618 emit_int8(0x57);
5619 emit_operand(dst, src);
5620 }
5621
vxorps(XMMRegister dst,XMMRegister nds,Address src,int vector_len)5622 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5623 assert(VM_Version::supports_avx(), "");
5624 InstructionMark im(this);
5625 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
5626 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
5627 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
5628 emit_int8(0x57);
5629 emit_operand(dst, src);
5630 }
5631
5632 // Integer vector arithmetic
vphaddw(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)5633 void Assembler::vphaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5634 assert(VM_Version::supports_avx() && (vector_len == 0) ||
5635 VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
5636 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
5637 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5638 emit_int8(0x01);
5639 emit_int8((unsigned char)(0xC0 | encode));
5640 }
5641
vphaddd(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)5642 void Assembler::vphaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5643 assert(VM_Version::supports_avx() && (vector_len == 0) ||
5644 VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
5645 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
5646 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5647 emit_int8(0x02);
5648 emit_int8((unsigned char)(0xC0 | encode));
5649 }
5650
paddb(XMMRegister dst,XMMRegister src)5651 void Assembler::paddb(XMMRegister dst, XMMRegister src) {
5652 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5653 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5654 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5655 emit_int8((unsigned char)0xFC);
5656 emit_int8((unsigned char)(0xC0 | encode));
5657 }
5658
paddw(XMMRegister dst,XMMRegister src)5659 void Assembler::paddw(XMMRegister dst, XMMRegister src) {
5660 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5661 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5662 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5663 emit_int8((unsigned char)0xFD);
5664 emit_int8((unsigned char)(0xC0 | encode));
5665 }
5666
paddd(XMMRegister dst,XMMRegister src)5667 void Assembler::paddd(XMMRegister dst, XMMRegister src) {
5668 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5669 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5670 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5671 emit_int8((unsigned char)0xFE);
5672 emit_int8((unsigned char)(0xC0 | encode));
5673 }
5674
paddd(XMMRegister dst,Address src)5675 void Assembler::paddd(XMMRegister dst, Address src) {
5676 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5677 InstructionMark im(this);
5678 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5679 simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5680 emit_int8((unsigned char)0xFE);
5681 emit_operand(dst, src);
5682 }
5683
paddq(XMMRegister dst,XMMRegister src)5684 void Assembler::paddq(XMMRegister dst, XMMRegister src) {
5685 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5686 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5687 attributes.set_rex_vex_w_reverted();
5688 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5689 emit_int8((unsigned char)0xD4);
5690 emit_int8((unsigned char)(0xC0 | encode));
5691 }
5692
phaddw(XMMRegister dst,XMMRegister src)5693 void Assembler::phaddw(XMMRegister dst, XMMRegister src) {
5694 assert(VM_Version::supports_sse3(), "");
5695 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
5696 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5697 emit_int8(0x01);
5698 emit_int8((unsigned char)(0xC0 | encode));
5699 }
5700
phaddd(XMMRegister dst,XMMRegister src)5701 void Assembler::phaddd(XMMRegister dst, XMMRegister src) {
5702 assert(VM_Version::supports_sse3(), "");
5703 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
5704 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5705 emit_int8(0x02);
5706 emit_int8((unsigned char)(0xC0 | encode));
5707 }
5708
vpaddb(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)5709 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5710 assert(UseAVX > 0, "requires some form of AVX");
5711 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5712 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5713 emit_int8((unsigned char)0xFC);
5714 emit_int8((unsigned char)(0xC0 | encode));
5715 }
5716
vpaddw(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)5717 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5718 assert(UseAVX > 0, "requires some form of AVX");
5719 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5720 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5721 emit_int8((unsigned char)0xFD);
5722 emit_int8((unsigned char)(0xC0 | encode));
5723 }
5724
vpaddd(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)5725 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5726 assert(UseAVX > 0, "requires some form of AVX");
5727 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5728 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5729 emit_int8((unsigned char)0xFE);
5730 emit_int8((unsigned char)(0xC0 | encode));
5731 }
5732
vpaddq(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)5733 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5734 assert(UseAVX > 0, "requires some form of AVX");
5735 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5736 attributes.set_rex_vex_w_reverted();
5737 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5738 emit_int8((unsigned char)0xD4);
5739 emit_int8((unsigned char)(0xC0 | encode));
5740 }
5741
vpaddb(XMMRegister dst,XMMRegister nds,Address src,int vector_len)5742 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5743 assert(UseAVX > 0, "requires some form of AVX");
5744 InstructionMark im(this);
5745 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5746 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
5747 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5748 emit_int8((unsigned char)0xFC);
5749 emit_operand(dst, src);
5750 }
5751
vpaddw(XMMRegister dst,XMMRegister nds,Address src,int vector_len)5752 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5753 assert(UseAVX > 0, "requires some form of AVX");
5754 InstructionMark im(this);
5755 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5756 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
5757 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5758 emit_int8((unsigned char)0xFD);
5759 emit_operand(dst, src);
5760 }
5761
vpaddd(XMMRegister dst,XMMRegister nds,Address src,int vector_len)5762 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5763 assert(UseAVX > 0, "requires some form of AVX");
5764 InstructionMark im(this);
5765 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5766 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
5767 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5768 emit_int8((unsigned char)0xFE);
5769 emit_operand(dst, src);
5770 }
5771
vpaddq(XMMRegister dst,XMMRegister nds,Address src,int vector_len)5772 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5773 assert(UseAVX > 0, "requires some form of AVX");
5774 InstructionMark im(this);
5775 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5776 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
5777 attributes.set_rex_vex_w_reverted();
5778 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5779 emit_int8((unsigned char)0xD4);
5780 emit_operand(dst, src);
5781 }
5782
psubb(XMMRegister dst,XMMRegister src)5783 void Assembler::psubb(XMMRegister dst, XMMRegister src) {
5784 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5785 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5786 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5787 emit_int8((unsigned char)0xF8);
5788 emit_int8((unsigned char)(0xC0 | encode));
5789 }
5790
psubw(XMMRegister dst,XMMRegister src)5791 void Assembler::psubw(XMMRegister dst, XMMRegister src) {
5792 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5793 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5794 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5795 emit_int8((unsigned char)0xF9);
5796 emit_int8((unsigned char)(0xC0 | encode));
5797 }
5798
psubd(XMMRegister dst,XMMRegister src)5799 void Assembler::psubd(XMMRegister dst, XMMRegister src) {
5800 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5801 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5802 emit_int8((unsigned char)0xFA);
5803 emit_int8((unsigned char)(0xC0 | encode));
5804 }
5805
psubq(XMMRegister dst,XMMRegister src)5806 void Assembler::psubq(XMMRegister dst, XMMRegister src) {
5807 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5808 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5809 attributes.set_rex_vex_w_reverted();
5810 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5811 emit_int8((unsigned char)0xFB);
5812 emit_int8((unsigned char)(0xC0 | encode));
5813 }
5814
vpsubb(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)5815 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5816 assert(UseAVX > 0, "requires some form of AVX");
5817 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5818 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5819 emit_int8((unsigned char)0xF8);
5820 emit_int8((unsigned char)(0xC0 | encode));
5821 }
5822
vpsubw(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)5823 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5824 assert(UseAVX > 0, "requires some form of AVX");
5825 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5826 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5827 emit_int8((unsigned char)0xF9);
5828 emit_int8((unsigned char)(0xC0 | encode));
5829 }
5830
vpsubd(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)5831 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5832 assert(UseAVX > 0, "requires some form of AVX");
5833 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5834 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5835 emit_int8((unsigned char)0xFA);
5836 emit_int8((unsigned char)(0xC0 | encode));
5837 }
5838
vpsubq(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)5839 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5840 assert(UseAVX > 0, "requires some form of AVX");
5841 InstructionAttr attributes(vector_len, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5842 attributes.set_rex_vex_w_reverted();
5843 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5844 emit_int8((unsigned char)0xFB);
5845 emit_int8((unsigned char)(0xC0 | encode));
5846 }
5847
vpsubb(XMMRegister dst,XMMRegister nds,Address src,int vector_len)5848 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5849 assert(UseAVX > 0, "requires some form of AVX");
5850 InstructionMark im(this);
5851 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5852 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
5853 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5854 emit_int8((unsigned char)0xF8);
5855 emit_operand(dst, src);
5856 }
5857
vpsubw(XMMRegister dst,XMMRegister nds,Address src,int vector_len)5858 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5859 assert(UseAVX > 0, "requires some form of AVX");
5860 InstructionMark im(this);
5861 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5862 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
5863 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5864 emit_int8((unsigned char)0xF9);
5865 emit_operand(dst, src);
5866 }
5867
vpsubd(XMMRegister dst,XMMRegister nds,Address src,int vector_len)5868 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5869 assert(UseAVX > 0, "requires some form of AVX");
5870 InstructionMark im(this);
5871 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5872 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
5873 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5874 emit_int8((unsigned char)0xFA);
5875 emit_operand(dst, src);
5876 }
5877
vpsubq(XMMRegister dst,XMMRegister nds,Address src,int vector_len)5878 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5879 assert(UseAVX > 0, "requires some form of AVX");
5880 InstructionMark im(this);
5881 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5882 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
5883 attributes.set_rex_vex_w_reverted();
5884 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5885 emit_int8((unsigned char)0xFB);
5886 emit_operand(dst, src);
5887 }
5888
pmullw(XMMRegister dst,XMMRegister src)5889 void Assembler::pmullw(XMMRegister dst, XMMRegister src) {
5890 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5891 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5892 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5893 emit_int8((unsigned char)0xD5);
5894 emit_int8((unsigned char)(0xC0 | encode));
5895 }
5896
pmulld(XMMRegister dst,XMMRegister src)5897 void Assembler::pmulld(XMMRegister dst, XMMRegister src) {
5898 assert(VM_Version::supports_sse4_1(), "");
5899 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5900 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5901 emit_int8(0x40);
5902 emit_int8((unsigned char)(0xC0 | encode));
5903 }
5904
vpmullw(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)5905 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5906 assert(UseAVX > 0, "requires some form of AVX");
5907 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5908 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5909 emit_int8((unsigned char)0xD5);
5910 emit_int8((unsigned char)(0xC0 | encode));
5911 }
5912
vpmulld(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)5913 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5914 assert(UseAVX > 0, "requires some form of AVX");
5915 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5916 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5917 emit_int8(0x40);
5918 emit_int8((unsigned char)(0xC0 | encode));
5919 }
5920
vpmullq(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)5921 void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
5922 assert(UseAVX > 2, "requires some form of EVEX");
5923 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
5924 attributes.set_is_evex_instruction();
5925 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5926 emit_int8(0x40);
5927 emit_int8((unsigned char)(0xC0 | encode));
5928 }
5929
vpmullw(XMMRegister dst,XMMRegister nds,Address src,int vector_len)5930 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5931 assert(UseAVX > 0, "requires some form of AVX");
5932 InstructionMark im(this);
5933 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5934 attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
5935 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5936 emit_int8((unsigned char)0xD5);
5937 emit_operand(dst, src);
5938 }
5939
vpmulld(XMMRegister dst,XMMRegister nds,Address src,int vector_len)5940 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5941 assert(UseAVX > 0, "requires some form of AVX");
5942 InstructionMark im(this);
5943 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5944 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
5945 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5946 emit_int8(0x40);
5947 emit_operand(dst, src);
5948 }
5949
vpmullq(XMMRegister dst,XMMRegister nds,Address src,int vector_len)5950 void Assembler::vpmullq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
5951 assert(UseAVX > 2, "requires some form of EVEX");
5952 InstructionMark im(this);
5953 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_dq, /* no_mask_reg */ true, /* uses_vl */ true);
5954 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
5955 attributes.set_is_evex_instruction();
5956 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
5957 emit_int8(0x40);
5958 emit_operand(dst, src);
5959 }
5960
5961 // Shift packed integers left by specified number of bits.
psllw(XMMRegister dst,int shift)5962 void Assembler::psllw(XMMRegister dst, int shift) {
5963 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5964 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5965 // XMM6 is for /6 encoding: 66 0F 71 /6 ib
5966 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5967 emit_int8(0x71);
5968 emit_int8((unsigned char)(0xC0 | encode));
5969 emit_int8(shift & 0xFF);
5970 }
5971
pslld(XMMRegister dst,int shift)5972 void Assembler::pslld(XMMRegister dst, int shift) {
5973 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5974 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5975 // XMM6 is for /6 encoding: 66 0F 72 /6 ib
5976 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5977 emit_int8(0x72);
5978 emit_int8((unsigned char)(0xC0 | encode));
5979 emit_int8(shift & 0xFF);
5980 }
5981
psllq(XMMRegister dst,int shift)5982 void Assembler::psllq(XMMRegister dst, int shift) {
5983 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5984 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
5985 // XMM6 is for /6 encoding: 66 0F 73 /6 ib
5986 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5987 emit_int8(0x73);
5988 emit_int8((unsigned char)(0xC0 | encode));
5989 emit_int8(shift & 0xFF);
5990 }
5991
psllw(XMMRegister dst,XMMRegister shift)5992 void Assembler::psllw(XMMRegister dst, XMMRegister shift) {
5993 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
5994 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
5995 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
5996 emit_int8((unsigned char)0xF1);
5997 emit_int8((unsigned char)(0xC0 | encode));
5998 }
5999
pslld(XMMRegister dst,XMMRegister shift)6000 void Assembler::pslld(XMMRegister dst, XMMRegister shift) {
6001 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6002 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6003 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6004 emit_int8((unsigned char)0xF2);
6005 emit_int8((unsigned char)(0xC0 | encode));
6006 }
6007
psllq(XMMRegister dst,XMMRegister shift)6008 void Assembler::psllq(XMMRegister dst, XMMRegister shift) {
6009 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6010 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6011 attributes.set_rex_vex_w_reverted();
6012 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6013 emit_int8((unsigned char)0xF3);
6014 emit_int8((unsigned char)(0xC0 | encode));
6015 }
6016
vpsllw(XMMRegister dst,XMMRegister src,int shift,int vector_len)6017 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
6018 assert(UseAVX > 0, "requires some form of AVX");
6019 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6020 // XMM6 is for /6 encoding: 66 0F 71 /6 ib
6021 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6022 emit_int8(0x71);
6023 emit_int8((unsigned char)(0xC0 | encode));
6024 emit_int8(shift & 0xFF);
6025 }
6026
vpslld(XMMRegister dst,XMMRegister src,int shift,int vector_len)6027 void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
6028 assert(UseAVX > 0, "requires some form of AVX");
6029 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6030 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6031 // XMM6 is for /6 encoding: 66 0F 72 /6 ib
6032 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6033 emit_int8(0x72);
6034 emit_int8((unsigned char)(0xC0 | encode));
6035 emit_int8(shift & 0xFF);
6036 }
6037
vpsllq(XMMRegister dst,XMMRegister src,int shift,int vector_len)6038 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
6039 assert(UseAVX > 0, "requires some form of AVX");
6040 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6041 attributes.set_rex_vex_w_reverted();
6042 // XMM6 is for /6 encoding: 66 0F 73 /6 ib
6043 int encode = vex_prefix_and_encode(xmm6->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6044 emit_int8(0x73);
6045 emit_int8((unsigned char)(0xC0 | encode));
6046 emit_int8(shift & 0xFF);
6047 }
6048
vpsllw(XMMRegister dst,XMMRegister src,XMMRegister shift,int vector_len)6049 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
6050 assert(UseAVX > 0, "requires some form of AVX");
6051 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6052 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6053 emit_int8((unsigned char)0xF1);
6054 emit_int8((unsigned char)(0xC0 | encode));
6055 }
6056
vpslld(XMMRegister dst,XMMRegister src,XMMRegister shift,int vector_len)6057 void Assembler::vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
6058 assert(UseAVX > 0, "requires some form of AVX");
6059 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6060 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6061 emit_int8((unsigned char)0xF2);
6062 emit_int8((unsigned char)(0xC0 | encode));
6063 }
6064
vpsllq(XMMRegister dst,XMMRegister src,XMMRegister shift,int vector_len)6065 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
6066 assert(UseAVX > 0, "requires some form of AVX");
6067 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6068 attributes.set_rex_vex_w_reverted();
6069 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6070 emit_int8((unsigned char)0xF3);
6071 emit_int8((unsigned char)(0xC0 | encode));
6072 }
6073
6074 // Shift packed integers logically right by specified number of bits.
psrlw(XMMRegister dst,int shift)6075 void Assembler::psrlw(XMMRegister dst, int shift) {
6076 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6077 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6078 // XMM2 is for /2 encoding: 66 0F 71 /2 ib
6079 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6080 emit_int8(0x71);
6081 emit_int8((unsigned char)(0xC0 | encode));
6082 emit_int8(shift & 0xFF);
6083 }
6084
psrld(XMMRegister dst,int shift)6085 void Assembler::psrld(XMMRegister dst, int shift) {
6086 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6087 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6088 // XMM2 is for /2 encoding: 66 0F 72 /2 ib
6089 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6090 emit_int8(0x72);
6091 emit_int8((unsigned char)(0xC0 | encode));
6092 emit_int8(shift & 0xFF);
6093 }
6094
psrlq(XMMRegister dst,int shift)6095 void Assembler::psrlq(XMMRegister dst, int shift) {
6096 // Do not confuse it with psrldq SSE2 instruction which
6097 // shifts 128 bit value in xmm register by number of bytes.
6098 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6099 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6100 attributes.set_rex_vex_w_reverted();
6101 // XMM2 is for /2 encoding: 66 0F 73 /2 ib
6102 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6103 emit_int8(0x73);
6104 emit_int8((unsigned char)(0xC0 | encode));
6105 emit_int8(shift & 0xFF);
6106 }
6107
psrlw(XMMRegister dst,XMMRegister shift)6108 void Assembler::psrlw(XMMRegister dst, XMMRegister shift) {
6109 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6110 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6111 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6112 emit_int8((unsigned char)0xD1);
6113 emit_int8((unsigned char)(0xC0 | encode));
6114 }
6115
psrld(XMMRegister dst,XMMRegister shift)6116 void Assembler::psrld(XMMRegister dst, XMMRegister shift) {
6117 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6118 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6119 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6120 emit_int8((unsigned char)0xD2);
6121 emit_int8((unsigned char)(0xC0 | encode));
6122 }
6123
psrlq(XMMRegister dst,XMMRegister shift)6124 void Assembler::psrlq(XMMRegister dst, XMMRegister shift) {
6125 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6126 InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6127 attributes.set_rex_vex_w_reverted();
6128 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6129 emit_int8((unsigned char)0xD3);
6130 emit_int8((unsigned char)(0xC0 | encode));
6131 }
6132
vpsrlw(XMMRegister dst,XMMRegister src,int shift,int vector_len)6133 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
6134 assert(UseAVX > 0, "requires some form of AVX");
6135 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6136 // XMM2 is for /2 encoding: 66 0F 71 /2 ib
6137 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6138 emit_int8(0x71);
6139 emit_int8((unsigned char)(0xC0 | encode));
6140 emit_int8(shift & 0xFF);
6141 }
6142
vpsrld(XMMRegister dst,XMMRegister src,int shift,int vector_len)6143 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
6144 assert(UseAVX > 0, "requires some form of AVX");
6145 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6146 // XMM2 is for /2 encoding: 66 0F 72 /2 ib
6147 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6148 emit_int8(0x72);
6149 emit_int8((unsigned char)(0xC0 | encode));
6150 emit_int8(shift & 0xFF);
6151 }
6152
vpsrlq(XMMRegister dst,XMMRegister src,int shift,int vector_len)6153 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
6154 assert(UseAVX > 0, "requires some form of AVX");
6155 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6156 attributes.set_rex_vex_w_reverted();
6157 // XMM2 is for /2 encoding: 66 0F 73 /2 ib
6158 int encode = vex_prefix_and_encode(xmm2->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6159 emit_int8(0x73);
6160 emit_int8((unsigned char)(0xC0 | encode));
6161 emit_int8(shift & 0xFF);
6162 }
6163
vpsrlw(XMMRegister dst,XMMRegister src,XMMRegister shift,int vector_len)6164 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
6165 assert(UseAVX > 0, "requires some form of AVX");
6166 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6167 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6168 emit_int8((unsigned char)0xD1);
6169 emit_int8((unsigned char)(0xC0 | encode));
6170 }
6171
vpsrld(XMMRegister dst,XMMRegister src,XMMRegister shift,int vector_len)6172 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
6173 assert(UseAVX > 0, "requires some form of AVX");
6174 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6175 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6176 emit_int8((unsigned char)0xD2);
6177 emit_int8((unsigned char)(0xC0 | encode));
6178 }
6179
vpsrlq(XMMRegister dst,XMMRegister src,XMMRegister shift,int vector_len)6180 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
6181 assert(UseAVX > 0, "requires some form of AVX");
6182 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6183 attributes.set_rex_vex_w_reverted();
6184 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6185 emit_int8((unsigned char)0xD3);
6186 emit_int8((unsigned char)(0xC0 | encode));
6187 }
6188
evpsrlvw(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)6189 void Assembler::evpsrlvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6190 assert(VM_Version::supports_avx512bw(), "");
6191 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6192 attributes.set_is_evex_instruction();
6193 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6194 emit_int8(0x10);
6195 emit_int8((unsigned char)(0xC0 | encode));
6196 }
6197
evpsllvw(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)6198 void Assembler::evpsllvw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6199 assert(VM_Version::supports_avx512bw(), "");
6200 InstructionAttr attributes(vector_len, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6201 attributes.set_is_evex_instruction();
6202 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6203 emit_int8(0x12);
6204 emit_int8((unsigned char)(0xC0 | encode));
6205 }
6206
6207 // Shift packed integers arithmetically right by specified number of bits.
psraw(XMMRegister dst,int shift)6208 void Assembler::psraw(XMMRegister dst, int shift) {
6209 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6210 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6211 // XMM4 is for /4 encoding: 66 0F 71 /4 ib
6212 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6213 emit_int8(0x71);
6214 emit_int8((unsigned char)(0xC0 | encode));
6215 emit_int8(shift & 0xFF);
6216 }
6217
psrad(XMMRegister dst,int shift)6218 void Assembler::psrad(XMMRegister dst, int shift) {
6219 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6220 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6221 // XMM4 is for /4 encoding: 66 0F 72 /4 ib
6222 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6223 emit_int8(0x72);
6224 emit_int8((unsigned char)(0xC0 | encode));
6225 emit_int8(shift & 0xFF);
6226 }
6227
psraw(XMMRegister dst,XMMRegister shift)6228 void Assembler::psraw(XMMRegister dst, XMMRegister shift) {
6229 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6230 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6231 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6232 emit_int8((unsigned char)0xE1);
6233 emit_int8((unsigned char)(0xC0 | encode));
6234 }
6235
psrad(XMMRegister dst,XMMRegister shift)6236 void Assembler::psrad(XMMRegister dst, XMMRegister shift) {
6237 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6238 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6239 int encode = simd_prefix_and_encode(dst, dst, shift, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6240 emit_int8((unsigned char)0xE2);
6241 emit_int8((unsigned char)(0xC0 | encode));
6242 }
6243
vpsraw(XMMRegister dst,XMMRegister src,int shift,int vector_len)6244 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
6245 assert(UseAVX > 0, "requires some form of AVX");
6246 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6247 // XMM4 is for /4 encoding: 66 0F 71 /4 ib
6248 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6249 emit_int8(0x71);
6250 emit_int8((unsigned char)(0xC0 | encode));
6251 emit_int8(shift & 0xFF);
6252 }
6253
vpsrad(XMMRegister dst,XMMRegister src,int shift,int vector_len)6254 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int shift, int vector_len) {
6255 assert(UseAVX > 0, "requires some form of AVX");
6256 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6257 // XMM4 is for /4 encoding: 66 0F 71 /4 ib
6258 int encode = vex_prefix_and_encode(xmm4->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6259 emit_int8(0x72);
6260 emit_int8((unsigned char)(0xC0 | encode));
6261 emit_int8(shift & 0xFF);
6262 }
6263
vpsraw(XMMRegister dst,XMMRegister src,XMMRegister shift,int vector_len)6264 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
6265 assert(UseAVX > 0, "requires some form of AVX");
6266 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6267 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6268 emit_int8((unsigned char)0xE1);
6269 emit_int8((unsigned char)(0xC0 | encode));
6270 }
6271
vpsrad(XMMRegister dst,XMMRegister src,XMMRegister shift,int vector_len)6272 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, int vector_len) {
6273 assert(UseAVX > 0, "requires some form of AVX");
6274 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6275 int encode = vex_prefix_and_encode(dst->encoding(), src->encoding(), shift->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6276 emit_int8((unsigned char)0xE2);
6277 emit_int8((unsigned char)(0xC0 | encode));
6278 }
6279
6280
6281 // logical operations packed integers
pand(XMMRegister dst,XMMRegister src)6282 void Assembler::pand(XMMRegister dst, XMMRegister src) {
6283 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6284 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6285 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6286 emit_int8((unsigned char)0xDB);
6287 emit_int8((unsigned char)(0xC0 | encode));
6288 }
6289
vpand(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)6290 void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6291 assert(UseAVX > 0, "requires some form of AVX");
6292 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6293 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6294 emit_int8((unsigned char)0xDB);
6295 emit_int8((unsigned char)(0xC0 | encode));
6296 }
6297
vpand(XMMRegister dst,XMMRegister nds,Address src,int vector_len)6298 void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
6299 assert(UseAVX > 0, "requires some form of AVX");
6300 InstructionMark im(this);
6301 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6302 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
6303 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6304 emit_int8((unsigned char)0xDB);
6305 emit_operand(dst, src);
6306 }
6307
vpandq(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)6308 void Assembler::vpandq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6309 assert(VM_Version::supports_evex(), "");
6310 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6311 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6312 emit_int8((unsigned char)0xDB);
6313 emit_int8((unsigned char)(0xC0 | encode));
6314 }
6315
6316
pandn(XMMRegister dst,XMMRegister src)6317 void Assembler::pandn(XMMRegister dst, XMMRegister src) {
6318 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6319 InstructionAttr attributes(AVX_128bit, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6320 attributes.set_rex_vex_w_reverted();
6321 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6322 emit_int8((unsigned char)0xDF);
6323 emit_int8((unsigned char)(0xC0 | encode));
6324 }
6325
vpandn(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)6326 void Assembler::vpandn(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6327 assert(UseAVX > 0, "requires some form of AVX");
6328 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6329 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6330 emit_int8((unsigned char)0xDF);
6331 emit_int8((unsigned char)(0xC0 | encode));
6332 }
6333
6334
por(XMMRegister dst,XMMRegister src)6335 void Assembler::por(XMMRegister dst, XMMRegister src) {
6336 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6337 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6338 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6339 emit_int8((unsigned char)0xEB);
6340 emit_int8((unsigned char)(0xC0 | encode));
6341 }
6342
vpor(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)6343 void Assembler::vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6344 assert(UseAVX > 0, "requires some form of AVX");
6345 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6346 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6347 emit_int8((unsigned char)0xEB);
6348 emit_int8((unsigned char)(0xC0 | encode));
6349 }
6350
vpor(XMMRegister dst,XMMRegister nds,Address src,int vector_len)6351 void Assembler::vpor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
6352 assert(UseAVX > 0, "requires some form of AVX");
6353 InstructionMark im(this);
6354 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6355 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
6356 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6357 emit_int8((unsigned char)0xEB);
6358 emit_operand(dst, src);
6359 }
6360
vporq(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)6361 void Assembler::vporq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6362 assert(VM_Version::supports_evex(), "");
6363 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6364 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6365 emit_int8((unsigned char)0xEB);
6366 emit_int8((unsigned char)(0xC0 | encode));
6367 }
6368
6369
pxor(XMMRegister dst,XMMRegister src)6370 void Assembler::pxor(XMMRegister dst, XMMRegister src) {
6371 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
6372 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6373 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6374 emit_int8((unsigned char)0xEF);
6375 emit_int8((unsigned char)(0xC0 | encode));
6376 }
6377
vpxor(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)6378 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6379 assert(UseAVX > 0, "requires some form of AVX");
6380 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6381 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6382 emit_int8((unsigned char)0xEF);
6383 emit_int8((unsigned char)(0xC0 | encode));
6384 }
6385
vpxor(XMMRegister dst,XMMRegister nds,Address src,int vector_len)6386 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
6387 assert(UseAVX > 0, "requires some form of AVX");
6388 InstructionMark im(this);
6389 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6390 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_32bit);
6391 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6392 emit_int8((unsigned char)0xEF);
6393 emit_operand(dst, src);
6394 }
6395
evpxorq(XMMRegister dst,XMMRegister nds,XMMRegister src,int vector_len)6396 void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
6397 assert(VM_Version::supports_evex(), "requires EVEX support");
6398 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6399 attributes.set_is_evex_instruction();
6400 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6401 emit_int8((unsigned char)0xEF);
6402 emit_int8((unsigned char)(0xC0 | encode));
6403 }
6404
evpxorq(XMMRegister dst,XMMRegister nds,Address src,int vector_len)6405 void Assembler::evpxorq(XMMRegister dst, XMMRegister nds, Address src, int vector_len) {
6406 assert(VM_Version::supports_evex(), "requires EVEX support");
6407 assert(dst != xnoreg, "sanity");
6408 InstructionMark im(this);
6409 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6410 attributes.set_is_evex_instruction();
6411 attributes.set_address_attributes(/* tuple_type */ EVEX_FV, /* input_size_in_bits */ EVEX_64bit);
6412 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
6413 emit_int8((unsigned char)0xEF);
6414 emit_operand(dst, src);
6415 }
6416
6417
6418 // vinserti forms
6419
vinserti128(XMMRegister dst,XMMRegister nds,XMMRegister src,uint8_t imm8)6420 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
6421 assert(VM_Version::supports_avx2(), "");
6422 assert(imm8 <= 0x01, "imm8: %u", imm8);
6423 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6424 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6425 emit_int8(0x38);
6426 emit_int8((unsigned char)(0xC0 | encode));
6427 // 0x00 - insert into lower 128 bits
6428 // 0x01 - insert into upper 128 bits
6429 emit_int8(imm8 & 0x01);
6430 }
6431
vinserti128(XMMRegister dst,XMMRegister nds,Address src,uint8_t imm8)6432 void Assembler::vinserti128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
6433 assert(VM_Version::supports_avx2(), "");
6434 assert(dst != xnoreg, "sanity");
6435 assert(imm8 <= 0x01, "imm8: %u", imm8);
6436 InstructionMark im(this);
6437 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6438 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
6439 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6440 emit_int8(0x38);
6441 emit_operand(dst, src);
6442 // 0x00 - insert into lower 128 bits
6443 // 0x01 - insert into upper 128 bits
6444 emit_int8(imm8 & 0x01);
6445 }
6446
vinserti32x4(XMMRegister dst,XMMRegister nds,XMMRegister src,uint8_t imm8)6447 void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
6448 assert(VM_Version::supports_evex(), "");
6449 assert(imm8 <= 0x03, "imm8: %u", imm8);
6450 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6451 attributes.set_is_evex_instruction();
6452 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6453 emit_int8(0x38);
6454 emit_int8((unsigned char)(0xC0 | encode));
6455 // 0x00 - insert into q0 128 bits (0..127)
6456 // 0x01 - insert into q1 128 bits (128..255)
6457 // 0x02 - insert into q2 128 bits (256..383)
6458 // 0x03 - insert into q3 128 bits (384..511)
6459 emit_int8(imm8 & 0x03);
6460 }
6461
vinserti32x4(XMMRegister dst,XMMRegister nds,Address src,uint8_t imm8)6462 void Assembler::vinserti32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
6463 assert(VM_Version::supports_avx(), "");
6464 assert(dst != xnoreg, "sanity");
6465 assert(imm8 <= 0x03, "imm8: %u", imm8);
6466 InstructionMark im(this);
6467 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6468 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
6469 attributes.set_is_evex_instruction();
6470 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6471 emit_int8(0x18);
6472 emit_operand(dst, src);
6473 // 0x00 - insert into q0 128 bits (0..127)
6474 // 0x01 - insert into q1 128 bits (128..255)
6475 // 0x02 - insert into q2 128 bits (256..383)
6476 // 0x03 - insert into q3 128 bits (384..511)
6477 emit_int8(imm8 & 0x03);
6478 }
6479
vinserti64x4(XMMRegister dst,XMMRegister nds,XMMRegister src,uint8_t imm8)6480 void Assembler::vinserti64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
6481 assert(VM_Version::supports_evex(), "");
6482 assert(imm8 <= 0x01, "imm8: %u", imm8);
6483 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6484 attributes.set_is_evex_instruction();
6485 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6486 emit_int8(0x3A);
6487 emit_int8((unsigned char)(0xC0 | encode));
6488 // 0x00 - insert into lower 256 bits
6489 // 0x01 - insert into upper 256 bits
6490 emit_int8(imm8 & 0x01);
6491 }
6492
6493
6494 // vinsertf forms
6495
vinsertf128(XMMRegister dst,XMMRegister nds,XMMRegister src,uint8_t imm8)6496 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
6497 assert(VM_Version::supports_avx(), "");
6498 assert(imm8 <= 0x01, "imm8: %u", imm8);
6499 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6500 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6501 emit_int8(0x18);
6502 emit_int8((unsigned char)(0xC0 | encode));
6503 // 0x00 - insert into lower 128 bits
6504 // 0x01 - insert into upper 128 bits
6505 emit_int8(imm8 & 0x01);
6506 }
6507
vinsertf128(XMMRegister dst,XMMRegister nds,Address src,uint8_t imm8)6508 void Assembler::vinsertf128(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
6509 assert(VM_Version::supports_avx(), "");
6510 assert(dst != xnoreg, "sanity");
6511 assert(imm8 <= 0x01, "imm8: %u", imm8);
6512 InstructionMark im(this);
6513 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6514 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
6515 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6516 emit_int8(0x18);
6517 emit_operand(dst, src);
6518 // 0x00 - insert into lower 128 bits
6519 // 0x01 - insert into upper 128 bits
6520 emit_int8(imm8 & 0x01);
6521 }
6522
vinsertf32x4(XMMRegister dst,XMMRegister nds,XMMRegister src,uint8_t imm8)6523 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
6524 assert(VM_Version::supports_avx2(), "");
6525 assert(imm8 <= 0x03, "imm8: %u", imm8);
6526 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6527 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6528 emit_int8(0x18);
6529 emit_int8((unsigned char)(0xC0 | encode));
6530 // 0x00 - insert into q0 128 bits (0..127)
6531 // 0x01 - insert into q1 128 bits (128..255)
6532 // 0x02 - insert into q0 128 bits (256..383)
6533 // 0x03 - insert into q1 128 bits (384..512)
6534 emit_int8(imm8 & 0x03);
6535 }
6536
vinsertf32x4(XMMRegister dst,XMMRegister nds,Address src,uint8_t imm8)6537 void Assembler::vinsertf32x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
6538 assert(VM_Version::supports_avx(), "");
6539 assert(dst != xnoreg, "sanity");
6540 assert(imm8 <= 0x03, "imm8: %u", imm8);
6541 InstructionMark im(this);
6542 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6543 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
6544 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6545 emit_int8(0x18);
6546 emit_operand(dst, src);
6547 // 0x00 - insert into q0 128 bits (0..127)
6548 // 0x01 - insert into q1 128 bits (128..255)
6549 // 0x02 - insert into q0 128 bits (256..383)
6550 // 0x03 - insert into q1 128 bits (384..512)
6551 emit_int8(imm8 & 0x03);
6552 }
6553
vinsertf64x4(XMMRegister dst,XMMRegister nds,XMMRegister src,uint8_t imm8)6554 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, XMMRegister src, uint8_t imm8) {
6555 assert(VM_Version::supports_evex(), "");
6556 assert(imm8 <= 0x01, "imm8: %u", imm8);
6557 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6558 attributes.set_is_evex_instruction();
6559 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6560 emit_int8(0x1A);
6561 emit_int8((unsigned char)(0xC0 | encode));
6562 // 0x00 - insert into lower 256 bits
6563 // 0x01 - insert into upper 256 bits
6564 emit_int8(imm8 & 0x01);
6565 }
6566
vinsertf64x4(XMMRegister dst,XMMRegister nds,Address src,uint8_t imm8)6567 void Assembler::vinsertf64x4(XMMRegister dst, XMMRegister nds, Address src, uint8_t imm8) {
6568 assert(VM_Version::supports_evex(), "");
6569 assert(dst != xnoreg, "sanity");
6570 assert(imm8 <= 0x01, "imm8: %u", imm8);
6571 InstructionMark im(this);
6572 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6573 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit);
6574 attributes.set_is_evex_instruction();
6575 vex_prefix(src, nds->encoding(), dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6576 emit_int8(0x1A);
6577 emit_operand(dst, src);
6578 // 0x00 - insert into lower 256 bits
6579 // 0x01 - insert into upper 256 bits
6580 emit_int8(imm8 & 0x01);
6581 }
6582
6583
6584 // vextracti forms
6585
vextracti128(XMMRegister dst,XMMRegister src,uint8_t imm8)6586 void Assembler::vextracti128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
6587 assert(VM_Version::supports_avx2(), "");
6588 assert(imm8 <= 0x01, "imm8: %u", imm8);
6589 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6590 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6591 emit_int8(0x39);
6592 emit_int8((unsigned char)(0xC0 | encode));
6593 // 0x00 - extract from lower 128 bits
6594 // 0x01 - extract from upper 128 bits
6595 emit_int8(imm8 & 0x01);
6596 }
6597
vextracti128(Address dst,XMMRegister src,uint8_t imm8)6598 void Assembler::vextracti128(Address dst, XMMRegister src, uint8_t imm8) {
6599 assert(VM_Version::supports_avx2(), "");
6600 assert(src != xnoreg, "sanity");
6601 assert(imm8 <= 0x01, "imm8: %u", imm8);
6602 InstructionMark im(this);
6603 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6604 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
6605 attributes.reset_is_clear_context();
6606 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6607 emit_int8(0x39);
6608 emit_operand(src, dst);
6609 // 0x00 - extract from lower 128 bits
6610 // 0x01 - extract from upper 128 bits
6611 emit_int8(imm8 & 0x01);
6612 }
6613
vextracti32x4(XMMRegister dst,XMMRegister src,uint8_t imm8)6614 void Assembler::vextracti32x4(XMMRegister dst, XMMRegister src, uint8_t imm8) {
6615 assert(VM_Version::supports_evex(), "");
6616 assert(imm8 <= 0x03, "imm8: %u", imm8);
6617 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6618 attributes.set_is_evex_instruction();
6619 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6620 emit_int8(0x39);
6621 emit_int8((unsigned char)(0xC0 | encode));
6622 // 0x00 - extract from bits 127:0
6623 // 0x01 - extract from bits 255:128
6624 // 0x02 - extract from bits 383:256
6625 // 0x03 - extract from bits 511:384
6626 emit_int8(imm8 & 0x03);
6627 }
6628
vextracti32x4(Address dst,XMMRegister src,uint8_t imm8)6629 void Assembler::vextracti32x4(Address dst, XMMRegister src, uint8_t imm8) {
6630 assert(VM_Version::supports_evex(), "");
6631 assert(src != xnoreg, "sanity");
6632 assert(imm8 <= 0x03, "imm8: %u", imm8);
6633 InstructionMark im(this);
6634 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6635 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
6636 attributes.reset_is_clear_context();
6637 attributes.set_is_evex_instruction();
6638 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6639 emit_int8(0x39);
6640 emit_operand(src, dst);
6641 // 0x00 - extract from bits 127:0
6642 // 0x01 - extract from bits 255:128
6643 // 0x02 - extract from bits 383:256
6644 // 0x03 - extract from bits 511:384
6645 emit_int8(imm8 & 0x03);
6646 }
6647
vextracti64x2(XMMRegister dst,XMMRegister src,uint8_t imm8)6648 void Assembler::vextracti64x2(XMMRegister dst, XMMRegister src, uint8_t imm8) {
6649 assert(VM_Version::supports_avx512dq(), "");
6650 assert(imm8 <= 0x03, "imm8: %u", imm8);
6651 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6652 attributes.set_is_evex_instruction();
6653 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6654 emit_int8(0x39);
6655 emit_int8((unsigned char)(0xC0 | encode));
6656 // 0x00 - extract from bits 127:0
6657 // 0x01 - extract from bits 255:128
6658 // 0x02 - extract from bits 383:256
6659 // 0x03 - extract from bits 511:384
6660 emit_int8(imm8 & 0x03);
6661 }
6662
vextracti64x4(XMMRegister dst,XMMRegister src,uint8_t imm8)6663 void Assembler::vextracti64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) {
6664 assert(VM_Version::supports_evex(), "");
6665 assert(imm8 <= 0x01, "imm8: %u", imm8);
6666 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6667 attributes.set_is_evex_instruction();
6668 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6669 emit_int8(0x3B);
6670 emit_int8((unsigned char)(0xC0 | encode));
6671 // 0x00 - extract from lower 256 bits
6672 // 0x01 - extract from upper 256 bits
6673 emit_int8(imm8 & 0x01);
6674 }
6675
vextracti64x4(Address dst,XMMRegister src,uint8_t imm8)6676 void Assembler::vextracti64x4(Address dst, XMMRegister src, uint8_t imm8) {
6677 assert(VM_Version::supports_evex(), "");
6678 assert(src != xnoreg, "sanity");
6679 assert(imm8 <= 0x01, "imm8: %u", imm8);
6680 InstructionMark im(this);
6681 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6682 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_64bit);
6683 attributes.reset_is_clear_context();
6684 attributes.set_is_evex_instruction();
6685 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6686 emit_int8(0x38);
6687 emit_operand(src, dst);
6688 // 0x00 - extract from lower 256 bits
6689 // 0x01 - extract from upper 256 bits
6690 emit_int8(imm8 & 0x01);
6691 }
6692 // vextractf forms
6693
vextractf128(XMMRegister dst,XMMRegister src,uint8_t imm8)6694 void Assembler::vextractf128(XMMRegister dst, XMMRegister src, uint8_t imm8) {
6695 assert(VM_Version::supports_avx(), "");
6696 assert(imm8 <= 0x01, "imm8: %u", imm8);
6697 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6698 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6699 emit_int8(0x19);
6700 emit_int8((unsigned char)(0xC0 | encode));
6701 // 0x00 - extract from lower 128 bits
6702 // 0x01 - extract from upper 128 bits
6703 emit_int8(imm8 & 0x01);
6704 }
6705
vextractf128(Address dst,XMMRegister src,uint8_t imm8)6706 void Assembler::vextractf128(Address dst, XMMRegister src, uint8_t imm8) {
6707 assert(VM_Version::supports_avx(), "");
6708 assert(src != xnoreg, "sanity");
6709 assert(imm8 <= 0x01, "imm8: %u", imm8);
6710 InstructionMark im(this);
6711 InstructionAttr attributes(AVX_256bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6712 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
6713 attributes.reset_is_clear_context();
6714 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6715 emit_int8(0x19);
6716 emit_operand(src, dst);
6717 // 0x00 - extract from lower 128 bits
6718 // 0x01 - extract from upper 128 bits
6719 emit_int8(imm8 & 0x01);
6720 }
6721
vextractf32x4(XMMRegister dst,XMMRegister src,uint8_t imm8)6722 void Assembler::vextractf32x4(XMMRegister dst, XMMRegister src, uint8_t imm8) {
6723 assert(VM_Version::supports_evex(), "");
6724 assert(imm8 <= 0x03, "imm8: %u", imm8);
6725 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6726 attributes.set_is_evex_instruction();
6727 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6728 emit_int8(0x19);
6729 emit_int8((unsigned char)(0xC0 | encode));
6730 // 0x00 - extract from bits 127:0
6731 // 0x01 - extract from bits 255:128
6732 // 0x02 - extract from bits 383:256
6733 // 0x03 - extract from bits 511:384
6734 emit_int8(imm8 & 0x03);
6735 }
6736
vextractf32x4(Address dst,XMMRegister src,uint8_t imm8)6737 void Assembler::vextractf32x4(Address dst, XMMRegister src, uint8_t imm8) {
6738 assert(VM_Version::supports_evex(), "");
6739 assert(src != xnoreg, "sanity");
6740 assert(imm8 <= 0x03, "imm8: %u", imm8);
6741 InstructionMark im(this);
6742 InstructionAttr attributes(AVX_512bit, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6743 attributes.set_address_attributes(/* tuple_type */ EVEX_T4, /* input_size_in_bits */ EVEX_32bit);
6744 attributes.reset_is_clear_context();
6745 attributes.set_is_evex_instruction();
6746 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6747 emit_int8(0x19);
6748 emit_operand(src, dst);
6749 // 0x00 - extract from bits 127:0
6750 // 0x01 - extract from bits 255:128
6751 // 0x02 - extract from bits 383:256
6752 // 0x03 - extract from bits 511:384
6753 emit_int8(imm8 & 0x03);
6754 }
6755
vextractf64x2(XMMRegister dst,XMMRegister src,uint8_t imm8)6756 void Assembler::vextractf64x2(XMMRegister dst, XMMRegister src, uint8_t imm8) {
6757 assert(VM_Version::supports_avx512dq(), "");
6758 assert(imm8 <= 0x03, "imm8: %u", imm8);
6759 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6760 attributes.set_is_evex_instruction();
6761 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6762 emit_int8(0x19);
6763 emit_int8((unsigned char)(0xC0 | encode));
6764 // 0x00 - extract from bits 127:0
6765 // 0x01 - extract from bits 255:128
6766 // 0x02 - extract from bits 383:256
6767 // 0x03 - extract from bits 511:384
6768 emit_int8(imm8 & 0x03);
6769 }
6770
vextractf64x4(XMMRegister dst,XMMRegister src,uint8_t imm8)6771 void Assembler::vextractf64x4(XMMRegister dst, XMMRegister src, uint8_t imm8) {
6772 assert(VM_Version::supports_evex(), "");
6773 assert(imm8 <= 0x01, "imm8: %u", imm8);
6774 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6775 attributes.set_is_evex_instruction();
6776 int encode = vex_prefix_and_encode(src->encoding(), 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6777 emit_int8(0x1B);
6778 emit_int8((unsigned char)(0xC0 | encode));
6779 // 0x00 - extract from lower 256 bits
6780 // 0x01 - extract from upper 256 bits
6781 emit_int8(imm8 & 0x01);
6782 }
6783
vextractf64x4(Address dst,XMMRegister src,uint8_t imm8)6784 void Assembler::vextractf64x4(Address dst, XMMRegister src, uint8_t imm8) {
6785 assert(VM_Version::supports_evex(), "");
6786 assert(src != xnoreg, "sanity");
6787 assert(imm8 <= 0x01, "imm8: %u", imm8);
6788 InstructionMark im(this);
6789 InstructionAttr attributes(AVX_512bit, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6790 attributes.set_address_attributes(/* tuple_type */ EVEX_T4,/* input_size_in_bits */ EVEX_64bit);
6791 attributes.reset_is_clear_context();
6792 attributes.set_is_evex_instruction();
6793 vex_prefix(dst, 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
6794 emit_int8(0x1B);
6795 emit_operand(src, dst);
6796 // 0x00 - extract from lower 256 bits
6797 // 0x01 - extract from upper 256 bits
6798 emit_int8(imm8 & 0x01);
6799 }
6800
6801 // duplicate 1-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL
vpbroadcastb(XMMRegister dst,XMMRegister src,int vector_len)6802 void Assembler::vpbroadcastb(XMMRegister dst, XMMRegister src, int vector_len) {
6803 assert(VM_Version::supports_avx2(), "");
6804 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6805 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6806 emit_int8(0x78);
6807 emit_int8((unsigned char)(0xC0 | encode));
6808 }
6809
vpbroadcastb(XMMRegister dst,Address src,int vector_len)6810 void Assembler::vpbroadcastb(XMMRegister dst, Address src, int vector_len) {
6811 assert(VM_Version::supports_avx2(), "");
6812 assert(dst != xnoreg, "sanity");
6813 InstructionMark im(this);
6814 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6815 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_8bit);
6816 // swap src<->dst for encoding
6817 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6818 emit_int8(0x78);
6819 emit_operand(dst, src);
6820 }
6821
6822 // duplicate 2-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL
vpbroadcastw(XMMRegister dst,XMMRegister src,int vector_len)6823 void Assembler::vpbroadcastw(XMMRegister dst, XMMRegister src, int vector_len) {
6824 assert(VM_Version::supports_avx2(), "");
6825 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6826 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6827 emit_int8(0x79);
6828 emit_int8((unsigned char)(0xC0 | encode));
6829 }
6830
vpbroadcastw(XMMRegister dst,Address src,int vector_len)6831 void Assembler::vpbroadcastw(XMMRegister dst, Address src, int vector_len) {
6832 assert(VM_Version::supports_avx2(), "");
6833 assert(dst != xnoreg, "sanity");
6834 InstructionMark im(this);
6835 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6836 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_16bit);
6837 // swap src<->dst for encoding
6838 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6839 emit_int8(0x79);
6840 emit_operand(dst, src);
6841 }
6842
6843 // xmm/mem sourced byte/word/dword/qword replicate
6844
6845 // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL
vpbroadcastd(XMMRegister dst,XMMRegister src,int vector_len)6846 void Assembler::vpbroadcastd(XMMRegister dst, XMMRegister src, int vector_len) {
6847 assert(UseAVX >= 2, "");
6848 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6849 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6850 emit_int8(0x58);
6851 emit_int8((unsigned char)(0xC0 | encode));
6852 }
6853
vpbroadcastd(XMMRegister dst,Address src,int vector_len)6854 void Assembler::vpbroadcastd(XMMRegister dst, Address src, int vector_len) {
6855 assert(VM_Version::supports_avx2(), "");
6856 assert(dst != xnoreg, "sanity");
6857 InstructionMark im(this);
6858 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6859 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
6860 // swap src<->dst for encoding
6861 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6862 emit_int8(0x58);
6863 emit_operand(dst, src);
6864 }
6865
6866 // duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL
vpbroadcastq(XMMRegister dst,XMMRegister src,int vector_len)6867 void Assembler::vpbroadcastq(XMMRegister dst, XMMRegister src, int vector_len) {
6868 assert(VM_Version::supports_avx2(), "");
6869 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6870 attributes.set_rex_vex_w_reverted();
6871 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6872 emit_int8(0x59);
6873 emit_int8((unsigned char)(0xC0 | encode));
6874 }
6875
vpbroadcastq(XMMRegister dst,Address src,int vector_len)6876 void Assembler::vpbroadcastq(XMMRegister dst, Address src, int vector_len) {
6877 assert(VM_Version::supports_avx2(), "");
6878 assert(dst != xnoreg, "sanity");
6879 InstructionMark im(this);
6880 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6881 attributes.set_rex_vex_w_reverted();
6882 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
6883 // swap src<->dst for encoding
6884 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6885 emit_int8(0x59);
6886 emit_operand(dst, src);
6887 }
evbroadcasti64x2(XMMRegister dst,XMMRegister src,int vector_len)6888 void Assembler::evbroadcasti64x2(XMMRegister dst, XMMRegister src, int vector_len) {
6889 assert(vector_len != Assembler::AVX_128bit, "");
6890 assert(VM_Version::supports_avx512dq(), "");
6891 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6892 attributes.set_rex_vex_w_reverted();
6893 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6894 emit_int8(0x5A);
6895 emit_int8((unsigned char)(0xC0 | encode));
6896 }
6897
evbroadcasti64x2(XMMRegister dst,Address src,int vector_len)6898 void Assembler::evbroadcasti64x2(XMMRegister dst, Address src, int vector_len) {
6899 assert(vector_len != Assembler::AVX_128bit, "");
6900 assert(VM_Version::supports_avx512dq(), "");
6901 assert(dst != xnoreg, "sanity");
6902 InstructionMark im(this);
6903 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6904 attributes.set_rex_vex_w_reverted();
6905 attributes.set_address_attributes(/* tuple_type */ EVEX_T2, /* input_size_in_bits */ EVEX_64bit);
6906 // swap src<->dst for encoding
6907 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6908 emit_int8(0x5A);
6909 emit_operand(dst, src);
6910 }
6911
6912 // scalar single/double precision replicate
6913
6914 // duplicate single precision data from src into programmed locations in dest : requires AVX512VL
vpbroadcastss(XMMRegister dst,XMMRegister src,int vector_len)6915 void Assembler::vpbroadcastss(XMMRegister dst, XMMRegister src, int vector_len) {
6916 assert(VM_Version::supports_avx(), "");
6917 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6918 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6919 emit_int8(0x18);
6920 emit_int8((unsigned char)(0xC0 | encode));
6921 }
6922
vpbroadcastss(XMMRegister dst,Address src,int vector_len)6923 void Assembler::vpbroadcastss(XMMRegister dst, Address src, int vector_len) {
6924 assert(VM_Version::supports_avx(), "");
6925 assert(dst != xnoreg, "sanity");
6926 InstructionMark im(this);
6927 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6928 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
6929 // swap src<->dst for encoding
6930 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6931 emit_int8(0x18);
6932 emit_operand(dst, src);
6933 }
6934
6935 // duplicate double precision data from src into programmed locations in dest : requires AVX512VL
vpbroadcastsd(XMMRegister dst,XMMRegister src,int vector_len)6936 void Assembler::vpbroadcastsd(XMMRegister dst, XMMRegister src, int vector_len) {
6937 assert(VM_Version::supports_avx(), "");
6938 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6939 attributes.set_rex_vex_w_reverted();
6940 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6941 emit_int8(0x19);
6942 emit_int8((unsigned char)(0xC0 | encode));
6943 }
6944
vpbroadcastsd(XMMRegister dst,Address src,int vector_len)6945 void Assembler::vpbroadcastsd(XMMRegister dst, Address src, int vector_len) {
6946 assert(VM_Version::supports_avx(), "");
6947 assert(dst != xnoreg, "sanity");
6948 InstructionMark im(this);
6949 InstructionAttr attributes(vector_len, /* vex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6950 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
6951 attributes.set_rex_vex_w_reverted();
6952 // swap src<->dst for encoding
6953 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6954 emit_int8(0x19);
6955 emit_operand(dst, src);
6956 }
6957
6958
6959 // gpr source broadcast forms
6960
6961 // duplicate 1-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL
evpbroadcastb(XMMRegister dst,Register src,int vector_len)6962 void Assembler::evpbroadcastb(XMMRegister dst, Register src, int vector_len) {
6963 assert(VM_Version::supports_avx512bw(), "");
6964 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6965 attributes.set_is_evex_instruction();
6966 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6967 emit_int8(0x7A);
6968 emit_int8((unsigned char)(0xC0 | encode));
6969 }
6970
6971 // duplicate 2-byte integer data from src into programmed locations in dest : requires AVX512BW and AVX512VL
evpbroadcastw(XMMRegister dst,Register src,int vector_len)6972 void Assembler::evpbroadcastw(XMMRegister dst, Register src, int vector_len) {
6973 assert(VM_Version::supports_avx512bw(), "");
6974 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
6975 attributes.set_is_evex_instruction();
6976 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6977 emit_int8(0x7B);
6978 emit_int8((unsigned char)(0xC0 | encode));
6979 }
6980
6981 // duplicate 4-byte integer data from src into programmed locations in dest : requires AVX512VL
evpbroadcastd(XMMRegister dst,Register src,int vector_len)6982 void Assembler::evpbroadcastd(XMMRegister dst, Register src, int vector_len) {
6983 assert(VM_Version::supports_evex(), "");
6984 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6985 attributes.set_is_evex_instruction();
6986 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6987 emit_int8(0x7C);
6988 emit_int8((unsigned char)(0xC0 | encode));
6989 }
6990
6991 // duplicate 8-byte integer data from src into programmed locations in dest : requires AVX512VL
evpbroadcastq(XMMRegister dst,Register src,int vector_len)6992 void Assembler::evpbroadcastq(XMMRegister dst, Register src, int vector_len) {
6993 assert(VM_Version::supports_evex(), "");
6994 InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
6995 attributes.set_is_evex_instruction();
6996 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
6997 emit_int8(0x7C);
6998 emit_int8((unsigned char)(0xC0 | encode));
6999 }
7000
evpgatherdd(XMMRegister dst,KRegister mask,Address src,int vector_len)7001 void Assembler::evpgatherdd(XMMRegister dst, KRegister mask, Address src, int vector_len) {
7002 assert(VM_Version::supports_evex(), "");
7003 assert(dst != xnoreg, "sanity");
7004 InstructionMark im(this);
7005 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
7006 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
7007 attributes.reset_is_clear_context();
7008 attributes.set_embedded_opmask_register_specifier(mask);
7009 attributes.set_is_evex_instruction();
7010 // swap src<->dst for encoding
7011 vex_prefix(src, 0, dst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7012 emit_int8((unsigned char)0x90);
7013 emit_operand(dst, src);
7014 }
7015
7016 // Carry-Less Multiplication Quadword
pclmulqdq(XMMRegister dst,XMMRegister src,int mask)7017 void Assembler::pclmulqdq(XMMRegister dst, XMMRegister src, int mask) {
7018 assert(VM_Version::supports_clmul(), "");
7019 InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
7020 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
7021 emit_int8(0x44);
7022 emit_int8((unsigned char)(0xC0 | encode));
7023 emit_int8((unsigned char)mask);
7024 }
7025
7026 // Carry-Less Multiplication Quadword
vpclmulqdq(XMMRegister dst,XMMRegister nds,XMMRegister src,int mask)7027 void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) {
7028 assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), "");
7029 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
7030 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
7031 emit_int8(0x44);
7032 emit_int8((unsigned char)(0xC0 | encode));
7033 emit_int8((unsigned char)mask);
7034 }
7035
evpclmulqdq(XMMRegister dst,XMMRegister nds,XMMRegister src,int mask,int vector_len)7036 void Assembler::evpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask, int vector_len) {
7037 assert(VM_Version::supports_vpclmulqdq(), "Requires vector carryless multiplication support");
7038 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ true);
7039 attributes.set_is_evex_instruction();
7040 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
7041 emit_int8(0x44);
7042 emit_int8((unsigned char)(0xC0 | encode));
7043 emit_int8((unsigned char)mask);
7044 }
7045
vzeroupper()7046 void Assembler::vzeroupper() {
7047 if (VM_Version::supports_vzeroupper()) {
7048 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
7049 (void)vex_prefix_and_encode(0, 0, 0, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
7050 emit_int8(0x77);
7051 }
7052 }
7053
7054 #ifndef _LP64
7055 // 32bit only pieces of the assembler
7056
cmp_literal32(Register src1,int32_t imm32,RelocationHolder const & rspec)7057 void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) {
7058 // NO PREFIX AS NEVER 64BIT
7059 InstructionMark im(this);
7060 emit_int8((unsigned char)0x81);
7061 emit_int8((unsigned char)(0xF8 | src1->encoding()));
7062 emit_data(imm32, rspec, 0);
7063 }
7064
cmp_literal32(Address src1,int32_t imm32,RelocationHolder const & rspec)7065 void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) {
7066 // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs
7067 InstructionMark im(this);
7068 emit_int8((unsigned char)0x81);
7069 emit_operand(rdi, src1);
7070 emit_data(imm32, rspec, 0);
7071 }
7072
7073 // The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax,
7074 // and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded
7075 // into rdx:rax. The ZF is set if the compared values were equal, and cleared otherwise.
cmpxchg8(Address adr)7076 void Assembler::cmpxchg8(Address adr) {
7077 InstructionMark im(this);
7078 emit_int8(0x0F);
7079 emit_int8((unsigned char)0xC7);
7080 emit_operand(rcx, adr);
7081 }
7082
decl(Register dst)7083 void Assembler::decl(Register dst) {
7084 // Don't use it directly. Use MacroAssembler::decrementl() instead.
7085 emit_int8(0x48 | dst->encoding());
7086 }
7087
7088 #endif // _LP64
7089
7090 // 64bit typically doesn't use the x87 but needs to for the trig funcs
7091
fabs()7092 void Assembler::fabs() {
7093 emit_int8((unsigned char)0xD9);
7094 emit_int8((unsigned char)0xE1);
7095 }
7096
fadd(int i)7097 void Assembler::fadd(int i) {
7098 emit_farith(0xD8, 0xC0, i);
7099 }
7100
fadd_d(Address src)7101 void Assembler::fadd_d(Address src) {
7102 InstructionMark im(this);
7103 emit_int8((unsigned char)0xDC);
7104 emit_operand32(rax, src);
7105 }
7106
fadd_s(Address src)7107 void Assembler::fadd_s(Address src) {
7108 InstructionMark im(this);
7109 emit_int8((unsigned char)0xD8);
7110 emit_operand32(rax, src);
7111 }
7112
fadda(int i)7113 void Assembler::fadda(int i) {
7114 emit_farith(0xDC, 0xC0, i);
7115 }
7116
faddp(int i)7117 void Assembler::faddp(int i) {
7118 emit_farith(0xDE, 0xC0, i);
7119 }
7120
fchs()7121 void Assembler::fchs() {
7122 emit_int8((unsigned char)0xD9);
7123 emit_int8((unsigned char)0xE0);
7124 }
7125
fcom(int i)7126 void Assembler::fcom(int i) {
7127 emit_farith(0xD8, 0xD0, i);
7128 }
7129
fcomp(int i)7130 void Assembler::fcomp(int i) {
7131 emit_farith(0xD8, 0xD8, i);
7132 }
7133
fcomp_d(Address src)7134 void Assembler::fcomp_d(Address src) {
7135 InstructionMark im(this);
7136 emit_int8((unsigned char)0xDC);
7137 emit_operand32(rbx, src);
7138 }
7139
fcomp_s(Address src)7140 void Assembler::fcomp_s(Address src) {
7141 InstructionMark im(this);
7142 emit_int8((unsigned char)0xD8);
7143 emit_operand32(rbx, src);
7144 }
7145
fcompp()7146 void Assembler::fcompp() {
7147 emit_int8((unsigned char)0xDE);
7148 emit_int8((unsigned char)0xD9);
7149 }
7150
fcos()7151 void Assembler::fcos() {
7152 emit_int8((unsigned char)0xD9);
7153 emit_int8((unsigned char)0xFF);
7154 }
7155
fdecstp()7156 void Assembler::fdecstp() {
7157 emit_int8((unsigned char)0xD9);
7158 emit_int8((unsigned char)0xF6);
7159 }
7160
fdiv(int i)7161 void Assembler::fdiv(int i) {
7162 emit_farith(0xD8, 0xF0, i);
7163 }
7164
fdiv_d(Address src)7165 void Assembler::fdiv_d(Address src) {
7166 InstructionMark im(this);
7167 emit_int8((unsigned char)0xDC);
7168 emit_operand32(rsi, src);
7169 }
7170
fdiv_s(Address src)7171 void Assembler::fdiv_s(Address src) {
7172 InstructionMark im(this);
7173 emit_int8((unsigned char)0xD8);
7174 emit_operand32(rsi, src);
7175 }
7176
fdiva(int i)7177 void Assembler::fdiva(int i) {
7178 emit_farith(0xDC, 0xF8, i);
7179 }
7180
7181 // Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994)
7182 // is erroneous for some of the floating-point instructions below.
7183
fdivp(int i)7184 void Assembler::fdivp(int i) {
7185 emit_farith(0xDE, 0xF8, i); // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong)
7186 }
7187
fdivr(int i)7188 void Assembler::fdivr(int i) {
7189 emit_farith(0xD8, 0xF8, i);
7190 }
7191
fdivr_d(Address src)7192 void Assembler::fdivr_d(Address src) {
7193 InstructionMark im(this);
7194 emit_int8((unsigned char)0xDC);
7195 emit_operand32(rdi, src);
7196 }
7197
fdivr_s(Address src)7198 void Assembler::fdivr_s(Address src) {
7199 InstructionMark im(this);
7200 emit_int8((unsigned char)0xD8);
7201 emit_operand32(rdi, src);
7202 }
7203
fdivra(int i)7204 void Assembler::fdivra(int i) {
7205 emit_farith(0xDC, 0xF0, i);
7206 }
7207
fdivrp(int i)7208 void Assembler::fdivrp(int i) {
7209 emit_farith(0xDE, 0xF0, i); // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong)
7210 }
7211
ffree(int i)7212 void Assembler::ffree(int i) {
7213 emit_farith(0xDD, 0xC0, i);
7214 }
7215
fild_d(Address adr)7216 void Assembler::fild_d(Address adr) {
7217 InstructionMark im(this);
7218 emit_int8((unsigned char)0xDF);
7219 emit_operand32(rbp, adr);
7220 }
7221
fild_s(Address adr)7222 void Assembler::fild_s(Address adr) {
7223 InstructionMark im(this);
7224 emit_int8((unsigned char)0xDB);
7225 emit_operand32(rax, adr);
7226 }
7227
fincstp()7228 void Assembler::fincstp() {
7229 emit_int8((unsigned char)0xD9);
7230 emit_int8((unsigned char)0xF7);
7231 }
7232
finit()7233 void Assembler::finit() {
7234 emit_int8((unsigned char)0x9B);
7235 emit_int8((unsigned char)0xDB);
7236 emit_int8((unsigned char)0xE3);
7237 }
7238
fist_s(Address adr)7239 void Assembler::fist_s(Address adr) {
7240 InstructionMark im(this);
7241 emit_int8((unsigned char)0xDB);
7242 emit_operand32(rdx, adr);
7243 }
7244
fistp_d(Address adr)7245 void Assembler::fistp_d(Address adr) {
7246 InstructionMark im(this);
7247 emit_int8((unsigned char)0xDF);
7248 emit_operand32(rdi, adr);
7249 }
7250
fistp_s(Address adr)7251 void Assembler::fistp_s(Address adr) {
7252 InstructionMark im(this);
7253 emit_int8((unsigned char)0xDB);
7254 emit_operand32(rbx, adr);
7255 }
7256
fld1()7257 void Assembler::fld1() {
7258 emit_int8((unsigned char)0xD9);
7259 emit_int8((unsigned char)0xE8);
7260 }
7261
fld_d(Address adr)7262 void Assembler::fld_d(Address adr) {
7263 InstructionMark im(this);
7264 emit_int8((unsigned char)0xDD);
7265 emit_operand32(rax, adr);
7266 }
7267
fld_s(Address adr)7268 void Assembler::fld_s(Address adr) {
7269 InstructionMark im(this);
7270 emit_int8((unsigned char)0xD9);
7271 emit_operand32(rax, adr);
7272 }
7273
7274
fld_s(int index)7275 void Assembler::fld_s(int index) {
7276 emit_farith(0xD9, 0xC0, index);
7277 }
7278
fld_x(Address adr)7279 void Assembler::fld_x(Address adr) {
7280 InstructionMark im(this);
7281 emit_int8((unsigned char)0xDB);
7282 emit_operand32(rbp, adr);
7283 }
7284
fldcw(Address src)7285 void Assembler::fldcw(Address src) {
7286 InstructionMark im(this);
7287 emit_int8((unsigned char)0xD9);
7288 emit_operand32(rbp, src);
7289 }
7290
fldenv(Address src)7291 void Assembler::fldenv(Address src) {
7292 InstructionMark im(this);
7293 emit_int8((unsigned char)0xD9);
7294 emit_operand32(rsp, src);
7295 }
7296
fldlg2()7297 void Assembler::fldlg2() {
7298 emit_int8((unsigned char)0xD9);
7299 emit_int8((unsigned char)0xEC);
7300 }
7301
fldln2()7302 void Assembler::fldln2() {
7303 emit_int8((unsigned char)0xD9);
7304 emit_int8((unsigned char)0xED);
7305 }
7306
fldz()7307 void Assembler::fldz() {
7308 emit_int8((unsigned char)0xD9);
7309 emit_int8((unsigned char)0xEE);
7310 }
7311
flog()7312 void Assembler::flog() {
7313 fldln2();
7314 fxch();
7315 fyl2x();
7316 }
7317
flog10()7318 void Assembler::flog10() {
7319 fldlg2();
7320 fxch();
7321 fyl2x();
7322 }
7323
fmul(int i)7324 void Assembler::fmul(int i) {
7325 emit_farith(0xD8, 0xC8, i);
7326 }
7327
fmul_d(Address src)7328 void Assembler::fmul_d(Address src) {
7329 InstructionMark im(this);
7330 emit_int8((unsigned char)0xDC);
7331 emit_operand32(rcx, src);
7332 }
7333
fmul_s(Address src)7334 void Assembler::fmul_s(Address src) {
7335 InstructionMark im(this);
7336 emit_int8((unsigned char)0xD8);
7337 emit_operand32(rcx, src);
7338 }
7339
fmula(int i)7340 void Assembler::fmula(int i) {
7341 emit_farith(0xDC, 0xC8, i);
7342 }
7343
fmulp(int i)7344 void Assembler::fmulp(int i) {
7345 emit_farith(0xDE, 0xC8, i);
7346 }
7347
fnsave(Address dst)7348 void Assembler::fnsave(Address dst) {
7349 InstructionMark im(this);
7350 emit_int8((unsigned char)0xDD);
7351 emit_operand32(rsi, dst);
7352 }
7353
fnstcw(Address src)7354 void Assembler::fnstcw(Address src) {
7355 InstructionMark im(this);
7356 emit_int8((unsigned char)0x9B);
7357 emit_int8((unsigned char)0xD9);
7358 emit_operand32(rdi, src);
7359 }
7360
fnstsw_ax()7361 void Assembler::fnstsw_ax() {
7362 emit_int8((unsigned char)0xDF);
7363 emit_int8((unsigned char)0xE0);
7364 }
7365
fprem()7366 void Assembler::fprem() {
7367 emit_int8((unsigned char)0xD9);
7368 emit_int8((unsigned char)0xF8);
7369 }
7370
fprem1()7371 void Assembler::fprem1() {
7372 emit_int8((unsigned char)0xD9);
7373 emit_int8((unsigned char)0xF5);
7374 }
7375
frstor(Address src)7376 void Assembler::frstor(Address src) {
7377 InstructionMark im(this);
7378 emit_int8((unsigned char)0xDD);
7379 emit_operand32(rsp, src);
7380 }
7381
fsin()7382 void Assembler::fsin() {
7383 emit_int8((unsigned char)0xD9);
7384 emit_int8((unsigned char)0xFE);
7385 }
7386
fsqrt()7387 void Assembler::fsqrt() {
7388 emit_int8((unsigned char)0xD9);
7389 emit_int8((unsigned char)0xFA);
7390 }
7391
fst_d(Address adr)7392 void Assembler::fst_d(Address adr) {
7393 InstructionMark im(this);
7394 emit_int8((unsigned char)0xDD);
7395 emit_operand32(rdx, adr);
7396 }
7397
fst_s(Address adr)7398 void Assembler::fst_s(Address adr) {
7399 InstructionMark im(this);
7400 emit_int8((unsigned char)0xD9);
7401 emit_operand32(rdx, adr);
7402 }
7403
fstp_d(Address adr)7404 void Assembler::fstp_d(Address adr) {
7405 InstructionMark im(this);
7406 emit_int8((unsigned char)0xDD);
7407 emit_operand32(rbx, adr);
7408 }
7409
fstp_d(int index)7410 void Assembler::fstp_d(int index) {
7411 emit_farith(0xDD, 0xD8, index);
7412 }
7413
fstp_s(Address adr)7414 void Assembler::fstp_s(Address adr) {
7415 InstructionMark im(this);
7416 emit_int8((unsigned char)0xD9);
7417 emit_operand32(rbx, adr);
7418 }
7419
fstp_x(Address adr)7420 void Assembler::fstp_x(Address adr) {
7421 InstructionMark im(this);
7422 emit_int8((unsigned char)0xDB);
7423 emit_operand32(rdi, adr);
7424 }
7425
fsub(int i)7426 void Assembler::fsub(int i) {
7427 emit_farith(0xD8, 0xE0, i);
7428 }
7429
fsub_d(Address src)7430 void Assembler::fsub_d(Address src) {
7431 InstructionMark im(this);
7432 emit_int8((unsigned char)0xDC);
7433 emit_operand32(rsp, src);
7434 }
7435
fsub_s(Address src)7436 void Assembler::fsub_s(Address src) {
7437 InstructionMark im(this);
7438 emit_int8((unsigned char)0xD8);
7439 emit_operand32(rsp, src);
7440 }
7441
fsuba(int i)7442 void Assembler::fsuba(int i) {
7443 emit_farith(0xDC, 0xE8, i);
7444 }
7445
fsubp(int i)7446 void Assembler::fsubp(int i) {
7447 emit_farith(0xDE, 0xE8, i); // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong)
7448 }
7449
fsubr(int i)7450 void Assembler::fsubr(int i) {
7451 emit_farith(0xD8, 0xE8, i);
7452 }
7453
fsubr_d(Address src)7454 void Assembler::fsubr_d(Address src) {
7455 InstructionMark im(this);
7456 emit_int8((unsigned char)0xDC);
7457 emit_operand32(rbp, src);
7458 }
7459
fsubr_s(Address src)7460 void Assembler::fsubr_s(Address src) {
7461 InstructionMark im(this);
7462 emit_int8((unsigned char)0xD8);
7463 emit_operand32(rbp, src);
7464 }
7465
fsubra(int i)7466 void Assembler::fsubra(int i) {
7467 emit_farith(0xDC, 0xE0, i);
7468 }
7469
fsubrp(int i)7470 void Assembler::fsubrp(int i) {
7471 emit_farith(0xDE, 0xE0, i); // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong)
7472 }
7473
ftan()7474 void Assembler::ftan() {
7475 emit_int8((unsigned char)0xD9);
7476 emit_int8((unsigned char)0xF2);
7477 emit_int8((unsigned char)0xDD);
7478 emit_int8((unsigned char)0xD8);
7479 }
7480
ftst()7481 void Assembler::ftst() {
7482 emit_int8((unsigned char)0xD9);
7483 emit_int8((unsigned char)0xE4);
7484 }
7485
fucomi(int i)7486 void Assembler::fucomi(int i) {
7487 // make sure the instruction is supported (introduced for P6, together with cmov)
7488 guarantee(VM_Version::supports_cmov(), "illegal instruction");
7489 emit_farith(0xDB, 0xE8, i);
7490 }
7491
fucomip(int i)7492 void Assembler::fucomip(int i) {
7493 // make sure the instruction is supported (introduced for P6, together with cmov)
7494 guarantee(VM_Version::supports_cmov(), "illegal instruction");
7495 emit_farith(0xDF, 0xE8, i);
7496 }
7497
fwait()7498 void Assembler::fwait() {
7499 emit_int8((unsigned char)0x9B);
7500 }
7501
fxch(int i)7502 void Assembler::fxch(int i) {
7503 emit_farith(0xD9, 0xC8, i);
7504 }
7505
fyl2x()7506 void Assembler::fyl2x() {
7507 emit_int8((unsigned char)0xD9);
7508 emit_int8((unsigned char)0xF1);
7509 }
7510
frndint()7511 void Assembler::frndint() {
7512 emit_int8((unsigned char)0xD9);
7513 emit_int8((unsigned char)0xFC);
7514 }
7515
f2xm1()7516 void Assembler::f2xm1() {
7517 emit_int8((unsigned char)0xD9);
7518 emit_int8((unsigned char)0xF0);
7519 }
7520
fldl2e()7521 void Assembler::fldl2e() {
7522 emit_int8((unsigned char)0xD9);
7523 emit_int8((unsigned char)0xEA);
7524 }
7525
7526 // SSE SIMD prefix byte values corresponding to VexSimdPrefix encoding.
7527 static int simd_pre[4] = { 0, 0x66, 0xF3, 0xF2 };
7528 // SSE opcode second byte values (first is 0x0F) corresponding to VexOpcode encoding.
7529 static int simd_opc[4] = { 0, 0, 0x38, 0x3A };
7530
7531 // Generate SSE legacy REX prefix and SIMD opcode based on VEX encoding.
rex_prefix(Address adr,XMMRegister xreg,VexSimdPrefix pre,VexOpcode opc,bool rex_w)7532 void Assembler::rex_prefix(Address adr, XMMRegister xreg, VexSimdPrefix pre, VexOpcode opc, bool rex_w) {
7533 if (pre > 0) {
7534 emit_int8(simd_pre[pre]);
7535 }
7536 if (rex_w) {
7537 prefixq(adr, xreg);
7538 } else {
7539 prefix(adr, xreg);
7540 }
7541 if (opc > 0) {
7542 emit_int8(0x0F);
7543 int opc2 = simd_opc[opc];
7544 if (opc2 > 0) {
7545 emit_int8(opc2);
7546 }
7547 }
7548 }
7549
rex_prefix_and_encode(int dst_enc,int src_enc,VexSimdPrefix pre,VexOpcode opc,bool rex_w)7550 int Assembler::rex_prefix_and_encode(int dst_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool rex_w) {
7551 if (pre > 0) {
7552 emit_int8(simd_pre[pre]);
7553 }
7554 int encode = (rex_w) ? prefixq_and_encode(dst_enc, src_enc) : prefix_and_encode(dst_enc, src_enc);
7555 if (opc > 0) {
7556 emit_int8(0x0F);
7557 int opc2 = simd_opc[opc];
7558 if (opc2 > 0) {
7559 emit_int8(opc2);
7560 }
7561 }
7562 return encode;
7563 }
7564
7565
vex_prefix(bool vex_r,bool vex_b,bool vex_x,int nds_enc,VexSimdPrefix pre,VexOpcode opc)7566 void Assembler::vex_prefix(bool vex_r, bool vex_b, bool vex_x, int nds_enc, VexSimdPrefix pre, VexOpcode opc) {
7567 int vector_len = _attributes->get_vector_len();
7568 bool vex_w = _attributes->is_rex_vex_w();
7569 if (vex_b || vex_x || vex_w || (opc == VEX_OPCODE_0F_38) || (opc == VEX_OPCODE_0F_3A)) {
7570 prefix(VEX_3bytes);
7571
7572 int byte1 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0);
7573 byte1 = (~byte1) & 0xE0;
7574 byte1 |= opc;
7575 emit_int8(byte1);
7576
7577 int byte2 = ((~nds_enc) & 0xf) << 3;
7578 byte2 |= (vex_w ? VEX_W : 0) | ((vector_len > 0) ? 4 : 0) | pre;
7579 emit_int8(byte2);
7580 } else {
7581 prefix(VEX_2bytes);
7582
7583 int byte1 = vex_r ? VEX_R : 0;
7584 byte1 = (~byte1) & 0x80;
7585 byte1 |= ((~nds_enc) & 0xf) << 3;
7586 byte1 |= ((vector_len > 0 ) ? 4 : 0) | pre;
7587 emit_int8(byte1);
7588 }
7589 }
7590
7591 // This is a 4 byte encoding
evex_prefix(bool vex_r,bool vex_b,bool vex_x,bool evex_r,bool evex_v,int nds_enc,VexSimdPrefix pre,VexOpcode opc)7592 void Assembler::evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_r, bool evex_v, int nds_enc, VexSimdPrefix pre, VexOpcode opc){
7593 // EVEX 0x62 prefix
7594 prefix(EVEX_4bytes);
7595 bool vex_w = _attributes->is_rex_vex_w();
7596 int evex_encoding = (vex_w ? VEX_W : 0);
7597 // EVEX.b is not currently used for broadcast of single element or data rounding modes
7598 _attributes->set_evex_encoding(evex_encoding);
7599
7600 // P0: byte 2, initialized to RXBR`00mm
7601 // instead of not'd
7602 int byte2 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0) | (evex_r ? EVEX_Rb : 0);
7603 byte2 = (~byte2) & 0xF0;
7604 // confine opc opcode extensions in mm bits to lower two bits
7605 // of form {0F, 0F_38, 0F_3A}
7606 byte2 |= opc;
7607 emit_int8(byte2);
7608
7609 // P1: byte 3 as Wvvvv1pp
7610 int byte3 = ((~nds_enc) & 0xf) << 3;
7611 // p[10] is always 1
7612 byte3 |= EVEX_F;
7613 byte3 |= (vex_w & 1) << 7;
7614 // confine pre opcode extensions in pp bits to lower two bits
7615 // of form {66, F3, F2}
7616 byte3 |= pre;
7617 emit_int8(byte3);
7618
7619 // P2: byte 4 as zL'Lbv'aaa
7620 // kregs are implemented in the low 3 bits as aaa
7621 int byte4 = (_attributes->is_no_reg_mask()) ?
7622 0 :
7623 _attributes->get_embedded_opmask_register_specifier();
7624 // EVEX.v` for extending EVEX.vvvv or VIDX
7625 byte4 |= (evex_v ? 0: EVEX_V);
7626 // third EXEC.b for broadcast actions
7627 byte4 |= (_attributes->is_extended_context() ? EVEX_Rb : 0);
7628 // fourth EVEX.L'L for vector length : 0 is 128, 1 is 256, 2 is 512, currently we do not support 1024
7629 byte4 |= ((_attributes->get_vector_len())& 0x3) << 5;
7630 // last is EVEX.z for zero/merge actions
7631 if (_attributes->is_no_reg_mask() == false) {
7632 byte4 |= (_attributes->is_clear_context() ? EVEX_Z : 0);
7633 }
7634 emit_int8(byte4);
7635 }
7636
vex_prefix(Address adr,int nds_enc,int xreg_enc,VexSimdPrefix pre,VexOpcode opc,InstructionAttr * attributes)7637 void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes) {
7638 bool vex_r = ((xreg_enc & 8) == 8) ? 1 : 0;
7639 bool vex_b = adr.base_needs_rex();
7640 bool vex_x;
7641 if (adr.isxmmindex()) {
7642 vex_x = adr.xmmindex_needs_rex();
7643 } else {
7644 vex_x = adr.index_needs_rex();
7645 }
7646 set_attributes(attributes);
7647 attributes->set_current_assembler(this);
7648
7649 // For EVEX instruction (which is not marked as pure EVEX instruction) check and see if this instruction
7650 // is allowed in legacy mode and has resources which will fit in it.
7651 // Pure EVEX instructions will have is_evex_instruction set in their definition.
7652 if (!attributes->is_legacy_mode()) {
7653 if (UseAVX > 2 && !attributes->is_evex_instruction() && !_is_managed) {
7654 if ((attributes->get_vector_len() != AVX_512bit) && (nds_enc < 16) && (xreg_enc < 16)) {
7655 attributes->set_is_legacy_mode();
7656 }
7657 }
7658 }
7659
7660 if (UseAVX > 2) {
7661 assert(((!attributes->uses_vl()) ||
7662 (attributes->get_vector_len() == AVX_512bit) ||
7663 (!_legacy_mode_vl) ||
7664 (attributes->is_legacy_mode())),"XMM register should be 0-15");
7665 assert(((nds_enc < 16 && xreg_enc < 16) || (!attributes->is_legacy_mode())),"XMM register should be 0-15");
7666 }
7667
7668 _is_managed = false;
7669 if (UseAVX > 2 && !attributes->is_legacy_mode())
7670 {
7671 bool evex_r = (xreg_enc >= 16);
7672 bool evex_v;
7673 // EVEX.V' is set to true when VSIB is used as we may need to use higher order XMM registers (16-31)
7674 if (adr.isxmmindex()) {
7675 evex_v = ((adr._xmmindex->encoding() > 15) ? true : false);
7676 } else {
7677 evex_v = (nds_enc >= 16);
7678 }
7679 attributes->set_is_evex_instruction();
7680 evex_prefix(vex_r, vex_b, vex_x, evex_r, evex_v, nds_enc, pre, opc);
7681 } else {
7682 if (UseAVX > 2 && attributes->is_rex_vex_w_reverted()) {
7683 attributes->set_rex_vex_w(false);
7684 }
7685 vex_prefix(vex_r, vex_b, vex_x, nds_enc, pre, opc);
7686 }
7687 }
7688
vex_prefix_and_encode(int dst_enc,int nds_enc,int src_enc,VexSimdPrefix pre,VexOpcode opc,InstructionAttr * attributes)7689 int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, InstructionAttr *attributes) {
7690 bool vex_r = ((dst_enc & 8) == 8) ? 1 : 0;
7691 bool vex_b = ((src_enc & 8) == 8) ? 1 : 0;
7692 bool vex_x = false;
7693 set_attributes(attributes);
7694 attributes->set_current_assembler(this);
7695
7696 // For EVEX instruction (which is not marked as pure EVEX instruction) check and see if this instruction
7697 // is allowed in legacy mode and has resources which will fit in it.
7698 // Pure EVEX instructions will have is_evex_instruction set in their definition.
7699 if (!attributes->is_legacy_mode()) {
7700 if (UseAVX > 2 && !attributes->is_evex_instruction() && !_is_managed) {
7701 if ((!attributes->uses_vl() || (attributes->get_vector_len() != AVX_512bit)) &&
7702 (dst_enc < 16) && (nds_enc < 16) && (src_enc < 16)) {
7703 attributes->set_is_legacy_mode();
7704 }
7705 }
7706 }
7707
7708 if (UseAVX > 2) {
7709 // All the scalar fp instructions (with uses_vl as false) can have legacy_mode as false
7710 // Instruction with uses_vl true are vector instructions
7711 // All the vector instructions with AVX_512bit length can have legacy_mode as false
7712 // All the vector instructions with < AVX_512bit length can have legacy_mode as false if AVX512vl() is supported
7713 // Rest all should have legacy_mode set as true
7714 assert(((!attributes->uses_vl()) ||
7715 (attributes->get_vector_len() == AVX_512bit) ||
7716 (!_legacy_mode_vl) ||
7717 (attributes->is_legacy_mode())),"XMM register should be 0-15");
7718 // Instruction with legacy_mode true should have dst, nds and src < 15
7719 assert(((dst_enc < 16 && nds_enc < 16 && src_enc < 16) || (!attributes->is_legacy_mode())),"XMM register should be 0-15");
7720 }
7721
7722 _is_managed = false;
7723 if (UseAVX > 2 && !attributes->is_legacy_mode())
7724 {
7725 bool evex_r = (dst_enc >= 16);
7726 bool evex_v = (nds_enc >= 16);
7727 // can use vex_x as bank extender on rm encoding
7728 vex_x = (src_enc >= 16);
7729 attributes->set_is_evex_instruction();
7730 evex_prefix(vex_r, vex_b, vex_x, evex_r, evex_v, nds_enc, pre, opc);
7731 } else {
7732 if (UseAVX > 2 && attributes->is_rex_vex_w_reverted()) {
7733 attributes->set_rex_vex_w(false);
7734 }
7735 vex_prefix(vex_r, vex_b, vex_x, nds_enc, pre, opc);
7736 }
7737
7738 // return modrm byte components for operands
7739 return (((dst_enc & 7) << 3) | (src_enc & 7));
7740 }
7741
7742
simd_prefix(XMMRegister xreg,XMMRegister nds,Address adr,VexSimdPrefix pre,VexOpcode opc,InstructionAttr * attributes)7743 void Assembler::simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre,
7744 VexOpcode opc, InstructionAttr *attributes) {
7745 if (UseAVX > 0) {
7746 int xreg_enc = xreg->encoding();
7747 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
7748 vex_prefix(adr, nds_enc, xreg_enc, pre, opc, attributes);
7749 } else {
7750 assert((nds == xreg) || (nds == xnoreg), "wrong sse encoding");
7751 rex_prefix(adr, xreg, pre, opc, attributes->is_rex_vex_w());
7752 }
7753 }
7754
simd_prefix_and_encode(XMMRegister dst,XMMRegister nds,XMMRegister src,VexSimdPrefix pre,VexOpcode opc,InstructionAttr * attributes)7755 int Assembler::simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre,
7756 VexOpcode opc, InstructionAttr *attributes) {
7757 int dst_enc = dst->encoding();
7758 int src_enc = src->encoding();
7759 if (UseAVX > 0) {
7760 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
7761 return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, attributes);
7762 } else {
7763 assert((nds == dst) || (nds == src) || (nds == xnoreg), "wrong sse encoding");
7764 return rex_prefix_and_encode(dst_enc, src_enc, pre, opc, attributes->is_rex_vex_w());
7765 }
7766 }
7767
cmppd(XMMRegister dst,XMMRegister nds,XMMRegister src,int cop,int vector_len)7768 void Assembler::cmppd(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len) {
7769 assert(VM_Version::supports_avx(), "");
7770 assert(!VM_Version::supports_evex(), "");
7771 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
7772 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
7773 emit_int8((unsigned char)0xC2);
7774 emit_int8((unsigned char)(0xC0 | encode));
7775 emit_int8((unsigned char)(0xF & cop));
7776 }
7777
blendvpd(XMMRegister dst,XMMRegister nds,XMMRegister src1,XMMRegister src2,int vector_len)7778 void Assembler::blendvpd(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) {
7779 assert(VM_Version::supports_avx(), "");
7780 assert(!VM_Version::supports_evex(), "");
7781 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
7782 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
7783 emit_int8((unsigned char)0x4B);
7784 emit_int8((unsigned char)(0xC0 | encode));
7785 int src2_enc = src2->encoding();
7786 emit_int8((unsigned char)(0xF0 & src2_enc<<4));
7787 }
7788
cmpps(XMMRegister dst,XMMRegister nds,XMMRegister src,int cop,int vector_len)7789 void Assembler::cmpps(XMMRegister dst, XMMRegister nds, XMMRegister src, int cop, int vector_len) {
7790 assert(VM_Version::supports_avx(), "");
7791 assert(!VM_Version::supports_evex(), "");
7792 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
7793 int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
7794 emit_int8((unsigned char)0xC2);
7795 emit_int8((unsigned char)(0xC0 | encode));
7796 emit_int8((unsigned char)(0xF & cop));
7797 }
7798
blendvps(XMMRegister dst,XMMRegister nds,XMMRegister src1,XMMRegister src2,int vector_len)7799 void Assembler::blendvps(XMMRegister dst, XMMRegister nds, XMMRegister src1, XMMRegister src2, int vector_len) {
7800 assert(VM_Version::supports_avx(), "");
7801 assert(!VM_Version::supports_evex(), "");
7802 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
7803 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
7804 emit_int8((unsigned char)0x4A);
7805 emit_int8((unsigned char)(0xC0 | encode));
7806 int src2_enc = src2->encoding();
7807 emit_int8((unsigned char)(0xF0 & src2_enc<<4));
7808 }
7809
vpblendd(XMMRegister dst,XMMRegister nds,XMMRegister src,int imm8,int vector_len)7810 void Assembler::vpblendd(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
7811 assert(VM_Version::supports_avx2(), "");
7812 InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
7813 int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
7814 emit_int8((unsigned char)0x02);
7815 emit_int8((unsigned char)(0xC0 | encode));
7816 emit_int8((unsigned char)imm8);
7817 }
7818
shlxl(Register dst,Register src1,Register src2)7819 void Assembler::shlxl(Register dst, Register src1, Register src2) {
7820 assert(VM_Version::supports_bmi2(), "");
7821 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
7822 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7823 emit_int8((unsigned char)0xF7);
7824 emit_int8((unsigned char)(0xC0 | encode));
7825 }
7826
shlxq(Register dst,Register src1,Register src2)7827 void Assembler::shlxq(Register dst, Register src1, Register src2) {
7828 assert(VM_Version::supports_bmi2(), "");
7829 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ true);
7830 int encode = vex_prefix_and_encode(dst->encoding(), src2->encoding(), src1->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
7831 emit_int8((unsigned char)0xF7);
7832 emit_int8((unsigned char)(0xC0 | encode));
7833 }
7834
7835 #ifndef _LP64
7836
incl(Register dst)7837 void Assembler::incl(Register dst) {
7838 // Don't use it directly. Use MacroAssembler::incrementl() instead.
7839 emit_int8(0x40 | dst->encoding());
7840 }
7841
lea(Register dst,Address src)7842 void Assembler::lea(Register dst, Address src) {
7843 leal(dst, src);
7844 }
7845
mov_literal32(Address dst,int32_t imm32,RelocationHolder const & rspec)7846 void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) {
7847 InstructionMark im(this);
7848 emit_int8((unsigned char)0xC7);
7849 emit_operand(rax, dst);
7850 emit_data((int)imm32, rspec, 0);
7851 }
7852
mov_literal32(Register dst,int32_t imm32,RelocationHolder const & rspec)7853 void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) {
7854 InstructionMark im(this);
7855 int encode = prefix_and_encode(dst->encoding());
7856 emit_int8((unsigned char)(0xB8 | encode));
7857 emit_data((int)imm32, rspec, 0);
7858 }
7859
popa()7860 void Assembler::popa() { // 32bit
7861 emit_int8(0x61);
7862 }
7863
push_literal32(int32_t imm32,RelocationHolder const & rspec)7864 void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) {
7865 InstructionMark im(this);
7866 emit_int8(0x68);
7867 emit_data(imm32, rspec, 0);
7868 }
7869
pusha()7870 void Assembler::pusha() { // 32bit
7871 emit_int8(0x60);
7872 }
7873
set_byte_if_not_zero(Register dst)7874 void Assembler::set_byte_if_not_zero(Register dst) {
7875 emit_int8(0x0F);
7876 emit_int8((unsigned char)0x95);
7877 emit_int8((unsigned char)(0xE0 | dst->encoding()));
7878 }
7879
shldl(Register dst,Register src)7880 void Assembler::shldl(Register dst, Register src) {
7881 emit_int8(0x0F);
7882 emit_int8((unsigned char)0xA5);
7883 emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding()));
7884 }
7885
7886 // 0F A4 / r ib
shldl(Register dst,Register src,int8_t imm8)7887 void Assembler::shldl(Register dst, Register src, int8_t imm8) {
7888 emit_int8(0x0F);
7889 emit_int8((unsigned char)0xA4);
7890 emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding()));
7891 emit_int8(imm8);
7892 }
7893
shrdl(Register dst,Register src)7894 void Assembler::shrdl(Register dst, Register src) {
7895 emit_int8(0x0F);
7896 emit_int8((unsigned char)0xAD);
7897 emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding()));
7898 }
7899
7900 #else // LP64
7901
set_byte_if_not_zero(Register dst)7902 void Assembler::set_byte_if_not_zero(Register dst) {
7903 int enc = prefix_and_encode(dst->encoding(), true);
7904 emit_int8(0x0F);
7905 emit_int8((unsigned char)0x95);
7906 emit_int8((unsigned char)(0xE0 | enc));
7907 }
7908
7909 // 64bit only pieces of the assembler
7910 // This should only be used by 64bit instructions that can use rip-relative
7911 // it cannot be used by instructions that want an immediate value.
7912
reachable(AddressLiteral adr)7913 bool Assembler::reachable(AddressLiteral adr) {
7914 int64_t disp;
7915 // None will force a 64bit literal to the code stream. Likely a placeholder
7916 // for something that will be patched later and we need to certain it will
7917 // always be reachable.
7918 if (adr.reloc() == relocInfo::none) {
7919 return false;
7920 }
7921 if (adr.reloc() == relocInfo::internal_word_type) {
7922 // This should be rip relative and easily reachable.
7923 return true;
7924 }
7925 if (adr.reloc() == relocInfo::virtual_call_type ||
7926 adr.reloc() == relocInfo::opt_virtual_call_type ||
7927 adr.reloc() == relocInfo::static_call_type ||
7928 adr.reloc() == relocInfo::static_stub_type ) {
7929 // This should be rip relative within the code cache and easily
7930 // reachable until we get huge code caches. (At which point
7931 // ic code is going to have issues).
7932 return true;
7933 }
7934 if (adr.reloc() != relocInfo::external_word_type &&
7935 adr.reloc() != relocInfo::poll_return_type && // these are really external_word but need special
7936 adr.reloc() != relocInfo::poll_type && // relocs to identify them
7937 adr.reloc() != relocInfo::runtime_call_type ) {
7938 return false;
7939 }
7940
7941 // Stress the correction code
7942 if (ForceUnreachable) {
7943 // Must be runtimecall reloc, see if it is in the codecache
7944 // Flipping stuff in the codecache to be unreachable causes issues
7945 // with things like inline caches where the additional instructions
7946 // are not handled.
7947 if (CodeCache::find_blob(adr._target) == NULL) {
7948 return false;
7949 }
7950 }
7951 // For external_word_type/runtime_call_type if it is reachable from where we
7952 // are now (possibly a temp buffer) and where we might end up
7953 // anywhere in the codeCache then we are always reachable.
7954 // This would have to change if we ever save/restore shared code
7955 // to be more pessimistic.
7956 disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int));
7957 if (!is_simm32(disp)) return false;
7958 disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int));
7959 if (!is_simm32(disp)) return false;
7960
7961 disp = (int64_t)adr._target - ((int64_t)pc() + sizeof(int));
7962
7963 // Because rip relative is a disp + address_of_next_instruction and we
7964 // don't know the value of address_of_next_instruction we apply a fudge factor
7965 // to make sure we will be ok no matter the size of the instruction we get placed into.
7966 // We don't have to fudge the checks above here because they are already worst case.
7967
7968 // 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal
7969 // + 4 because better safe than sorry.
7970 const int fudge = 12 + 4;
7971 if (disp < 0) {
7972 disp -= fudge;
7973 } else {
7974 disp += fudge;
7975 }
7976 return is_simm32(disp);
7977 }
7978
7979 // Check if the polling page is not reachable from the code cache using rip-relative
7980 // addressing.
is_polling_page_far()7981 bool Assembler::is_polling_page_far() {
7982 intptr_t addr = (intptr_t)os::get_polling_page();
7983 return ForceUnreachable ||
7984 !is_simm32(addr - (intptr_t)CodeCache::low_bound()) ||
7985 !is_simm32(addr - (intptr_t)CodeCache::high_bound());
7986 }
7987
emit_data64(jlong data,relocInfo::relocType rtype,int format)7988 void Assembler::emit_data64(jlong data,
7989 relocInfo::relocType rtype,
7990 int format) {
7991 if (rtype == relocInfo::none) {
7992 emit_int64(data);
7993 } else {
7994 emit_data64(data, Relocation::spec_simple(rtype), format);
7995 }
7996 }
7997
emit_data64(jlong data,RelocationHolder const & rspec,int format)7998 void Assembler::emit_data64(jlong data,
7999 RelocationHolder const& rspec,
8000 int format) {
8001 assert(imm_operand == 0, "default format must be immediate in this file");
8002 assert(imm_operand == format, "must be immediate");
8003 assert(inst_mark() != NULL, "must be inside InstructionMark");
8004 // Do not use AbstractAssembler::relocate, which is not intended for
8005 // embedded words. Instead, relocate to the enclosing instruction.
8006 code_section()->relocate(inst_mark(), rspec, format);
8007 #ifdef ASSERT
8008 check_relocation(rspec, format);
8009 #endif
8010 emit_int64(data);
8011 }
8012
prefix_and_encode(int reg_enc,bool byteinst)8013 int Assembler::prefix_and_encode(int reg_enc, bool byteinst) {
8014 if (reg_enc >= 8) {
8015 prefix(REX_B);
8016 reg_enc -= 8;
8017 } else if (byteinst && reg_enc >= 4) {
8018 prefix(REX);
8019 }
8020 return reg_enc;
8021 }
8022
prefixq_and_encode(int reg_enc)8023 int Assembler::prefixq_and_encode(int reg_enc) {
8024 if (reg_enc < 8) {
8025 prefix(REX_W);
8026 } else {
8027 prefix(REX_WB);
8028 reg_enc -= 8;
8029 }
8030 return reg_enc;
8031 }
8032
prefix_and_encode(int dst_enc,bool dst_is_byte,int src_enc,bool src_is_byte)8033 int Assembler::prefix_and_encode(int dst_enc, bool dst_is_byte, int src_enc, bool src_is_byte) {
8034 if (dst_enc < 8) {
8035 if (src_enc >= 8) {
8036 prefix(REX_B);
8037 src_enc -= 8;
8038 } else if ((src_is_byte && src_enc >= 4) || (dst_is_byte && dst_enc >= 4)) {
8039 prefix(REX);
8040 }
8041 } else {
8042 if (src_enc < 8) {
8043 prefix(REX_R);
8044 } else {
8045 prefix(REX_RB);
8046 src_enc -= 8;
8047 }
8048 dst_enc -= 8;
8049 }
8050 return dst_enc << 3 | src_enc;
8051 }
8052
prefixq_and_encode(int dst_enc,int src_enc)8053 int Assembler::prefixq_and_encode(int dst_enc, int src_enc) {
8054 if (dst_enc < 8) {
8055 if (src_enc < 8) {
8056 prefix(REX_W);
8057 } else {
8058 prefix(REX_WB);
8059 src_enc -= 8;
8060 }
8061 } else {
8062 if (src_enc < 8) {
8063 prefix(REX_WR);
8064 } else {
8065 prefix(REX_WRB);
8066 src_enc -= 8;
8067 }
8068 dst_enc -= 8;
8069 }
8070 return dst_enc << 3 | src_enc;
8071 }
8072
prefix(Register reg)8073 void Assembler::prefix(Register reg) {
8074 if (reg->encoding() >= 8) {
8075 prefix(REX_B);
8076 }
8077 }
8078
prefix(Register dst,Register src,Prefix p)8079 void Assembler::prefix(Register dst, Register src, Prefix p) {
8080 if (src->encoding() >= 8) {
8081 p = (Prefix)(p | REX_B);
8082 }
8083 if (dst->encoding() >= 8) {
8084 p = (Prefix)( p | REX_R);
8085 }
8086 if (p != Prefix_EMPTY) {
8087 // do not generate an empty prefix
8088 prefix(p);
8089 }
8090 }
8091
prefix(Register dst,Address adr,Prefix p)8092 void Assembler::prefix(Register dst, Address adr, Prefix p) {
8093 if (adr.base_needs_rex()) {
8094 if (adr.index_needs_rex()) {
8095 assert(false, "prefix(Register dst, Address adr, Prefix p) does not support handling of an X");
8096 } else {
8097 prefix(REX_B);
8098 }
8099 } else {
8100 if (adr.index_needs_rex()) {
8101 assert(false, "prefix(Register dst, Address adr, Prefix p) does not support handling of an X");
8102 }
8103 }
8104 if (dst->encoding() >= 8) {
8105 p = (Prefix)(p | REX_R);
8106 }
8107 if (p != Prefix_EMPTY) {
8108 // do not generate an empty prefix
8109 prefix(p);
8110 }
8111 }
8112
prefix(Address adr)8113 void Assembler::prefix(Address adr) {
8114 if (adr.base_needs_rex()) {
8115 if (adr.index_needs_rex()) {
8116 prefix(REX_XB);
8117 } else {
8118 prefix(REX_B);
8119 }
8120 } else {
8121 if (adr.index_needs_rex()) {
8122 prefix(REX_X);
8123 }
8124 }
8125 }
8126
prefixq(Address adr)8127 void Assembler::prefixq(Address adr) {
8128 if (adr.base_needs_rex()) {
8129 if (adr.index_needs_rex()) {
8130 prefix(REX_WXB);
8131 } else {
8132 prefix(REX_WB);
8133 }
8134 } else {
8135 if (adr.index_needs_rex()) {
8136 prefix(REX_WX);
8137 } else {
8138 prefix(REX_W);
8139 }
8140 }
8141 }
8142
8143
prefix(Address adr,Register reg,bool byteinst)8144 void Assembler::prefix(Address adr, Register reg, bool byteinst) {
8145 if (reg->encoding() < 8) {
8146 if (adr.base_needs_rex()) {
8147 if (adr.index_needs_rex()) {
8148 prefix(REX_XB);
8149 } else {
8150 prefix(REX_B);
8151 }
8152 } else {
8153 if (adr.index_needs_rex()) {
8154 prefix(REX_X);
8155 } else if (byteinst && reg->encoding() >= 4 ) {
8156 prefix(REX);
8157 }
8158 }
8159 } else {
8160 if (adr.base_needs_rex()) {
8161 if (adr.index_needs_rex()) {
8162 prefix(REX_RXB);
8163 } else {
8164 prefix(REX_RB);
8165 }
8166 } else {
8167 if (adr.index_needs_rex()) {
8168 prefix(REX_RX);
8169 } else {
8170 prefix(REX_R);
8171 }
8172 }
8173 }
8174 }
8175
prefixq(Address adr,Register src)8176 void Assembler::prefixq(Address adr, Register src) {
8177 if (src->encoding() < 8) {
8178 if (adr.base_needs_rex()) {
8179 if (adr.index_needs_rex()) {
8180 prefix(REX_WXB);
8181 } else {
8182 prefix(REX_WB);
8183 }
8184 } else {
8185 if (adr.index_needs_rex()) {
8186 prefix(REX_WX);
8187 } else {
8188 prefix(REX_W);
8189 }
8190 }
8191 } else {
8192 if (adr.base_needs_rex()) {
8193 if (adr.index_needs_rex()) {
8194 prefix(REX_WRXB);
8195 } else {
8196 prefix(REX_WRB);
8197 }
8198 } else {
8199 if (adr.index_needs_rex()) {
8200 prefix(REX_WRX);
8201 } else {
8202 prefix(REX_WR);
8203 }
8204 }
8205 }
8206 }
8207
prefix(Address adr,XMMRegister reg)8208 void Assembler::prefix(Address adr, XMMRegister reg) {
8209 if (reg->encoding() < 8) {
8210 if (adr.base_needs_rex()) {
8211 if (adr.index_needs_rex()) {
8212 prefix(REX_XB);
8213 } else {
8214 prefix(REX_B);
8215 }
8216 } else {
8217 if (adr.index_needs_rex()) {
8218 prefix(REX_X);
8219 }
8220 }
8221 } else {
8222 if (adr.base_needs_rex()) {
8223 if (adr.index_needs_rex()) {
8224 prefix(REX_RXB);
8225 } else {
8226 prefix(REX_RB);
8227 }
8228 } else {
8229 if (adr.index_needs_rex()) {
8230 prefix(REX_RX);
8231 } else {
8232 prefix(REX_R);
8233 }
8234 }
8235 }
8236 }
8237
prefixq(Address adr,XMMRegister src)8238 void Assembler::prefixq(Address adr, XMMRegister src) {
8239 if (src->encoding() < 8) {
8240 if (adr.base_needs_rex()) {
8241 if (adr.index_needs_rex()) {
8242 prefix(REX_WXB);
8243 } else {
8244 prefix(REX_WB);
8245 }
8246 } else {
8247 if (adr.index_needs_rex()) {
8248 prefix(REX_WX);
8249 } else {
8250 prefix(REX_W);
8251 }
8252 }
8253 } else {
8254 if (adr.base_needs_rex()) {
8255 if (adr.index_needs_rex()) {
8256 prefix(REX_WRXB);
8257 } else {
8258 prefix(REX_WRB);
8259 }
8260 } else {
8261 if (adr.index_needs_rex()) {
8262 prefix(REX_WRX);
8263 } else {
8264 prefix(REX_WR);
8265 }
8266 }
8267 }
8268 }
8269
adcq(Register dst,int32_t imm32)8270 void Assembler::adcq(Register dst, int32_t imm32) {
8271 (void) prefixq_and_encode(dst->encoding());
8272 emit_arith(0x81, 0xD0, dst, imm32);
8273 }
8274
adcq(Register dst,Address src)8275 void Assembler::adcq(Register dst, Address src) {
8276 InstructionMark im(this);
8277 prefixq(src, dst);
8278 emit_int8(0x13);
8279 emit_operand(dst, src);
8280 }
8281
adcq(Register dst,Register src)8282 void Assembler::adcq(Register dst, Register src) {
8283 (void) prefixq_and_encode(dst->encoding(), src->encoding());
8284 emit_arith(0x13, 0xC0, dst, src);
8285 }
8286
addq(Address dst,int32_t imm32)8287 void Assembler::addq(Address dst, int32_t imm32) {
8288 InstructionMark im(this);
8289 prefixq(dst);
8290 emit_arith_operand(0x81, rax, dst,imm32);
8291 }
8292
addq(Address dst,Register src)8293 void Assembler::addq(Address dst, Register src) {
8294 InstructionMark im(this);
8295 prefixq(dst, src);
8296 emit_int8(0x01);
8297 emit_operand(src, dst);
8298 }
8299
addq(Register dst,int32_t imm32)8300 void Assembler::addq(Register dst, int32_t imm32) {
8301 (void) prefixq_and_encode(dst->encoding());
8302 emit_arith(0x81, 0xC0, dst, imm32);
8303 }
8304
addq(Register dst,Address src)8305 void Assembler::addq(Register dst, Address src) {
8306 InstructionMark im(this);
8307 prefixq(src, dst);
8308 emit_int8(0x03);
8309 emit_operand(dst, src);
8310 }
8311
addq(Register dst,Register src)8312 void Assembler::addq(Register dst, Register src) {
8313 (void) prefixq_and_encode(dst->encoding(), src->encoding());
8314 emit_arith(0x03, 0xC0, dst, src);
8315 }
8316
adcxq(Register dst,Register src)8317 void Assembler::adcxq(Register dst, Register src) {
8318 //assert(VM_Version::supports_adx(), "adx instructions not supported");
8319 emit_int8((unsigned char)0x66);
8320 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
8321 emit_int8(0x0F);
8322 emit_int8(0x38);
8323 emit_int8((unsigned char)0xF6);
8324 emit_int8((unsigned char)(0xC0 | encode));
8325 }
8326
adoxq(Register dst,Register src)8327 void Assembler::adoxq(Register dst, Register src) {
8328 //assert(VM_Version::supports_adx(), "adx instructions not supported");
8329 emit_int8((unsigned char)0xF3);
8330 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
8331 emit_int8(0x0F);
8332 emit_int8(0x38);
8333 emit_int8((unsigned char)0xF6);
8334 emit_int8((unsigned char)(0xC0 | encode));
8335 }
8336
andq(Address dst,int32_t imm32)8337 void Assembler::andq(Address dst, int32_t imm32) {
8338 InstructionMark im(this);
8339 prefixq(dst);
8340 emit_int8((unsigned char)0x81);
8341 emit_operand(rsp, dst, 4);
8342 emit_int32(imm32);
8343 }
8344
andq(Register dst,int32_t imm32)8345 void Assembler::andq(Register dst, int32_t imm32) {
8346 (void) prefixq_and_encode(dst->encoding());
8347 emit_arith(0x81, 0xE0, dst, imm32);
8348 }
8349
andq(Register dst,Address src)8350 void Assembler::andq(Register dst, Address src) {
8351 InstructionMark im(this);
8352 prefixq(src, dst);
8353 emit_int8(0x23);
8354 emit_operand(dst, src);
8355 }
8356
andq(Register dst,Register src)8357 void Assembler::andq(Register dst, Register src) {
8358 (void) prefixq_and_encode(dst->encoding(), src->encoding());
8359 emit_arith(0x23, 0xC0, dst, src);
8360 }
8361
andnq(Register dst,Register src1,Register src2)8362 void Assembler::andnq(Register dst, Register src1, Register src2) {
8363 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
8364 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
8365 int encode = vex_prefix_and_encode(dst->encoding(), src1->encoding(), src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
8366 emit_int8((unsigned char)0xF2);
8367 emit_int8((unsigned char)(0xC0 | encode));
8368 }
8369
andnq(Register dst,Register src1,Address src2)8370 void Assembler::andnq(Register dst, Register src1, Address src2) {
8371 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
8372 InstructionMark im(this);
8373 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
8374 vex_prefix(src2, src1->encoding(), dst->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
8375 emit_int8((unsigned char)0xF2);
8376 emit_operand(dst, src2);
8377 }
8378
bsfq(Register dst,Register src)8379 void Assembler::bsfq(Register dst, Register src) {
8380 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
8381 emit_int8(0x0F);
8382 emit_int8((unsigned char)0xBC);
8383 emit_int8((unsigned char)(0xC0 | encode));
8384 }
8385
bsrq(Register dst,Register src)8386 void Assembler::bsrq(Register dst, Register src) {
8387 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
8388 emit_int8(0x0F);
8389 emit_int8((unsigned char)0xBD);
8390 emit_int8((unsigned char)(0xC0 | encode));
8391 }
8392
bswapq(Register reg)8393 void Assembler::bswapq(Register reg) {
8394 int encode = prefixq_and_encode(reg->encoding());
8395 emit_int8(0x0F);
8396 emit_int8((unsigned char)(0xC8 | encode));
8397 }
8398
blsiq(Register dst,Register src)8399 void Assembler::blsiq(Register dst, Register src) {
8400 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
8401 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
8402 int encode = vex_prefix_and_encode(rbx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
8403 emit_int8((unsigned char)0xF3);
8404 emit_int8((unsigned char)(0xC0 | encode));
8405 }
8406
blsiq(Register dst,Address src)8407 void Assembler::blsiq(Register dst, Address src) {
8408 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
8409 InstructionMark im(this);
8410 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
8411 vex_prefix(src, dst->encoding(), rbx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
8412 emit_int8((unsigned char)0xF3);
8413 emit_operand(rbx, src);
8414 }
8415
blsmskq(Register dst,Register src)8416 void Assembler::blsmskq(Register dst, Register src) {
8417 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
8418 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
8419 int encode = vex_prefix_and_encode(rdx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
8420 emit_int8((unsigned char)0xF3);
8421 emit_int8((unsigned char)(0xC0 | encode));
8422 }
8423
blsmskq(Register dst,Address src)8424 void Assembler::blsmskq(Register dst, Address src) {
8425 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
8426 InstructionMark im(this);
8427 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
8428 vex_prefix(src, dst->encoding(), rdx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
8429 emit_int8((unsigned char)0xF3);
8430 emit_operand(rdx, src);
8431 }
8432
blsrq(Register dst,Register src)8433 void Assembler::blsrq(Register dst, Register src) {
8434 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
8435 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
8436 int encode = vex_prefix_and_encode(rcx->encoding(), dst->encoding(), src->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
8437 emit_int8((unsigned char)0xF3);
8438 emit_int8((unsigned char)(0xC0 | encode));
8439 }
8440
blsrq(Register dst,Address src)8441 void Assembler::blsrq(Register dst, Address src) {
8442 assert(VM_Version::supports_bmi1(), "bit manipulation instructions not supported");
8443 InstructionMark im(this);
8444 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
8445 vex_prefix(src, dst->encoding(), rcx->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F_38, &attributes);
8446 emit_int8((unsigned char)0xF3);
8447 emit_operand(rcx, src);
8448 }
8449
cdqq()8450 void Assembler::cdqq() {
8451 prefix(REX_W);
8452 emit_int8((unsigned char)0x99);
8453 }
8454
clflush(Address adr)8455 void Assembler::clflush(Address adr) {
8456 prefix(adr);
8457 emit_int8(0x0F);
8458 emit_int8((unsigned char)0xAE);
8459 emit_operand(rdi, adr);
8460 }
8461
cmovq(Condition cc,Register dst,Register src)8462 void Assembler::cmovq(Condition cc, Register dst, Register src) {
8463 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
8464 emit_int8(0x0F);
8465 emit_int8(0x40 | cc);
8466 emit_int8((unsigned char)(0xC0 | encode));
8467 }
8468
cmovq(Condition cc,Register dst,Address src)8469 void Assembler::cmovq(Condition cc, Register dst, Address src) {
8470 InstructionMark im(this);
8471 prefixq(src, dst);
8472 emit_int8(0x0F);
8473 emit_int8(0x40 | cc);
8474 emit_operand(dst, src);
8475 }
8476
cmpq(Address dst,int32_t imm32)8477 void Assembler::cmpq(Address dst, int32_t imm32) {
8478 InstructionMark im(this);
8479 prefixq(dst);
8480 emit_int8((unsigned char)0x81);
8481 emit_operand(rdi, dst, 4);
8482 emit_int32(imm32);
8483 }
8484
cmpq(Register dst,int32_t imm32)8485 void Assembler::cmpq(Register dst, int32_t imm32) {
8486 (void) prefixq_and_encode(dst->encoding());
8487 emit_arith(0x81, 0xF8, dst, imm32);
8488 }
8489
cmpq(Address dst,Register src)8490 void Assembler::cmpq(Address dst, Register src) {
8491 InstructionMark im(this);
8492 prefixq(dst, src);
8493 emit_int8(0x3B);
8494 emit_operand(src, dst);
8495 }
8496
cmpq(Register dst,Register src)8497 void Assembler::cmpq(Register dst, Register src) {
8498 (void) prefixq_and_encode(dst->encoding(), src->encoding());
8499 emit_arith(0x3B, 0xC0, dst, src);
8500 }
8501
cmpq(Register dst,Address src)8502 void Assembler::cmpq(Register dst, Address src) {
8503 InstructionMark im(this);
8504 prefixq(src, dst);
8505 emit_int8(0x3B);
8506 emit_operand(dst, src);
8507 }
8508
cmpxchgq(Register reg,Address adr)8509 void Assembler::cmpxchgq(Register reg, Address adr) {
8510 InstructionMark im(this);
8511 prefixq(adr, reg);
8512 emit_int8(0x0F);
8513 emit_int8((unsigned char)0xB1);
8514 emit_operand(reg, adr);
8515 }
8516
cvtsi2sdq(XMMRegister dst,Register src)8517 void Assembler::cvtsi2sdq(XMMRegister dst, Register src) {
8518 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
8519 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
8520 int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
8521 emit_int8(0x2A);
8522 emit_int8((unsigned char)(0xC0 | encode));
8523 }
8524
cvtsi2sdq(XMMRegister dst,Address src)8525 void Assembler::cvtsi2sdq(XMMRegister dst, Address src) {
8526 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
8527 InstructionMark im(this);
8528 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
8529 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
8530 simd_prefix(dst, dst, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
8531 emit_int8(0x2A);
8532 emit_operand(dst, src);
8533 }
8534
cvtsi2ssq(XMMRegister dst,Address src)8535 void Assembler::cvtsi2ssq(XMMRegister dst, Address src) {
8536 NOT_LP64(assert(VM_Version::supports_sse(), ""));
8537 InstructionMark im(this);
8538 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
8539 attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
8540 simd_prefix(dst, dst, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
8541 emit_int8(0x2A);
8542 emit_operand(dst, src);
8543 }
8544
cvttsd2siq(Register dst,XMMRegister src)8545 void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
8546 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
8547 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
8548 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
8549 emit_int8(0x2C);
8550 emit_int8((unsigned char)(0xC0 | encode));
8551 }
8552
cvttss2siq(Register dst,XMMRegister src)8553 void Assembler::cvttss2siq(Register dst, XMMRegister src) {
8554 NOT_LP64(assert(VM_Version::supports_sse(), ""));
8555 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
8556 int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_F3, VEX_OPCODE_0F, &attributes);
8557 emit_int8(0x2C);
8558 emit_int8((unsigned char)(0xC0 | encode));
8559 }
8560
decl(Register dst)8561 void Assembler::decl(Register dst) {
8562 // Don't use it directly. Use MacroAssembler::decrementl() instead.
8563 // Use two-byte form (one-byte form is a REX prefix in 64-bit mode)
8564 int encode = prefix_and_encode(dst->encoding());
8565 emit_int8((unsigned char)0xFF);
8566 emit_int8((unsigned char)(0xC8 | encode));
8567 }
8568
decq(Register dst)8569 void Assembler::decq(Register dst) {
8570 // Don't use it directly. Use MacroAssembler::decrementq() instead.
8571 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
8572 int encode = prefixq_and_encode(dst->encoding());
8573 emit_int8((unsigned char)0xFF);
8574 emit_int8(0xC8 | encode);
8575 }
8576
decq(Address dst)8577 void Assembler::decq(Address dst) {
8578 // Don't use it directly. Use MacroAssembler::decrementq() instead.
8579 InstructionMark im(this);
8580 prefixq(dst);
8581 emit_int8((unsigned char)0xFF);
8582 emit_operand(rcx, dst);
8583 }
8584
fxrstor(Address src)8585 void Assembler::fxrstor(Address src) {
8586 prefixq(src);
8587 emit_int8(0x0F);
8588 emit_int8((unsigned char)0xAE);
8589 emit_operand(as_Register(1), src);
8590 }
8591
xrstor(Address src)8592 void Assembler::xrstor(Address src) {
8593 prefixq(src);
8594 emit_int8(0x0F);
8595 emit_int8((unsigned char)0xAE);
8596 emit_operand(as_Register(5), src);
8597 }
8598
fxsave(Address dst)8599 void Assembler::fxsave(Address dst) {
8600 prefixq(dst);
8601 emit_int8(0x0F);
8602 emit_int8((unsigned char)0xAE);
8603 emit_operand(as_Register(0), dst);
8604 }
8605
xsave(Address dst)8606 void Assembler::xsave(Address dst) {
8607 prefixq(dst);
8608 emit_int8(0x0F);
8609 emit_int8((unsigned char)0xAE);
8610 emit_operand(as_Register(4), dst);
8611 }
8612
idivq(Register src)8613 void Assembler::idivq(Register src) {
8614 int encode = prefixq_and_encode(src->encoding());
8615 emit_int8((unsigned char)0xF7);
8616 emit_int8((unsigned char)(0xF8 | encode));
8617 }
8618
imulq(Register dst,Register src)8619 void Assembler::imulq(Register dst, Register src) {
8620 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
8621 emit_int8(0x0F);
8622 emit_int8((unsigned char)0xAF);
8623 emit_int8((unsigned char)(0xC0 | encode));
8624 }
8625
imulq(Register dst,Register src,int value)8626 void Assembler::imulq(Register dst, Register src, int value) {
8627 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
8628 if (is8bit(value)) {
8629 emit_int8(0x6B);
8630 emit_int8((unsigned char)(0xC0 | encode));
8631 emit_int8(value & 0xFF);
8632 } else {
8633 emit_int8(0x69);
8634 emit_int8((unsigned char)(0xC0 | encode));
8635 emit_int32(value);
8636 }
8637 }
8638
imulq(Register dst,Address src)8639 void Assembler::imulq(Register dst, Address src) {
8640 InstructionMark im(this);
8641 prefixq(src, dst);
8642 emit_int8(0x0F);
8643 emit_int8((unsigned char) 0xAF);
8644 emit_operand(dst, src);
8645 }
8646
incl(Register dst)8647 void Assembler::incl(Register dst) {
8648 // Don't use it directly. Use MacroAssembler::incrementl() instead.
8649 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
8650 int encode = prefix_and_encode(dst->encoding());
8651 emit_int8((unsigned char)0xFF);
8652 emit_int8((unsigned char)(0xC0 | encode));
8653 }
8654
incq(Register dst)8655 void Assembler::incq(Register dst) {
8656 // Don't use it directly. Use MacroAssembler::incrementq() instead.
8657 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
8658 int encode = prefixq_and_encode(dst->encoding());
8659 emit_int8((unsigned char)0xFF);
8660 emit_int8((unsigned char)(0xC0 | encode));
8661 }
8662
incq(Address dst)8663 void Assembler::incq(Address dst) {
8664 // Don't use it directly. Use MacroAssembler::incrementq() instead.
8665 InstructionMark im(this);
8666 prefixq(dst);
8667 emit_int8((unsigned char)0xFF);
8668 emit_operand(rax, dst);
8669 }
8670
lea(Register dst,Address src)8671 void Assembler::lea(Register dst, Address src) {
8672 leaq(dst, src);
8673 }
8674
leaq(Register dst,Address src)8675 void Assembler::leaq(Register dst, Address src) {
8676 InstructionMark im(this);
8677 prefixq(src, dst);
8678 emit_int8((unsigned char)0x8D);
8679 emit_operand(dst, src);
8680 }
8681
mov64(Register dst,int64_t imm64)8682 void Assembler::mov64(Register dst, int64_t imm64) {
8683 InstructionMark im(this);
8684 int encode = prefixq_and_encode(dst->encoding());
8685 emit_int8((unsigned char)(0xB8 | encode));
8686 emit_int64(imm64);
8687 }
8688
mov_literal64(Register dst,intptr_t imm64,RelocationHolder const & rspec)8689 void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) {
8690 InstructionMark im(this);
8691 int encode = prefixq_and_encode(dst->encoding());
8692 emit_int8(0xB8 | encode);
8693 emit_data64(imm64, rspec);
8694 }
8695
mov_narrow_oop(Register dst,int32_t imm32,RelocationHolder const & rspec)8696 void Assembler::mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec) {
8697 InstructionMark im(this);
8698 int encode = prefix_and_encode(dst->encoding());
8699 emit_int8((unsigned char)(0xB8 | encode));
8700 emit_data((int)imm32, rspec, narrow_oop_operand);
8701 }
8702
mov_narrow_oop(Address dst,int32_t imm32,RelocationHolder const & rspec)8703 void Assembler::mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec) {
8704 InstructionMark im(this);
8705 prefix(dst);
8706 emit_int8((unsigned char)0xC7);
8707 emit_operand(rax, dst, 4);
8708 emit_data((int)imm32, rspec, narrow_oop_operand);
8709 }
8710
cmp_narrow_oop(Register src1,int32_t imm32,RelocationHolder const & rspec)8711 void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) {
8712 InstructionMark im(this);
8713 int encode = prefix_and_encode(src1->encoding());
8714 emit_int8((unsigned char)0x81);
8715 emit_int8((unsigned char)(0xF8 | encode));
8716 emit_data((int)imm32, rspec, narrow_oop_operand);
8717 }
8718
cmp_narrow_oop(Address src1,int32_t imm32,RelocationHolder const & rspec)8719 void Assembler::cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec) {
8720 InstructionMark im(this);
8721 prefix(src1);
8722 emit_int8((unsigned char)0x81);
8723 emit_operand(rax, src1, 4);
8724 emit_data((int)imm32, rspec, narrow_oop_operand);
8725 }
8726
lzcntq(Register dst,Register src)8727 void Assembler::lzcntq(Register dst, Register src) {
8728 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
8729 emit_int8((unsigned char)0xF3);
8730 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
8731 emit_int8(0x0F);
8732 emit_int8((unsigned char)0xBD);
8733 emit_int8((unsigned char)(0xC0 | encode));
8734 }
8735
movdq(XMMRegister dst,Register src)8736 void Assembler::movdq(XMMRegister dst, Register src) {
8737 // table D-1 says MMX/SSE2
8738 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
8739 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
8740 int encode = simd_prefix_and_encode(dst, xnoreg, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8741 emit_int8(0x6E);
8742 emit_int8((unsigned char)(0xC0 | encode));
8743 }
8744
movdq(Register dst,XMMRegister src)8745 void Assembler::movdq(Register dst, XMMRegister src) {
8746 // table D-1 says MMX/SSE2
8747 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
8748 InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
8749 // swap src/dst to get correct prefix
8750 int encode = simd_prefix_and_encode(src, xnoreg, as_XMMRegister(dst->encoding()), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
8751 emit_int8(0x7E);
8752 emit_int8((unsigned char)(0xC0 | encode));
8753 }
8754
movq(Register dst,Register src)8755 void Assembler::movq(Register dst, Register src) {
8756 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
8757 emit_int8((unsigned char)0x8B);
8758 emit_int8((unsigned char)(0xC0 | encode));
8759 }
8760
movq(Register dst,Address src)8761 void Assembler::movq(Register dst, Address src) {
8762 InstructionMark im(this);
8763 prefixq(src, dst);
8764 emit_int8((unsigned char)0x8B);
8765 emit_operand(dst, src);
8766 }
8767
movq(Address dst,Register src)8768 void Assembler::movq(Address dst, Register src) {
8769 InstructionMark im(this);
8770 prefixq(dst, src);
8771 emit_int8((unsigned char)0x89);
8772 emit_operand(src, dst);
8773 }
8774
movsbq(Register dst,Address src)8775 void Assembler::movsbq(Register dst, Address src) {
8776 InstructionMark im(this);
8777 prefixq(src, dst);
8778 emit_int8(0x0F);
8779 emit_int8((unsigned char)0xBE);
8780 emit_operand(dst, src);
8781 }
8782
movsbq(Register dst,Register src)8783 void Assembler::movsbq(Register dst, Register src) {
8784 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
8785 emit_int8(0x0F);
8786 emit_int8((unsigned char)0xBE);
8787 emit_int8((unsigned char)(0xC0 | encode));
8788 }
8789
movslq(Register dst,int32_t imm32)8790 void Assembler::movslq(Register dst, int32_t imm32) {
8791 // dbx shows movslq(rcx, 3) as movq $0x0000000049000000,(%rbx)
8792 // and movslq(r8, 3); as movl $0x0000000048000000,(%rbx)
8793 // as a result we shouldn't use until tested at runtime...
8794 ShouldNotReachHere();
8795 InstructionMark im(this);
8796 int encode = prefixq_and_encode(dst->encoding());
8797 emit_int8((unsigned char)(0xC7 | encode));
8798 emit_int32(imm32);
8799 }
8800
movslq(Address dst,int32_t imm32)8801 void Assembler::movslq(Address dst, int32_t imm32) {
8802 assert(is_simm32(imm32), "lost bits");
8803 InstructionMark im(this);
8804 prefixq(dst);
8805 emit_int8((unsigned char)0xC7);
8806 emit_operand(rax, dst, 4);
8807 emit_int32(imm32);
8808 }
8809
movslq(Register dst,Address src)8810 void Assembler::movslq(Register dst, Address src) {
8811 InstructionMark im(this);
8812 prefixq(src, dst);
8813 emit_int8(0x63);
8814 emit_operand(dst, src);
8815 }
8816
movslq(Register dst,Register src)8817 void Assembler::movslq(Register dst, Register src) {
8818 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
8819 emit_int8(0x63);
8820 emit_int8((unsigned char)(0xC0 | encode));
8821 }
8822
movswq(Register dst,Address src)8823 void Assembler::movswq(Register dst, Address src) {
8824 InstructionMark im(this);
8825 prefixq(src, dst);
8826 emit_int8(0x0F);
8827 emit_int8((unsigned char)0xBF);
8828 emit_operand(dst, src);
8829 }
8830
movswq(Register dst,Register src)8831 void Assembler::movswq(Register dst, Register src) {
8832 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
8833 emit_int8((unsigned char)0x0F);
8834 emit_int8((unsigned char)0xBF);
8835 emit_int8((unsigned char)(0xC0 | encode));
8836 }
8837
movzbq(Register dst,Address src)8838 void Assembler::movzbq(Register dst, Address src) {
8839 InstructionMark im(this);
8840 prefixq(src, dst);
8841 emit_int8((unsigned char)0x0F);
8842 emit_int8((unsigned char)0xB6);
8843 emit_operand(dst, src);
8844 }
8845
movzbq(Register dst,Register src)8846 void Assembler::movzbq(Register dst, Register src) {
8847 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
8848 emit_int8(0x0F);
8849 emit_int8((unsigned char)0xB6);
8850 emit_int8(0xC0 | encode);
8851 }
8852
movzwq(Register dst,Address src)8853 void Assembler::movzwq(Register dst, Address src) {
8854 InstructionMark im(this);
8855 prefixq(src, dst);
8856 emit_int8((unsigned char)0x0F);
8857 emit_int8((unsigned char)0xB7);
8858 emit_operand(dst, src);
8859 }
8860
movzwq(Register dst,Register src)8861 void Assembler::movzwq(Register dst, Register src) {
8862 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
8863 emit_int8((unsigned char)0x0F);
8864 emit_int8((unsigned char)0xB7);
8865 emit_int8((unsigned char)(0xC0 | encode));
8866 }
8867
mulq(Address src)8868 void Assembler::mulq(Address src) {
8869 InstructionMark im(this);
8870 prefixq(src);
8871 emit_int8((unsigned char)0xF7);
8872 emit_operand(rsp, src);
8873 }
8874
mulq(Register src)8875 void Assembler::mulq(Register src) {
8876 int encode = prefixq_and_encode(src->encoding());
8877 emit_int8((unsigned char)0xF7);
8878 emit_int8((unsigned char)(0xE0 | encode));
8879 }
8880
mulxq(Register dst1,Register dst2,Register src)8881 void Assembler::mulxq(Register dst1, Register dst2, Register src) {
8882 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
8883 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
8884 int encode = vex_prefix_and_encode(dst1->encoding(), dst2->encoding(), src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_38, &attributes);
8885 emit_int8((unsigned char)0xF6);
8886 emit_int8((unsigned char)(0xC0 | encode));
8887 }
8888
negq(Register dst)8889 void Assembler::negq(Register dst) {
8890 int encode = prefixq_and_encode(dst->encoding());
8891 emit_int8((unsigned char)0xF7);
8892 emit_int8((unsigned char)(0xD8 | encode));
8893 }
8894
notq(Register dst)8895 void Assembler::notq(Register dst) {
8896 int encode = prefixq_and_encode(dst->encoding());
8897 emit_int8((unsigned char)0xF7);
8898 emit_int8((unsigned char)(0xD0 | encode));
8899 }
8900
orq(Address dst,int32_t imm32)8901 void Assembler::orq(Address dst, int32_t imm32) {
8902 InstructionMark im(this);
8903 prefixq(dst);
8904 emit_int8((unsigned char)0x81);
8905 emit_operand(rcx, dst, 4);
8906 emit_int32(imm32);
8907 }
8908
orq(Register dst,int32_t imm32)8909 void Assembler::orq(Register dst, int32_t imm32) {
8910 (void) prefixq_and_encode(dst->encoding());
8911 emit_arith(0x81, 0xC8, dst, imm32);
8912 }
8913
orq(Register dst,Address src)8914 void Assembler::orq(Register dst, Address src) {
8915 InstructionMark im(this);
8916 prefixq(src, dst);
8917 emit_int8(0x0B);
8918 emit_operand(dst, src);
8919 }
8920
orq(Register dst,Register src)8921 void Assembler::orq(Register dst, Register src) {
8922 (void) prefixq_and_encode(dst->encoding(), src->encoding());
8923 emit_arith(0x0B, 0xC0, dst, src);
8924 }
8925
popa()8926 void Assembler::popa() { // 64bit
8927 movq(r15, Address(rsp, 0));
8928 movq(r14, Address(rsp, wordSize));
8929 movq(r13, Address(rsp, 2 * wordSize));
8930 movq(r12, Address(rsp, 3 * wordSize));
8931 movq(r11, Address(rsp, 4 * wordSize));
8932 movq(r10, Address(rsp, 5 * wordSize));
8933 movq(r9, Address(rsp, 6 * wordSize));
8934 movq(r8, Address(rsp, 7 * wordSize));
8935 movq(rdi, Address(rsp, 8 * wordSize));
8936 movq(rsi, Address(rsp, 9 * wordSize));
8937 movq(rbp, Address(rsp, 10 * wordSize));
8938 // skip rsp
8939 movq(rbx, Address(rsp, 12 * wordSize));
8940 movq(rdx, Address(rsp, 13 * wordSize));
8941 movq(rcx, Address(rsp, 14 * wordSize));
8942 movq(rax, Address(rsp, 15 * wordSize));
8943
8944 addq(rsp, 16 * wordSize);
8945 }
8946
popcntq(Register dst,Address src)8947 void Assembler::popcntq(Register dst, Address src) {
8948 assert(VM_Version::supports_popcnt(), "must support");
8949 InstructionMark im(this);
8950 emit_int8((unsigned char)0xF3);
8951 prefixq(src, dst);
8952 emit_int8((unsigned char)0x0F);
8953 emit_int8((unsigned char)0xB8);
8954 emit_operand(dst, src);
8955 }
8956
popcntq(Register dst,Register src)8957 void Assembler::popcntq(Register dst, Register src) {
8958 assert(VM_Version::supports_popcnt(), "must support");
8959 emit_int8((unsigned char)0xF3);
8960 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
8961 emit_int8((unsigned char)0x0F);
8962 emit_int8((unsigned char)0xB8);
8963 emit_int8((unsigned char)(0xC0 | encode));
8964 }
8965
popq(Address dst)8966 void Assembler::popq(Address dst) {
8967 InstructionMark im(this);
8968 prefixq(dst);
8969 emit_int8((unsigned char)0x8F);
8970 emit_operand(rax, dst);
8971 }
8972
pusha()8973 void Assembler::pusha() { // 64bit
8974 // we have to store original rsp. ABI says that 128 bytes
8975 // below rsp are local scratch.
8976 movq(Address(rsp, -5 * wordSize), rsp);
8977
8978 subq(rsp, 16 * wordSize);
8979
8980 movq(Address(rsp, 15 * wordSize), rax);
8981 movq(Address(rsp, 14 * wordSize), rcx);
8982 movq(Address(rsp, 13 * wordSize), rdx);
8983 movq(Address(rsp, 12 * wordSize), rbx);
8984 // skip rsp
8985 movq(Address(rsp, 10 * wordSize), rbp);
8986 movq(Address(rsp, 9 * wordSize), rsi);
8987 movq(Address(rsp, 8 * wordSize), rdi);
8988 movq(Address(rsp, 7 * wordSize), r8);
8989 movq(Address(rsp, 6 * wordSize), r9);
8990 movq(Address(rsp, 5 * wordSize), r10);
8991 movq(Address(rsp, 4 * wordSize), r11);
8992 movq(Address(rsp, 3 * wordSize), r12);
8993 movq(Address(rsp, 2 * wordSize), r13);
8994 movq(Address(rsp, wordSize), r14);
8995 movq(Address(rsp, 0), r15);
8996 }
8997
pushq(Address src)8998 void Assembler::pushq(Address src) {
8999 InstructionMark im(this);
9000 prefixq(src);
9001 emit_int8((unsigned char)0xFF);
9002 emit_operand(rsi, src);
9003 }
9004
rclq(Register dst,int imm8)9005 void Assembler::rclq(Register dst, int imm8) {
9006 assert(isShiftCount(imm8 >> 1), "illegal shift count");
9007 int encode = prefixq_and_encode(dst->encoding());
9008 if (imm8 == 1) {
9009 emit_int8((unsigned char)0xD1);
9010 emit_int8((unsigned char)(0xD0 | encode));
9011 } else {
9012 emit_int8((unsigned char)0xC1);
9013 emit_int8((unsigned char)(0xD0 | encode));
9014 emit_int8(imm8);
9015 }
9016 }
9017
rcrq(Register dst,int imm8)9018 void Assembler::rcrq(Register dst, int imm8) {
9019 assert(isShiftCount(imm8 >> 1), "illegal shift count");
9020 int encode = prefixq_and_encode(dst->encoding());
9021 if (imm8 == 1) {
9022 emit_int8((unsigned char)0xD1);
9023 emit_int8((unsigned char)(0xD8 | encode));
9024 } else {
9025 emit_int8((unsigned char)0xC1);
9026 emit_int8((unsigned char)(0xD8 | encode));
9027 emit_int8(imm8);
9028 }
9029 }
9030
rorq(Register dst,int imm8)9031 void Assembler::rorq(Register dst, int imm8) {
9032 assert(isShiftCount(imm8 >> 1), "illegal shift count");
9033 int encode = prefixq_and_encode(dst->encoding());
9034 if (imm8 == 1) {
9035 emit_int8((unsigned char)0xD1);
9036 emit_int8((unsigned char)(0xC8 | encode));
9037 } else {
9038 emit_int8((unsigned char)0xC1);
9039 emit_int8((unsigned char)(0xc8 | encode));
9040 emit_int8(imm8);
9041 }
9042 }
9043
rorxq(Register dst,Register src,int imm8)9044 void Assembler::rorxq(Register dst, Register src, int imm8) {
9045 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
9046 InstructionAttr attributes(AVX_128bit, /* vex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
9047 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes);
9048 emit_int8((unsigned char)0xF0);
9049 emit_int8((unsigned char)(0xC0 | encode));
9050 emit_int8(imm8);
9051 }
9052
rorxd(Register dst,Register src,int imm8)9053 void Assembler::rorxd(Register dst, Register src, int imm8) {
9054 assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
9055 InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
9056 int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes);
9057 emit_int8((unsigned char)0xF0);
9058 emit_int8((unsigned char)(0xC0 | encode));
9059 emit_int8(imm8);
9060 }
9061
sarq(Register dst,int imm8)9062 void Assembler::sarq(Register dst, int imm8) {
9063 assert(isShiftCount(imm8 >> 1), "illegal shift count");
9064 int encode = prefixq_and_encode(dst->encoding());
9065 if (imm8 == 1) {
9066 emit_int8((unsigned char)0xD1);
9067 emit_int8((unsigned char)(0xF8 | encode));
9068 } else {
9069 emit_int8((unsigned char)0xC1);
9070 emit_int8((unsigned char)(0xF8 | encode));
9071 emit_int8(imm8);
9072 }
9073 }
9074
sarq(Register dst)9075 void Assembler::sarq(Register dst) {
9076 int encode = prefixq_and_encode(dst->encoding());
9077 emit_int8((unsigned char)0xD3);
9078 emit_int8((unsigned char)(0xF8 | encode));
9079 }
9080
sbbq(Address dst,int32_t imm32)9081 void Assembler::sbbq(Address dst, int32_t imm32) {
9082 InstructionMark im(this);
9083 prefixq(dst);
9084 emit_arith_operand(0x81, rbx, dst, imm32);
9085 }
9086
sbbq(Register dst,int32_t imm32)9087 void Assembler::sbbq(Register dst, int32_t imm32) {
9088 (void) prefixq_and_encode(dst->encoding());
9089 emit_arith(0x81, 0xD8, dst, imm32);
9090 }
9091
sbbq(Register dst,Address src)9092 void Assembler::sbbq(Register dst, Address src) {
9093 InstructionMark im(this);
9094 prefixq(src, dst);
9095 emit_int8(0x1B);
9096 emit_operand(dst, src);
9097 }
9098
sbbq(Register dst,Register src)9099 void Assembler::sbbq(Register dst, Register src) {
9100 (void) prefixq_and_encode(dst->encoding(), src->encoding());
9101 emit_arith(0x1B, 0xC0, dst, src);
9102 }
9103
shlq(Register dst,int imm8)9104 void Assembler::shlq(Register dst, int imm8) {
9105 assert(isShiftCount(imm8 >> 1), "illegal shift count");
9106 int encode = prefixq_and_encode(dst->encoding());
9107 if (imm8 == 1) {
9108 emit_int8((unsigned char)0xD1);
9109 emit_int8((unsigned char)(0xE0 | encode));
9110 } else {
9111 emit_int8((unsigned char)0xC1);
9112 emit_int8((unsigned char)(0xE0 | encode));
9113 emit_int8(imm8);
9114 }
9115 }
9116
shlq(Register dst)9117 void Assembler::shlq(Register dst) {
9118 int encode = prefixq_and_encode(dst->encoding());
9119 emit_int8((unsigned char)0xD3);
9120 emit_int8((unsigned char)(0xE0 | encode));
9121 }
9122
shrq(Register dst,int imm8)9123 void Assembler::shrq(Register dst, int imm8) {
9124 assert(isShiftCount(imm8 >> 1), "illegal shift count");
9125 int encode = prefixq_and_encode(dst->encoding());
9126 emit_int8((unsigned char)0xC1);
9127 emit_int8((unsigned char)(0xE8 | encode));
9128 emit_int8(imm8);
9129 }
9130
shrq(Register dst)9131 void Assembler::shrq(Register dst) {
9132 int encode = prefixq_and_encode(dst->encoding());
9133 emit_int8((unsigned char)0xD3);
9134 emit_int8(0xE8 | encode);
9135 }
9136
subq(Address dst,int32_t imm32)9137 void Assembler::subq(Address dst, int32_t imm32) {
9138 InstructionMark im(this);
9139 prefixq(dst);
9140 emit_arith_operand(0x81, rbp, dst, imm32);
9141 }
9142
subq(Address dst,Register src)9143 void Assembler::subq(Address dst, Register src) {
9144 InstructionMark im(this);
9145 prefixq(dst, src);
9146 emit_int8(0x29);
9147 emit_operand(src, dst);
9148 }
9149
subq(Register dst,int32_t imm32)9150 void Assembler::subq(Register dst, int32_t imm32) {
9151 (void) prefixq_and_encode(dst->encoding());
9152 emit_arith(0x81, 0xE8, dst, imm32);
9153 }
9154
9155 // Force generation of a 4 byte immediate value even if it fits into 8bit
subq_imm32(Register dst,int32_t imm32)9156 void Assembler::subq_imm32(Register dst, int32_t imm32) {
9157 (void) prefixq_and_encode(dst->encoding());
9158 emit_arith_imm32(0x81, 0xE8, dst, imm32);
9159 }
9160
subq(Register dst,Address src)9161 void Assembler::subq(Register dst, Address src) {
9162 InstructionMark im(this);
9163 prefixq(src, dst);
9164 emit_int8(0x2B);
9165 emit_operand(dst, src);
9166 }
9167
subq(Register dst,Register src)9168 void Assembler::subq(Register dst, Register src) {
9169 (void) prefixq_and_encode(dst->encoding(), src->encoding());
9170 emit_arith(0x2B, 0xC0, dst, src);
9171 }
9172
testq(Register dst,int32_t imm32)9173 void Assembler::testq(Register dst, int32_t imm32) {
9174 // not using emit_arith because test
9175 // doesn't support sign-extension of
9176 // 8bit operands
9177 int encode = dst->encoding();
9178 if (encode == 0) {
9179 prefix(REX_W);
9180 emit_int8((unsigned char)0xA9);
9181 } else {
9182 encode = prefixq_and_encode(encode);
9183 emit_int8((unsigned char)0xF7);
9184 emit_int8((unsigned char)(0xC0 | encode));
9185 }
9186 emit_int32(imm32);
9187 }
9188
testq(Register dst,Register src)9189 void Assembler::testq(Register dst, Register src) {
9190 (void) prefixq_and_encode(dst->encoding(), src->encoding());
9191 emit_arith(0x85, 0xC0, dst, src);
9192 }
9193
testq(Register dst,Address src)9194 void Assembler::testq(Register dst, Address src) {
9195 InstructionMark im(this);
9196 prefixq(src, dst);
9197 emit_int8((unsigned char)0x85);
9198 emit_operand(dst, src);
9199 }
9200
xaddq(Address dst,Register src)9201 void Assembler::xaddq(Address dst, Register src) {
9202 InstructionMark im(this);
9203 prefixq(dst, src);
9204 emit_int8(0x0F);
9205 emit_int8((unsigned char)0xC1);
9206 emit_operand(src, dst);
9207 }
9208
xchgq(Register dst,Address src)9209 void Assembler::xchgq(Register dst, Address src) {
9210 InstructionMark im(this);
9211 prefixq(src, dst);
9212 emit_int8((unsigned char)0x87);
9213 emit_operand(dst, src);
9214 }
9215
xchgq(Register dst,Register src)9216 void Assembler::xchgq(Register dst, Register src) {
9217 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
9218 emit_int8((unsigned char)0x87);
9219 emit_int8((unsigned char)(0xc0 | encode));
9220 }
9221
xorq(Register dst,Register src)9222 void Assembler::xorq(Register dst, Register src) {
9223 (void) prefixq_and_encode(dst->encoding(), src->encoding());
9224 emit_arith(0x33, 0xC0, dst, src);
9225 }
9226
xorq(Register dst,Address src)9227 void Assembler::xorq(Register dst, Address src) {
9228 InstructionMark im(this);
9229 prefixq(src, dst);
9230 emit_int8(0x33);
9231 emit_operand(dst, src);
9232 }
9233
9234 #endif // !LP64
9235