1 /*
2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "jvm.h"
27 #include "asm/macroAssembler.hpp"
28 #include "asm/macroAssembler.inline.hpp"
29 #include "logging/log.hpp"
30 #include "logging/logStream.hpp"
31 #include "memory/resourceArea.hpp"
32 #include "runtime/java.hpp"
33 #include "runtime/os.hpp"
34 #include "runtime/stubCodeGenerator.hpp"
35 #include "runtime/vm_version.hpp"
36 #include "utilities/powerOfTwo.hpp"
37 #include "utilities/virtualizationSupport.hpp"
38
39 #include OS_HEADER_INLINE(os)
40
41 int VM_Version::_cpu;
42 int VM_Version::_model;
43 int VM_Version::_stepping;
44 bool VM_Version::_has_intel_jcc_erratum;
45 VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, };
46
47 // Address of instruction which causes SEGV
48 address VM_Version::_cpuinfo_segv_addr = 0;
49 // Address of instruction after the one which causes SEGV
50 address VM_Version::_cpuinfo_cont_addr = 0;
51
52 static BufferBlob* stub_blob;
53 static const int stub_size = 1100;
54
55 extern "C" {
56 typedef void (*get_cpu_info_stub_t)(void*);
57 }
58 static get_cpu_info_stub_t get_cpu_info_stub = NULL;
59
60
61 class VM_Version_StubGenerator: public StubCodeGenerator {
62 public:
63
VM_Version_StubGenerator(CodeBuffer * c)64 VM_Version_StubGenerator(CodeBuffer *c) : StubCodeGenerator(c) {}
65
generate_get_cpu_info()66 address generate_get_cpu_info() {
67 // Flags to test CPU type.
68 const uint32_t HS_EFL_AC = 0x40000;
69 const uint32_t HS_EFL_ID = 0x200000;
70 // Values for when we don't have a CPUID instruction.
71 const int CPU_FAMILY_SHIFT = 8;
72 const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT);
73 const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT);
74 bool use_evex = FLAG_IS_DEFAULT(UseAVX) || (UseAVX > 2);
75
76 Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4;
77 Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, ext_cpuid8, done, wrapup;
78 Label legacy_setup, save_restore_except, legacy_save_restore, start_simd_check;
79
80 StubCodeMark mark(this, "VM_Version", "get_cpu_info_stub");
81 # define __ _masm->
82
83 address start = __ pc();
84
85 //
86 // void get_cpu_info(VM_Version::CpuidInfo* cpuid_info);
87 //
88 // LP64: rcx and rdx are first and second argument registers on windows
89
90 __ push(rbp);
91 #ifdef _LP64
92 __ mov(rbp, c_rarg0); // cpuid_info address
93 #else
94 __ movptr(rbp, Address(rsp, 8)); // cpuid_info address
95 #endif
96 __ push(rbx);
97 __ push(rsi);
98 __ pushf(); // preserve rbx, and flags
99 __ pop(rax);
100 __ push(rax);
101 __ mov(rcx, rax);
102 //
103 // if we are unable to change the AC flag, we have a 386
104 //
105 __ xorl(rax, HS_EFL_AC);
106 __ push(rax);
107 __ popf();
108 __ pushf();
109 __ pop(rax);
110 __ cmpptr(rax, rcx);
111 __ jccb(Assembler::notEqual, detect_486);
112
113 __ movl(rax, CPU_FAMILY_386);
114 __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax);
115 __ jmp(done);
116
117 //
118 // If we are unable to change the ID flag, we have a 486 which does
119 // not support the "cpuid" instruction.
120 //
121 __ bind(detect_486);
122 __ mov(rax, rcx);
123 __ xorl(rax, HS_EFL_ID);
124 __ push(rax);
125 __ popf();
126 __ pushf();
127 __ pop(rax);
128 __ cmpptr(rcx, rax);
129 __ jccb(Assembler::notEqual, detect_586);
130
131 __ bind(cpu486);
132 __ movl(rax, CPU_FAMILY_486);
133 __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax);
134 __ jmp(done);
135
136 //
137 // At this point, we have a chip which supports the "cpuid" instruction
138 //
139 __ bind(detect_586);
140 __ xorl(rax, rax);
141 __ cpuid();
142 __ orl(rax, rax);
143 __ jcc(Assembler::equal, cpu486); // if cpuid doesn't support an input
144 // value of at least 1, we give up and
145 // assume a 486
146 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset())));
147 __ movl(Address(rsi, 0), rax);
148 __ movl(Address(rsi, 4), rbx);
149 __ movl(Address(rsi, 8), rcx);
150 __ movl(Address(rsi,12), rdx);
151
152 __ cmpl(rax, 0xa); // Is cpuid(0xB) supported?
153 __ jccb(Assembler::belowEqual, std_cpuid4);
154
155 //
156 // cpuid(0xB) Processor Topology
157 //
158 __ movl(rax, 0xb);
159 __ xorl(rcx, rcx); // Threads level
160 __ cpuid();
161
162 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB0_offset())));
163 __ movl(Address(rsi, 0), rax);
164 __ movl(Address(rsi, 4), rbx);
165 __ movl(Address(rsi, 8), rcx);
166 __ movl(Address(rsi,12), rdx);
167
168 __ movl(rax, 0xb);
169 __ movl(rcx, 1); // Cores level
170 __ cpuid();
171 __ push(rax);
172 __ andl(rax, 0x1f); // Determine if valid topology level
173 __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level
174 __ andl(rax, 0xffff);
175 __ pop(rax);
176 __ jccb(Assembler::equal, std_cpuid4);
177
178 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB1_offset())));
179 __ movl(Address(rsi, 0), rax);
180 __ movl(Address(rsi, 4), rbx);
181 __ movl(Address(rsi, 8), rcx);
182 __ movl(Address(rsi,12), rdx);
183
184 __ movl(rax, 0xb);
185 __ movl(rcx, 2); // Packages level
186 __ cpuid();
187 __ push(rax);
188 __ andl(rax, 0x1f); // Determine if valid topology level
189 __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level
190 __ andl(rax, 0xffff);
191 __ pop(rax);
192 __ jccb(Assembler::equal, std_cpuid4);
193
194 __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB2_offset())));
195 __ movl(Address(rsi, 0), rax);
196 __ movl(Address(rsi, 4), rbx);
197 __ movl(Address(rsi, 8), rcx);
198 __ movl(Address(rsi,12), rdx);
199
200 //
201 // cpuid(0x4) Deterministic cache params
202 //
203 __ bind(std_cpuid4);
204 __ movl(rax, 4);
205 __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x4) supported?
206 __ jccb(Assembler::greater, std_cpuid1);
207
208 __ xorl(rcx, rcx); // L1 cache
209 __ cpuid();
210 __ push(rax);
211 __ andl(rax, 0x1f); // Determine if valid cache parameters used
212 __ orl(rax, rax); // eax[4:0] == 0 indicates invalid cache
213 __ pop(rax);
214 __ jccb(Assembler::equal, std_cpuid1);
215
216 __ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset())));
217 __ movl(Address(rsi, 0), rax);
218 __ movl(Address(rsi, 4), rbx);
219 __ movl(Address(rsi, 8), rcx);
220 __ movl(Address(rsi,12), rdx);
221
222 //
223 // Standard cpuid(0x1)
224 //
225 __ bind(std_cpuid1);
226 __ movl(rax, 1);
227 __ cpuid();
228 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
229 __ movl(Address(rsi, 0), rax);
230 __ movl(Address(rsi, 4), rbx);
231 __ movl(Address(rsi, 8), rcx);
232 __ movl(Address(rsi,12), rdx);
233
234 //
235 // Check if OS has enabled XGETBV instruction to access XCR0
236 // (OSXSAVE feature flag) and CPU supports AVX
237 //
238 __ andl(rcx, 0x18000000); // cpuid1 bits osxsave | avx
239 __ cmpl(rcx, 0x18000000);
240 __ jccb(Assembler::notEqual, sef_cpuid); // jump if AVX is not supported
241
242 //
243 // XCR0, XFEATURE_ENABLED_MASK register
244 //
245 __ xorl(rcx, rcx); // zero for XCR0 register
246 __ xgetbv();
247 __ lea(rsi, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset())));
248 __ movl(Address(rsi, 0), rax);
249 __ movl(Address(rsi, 4), rdx);
250
251 //
252 // cpuid(0x7) Structured Extended Features
253 //
254 __ bind(sef_cpuid);
255 __ movl(rax, 7);
256 __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x7) supported?
257 __ jccb(Assembler::greater, ext_cpuid);
258
259 __ xorl(rcx, rcx);
260 __ cpuid();
261 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset())));
262 __ movl(Address(rsi, 0), rax);
263 __ movl(Address(rsi, 4), rbx);
264 __ movl(Address(rsi, 8), rcx);
265 __ movl(Address(rsi, 12), rdx);
266
267 //
268 // Extended cpuid(0x80000000)
269 //
270 __ bind(ext_cpuid);
271 __ movl(rax, 0x80000000);
272 __ cpuid();
273 __ cmpl(rax, 0x80000000); // Is cpuid(0x80000001) supported?
274 __ jcc(Assembler::belowEqual, done);
275 __ cmpl(rax, 0x80000004); // Is cpuid(0x80000005) supported?
276 __ jcc(Assembler::belowEqual, ext_cpuid1);
277 __ cmpl(rax, 0x80000006); // Is cpuid(0x80000007) supported?
278 __ jccb(Assembler::belowEqual, ext_cpuid5);
279 __ cmpl(rax, 0x80000007); // Is cpuid(0x80000008) supported?
280 __ jccb(Assembler::belowEqual, ext_cpuid7);
281 __ cmpl(rax, 0x80000008); // Is cpuid(0x80000009 and above) supported?
282 __ jccb(Assembler::belowEqual, ext_cpuid8);
283 __ cmpl(rax, 0x8000001E); // Is cpuid(0x8000001E) supported?
284 __ jccb(Assembler::below, ext_cpuid8);
285 //
286 // Extended cpuid(0x8000001E)
287 //
288 __ movl(rax, 0x8000001E);
289 __ cpuid();
290 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1E_offset())));
291 __ movl(Address(rsi, 0), rax);
292 __ movl(Address(rsi, 4), rbx);
293 __ movl(Address(rsi, 8), rcx);
294 __ movl(Address(rsi,12), rdx);
295
296 //
297 // Extended cpuid(0x80000008)
298 //
299 __ bind(ext_cpuid8);
300 __ movl(rax, 0x80000008);
301 __ cpuid();
302 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset())));
303 __ movl(Address(rsi, 0), rax);
304 __ movl(Address(rsi, 4), rbx);
305 __ movl(Address(rsi, 8), rcx);
306 __ movl(Address(rsi,12), rdx);
307
308 //
309 // Extended cpuid(0x80000007)
310 //
311 __ bind(ext_cpuid7);
312 __ movl(rax, 0x80000007);
313 __ cpuid();
314 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid7_offset())));
315 __ movl(Address(rsi, 0), rax);
316 __ movl(Address(rsi, 4), rbx);
317 __ movl(Address(rsi, 8), rcx);
318 __ movl(Address(rsi,12), rdx);
319
320 //
321 // Extended cpuid(0x80000005)
322 //
323 __ bind(ext_cpuid5);
324 __ movl(rax, 0x80000005);
325 __ cpuid();
326 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset())));
327 __ movl(Address(rsi, 0), rax);
328 __ movl(Address(rsi, 4), rbx);
329 __ movl(Address(rsi, 8), rcx);
330 __ movl(Address(rsi,12), rdx);
331
332 //
333 // Extended cpuid(0x80000001)
334 //
335 __ bind(ext_cpuid1);
336 __ movl(rax, 0x80000001);
337 __ cpuid();
338 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset())));
339 __ movl(Address(rsi, 0), rax);
340 __ movl(Address(rsi, 4), rbx);
341 __ movl(Address(rsi, 8), rcx);
342 __ movl(Address(rsi,12), rdx);
343
344 //
345 // Check if OS has enabled XGETBV instruction to access XCR0
346 // (OSXSAVE feature flag) and CPU supports AVX
347 //
348 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
349 __ movl(rcx, 0x18000000); // cpuid1 bits osxsave | avx
350 __ andl(rcx, Address(rsi, 8)); // cpuid1 bits osxsave | avx
351 __ cmpl(rcx, 0x18000000);
352 __ jccb(Assembler::notEqual, done); // jump if AVX is not supported
353
354 __ movl(rax, 0x6);
355 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm
356 __ cmpl(rax, 0x6);
357 __ jccb(Assembler::equal, start_simd_check); // return if AVX is not supported
358
359 // we need to bridge farther than imm8, so we use this island as a thunk
360 __ bind(done);
361 __ jmp(wrapup);
362
363 __ bind(start_simd_check);
364 //
365 // Some OSs have a bug when upper 128/256bits of YMM/ZMM
366 // registers are not restored after a signal processing.
367 // Generate SEGV here (reference through NULL)
368 // and check upper YMM/ZMM bits after it.
369 //
370 intx saved_useavx = UseAVX;
371 intx saved_usesse = UseSSE;
372
373 // If UseAVX is unitialized or is set by the user to include EVEX
374 if (use_evex) {
375 // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f
376 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset())));
377 __ movl(rax, 0x10000);
378 __ andl(rax, Address(rsi, 4)); // xcr0 bits sse | ymm
379 __ cmpl(rax, 0x10000);
380 __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported
381 // check _cpuid_info.xem_xcr0_eax.bits.opmask
382 // check _cpuid_info.xem_xcr0_eax.bits.zmm512
383 // check _cpuid_info.xem_xcr0_eax.bits.zmm32
384 __ movl(rax, 0xE0);
385 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm
386 __ cmpl(rax, 0xE0);
387 __ jccb(Assembler::notEqual, legacy_setup); // jump if EVEX is not supported
388
389 if (FLAG_IS_DEFAULT(UseAVX)) {
390 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
391 __ movl(rax, Address(rsi, 0));
392 __ cmpl(rax, 0x50654); // If it is Skylake
393 __ jcc(Assembler::equal, legacy_setup);
394 }
395 // EVEX setup: run in lowest evex mode
396 VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts
397 UseAVX = 3;
398 UseSSE = 2;
399 #ifdef _WINDOWS
400 // xmm5-xmm15 are not preserved by caller on windows
401 // https://msdn.microsoft.com/en-us/library/9z1stfyw.aspx
402 __ subptr(rsp, 64);
403 __ evmovdqul(Address(rsp, 0), xmm7, Assembler::AVX_512bit);
404 #ifdef _LP64
405 __ subptr(rsp, 64);
406 __ evmovdqul(Address(rsp, 0), xmm8, Assembler::AVX_512bit);
407 __ subptr(rsp, 64);
408 __ evmovdqul(Address(rsp, 0), xmm31, Assembler::AVX_512bit);
409 #endif // _LP64
410 #endif // _WINDOWS
411
412 // load value into all 64 bytes of zmm7 register
413 __ movl(rcx, VM_Version::ymm_test_value());
414 __ movdl(xmm0, rcx);
415 __ vpbroadcastd(xmm0, xmm0, Assembler::AVX_512bit);
416 __ evmovdqul(xmm7, xmm0, Assembler::AVX_512bit);
417 #ifdef _LP64
418 __ evmovdqul(xmm8, xmm0, Assembler::AVX_512bit);
419 __ evmovdqul(xmm31, xmm0, Assembler::AVX_512bit);
420 #endif
421 VM_Version::clean_cpuFeatures();
422 __ jmp(save_restore_except);
423 }
424
425 __ bind(legacy_setup);
426 // AVX setup
427 VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts
428 UseAVX = 1;
429 UseSSE = 2;
430 #ifdef _WINDOWS
431 __ subptr(rsp, 32);
432 __ vmovdqu(Address(rsp, 0), xmm7);
433 #ifdef _LP64
434 __ subptr(rsp, 32);
435 __ vmovdqu(Address(rsp, 0), xmm8);
436 __ subptr(rsp, 32);
437 __ vmovdqu(Address(rsp, 0), xmm15);
438 #endif // _LP64
439 #endif // _WINDOWS
440
441 // load value into all 32 bytes of ymm7 register
442 __ movl(rcx, VM_Version::ymm_test_value());
443
444 __ movdl(xmm0, rcx);
445 __ pshufd(xmm0, xmm0, 0x00);
446 __ vinsertf128_high(xmm0, xmm0);
447 __ vmovdqu(xmm7, xmm0);
448 #ifdef _LP64
449 __ vmovdqu(xmm8, xmm0);
450 __ vmovdqu(xmm15, xmm0);
451 #endif
452 VM_Version::clean_cpuFeatures();
453
454 __ bind(save_restore_except);
455 __ xorl(rsi, rsi);
456 VM_Version::set_cpuinfo_segv_addr(__ pc());
457 // Generate SEGV
458 __ movl(rax, Address(rsi, 0));
459
460 VM_Version::set_cpuinfo_cont_addr(__ pc());
461 // Returns here after signal. Save xmm0 to check it later.
462
463 // If UseAVX is unitialized or is set by the user to include EVEX
464 if (use_evex) {
465 // check _cpuid_info.sef_cpuid7_ebx.bits.avx512f
466 __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset())));
467 __ movl(rax, 0x10000);
468 __ andl(rax, Address(rsi, 4));
469 __ cmpl(rax, 0x10000);
470 __ jcc(Assembler::notEqual, legacy_save_restore);
471 // check _cpuid_info.xem_xcr0_eax.bits.opmask
472 // check _cpuid_info.xem_xcr0_eax.bits.zmm512
473 // check _cpuid_info.xem_xcr0_eax.bits.zmm32
474 __ movl(rax, 0xE0);
475 __ andl(rax, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset()))); // xcr0 bits sse | ymm
476 __ cmpl(rax, 0xE0);
477 __ jcc(Assembler::notEqual, legacy_save_restore);
478
479 if (FLAG_IS_DEFAULT(UseAVX)) {
480 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
481 __ movl(rax, Address(rsi, 0));
482 __ cmpl(rax, 0x50654); // If it is Skylake
483 __ jcc(Assembler::equal, legacy_save_restore);
484 }
485 // EVEX check: run in lowest evex mode
486 VM_Version::set_evex_cpuFeatures(); // Enable temporary to pass asserts
487 UseAVX = 3;
488 UseSSE = 2;
489 __ lea(rsi, Address(rbp, in_bytes(VM_Version::zmm_save_offset())));
490 __ evmovdqul(Address(rsi, 0), xmm0, Assembler::AVX_512bit);
491 __ evmovdqul(Address(rsi, 64), xmm7, Assembler::AVX_512bit);
492 #ifdef _LP64
493 __ evmovdqul(Address(rsi, 128), xmm8, Assembler::AVX_512bit);
494 __ evmovdqul(Address(rsi, 192), xmm31, Assembler::AVX_512bit);
495 #endif
496
497 #ifdef _WINDOWS
498 #ifdef _LP64
499 __ evmovdqul(xmm31, Address(rsp, 0), Assembler::AVX_512bit);
500 __ addptr(rsp, 64);
501 __ evmovdqul(xmm8, Address(rsp, 0), Assembler::AVX_512bit);
502 __ addptr(rsp, 64);
503 #endif // _LP64
504 __ evmovdqul(xmm7, Address(rsp, 0), Assembler::AVX_512bit);
505 __ addptr(rsp, 64);
506 #endif // _WINDOWS
507 generate_vzeroupper(wrapup);
508 VM_Version::clean_cpuFeatures();
509 UseAVX = saved_useavx;
510 UseSSE = saved_usesse;
511 __ jmp(wrapup);
512 }
513
514 __ bind(legacy_save_restore);
515 // AVX check
516 VM_Version::set_avx_cpuFeatures(); // Enable temporary to pass asserts
517 UseAVX = 1;
518 UseSSE = 2;
519 __ lea(rsi, Address(rbp, in_bytes(VM_Version::ymm_save_offset())));
520 __ vmovdqu(Address(rsi, 0), xmm0);
521 __ vmovdqu(Address(rsi, 32), xmm7);
522 #ifdef _LP64
523 __ vmovdqu(Address(rsi, 64), xmm8);
524 __ vmovdqu(Address(rsi, 96), xmm15);
525 #endif
526
527 #ifdef _WINDOWS
528 #ifdef _LP64
529 __ vmovdqu(xmm15, Address(rsp, 0));
530 __ addptr(rsp, 32);
531 __ vmovdqu(xmm8, Address(rsp, 0));
532 __ addptr(rsp, 32);
533 #endif // _LP64
534 __ vmovdqu(xmm7, Address(rsp, 0));
535 __ addptr(rsp, 32);
536 #endif // _WINDOWS
537 generate_vzeroupper(wrapup);
538 VM_Version::clean_cpuFeatures();
539 UseAVX = saved_useavx;
540 UseSSE = saved_usesse;
541
542 __ bind(wrapup);
543 __ popf();
544 __ pop(rsi);
545 __ pop(rbx);
546 __ pop(rbp);
547 __ ret(0);
548
549 # undef __
550
551 return start;
552 };
generate_vzeroupper(Label & L_wrapup)553 void generate_vzeroupper(Label& L_wrapup) {
554 # define __ _masm->
555 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset())));
556 __ cmpl(Address(rsi, 4), 0x756e6547); // 'uneG'
557 __ jcc(Assembler::notEqual, L_wrapup);
558 __ movl(rcx, 0x0FFF0FF0);
559 __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
560 __ andl(rcx, Address(rsi, 0));
561 __ cmpl(rcx, 0x00050670); // If it is Xeon Phi 3200/5200/7200
562 __ jcc(Assembler::equal, L_wrapup);
563 __ cmpl(rcx, 0x00080650); // If it is Future Xeon Phi
564 __ jcc(Assembler::equal, L_wrapup);
565 // vzeroupper() will use a pre-computed instruction sequence that we
566 // can't compute until after we've determined CPU capabilities. Use
567 // uncached variant here directly to be able to bootstrap correctly
568 __ vzeroupper_uncached();
569 # undef __
570 }
571 };
572
get_processor_features()573 void VM_Version::get_processor_features() {
574
575 _cpu = 4; // 486 by default
576 _model = 0;
577 _stepping = 0;
578 _features = 0;
579 _logical_processors_per_package = 1;
580 // i486 internal cache is both I&D and has a 16-byte line size
581 _L1_data_cache_line_size = 16;
582
583 // Get raw processor info
584
585 get_cpu_info_stub(&_cpuid_info);
586
587 assert_is_initialized();
588 _cpu = extended_cpu_family();
589 _model = extended_cpu_model();
590 _stepping = cpu_stepping();
591
592 if (cpu_family() > 4) { // it supports CPUID
593 _features = feature_flags();
594 // Logical processors are only available on P4s and above,
595 // and only if hyperthreading is available.
596 _logical_processors_per_package = logical_processor_count();
597 _L1_data_cache_line_size = L1_line_size();
598 }
599
600 _supports_cx8 = supports_cmpxchg8();
601 // xchg and xadd instructions
602 _supports_atomic_getset4 = true;
603 _supports_atomic_getadd4 = true;
604 LP64_ONLY(_supports_atomic_getset8 = true);
605 LP64_ONLY(_supports_atomic_getadd8 = true);
606
607 #ifdef _LP64
608 // OS should support SSE for x64 and hardware should support at least SSE2.
609 if (!VM_Version::supports_sse2()) {
610 vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported");
611 }
612 // in 64 bit the use of SSE2 is the minimum
613 if (UseSSE < 2) UseSSE = 2;
614 #endif
615
616 #ifdef AMD64
617 // flush_icache_stub have to be generated first.
618 // That is why Icache line size is hard coded in ICache class,
619 // see icache_x86.hpp. It is also the reason why we can't use
620 // clflush instruction in 32-bit VM since it could be running
621 // on CPU which does not support it.
622 //
623 // The only thing we can do is to verify that flushed
624 // ICache::line_size has correct value.
625 guarantee(_cpuid_info.std_cpuid1_edx.bits.clflush != 0, "clflush is not supported");
626 // clflush_size is size in quadwords (8 bytes).
627 guarantee(_cpuid_info.std_cpuid1_ebx.bits.clflush_size == 8, "such clflush size is not supported");
628 #endif
629
630 #ifdef _LP64
631 // assigning this field effectively enables Unsafe.writebackMemory()
632 // by initing UnsafeConstant.DATA_CACHE_LINE_FLUSH_SIZE to non-zero
633 // that is only implemented on x86_64 and only if the OS plays ball
634 if (os::supports_map_sync()) {
635 // publish data cache line flush size to generic field, otherwise
636 // let if default to zero thereby disabling writeback
637 _data_cache_line_flush_size = _cpuid_info.std_cpuid1_ebx.bits.clflush_size * 8;
638 }
639 #endif
640 // If the OS doesn't support SSE, we can't use this feature even if the HW does
641 if (!os::supports_sse())
642 _features &= ~(CPU_SSE|CPU_SSE2|CPU_SSE3|CPU_SSSE3|CPU_SSE4A|CPU_SSE4_1|CPU_SSE4_2);
643
644 if (UseSSE < 4) {
645 _features &= ~CPU_SSE4_1;
646 _features &= ~CPU_SSE4_2;
647 }
648
649 if (UseSSE < 3) {
650 _features &= ~CPU_SSE3;
651 _features &= ~CPU_SSSE3;
652 _features &= ~CPU_SSE4A;
653 }
654
655 if (UseSSE < 2)
656 _features &= ~CPU_SSE2;
657
658 if (UseSSE < 1)
659 _features &= ~CPU_SSE;
660
661 //since AVX instructions is slower than SSE in some ZX cpus, force USEAVX=0.
662 if (is_zx() && ((cpu_family() == 6) || (cpu_family() == 7))) {
663 UseAVX = 0;
664 }
665
666 // first try initial setting and detect what we can support
667 int use_avx_limit = 0;
668 if (UseAVX > 0) {
669 if (UseAVX > 2 && supports_evex()) {
670 use_avx_limit = 3;
671 } else if (UseAVX > 1 && supports_avx2()) {
672 use_avx_limit = 2;
673 } else if (UseAVX > 0 && supports_avx()) {
674 use_avx_limit = 1;
675 } else {
676 use_avx_limit = 0;
677 }
678 }
679 if (FLAG_IS_DEFAULT(UseAVX)) {
680 // Don't use AVX-512 on older Skylakes unless explicitly requested.
681 if (use_avx_limit > 2 && is_intel_skylake() && _stepping < 5) {
682 FLAG_SET_DEFAULT(UseAVX, 2);
683 } else {
684 FLAG_SET_DEFAULT(UseAVX, use_avx_limit);
685 }
686 }
687 if (UseAVX > use_avx_limit) {
688 warning("UseAVX=%d is not supported on this CPU, setting it to UseAVX=%d", (int) UseAVX, use_avx_limit);
689 FLAG_SET_DEFAULT(UseAVX, use_avx_limit);
690 } else if (UseAVX < 0) {
691 warning("UseAVX=%d is not valid, setting it to UseAVX=0", (int) UseAVX);
692 FLAG_SET_DEFAULT(UseAVX, 0);
693 }
694
695 if (UseAVX < 3) {
696 _features &= ~CPU_AVX512F;
697 _features &= ~CPU_AVX512DQ;
698 _features &= ~CPU_AVX512CD;
699 _features &= ~CPU_AVX512BW;
700 _features &= ~CPU_AVX512VL;
701 _features &= ~CPU_AVX512_VPOPCNTDQ;
702 _features &= ~CPU_AVX512_VPCLMULQDQ;
703 _features &= ~CPU_AVX512_VAES;
704 _features &= ~CPU_AVX512_VNNI;
705 _features &= ~CPU_AVX512_VBMI;
706 _features &= ~CPU_AVX512_VBMI2;
707 }
708
709 if (UseAVX < 2)
710 _features &= ~CPU_AVX2;
711
712 if (UseAVX < 1) {
713 _features &= ~CPU_AVX;
714 _features &= ~CPU_VZEROUPPER;
715 }
716
717 if (logical_processors_per_package() == 1) {
718 // HT processor could be installed on a system which doesn't support HT.
719 _features &= ~CPU_HT;
720 }
721
722 if (is_intel()) { // Intel cpus specific settings
723 if (is_knights_family()) {
724 _features &= ~CPU_VZEROUPPER;
725 }
726 }
727
728 if (FLAG_IS_DEFAULT(IntelJccErratumMitigation)) {
729 _has_intel_jcc_erratum = compute_has_intel_jcc_erratum();
730 } else {
731 _has_intel_jcc_erratum = IntelJccErratumMitigation;
732 }
733
734 char buf[512];
735 int res = jio_snprintf(buf, sizeof(buf),
736 "(%u cores per cpu, %u threads per core) family %d model %d stepping %d microcode 0x%x"
737 "%s%s%s%s%s%s%s%s%s%s" "%s%s%s%s%s%s%s%s%s%s" "%s%s%s%s%s%s%s%s%s%s" "%s%s%s%s%s%s%s%s%s%s" "%s%s%s%s%s%s",
738
739 cores_per_cpu(), threads_per_core(),
740 cpu_family(), _model, _stepping, os::cpu_microcode_revision(),
741
742 (supports_cmov() ? ", cmov" : ""),
743 (supports_cmpxchg8() ? ", cx8" : ""),
744 (supports_fxsr() ? ", fxsr" : ""),
745 (supports_mmx() ? ", mmx" : ""),
746 (supports_sse() ? ", sse" : ""),
747 (supports_sse2() ? ", sse2" : ""),
748 (supports_sse3() ? ", sse3" : ""),
749 (supports_ssse3()? ", ssse3": ""),
750 (supports_sse4_1() ? ", sse4.1" : ""),
751 (supports_sse4_2() ? ", sse4.2" : ""),
752
753 (supports_popcnt() ? ", popcnt" : ""),
754 (supports_vzeroupper() ? ", vzeroupper" : ""),
755 (supports_avx() ? ", avx" : ""),
756 (supports_avx2() ? ", avx2" : ""),
757 (supports_aes() ? ", aes" : ""),
758 (supports_clmul() ? ", clmul" : ""),
759 (supports_erms() ? ", erms" : ""),
760 (supports_rtm() ? ", rtm" : ""),
761 (supports_3dnow_prefetch() ? ", 3dnowpref" : ""),
762 (supports_lzcnt() ? ", lzcnt": ""),
763
764 (supports_sse4a() ? ", sse4a": ""),
765 (supports_ht() ? ", ht": ""),
766 (supports_tsc() ? ", tsc": ""),
767 (supports_tscinv_bit() ? ", tscinvbit": ""),
768 (supports_tscinv() ? ", tscinv": ""),
769 (supports_bmi1() ? ", bmi1" : ""),
770 (supports_bmi2() ? ", bmi2" : ""),
771 (supports_adx() ? ", adx" : ""),
772 (supports_evex() ? ", avx512f" : ""),
773 (supports_avx512dq() ? ", avx512dq" : ""),
774
775 (supports_avx512pf() ? ", avx512pf" : ""),
776 (supports_avx512er() ? ", avx512er" : ""),
777 (supports_avx512cd() ? ", avx512cd" : ""),
778 (supports_avx512bw() ? ", avx512bw" : ""),
779 (supports_avx512vl() ? ", avx512vl" : ""),
780 (supports_avx512_vpopcntdq() ? ", avx512_vpopcntdq" : ""),
781 (supports_avx512_vpclmulqdq() ? ", avx512_vpclmulqdq" : ""),
782 (supports_avx512_vbmi() ? ", avx512_vbmi" : ""),
783 (supports_avx512_vbmi2() ? ", avx512_vbmi2" : ""),
784 (supports_avx512_vaes() ? ", avx512_vaes" : ""),
785
786 (supports_avx512_vnni() ? ", avx512_vnni" : ""),
787 (supports_sha() ? ", sha" : ""),
788 (supports_fma() ? ", fma" : ""),
789 (supports_clflush() ? ", clflush" : ""),
790 (supports_clflushopt() ? ", clflushopt" : ""),
791 (supports_clwb() ? ", clwb" : ""));
792
793 assert(res > 0, "not enough temporary space allocated"); // increase 'buf' size
794
795 _features_string = os::strdup(buf);
796
797 // UseSSE is set to the smaller of what hardware supports and what
798 // the command line requires. I.e., you cannot set UseSSE to 2 on
799 // older Pentiums which do not support it.
800 int use_sse_limit = 0;
801 if (UseSSE > 0) {
802 if (UseSSE > 3 && supports_sse4_1()) {
803 use_sse_limit = 4;
804 } else if (UseSSE > 2 && supports_sse3()) {
805 use_sse_limit = 3;
806 } else if (UseSSE > 1 && supports_sse2()) {
807 use_sse_limit = 2;
808 } else if (UseSSE > 0 && supports_sse()) {
809 use_sse_limit = 1;
810 } else {
811 use_sse_limit = 0;
812 }
813 }
814 if (FLAG_IS_DEFAULT(UseSSE)) {
815 FLAG_SET_DEFAULT(UseSSE, use_sse_limit);
816 } else if (UseSSE > use_sse_limit) {
817 warning("UseSSE=%d is not supported on this CPU, setting it to UseSSE=%d", (int) UseSSE, use_sse_limit);
818 FLAG_SET_DEFAULT(UseSSE, use_sse_limit);
819 } else if (UseSSE < 0) {
820 warning("UseSSE=%d is not valid, setting it to UseSSE=0", (int) UseSSE);
821 FLAG_SET_DEFAULT(UseSSE, 0);
822 }
823
824 // Use AES instructions if available.
825 if (supports_aes()) {
826 if (FLAG_IS_DEFAULT(UseAES)) {
827 FLAG_SET_DEFAULT(UseAES, true);
828 }
829 if (!UseAES) {
830 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
831 warning("AES intrinsics require UseAES flag to be enabled. Intrinsics will be disabled.");
832 }
833 FLAG_SET_DEFAULT(UseAESIntrinsics, false);
834 } else {
835 if (UseSSE > 2) {
836 if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
837 FLAG_SET_DEFAULT(UseAESIntrinsics, true);
838 }
839 } else {
840 // The AES intrinsic stubs require AES instruction support (of course)
841 // but also require sse3 mode or higher for instructions it use.
842 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
843 warning("X86 AES intrinsics require SSE3 instructions or higher. Intrinsics will be disabled.");
844 }
845 FLAG_SET_DEFAULT(UseAESIntrinsics, false);
846 }
847
848 // --AES-CTR begins--
849 if (!UseAESIntrinsics) {
850 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
851 warning("AES-CTR intrinsics require UseAESIntrinsics flag to be enabled. Intrinsics will be disabled.");
852 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
853 }
854 } else {
855 if (supports_sse4_1()) {
856 if (FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
857 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, true);
858 }
859 } else {
860 // The AES-CTR intrinsic stubs require AES instruction support (of course)
861 // but also require sse4.1 mode or higher for instructions it use.
862 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
863 warning("X86 AES-CTR intrinsics require SSE4.1 instructions or higher. Intrinsics will be disabled.");
864 }
865 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
866 }
867 }
868 // --AES-CTR ends--
869 }
870 } else if (UseAES || UseAESIntrinsics || UseAESCTRIntrinsics) {
871 if (UseAES && !FLAG_IS_DEFAULT(UseAES)) {
872 warning("AES instructions are not available on this CPU");
873 FLAG_SET_DEFAULT(UseAES, false);
874 }
875 if (UseAESIntrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
876 warning("AES intrinsics are not available on this CPU");
877 FLAG_SET_DEFAULT(UseAESIntrinsics, false);
878 }
879 if (UseAESCTRIntrinsics && !FLAG_IS_DEFAULT(UseAESCTRIntrinsics)) {
880 warning("AES-CTR intrinsics are not available on this CPU");
881 FLAG_SET_DEFAULT(UseAESCTRIntrinsics, false);
882 }
883 }
884
885 // Use CLMUL instructions if available.
886 if (supports_clmul()) {
887 if (FLAG_IS_DEFAULT(UseCLMUL)) {
888 UseCLMUL = true;
889 }
890 } else if (UseCLMUL) {
891 if (!FLAG_IS_DEFAULT(UseCLMUL))
892 warning("CLMUL instructions not available on this CPU (AVX may also be required)");
893 FLAG_SET_DEFAULT(UseCLMUL, false);
894 }
895
896 if (UseCLMUL && (UseSSE > 2)) {
897 if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) {
898 UseCRC32Intrinsics = true;
899 }
900 } else if (UseCRC32Intrinsics) {
901 if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics))
902 warning("CRC32 Intrinsics requires CLMUL instructions (not available on this CPU)");
903 FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
904 }
905
906 if (supports_sse4_2() && supports_clmul()) {
907 if (FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
908 UseCRC32CIntrinsics = true;
909 }
910 } else if (UseCRC32CIntrinsics) {
911 if (!FLAG_IS_DEFAULT(UseCRC32CIntrinsics)) {
912 warning("CRC32C intrinsics are not available on this CPU");
913 }
914 FLAG_SET_DEFAULT(UseCRC32CIntrinsics, false);
915 }
916
917 // GHASH/GCM intrinsics
918 if (UseCLMUL && (UseSSE > 2)) {
919 if (FLAG_IS_DEFAULT(UseGHASHIntrinsics)) {
920 UseGHASHIntrinsics = true;
921 }
922 } else if (UseGHASHIntrinsics) {
923 if (!FLAG_IS_DEFAULT(UseGHASHIntrinsics))
924 warning("GHASH intrinsic requires CLMUL and SSE2 instructions on this CPU");
925 FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
926 }
927
928 // Base64 Intrinsics (Check the condition for which the intrinsic will be active)
929 if ((UseAVX > 2) && supports_avx512vl() && supports_avx512bw()) {
930 if (FLAG_IS_DEFAULT(UseBASE64Intrinsics)) {
931 UseBASE64Intrinsics = true;
932 }
933 } else if (UseBASE64Intrinsics) {
934 if (!FLAG_IS_DEFAULT(UseBASE64Intrinsics))
935 warning("Base64 intrinsic requires EVEX instructions on this CPU");
936 FLAG_SET_DEFAULT(UseBASE64Intrinsics, false);
937 }
938
939 if (supports_fma() && UseSSE >= 2) { // Check UseSSE since FMA code uses SSE instructions
940 if (FLAG_IS_DEFAULT(UseFMA)) {
941 UseFMA = true;
942 }
943 } else if (UseFMA) {
944 warning("FMA instructions are not available on this CPU");
945 FLAG_SET_DEFAULT(UseFMA, false);
946 }
947
948 if (supports_sha() LP64_ONLY(|| supports_avx2() && supports_bmi2())) {
949 if (FLAG_IS_DEFAULT(UseSHA)) {
950 UseSHA = true;
951 }
952 } else if (UseSHA) {
953 warning("SHA instructions are not available on this CPU");
954 FLAG_SET_DEFAULT(UseSHA, false);
955 }
956
957 if (supports_sha() && supports_sse4_1() && UseSHA) {
958 if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) {
959 FLAG_SET_DEFAULT(UseSHA1Intrinsics, true);
960 }
961 } else if (UseSHA1Intrinsics) {
962 warning("Intrinsics for SHA-1 crypto hash functions not available on this CPU.");
963 FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
964 }
965
966 if (supports_sse4_1() && UseSHA) {
967 if (FLAG_IS_DEFAULT(UseSHA256Intrinsics)) {
968 FLAG_SET_DEFAULT(UseSHA256Intrinsics, true);
969 }
970 } else if (UseSHA256Intrinsics) {
971 warning("Intrinsics for SHA-224 and SHA-256 crypto hash functions not available on this CPU.");
972 FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
973 }
974
975 #ifdef _LP64
976 // These are only supported on 64-bit
977 if (UseSHA && supports_avx2() && supports_bmi2()) {
978 if (FLAG_IS_DEFAULT(UseSHA512Intrinsics)) {
979 FLAG_SET_DEFAULT(UseSHA512Intrinsics, true);
980 }
981 } else
982 #endif
983 if (UseSHA512Intrinsics) {
984 warning("Intrinsics for SHA-384 and SHA-512 crypto hash functions not available on this CPU.");
985 FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
986 }
987
988 if (!(UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics)) {
989 FLAG_SET_DEFAULT(UseSHA, false);
990 }
991
992 if (UseAdler32Intrinsics) {
993 warning("Adler32Intrinsics not available on this CPU.");
994 FLAG_SET_DEFAULT(UseAdler32Intrinsics, false);
995 }
996
997 if (!supports_rtm() && UseRTMLocking) {
998 // Can't continue because UseRTMLocking affects UseBiasedLocking flag
999 // setting during arguments processing. See use_biased_locking().
1000 // VM_Version_init() is executed after UseBiasedLocking is used
1001 // in Thread::allocate().
1002 vm_exit_during_initialization("RTM instructions are not available on this CPU");
1003 }
1004
1005 #if INCLUDE_RTM_OPT
1006 if (UseRTMLocking) {
1007 if (is_client_compilation_mode_vm()) {
1008 // Only C2 does RTM locking optimization.
1009 // Can't continue because UseRTMLocking affects UseBiasedLocking flag
1010 // setting during arguments processing. See use_biased_locking().
1011 vm_exit_during_initialization("RTM locking optimization is not supported in this VM");
1012 }
1013 if (is_intel_family_core()) {
1014 if ((_model == CPU_MODEL_HASWELL_E3) ||
1015 (_model == CPU_MODEL_HASWELL_E7 && _stepping < 3) ||
1016 (_model == CPU_MODEL_BROADWELL && _stepping < 4)) {
1017 // currently a collision between SKL and HSW_E3
1018 if (!UnlockExperimentalVMOptions && UseAVX < 3) {
1019 vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this "
1020 "platform. It must be enabled via -XX:+UnlockExperimentalVMOptions flag.");
1021 } else {
1022 warning("UseRTMLocking is only available as experimental option on this platform.");
1023 }
1024 }
1025 }
1026 if (!FLAG_IS_CMDLINE(UseRTMLocking)) {
1027 // RTM locking should be used only for applications with
1028 // high lock contention. For now we do not use it by default.
1029 vm_exit_during_initialization("UseRTMLocking flag should be only set on command line");
1030 }
1031 } else { // !UseRTMLocking
1032 if (UseRTMForStackLocks) {
1033 if (!FLAG_IS_DEFAULT(UseRTMForStackLocks)) {
1034 warning("UseRTMForStackLocks flag should be off when UseRTMLocking flag is off");
1035 }
1036 FLAG_SET_DEFAULT(UseRTMForStackLocks, false);
1037 }
1038 if (UseRTMDeopt) {
1039 FLAG_SET_DEFAULT(UseRTMDeopt, false);
1040 }
1041 if (PrintPreciseRTMLockingStatistics) {
1042 FLAG_SET_DEFAULT(PrintPreciseRTMLockingStatistics, false);
1043 }
1044 }
1045 #else
1046 if (UseRTMLocking) {
1047 // Only C2 does RTM locking optimization.
1048 // Can't continue because UseRTMLocking affects UseBiasedLocking flag
1049 // setting during arguments processing. See use_biased_locking().
1050 vm_exit_during_initialization("RTM locking optimization is not supported in this VM");
1051 }
1052 #endif
1053
1054 #ifdef COMPILER2
1055 if (UseFPUForSpilling) {
1056 if (UseSSE < 2) {
1057 // Only supported with SSE2+
1058 FLAG_SET_DEFAULT(UseFPUForSpilling, false);
1059 }
1060 }
1061 #endif
1062
1063 #if COMPILER2_OR_JVMCI
1064 int max_vector_size = 0;
1065 if (UseSSE < 2) {
1066 // Vectors (in XMM) are only supported with SSE2+
1067 // SSE is always 2 on x64.
1068 max_vector_size = 0;
1069 } else if (UseAVX == 0 || !os_supports_avx_vectors()) {
1070 // 16 byte vectors (in XMM) are supported with SSE2+
1071 max_vector_size = 16;
1072 } else if (UseAVX == 1 || UseAVX == 2) {
1073 // 32 bytes vectors (in YMM) are only supported with AVX+
1074 max_vector_size = 32;
1075 } else if (UseAVX > 2) {
1076 // 64 bytes vectors (in ZMM) are only supported with AVX 3
1077 max_vector_size = 64;
1078 }
1079
1080 #ifdef _LP64
1081 int min_vector_size = 4; // We require MaxVectorSize to be at least 4 on 64bit
1082 #else
1083 int min_vector_size = 0;
1084 #endif
1085
1086 if (!FLAG_IS_DEFAULT(MaxVectorSize)) {
1087 if (MaxVectorSize < min_vector_size) {
1088 warning("MaxVectorSize must be at least %i on this platform", min_vector_size);
1089 FLAG_SET_DEFAULT(MaxVectorSize, min_vector_size);
1090 }
1091 if (MaxVectorSize > max_vector_size) {
1092 warning("MaxVectorSize must be at most %i on this platform", max_vector_size);
1093 FLAG_SET_DEFAULT(MaxVectorSize, max_vector_size);
1094 }
1095 if (!is_power_of_2(MaxVectorSize)) {
1096 warning("MaxVectorSize must be a power of 2, setting to default: %i", max_vector_size);
1097 FLAG_SET_DEFAULT(MaxVectorSize, max_vector_size);
1098 }
1099 } else {
1100 // If default, use highest supported configuration
1101 FLAG_SET_DEFAULT(MaxVectorSize, max_vector_size);
1102 }
1103
1104 #if defined(COMPILER2) && defined(ASSERT)
1105 if (MaxVectorSize > 0) {
1106 if (supports_avx() && PrintMiscellaneous && Verbose && TraceNewVectors) {
1107 tty->print_cr("State of YMM registers after signal handle:");
1108 int nreg = 2 LP64_ONLY(+2);
1109 const char* ymm_name[4] = {"0", "7", "8", "15"};
1110 for (int i = 0; i < nreg; i++) {
1111 tty->print("YMM%s:", ymm_name[i]);
1112 for (int j = 7; j >=0; j--) {
1113 tty->print(" %x", _cpuid_info.ymm_save[i*8 + j]);
1114 }
1115 tty->cr();
1116 }
1117 }
1118 }
1119 #endif // COMPILER2 && ASSERT
1120
1121 if (!FLAG_IS_DEFAULT(AVX3Threshold)) {
1122 if (!is_power_of_2(AVX3Threshold)) {
1123 warning("AVX3Threshold must be a power of 2");
1124 FLAG_SET_DEFAULT(AVX3Threshold, 4096);
1125 }
1126 }
1127
1128 #ifdef _LP64
1129 if (FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
1130 UseMultiplyToLenIntrinsic = true;
1131 }
1132 if (FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
1133 UseSquareToLenIntrinsic = true;
1134 }
1135 if (FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
1136 UseMulAddIntrinsic = true;
1137 }
1138 if (FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
1139 UseMontgomeryMultiplyIntrinsic = true;
1140 }
1141 if (FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
1142 UseMontgomerySquareIntrinsic = true;
1143 }
1144 #else
1145 if (UseMultiplyToLenIntrinsic) {
1146 if (!FLAG_IS_DEFAULT(UseMultiplyToLenIntrinsic)) {
1147 warning("multiplyToLen intrinsic is not available in 32-bit VM");
1148 }
1149 FLAG_SET_DEFAULT(UseMultiplyToLenIntrinsic, false);
1150 }
1151 if (UseMontgomeryMultiplyIntrinsic) {
1152 if (!FLAG_IS_DEFAULT(UseMontgomeryMultiplyIntrinsic)) {
1153 warning("montgomeryMultiply intrinsic is not available in 32-bit VM");
1154 }
1155 FLAG_SET_DEFAULT(UseMontgomeryMultiplyIntrinsic, false);
1156 }
1157 if (UseMontgomerySquareIntrinsic) {
1158 if (!FLAG_IS_DEFAULT(UseMontgomerySquareIntrinsic)) {
1159 warning("montgomerySquare intrinsic is not available in 32-bit VM");
1160 }
1161 FLAG_SET_DEFAULT(UseMontgomerySquareIntrinsic, false);
1162 }
1163 if (UseSquareToLenIntrinsic) {
1164 if (!FLAG_IS_DEFAULT(UseSquareToLenIntrinsic)) {
1165 warning("squareToLen intrinsic is not available in 32-bit VM");
1166 }
1167 FLAG_SET_DEFAULT(UseSquareToLenIntrinsic, false);
1168 }
1169 if (UseMulAddIntrinsic) {
1170 if (!FLAG_IS_DEFAULT(UseMulAddIntrinsic)) {
1171 warning("mulAdd intrinsic is not available in 32-bit VM");
1172 }
1173 FLAG_SET_DEFAULT(UseMulAddIntrinsic, false);
1174 }
1175 #endif // _LP64
1176 #endif // COMPILER2_OR_JVMCI
1177
1178 // On new cpus instructions which update whole XMM register should be used
1179 // to prevent partial register stall due to dependencies on high half.
1180 //
1181 // UseXmmLoadAndClearUpper == true --> movsd(xmm, mem)
1182 // UseXmmLoadAndClearUpper == false --> movlpd(xmm, mem)
1183 // UseXmmRegToRegMoveAll == true --> movaps(xmm, xmm), movapd(xmm, xmm).
1184 // UseXmmRegToRegMoveAll == false --> movss(xmm, xmm), movsd(xmm, xmm).
1185
1186
1187 if (is_zx()) { // ZX cpus specific settings
1188 if (FLAG_IS_DEFAULT(UseStoreImmI16)) {
1189 UseStoreImmI16 = false; // don't use it on ZX cpus
1190 }
1191 if ((cpu_family() == 6) || (cpu_family() == 7)) {
1192 if (FLAG_IS_DEFAULT(UseAddressNop)) {
1193 // Use it on all ZX cpus
1194 UseAddressNop = true;
1195 }
1196 }
1197 if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) {
1198 UseXmmLoadAndClearUpper = true; // use movsd on all ZX cpus
1199 }
1200 if (FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll)) {
1201 if (supports_sse3()) {
1202 UseXmmRegToRegMoveAll = true; // use movaps, movapd on new ZX cpus
1203 } else {
1204 UseXmmRegToRegMoveAll = false;
1205 }
1206 }
1207 if (((cpu_family() == 6) || (cpu_family() == 7)) && supports_sse3()) { // new ZX cpus
1208 #ifdef COMPILER2
1209 if (FLAG_IS_DEFAULT(MaxLoopPad)) {
1210 // For new ZX cpus do the next optimization:
1211 // don't align the beginning of a loop if there are enough instructions
1212 // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp)
1213 // in current fetch line (OptoLoopAlignment) or the padding
1214 // is big (> MaxLoopPad).
1215 // Set MaxLoopPad to 11 for new ZX cpus to reduce number of
1216 // generated NOP instructions. 11 is the largest size of one
1217 // address NOP instruction '0F 1F' (see Assembler::nop(i)).
1218 MaxLoopPad = 11;
1219 }
1220 #endif // COMPILER2
1221 if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
1222 UseXMMForArrayCopy = true; // use SSE2 movq on new ZX cpus
1223 }
1224 if (supports_sse4_2()) { // new ZX cpus
1225 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
1226 UseUnalignedLoadStores = true; // use movdqu on newest ZX cpus
1227 }
1228 }
1229 if (supports_sse4_2()) {
1230 if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) {
1231 FLAG_SET_DEFAULT(UseSSE42Intrinsics, true);
1232 }
1233 } else {
1234 if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
1235 warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled.");
1236 }
1237 FLAG_SET_DEFAULT(UseSSE42Intrinsics, false);
1238 }
1239 }
1240
1241 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) {
1242 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
1243 }
1244 }
1245
1246 if (is_amd_family()) { // AMD cpus specific settings
1247 if (supports_sse2() && FLAG_IS_DEFAULT(UseAddressNop)) {
1248 // Use it on new AMD cpus starting from Opteron.
1249 UseAddressNop = true;
1250 }
1251 if (supports_sse2() && FLAG_IS_DEFAULT(UseNewLongLShift)) {
1252 // Use it on new AMD cpus starting from Opteron.
1253 UseNewLongLShift = true;
1254 }
1255 if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) {
1256 if (supports_sse4a()) {
1257 UseXmmLoadAndClearUpper = true; // use movsd only on '10h' Opteron
1258 } else {
1259 UseXmmLoadAndClearUpper = false;
1260 }
1261 }
1262 if (FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll)) {
1263 if (supports_sse4a()) {
1264 UseXmmRegToRegMoveAll = true; // use movaps, movapd only on '10h'
1265 } else {
1266 UseXmmRegToRegMoveAll = false;
1267 }
1268 }
1269 if (FLAG_IS_DEFAULT(UseXmmI2F)) {
1270 if (supports_sse4a()) {
1271 UseXmmI2F = true;
1272 } else {
1273 UseXmmI2F = false;
1274 }
1275 }
1276 if (FLAG_IS_DEFAULT(UseXmmI2D)) {
1277 if (supports_sse4a()) {
1278 UseXmmI2D = true;
1279 } else {
1280 UseXmmI2D = false;
1281 }
1282 }
1283 if (supports_sse4_2()) {
1284 if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) {
1285 FLAG_SET_DEFAULT(UseSSE42Intrinsics, true);
1286 }
1287 } else {
1288 if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
1289 warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled.");
1290 }
1291 FLAG_SET_DEFAULT(UseSSE42Intrinsics, false);
1292 }
1293
1294 // some defaults for AMD family 15h
1295 if (cpu_family() == 0x15) {
1296 // On family 15h processors default is no sw prefetch
1297 if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
1298 FLAG_SET_DEFAULT(AllocatePrefetchStyle, 0);
1299 }
1300 // Also, if some other prefetch style is specified, default instruction type is PREFETCHW
1301 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
1302 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
1303 }
1304 // On family 15h processors use XMM and UnalignedLoadStores for Array Copy
1305 if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
1306 FLAG_SET_DEFAULT(UseXMMForArrayCopy, true);
1307 }
1308 if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
1309 FLAG_SET_DEFAULT(UseUnalignedLoadStores, true);
1310 }
1311 }
1312
1313 #ifdef COMPILER2
1314 if (cpu_family() < 0x17 && MaxVectorSize > 16) {
1315 // Limit vectors size to 16 bytes on AMD cpus < 17h.
1316 FLAG_SET_DEFAULT(MaxVectorSize, 16);
1317 }
1318 #endif // COMPILER2
1319
1320 // Some defaults for AMD family 17h || Hygon family 18h
1321 if (cpu_family() == 0x17 || cpu_family() == 0x18) {
1322 // On family 17h processors use XMM and UnalignedLoadStores for Array Copy
1323 if (supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
1324 FLAG_SET_DEFAULT(UseXMMForArrayCopy, true);
1325 }
1326 if (supports_sse2() && FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
1327 FLAG_SET_DEFAULT(UseUnalignedLoadStores, true);
1328 }
1329 #ifdef COMPILER2
1330 if (supports_sse4_2() && FLAG_IS_DEFAULT(UseFPUForSpilling)) {
1331 FLAG_SET_DEFAULT(UseFPUForSpilling, true);
1332 }
1333 #endif
1334 }
1335 }
1336
1337 if (is_intel()) { // Intel cpus specific settings
1338 if (FLAG_IS_DEFAULT(UseStoreImmI16)) {
1339 UseStoreImmI16 = false; // don't use it on Intel cpus
1340 }
1341 if (cpu_family() == 6 || cpu_family() == 15) {
1342 if (FLAG_IS_DEFAULT(UseAddressNop)) {
1343 // Use it on all Intel cpus starting from PentiumPro
1344 UseAddressNop = true;
1345 }
1346 }
1347 if (FLAG_IS_DEFAULT(UseXmmLoadAndClearUpper)) {
1348 UseXmmLoadAndClearUpper = true; // use movsd on all Intel cpus
1349 }
1350 if (FLAG_IS_DEFAULT(UseXmmRegToRegMoveAll)) {
1351 if (supports_sse3()) {
1352 UseXmmRegToRegMoveAll = true; // use movaps, movapd on new Intel cpus
1353 } else {
1354 UseXmmRegToRegMoveAll = false;
1355 }
1356 }
1357 if (cpu_family() == 6 && supports_sse3()) { // New Intel cpus
1358 #ifdef COMPILER2
1359 if (FLAG_IS_DEFAULT(MaxLoopPad)) {
1360 // For new Intel cpus do the next optimization:
1361 // don't align the beginning of a loop if there are enough instructions
1362 // left (NumberOfLoopInstrToAlign defined in c2_globals.hpp)
1363 // in current fetch line (OptoLoopAlignment) or the padding
1364 // is big (> MaxLoopPad).
1365 // Set MaxLoopPad to 11 for new Intel cpus to reduce number of
1366 // generated NOP instructions. 11 is the largest size of one
1367 // address NOP instruction '0F 1F' (see Assembler::nop(i)).
1368 MaxLoopPad = 11;
1369 }
1370 #endif // COMPILER2
1371 if (FLAG_IS_DEFAULT(UseXMMForArrayCopy)) {
1372 UseXMMForArrayCopy = true; // use SSE2 movq on new Intel cpus
1373 }
1374 if ((supports_sse4_2() && supports_ht()) || supports_avx()) { // Newest Intel cpus
1375 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
1376 UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus
1377 }
1378 }
1379 if (supports_sse4_2()) {
1380 if (FLAG_IS_DEFAULT(UseSSE42Intrinsics)) {
1381 FLAG_SET_DEFAULT(UseSSE42Intrinsics, true);
1382 }
1383 } else {
1384 if (UseSSE42Intrinsics && !FLAG_IS_DEFAULT(UseAESIntrinsics)) {
1385 warning("SSE4.2 intrinsics require SSE4.2 instructions or higher. Intrinsics will be disabled.");
1386 }
1387 FLAG_SET_DEFAULT(UseSSE42Intrinsics, false);
1388 }
1389 }
1390 if (is_atom_family() || is_knights_family()) {
1391 #ifdef COMPILER2
1392 if (FLAG_IS_DEFAULT(OptoScheduling)) {
1393 OptoScheduling = true;
1394 }
1395 #endif
1396 if (supports_sse4_2()) { // Silvermont
1397 if (FLAG_IS_DEFAULT(UseUnalignedLoadStores)) {
1398 UseUnalignedLoadStores = true; // use movdqu on newest Intel cpus
1399 }
1400 }
1401 if (FLAG_IS_DEFAULT(UseIncDec)) {
1402 FLAG_SET_DEFAULT(UseIncDec, false);
1403 }
1404 }
1405 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr) && supports_3dnow_prefetch()) {
1406 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
1407 }
1408 }
1409
1410 #ifdef _LP64
1411 if (UseSSE42Intrinsics) {
1412 if (FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) {
1413 UseVectorizedMismatchIntrinsic = true;
1414 }
1415 } else if (UseVectorizedMismatchIntrinsic) {
1416 if (!FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic))
1417 warning("vectorizedMismatch intrinsics are not available on this CPU");
1418 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
1419 }
1420 #else
1421 if (UseVectorizedMismatchIntrinsic) {
1422 if (!FLAG_IS_DEFAULT(UseVectorizedMismatchIntrinsic)) {
1423 warning("vectorizedMismatch intrinsic is not available in 32-bit VM");
1424 }
1425 FLAG_SET_DEFAULT(UseVectorizedMismatchIntrinsic, false);
1426 }
1427 #endif // _LP64
1428
1429 // Use count leading zeros count instruction if available.
1430 if (supports_lzcnt()) {
1431 if (FLAG_IS_DEFAULT(UseCountLeadingZerosInstruction)) {
1432 UseCountLeadingZerosInstruction = true;
1433 }
1434 } else if (UseCountLeadingZerosInstruction) {
1435 warning("lzcnt instruction is not available on this CPU");
1436 FLAG_SET_DEFAULT(UseCountLeadingZerosInstruction, false);
1437 }
1438
1439 // Use count trailing zeros instruction if available
1440 if (supports_bmi1()) {
1441 // tzcnt does not require VEX prefix
1442 if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) {
1443 if (!UseBMI1Instructions && !FLAG_IS_DEFAULT(UseBMI1Instructions)) {
1444 // Don't use tzcnt if BMI1 is switched off on command line.
1445 UseCountTrailingZerosInstruction = false;
1446 } else {
1447 UseCountTrailingZerosInstruction = true;
1448 }
1449 }
1450 } else if (UseCountTrailingZerosInstruction) {
1451 warning("tzcnt instruction is not available on this CPU");
1452 FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false);
1453 }
1454
1455 // BMI instructions (except tzcnt) use an encoding with VEX prefix.
1456 // VEX prefix is generated only when AVX > 0.
1457 if (supports_bmi1() && supports_avx()) {
1458 if (FLAG_IS_DEFAULT(UseBMI1Instructions)) {
1459 UseBMI1Instructions = true;
1460 }
1461 } else if (UseBMI1Instructions) {
1462 warning("BMI1 instructions are not available on this CPU (AVX is also required)");
1463 FLAG_SET_DEFAULT(UseBMI1Instructions, false);
1464 }
1465
1466 if (supports_bmi2() && supports_avx()) {
1467 if (FLAG_IS_DEFAULT(UseBMI2Instructions)) {
1468 UseBMI2Instructions = true;
1469 }
1470 } else if (UseBMI2Instructions) {
1471 warning("BMI2 instructions are not available on this CPU (AVX is also required)");
1472 FLAG_SET_DEFAULT(UseBMI2Instructions, false);
1473 }
1474
1475 // Use population count instruction if available.
1476 if (supports_popcnt()) {
1477 if (FLAG_IS_DEFAULT(UsePopCountInstruction)) {
1478 UsePopCountInstruction = true;
1479 }
1480 } else if (UsePopCountInstruction) {
1481 warning("POPCNT instruction is not available on this CPU");
1482 FLAG_SET_DEFAULT(UsePopCountInstruction, false);
1483 }
1484
1485 // Use fast-string operations if available.
1486 if (supports_erms()) {
1487 if (FLAG_IS_DEFAULT(UseFastStosb)) {
1488 UseFastStosb = true;
1489 }
1490 } else if (UseFastStosb) {
1491 warning("fast-string operations are not available on this CPU");
1492 FLAG_SET_DEFAULT(UseFastStosb, false);
1493 }
1494
1495 // Use XMM/YMM MOVDQU instruction for Object Initialization
1496 if (!UseFastStosb && UseSSE >= 2 && UseUnalignedLoadStores) {
1497 if (FLAG_IS_DEFAULT(UseXMMForObjInit)) {
1498 UseXMMForObjInit = true;
1499 }
1500 } else if (UseXMMForObjInit) {
1501 warning("UseXMMForObjInit requires SSE2 and unaligned load/stores. Feature is switched off.");
1502 FLAG_SET_DEFAULT(UseXMMForObjInit, false);
1503 }
1504
1505 #ifdef COMPILER2
1506 if (FLAG_IS_DEFAULT(AlignVector)) {
1507 // Modern processors allow misaligned memory operations for vectors.
1508 AlignVector = !UseUnalignedLoadStores;
1509 }
1510 #endif // COMPILER2
1511
1512 if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
1513 if (AllocatePrefetchInstr == 3 && !supports_3dnow_prefetch()) {
1514 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 0);
1515 } else if (!supports_sse() && supports_3dnow_prefetch()) {
1516 FLAG_SET_DEFAULT(AllocatePrefetchInstr, 3);
1517 }
1518 }
1519
1520 // Allocation prefetch settings
1521 intx cache_line_size = prefetch_data_size();
1522 if (FLAG_IS_DEFAULT(AllocatePrefetchStepSize) &&
1523 (cache_line_size > AllocatePrefetchStepSize)) {
1524 FLAG_SET_DEFAULT(AllocatePrefetchStepSize, cache_line_size);
1525 }
1526
1527 if ((AllocatePrefetchDistance == 0) && (AllocatePrefetchStyle != 0)) {
1528 assert(!FLAG_IS_DEFAULT(AllocatePrefetchDistance), "default value should not be 0");
1529 if (!FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
1530 warning("AllocatePrefetchDistance is set to 0 which disable prefetching. Ignoring AllocatePrefetchStyle flag.");
1531 }
1532 FLAG_SET_DEFAULT(AllocatePrefetchStyle, 0);
1533 }
1534
1535 if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) {
1536 bool use_watermark_prefetch = (AllocatePrefetchStyle == 2);
1537 FLAG_SET_DEFAULT(AllocatePrefetchDistance, allocate_prefetch_distance(use_watermark_prefetch));
1538 }
1539
1540 if (is_intel() && cpu_family() == 6 && supports_sse3()) {
1541 if (FLAG_IS_DEFAULT(AllocatePrefetchLines) &&
1542 supports_sse4_2() && supports_ht()) { // Nehalem based cpus
1543 FLAG_SET_DEFAULT(AllocatePrefetchLines, 4);
1544 }
1545 #ifdef COMPILER2
1546 if (FLAG_IS_DEFAULT(UseFPUForSpilling) && supports_sse4_2()) {
1547 FLAG_SET_DEFAULT(UseFPUForSpilling, true);
1548 }
1549 #endif
1550 }
1551
1552 if (is_zx() && ((cpu_family() == 6) || (cpu_family() == 7)) && supports_sse4_2()) {
1553 #ifdef COMPILER2
1554 if (FLAG_IS_DEFAULT(UseFPUForSpilling)) {
1555 FLAG_SET_DEFAULT(UseFPUForSpilling, true);
1556 }
1557 #endif
1558 }
1559
1560 #ifdef _LP64
1561 // Prefetch settings
1562
1563 // Prefetch interval for gc copy/scan == 9 dcache lines. Derived from
1564 // 50-warehouse specjbb runs on a 2-way 1.8ghz opteron using a 4gb heap.
1565 // Tested intervals from 128 to 2048 in increments of 64 == one cache line.
1566 // 256 bytes (4 dcache lines) was the nearest runner-up to 576.
1567
1568 // gc copy/scan is disabled if prefetchw isn't supported, because
1569 // Prefetch::write emits an inlined prefetchw on Linux.
1570 // Do not use the 3dnow prefetchw instruction. It isn't supported on em64t.
1571 // The used prefetcht0 instruction works for both amd64 and em64t.
1572
1573 if (FLAG_IS_DEFAULT(PrefetchCopyIntervalInBytes)) {
1574 FLAG_SET_DEFAULT(PrefetchCopyIntervalInBytes, 576);
1575 }
1576 if (FLAG_IS_DEFAULT(PrefetchScanIntervalInBytes)) {
1577 FLAG_SET_DEFAULT(PrefetchScanIntervalInBytes, 576);
1578 }
1579 if (FLAG_IS_DEFAULT(PrefetchFieldsAhead)) {
1580 FLAG_SET_DEFAULT(PrefetchFieldsAhead, 1);
1581 }
1582 #endif
1583
1584 if (FLAG_IS_DEFAULT(ContendedPaddingWidth) &&
1585 (cache_line_size > ContendedPaddingWidth))
1586 ContendedPaddingWidth = cache_line_size;
1587
1588 // This machine allows unaligned memory accesses
1589 if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) {
1590 FLAG_SET_DEFAULT(UseUnalignedAccesses, true);
1591 }
1592
1593 #ifndef PRODUCT
1594 if (log_is_enabled(Info, os, cpu)) {
1595 LogStream ls(Log(os, cpu)::info());
1596 outputStream* log = &ls;
1597 log->print_cr("Logical CPUs per core: %u",
1598 logical_processors_per_package());
1599 log->print_cr("L1 data cache line size: %u", L1_data_cache_line_size());
1600 log->print("UseSSE=%d", (int) UseSSE);
1601 if (UseAVX > 0) {
1602 log->print(" UseAVX=%d", (int) UseAVX);
1603 }
1604 if (UseAES) {
1605 log->print(" UseAES=1");
1606 }
1607 #ifdef COMPILER2
1608 if (MaxVectorSize > 0) {
1609 log->print(" MaxVectorSize=%d", (int) MaxVectorSize);
1610 }
1611 #endif
1612 log->cr();
1613 log->print("Allocation");
1614 if (AllocatePrefetchStyle <= 0 || (UseSSE == 0 && !supports_3dnow_prefetch())) {
1615 log->print_cr(": no prefetching");
1616 } else {
1617 log->print(" prefetching: ");
1618 if (UseSSE == 0 && supports_3dnow_prefetch()) {
1619 log->print("PREFETCHW");
1620 } else if (UseSSE >= 1) {
1621 if (AllocatePrefetchInstr == 0) {
1622 log->print("PREFETCHNTA");
1623 } else if (AllocatePrefetchInstr == 1) {
1624 log->print("PREFETCHT0");
1625 } else if (AllocatePrefetchInstr == 2) {
1626 log->print("PREFETCHT2");
1627 } else if (AllocatePrefetchInstr == 3) {
1628 log->print("PREFETCHW");
1629 }
1630 }
1631 if (AllocatePrefetchLines > 1) {
1632 log->print_cr(" at distance %d, %d lines of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchLines, (int) AllocatePrefetchStepSize);
1633 } else {
1634 log->print_cr(" at distance %d, one line of %d bytes", (int) AllocatePrefetchDistance, (int) AllocatePrefetchStepSize);
1635 }
1636 }
1637
1638 if (PrefetchCopyIntervalInBytes > 0) {
1639 log->print_cr("PrefetchCopyIntervalInBytes %d", (int) PrefetchCopyIntervalInBytes);
1640 }
1641 if (PrefetchScanIntervalInBytes > 0) {
1642 log->print_cr("PrefetchScanIntervalInBytes %d", (int) PrefetchScanIntervalInBytes);
1643 }
1644 if (PrefetchFieldsAhead > 0) {
1645 log->print_cr("PrefetchFieldsAhead %d", (int) PrefetchFieldsAhead);
1646 }
1647 if (ContendedPaddingWidth > 0) {
1648 log->print_cr("ContendedPaddingWidth %d", (int) ContendedPaddingWidth);
1649 }
1650 }
1651 #endif // !PRODUCT
1652 }
1653
print_platform_virtualization_info(outputStream * st)1654 void VM_Version::print_platform_virtualization_info(outputStream* st) {
1655 VirtualizationType vrt = VM_Version::get_detected_virtualization();
1656 if (vrt == XenHVM) {
1657 st->print_cr("Xen hardware-assisted virtualization detected");
1658 } else if (vrt == KVM) {
1659 st->print_cr("KVM virtualization detected");
1660 } else if (vrt == VMWare) {
1661 st->print_cr("VMWare virtualization detected");
1662 VirtualizationSupport::print_virtualization_info(st);
1663 } else if (vrt == HyperV) {
1664 st->print_cr("HyperV virtualization detected");
1665 }
1666 }
1667
check_virt_cpuid(uint32_t idx,uint32_t * regs)1668 void VM_Version::check_virt_cpuid(uint32_t idx, uint32_t *regs) {
1669 // TODO support 32 bit
1670 #if defined(_LP64)
1671 #if defined(_MSC_VER)
1672 // Allocate space for the code
1673 const int code_size = 100;
1674 ResourceMark rm;
1675 CodeBuffer cb("detect_virt", code_size, 0);
1676 MacroAssembler* a = new MacroAssembler(&cb);
1677 address code = a->pc();
1678 void (*test)(uint32_t idx, uint32_t *regs) = (void(*)(uint32_t idx, uint32_t *regs))code;
1679
1680 a->movq(r9, rbx); // save nonvolatile register
1681
1682 // next line would not work on 32-bit
1683 a->movq(rax, c_rarg0 /* rcx */);
1684 a->movq(r8, c_rarg1 /* rdx */);
1685 a->cpuid();
1686 a->movl(Address(r8, 0), rax);
1687 a->movl(Address(r8, 4), rbx);
1688 a->movl(Address(r8, 8), rcx);
1689 a->movl(Address(r8, 12), rdx);
1690
1691 a->movq(rbx, r9); // restore nonvolatile register
1692 a->ret(0);
1693
1694 uint32_t *code_end = (uint32_t *)a->pc();
1695 a->flush();
1696
1697 // execute code
1698 (*test)(idx, regs);
1699 #elif defined(__GNUC__)
1700 __asm__ volatile (
1701 " cpuid;"
1702 " mov %%eax,(%1);"
1703 " mov %%ebx,4(%1);"
1704 " mov %%ecx,8(%1);"
1705 " mov %%edx,12(%1);"
1706 : "+a" (idx)
1707 : "S" (regs)
1708 : "ebx", "ecx", "edx", "memory" );
1709 #endif
1710 #endif
1711 }
1712
1713
use_biased_locking()1714 bool VM_Version::use_biased_locking() {
1715 #if INCLUDE_RTM_OPT
1716 // RTM locking is most useful when there is high lock contention and
1717 // low data contention. With high lock contention the lock is usually
1718 // inflated and biased locking is not suitable for that case.
1719 // RTM locking code requires that biased locking is off.
1720 // Note: we can't switch off UseBiasedLocking in get_processor_features()
1721 // because it is used by Thread::allocate() which is called before
1722 // VM_Version::initialize().
1723 if (UseRTMLocking && UseBiasedLocking) {
1724 if (FLAG_IS_DEFAULT(UseBiasedLocking)) {
1725 FLAG_SET_DEFAULT(UseBiasedLocking, false);
1726 } else {
1727 warning("Biased locking is not supported with RTM locking; ignoring UseBiasedLocking flag." );
1728 UseBiasedLocking = false;
1729 }
1730 }
1731 #endif
1732 return UseBiasedLocking;
1733 }
1734
compute_has_intel_jcc_erratum()1735 bool VM_Version::compute_has_intel_jcc_erratum() {
1736 if (!is_intel_family_core()) {
1737 // Only Intel CPUs are affected.
1738 return false;
1739 }
1740 // The following table of affected CPUs is based on the following document released by Intel:
1741 // https://www.intel.com/content/dam/support/us/en/documents/processors/mitigations-jump-conditional-code-erratum.pdf
1742 switch (_model) {
1743 case 0x8E:
1744 // 06_8EH | 9 | 8th Generation Intel® Core™ Processor Family based on microarchitecture code name Amber Lake Y
1745 // 06_8EH | 9 | 7th Generation Intel® Core™ Processor Family based on microarchitecture code name Kaby Lake U
1746 // 06_8EH | 9 | 7th Generation Intel® Core™ Processor Family based on microarchitecture code name Kaby Lake U 23e
1747 // 06_8EH | 9 | 7th Generation Intel® Core™ Processor Family based on microarchitecture code name Kaby Lake Y
1748 // 06_8EH | A | 8th Generation Intel® Core™ Processor Family based on microarchitecture code name Coffee Lake U43e
1749 // 06_8EH | B | 8th Generation Intel® Core™ Processors based on microarchitecture code name Whiskey Lake U
1750 // 06_8EH | C | 8th Generation Intel® Core™ Processor Family based on microarchitecture code name Amber Lake Y
1751 // 06_8EH | C | 10th Generation Intel® Core™ Processor Family based on microarchitecture code name Comet Lake U42
1752 // 06_8EH | C | 8th Generation Intel® Core™ Processors based on microarchitecture code name Whiskey Lake U
1753 return _stepping == 0x9 || _stepping == 0xA || _stepping == 0xB || _stepping == 0xC;
1754 case 0x4E:
1755 // 06_4E | 3 | 6th Generation Intel® Core™ Processors based on microarchitecture code name Skylake U
1756 // 06_4E | 3 | 6th Generation Intel® Core™ Processor Family based on microarchitecture code name Skylake U23e
1757 // 06_4E | 3 | 6th Generation Intel® Core™ Processors based on microarchitecture code name Skylake Y
1758 return _stepping == 0x3;
1759 case 0x55:
1760 // 06_55H | 4 | Intel® Xeon® Processor D Family based on microarchitecture code name Skylake D, Bakerville
1761 // 06_55H | 4 | Intel® Xeon® Scalable Processors based on microarchitecture code name Skylake Server
1762 // 06_55H | 4 | Intel® Xeon® Processor W Family based on microarchitecture code name Skylake W
1763 // 06_55H | 4 | Intel® Core™ X-series Processors based on microarchitecture code name Skylake X
1764 // 06_55H | 4 | Intel® Xeon® Processor E3 v5 Family based on microarchitecture code name Skylake Xeon E3
1765 // 06_55 | 7 | 2nd Generation Intel® Xeon® Scalable Processors based on microarchitecture code name Cascade Lake (server)
1766 return _stepping == 0x4 || _stepping == 0x7;
1767 case 0x5E:
1768 // 06_5E | 3 | 6th Generation Intel® Core™ Processor Family based on microarchitecture code name Skylake H
1769 // 06_5E | 3 | 6th Generation Intel® Core™ Processor Family based on microarchitecture code name Skylake S
1770 return _stepping == 0x3;
1771 case 0x9E:
1772 // 06_9EH | 9 | 8th Generation Intel® Core™ Processor Family based on microarchitecture code name Kaby Lake G
1773 // 06_9EH | 9 | 7th Generation Intel® Core™ Processor Family based on microarchitecture code name Kaby Lake H
1774 // 06_9EH | 9 | 7th Generation Intel® Core™ Processor Family based on microarchitecture code name Kaby Lake S
1775 // 06_9EH | 9 | Intel® Core™ X-series Processors based on microarchitecture code name Kaby Lake X
1776 // 06_9EH | 9 | Intel® Xeon® Processor E3 v6 Family Kaby Lake Xeon E3
1777 // 06_9EH | A | 8th Generation Intel® Core™ Processor Family based on microarchitecture code name Coffee Lake H
1778 // 06_9EH | A | 8th Generation Intel® Core™ Processor Family based on microarchitecture code name Coffee Lake S
1779 // 06_9EH | A | 8th Generation Intel® Core™ Processor Family based on microarchitecture code name Coffee Lake S (6+2) x/KBP
1780 // 06_9EH | A | Intel® Xeon® Processor E Family based on microarchitecture code name Coffee Lake S (6+2)
1781 // 06_9EH | A | Intel® Xeon® Processor E Family based on microarchitecture code name Coffee Lake S (4+2)
1782 // 06_9EH | B | 8th Generation Intel® Core™ Processor Family based on microarchitecture code name Coffee Lake S (4+2)
1783 // 06_9EH | B | Intel® Celeron® Processor G Series based on microarchitecture code name Coffee Lake S (4+2)
1784 // 06_9EH | D | 9th Generation Intel® Core™ Processor Family based on microarchitecturecode name Coffee Lake H (8+2)
1785 // 06_9EH | D | 9th Generation Intel® Core™ Processor Family based on microarchitecture code name Coffee Lake S (8+2)
1786 return _stepping == 0x9 || _stepping == 0xA || _stepping == 0xB || _stepping == 0xD;
1787 case 0xA6:
1788 // 06_A6H | 0 | 10th Generation Intel® Core™ Processor Family based on microarchitecture code name Comet Lake U62
1789 return _stepping == 0x0;
1790 case 0xAE:
1791 // 06_AEH | A | 8th Generation Intel® Core™ Processor Family based on microarchitecture code name Kaby Lake Refresh U (4+2)
1792 return _stepping == 0xA;
1793 default:
1794 // If we are running on another intel machine not recognized in the table, we are okay.
1795 return false;
1796 }
1797 }
1798
1799 // On Xen, the cpuid instruction returns
1800 // eax / registers[0]: Version of Xen
1801 // ebx / registers[1]: chars 'XenV'
1802 // ecx / registers[2]: chars 'MMXe'
1803 // edx / registers[3]: chars 'nVMM'
1804 //
1805 // On KVM / VMWare / MS Hyper-V, the cpuid instruction returns
1806 // ebx / registers[1]: chars 'KVMK' / 'VMwa' / 'Micr'
1807 // ecx / registers[2]: chars 'VMKV' / 'reVM' / 'osof'
1808 // edx / registers[3]: chars 'M' / 'ware' / 't Hv'
1809 //
1810 // more information :
1811 // https://kb.vmware.com/s/article/1009458
1812 //
check_virtualizations()1813 void VM_Version::check_virtualizations() {
1814 #if defined(_LP64)
1815 uint32_t registers[4];
1816 char signature[13];
1817 uint32_t base;
1818 signature[12] = '\0';
1819 memset((void*)registers, 0, 4*sizeof(uint32_t));
1820
1821 for (base = 0x40000000; base < 0x40010000; base += 0x100) {
1822 check_virt_cpuid(base, registers);
1823
1824 *(uint32_t *)(signature + 0) = registers[1];
1825 *(uint32_t *)(signature + 4) = registers[2];
1826 *(uint32_t *)(signature + 8) = registers[3];
1827
1828 if (strncmp("VMwareVMware", signature, 12) == 0) {
1829 Abstract_VM_Version::_detected_virtualization = VMWare;
1830 // check for extended metrics from guestlib
1831 VirtualizationSupport::initialize();
1832 }
1833
1834 if (strncmp("Microsoft Hv", signature, 12) == 0) {
1835 Abstract_VM_Version::_detected_virtualization = HyperV;
1836 }
1837
1838 if (strncmp("KVMKVMKVM", signature, 9) == 0) {
1839 Abstract_VM_Version::_detected_virtualization = KVM;
1840 }
1841
1842 if (strncmp("XenVMMXenVMM", signature, 12) == 0) {
1843 Abstract_VM_Version::_detected_virtualization = XenHVM;
1844 }
1845 }
1846 #endif
1847 }
1848
initialize()1849 void VM_Version::initialize() {
1850 ResourceMark rm;
1851 // Making this stub must be FIRST use of assembler
1852
1853 stub_blob = BufferBlob::create("get_cpu_info_stub", stub_size);
1854 if (stub_blob == NULL) {
1855 vm_exit_during_initialization("Unable to allocate get_cpu_info_stub");
1856 }
1857 CodeBuffer c(stub_blob);
1858 VM_Version_StubGenerator g(&c);
1859 get_cpu_info_stub = CAST_TO_FN_PTR(get_cpu_info_stub_t,
1860 g.generate_get_cpu_info());
1861
1862 get_processor_features();
1863
1864 LP64_ONLY(Assembler::precompute_instructions();)
1865
1866 if (cpu_family() > 4) { // it supports CPUID
1867 check_virtualizations();
1868 }
1869 }
1870