1 /* 2 * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef CPU_X86_VM_VERSION_X86_HPP 26 #define CPU_X86_VM_VERSION_X86_HPP 27 28 #include "memory/universe.hpp" 29 #include "runtime/abstract_vm_version.hpp" 30 #include "utilities/macros.hpp" 31 32 class VM_Version : public Abstract_VM_Version { 33 friend class VMStructs; 34 friend class JVMCIVMStructs; 35 36 public: 37 // cpuid result register layouts. These are all unions of a uint32_t 38 // (in case anyone wants access to the register as a whole) and a bitfield. 39 40 union StdCpuid1Eax { 41 uint32_t value; 42 struct { 43 uint32_t stepping : 4, 44 model : 4, 45 family : 4, 46 proc_type : 2, 47 : 2, 48 ext_model : 4, 49 ext_family : 8, 50 : 4; 51 } bits; 52 }; 53 54 union StdCpuid1Ebx { // example, unused 55 uint32_t value; 56 struct { 57 uint32_t brand_id : 8, 58 clflush_size : 8, 59 threads_per_cpu : 8, 60 apic_id : 8; 61 } bits; 62 }; 63 64 union StdCpuid1Ecx { 65 uint32_t value; 66 struct { 67 uint32_t sse3 : 1, 68 clmul : 1, 69 : 1, 70 monitor : 1, 71 : 1, 72 vmx : 1, 73 : 1, 74 est : 1, 75 : 1, 76 ssse3 : 1, 77 cid : 1, 78 : 1, 79 fma : 1, 80 cmpxchg16: 1, 81 : 4, 82 dca : 1, 83 sse4_1 : 1, 84 sse4_2 : 1, 85 : 2, 86 popcnt : 1, 87 : 1, 88 aes : 1, 89 : 1, 90 osxsave : 1, 91 avx : 1, 92 : 2, 93 hv : 1; 94 } bits; 95 }; 96 97 union StdCpuid1Edx { 98 uint32_t value; 99 struct { 100 uint32_t : 4, 101 tsc : 1, 102 : 3, 103 cmpxchg8 : 1, 104 : 6, 105 cmov : 1, 106 : 3, 107 clflush : 1, 108 : 3, 109 mmx : 1, 110 fxsr : 1, 111 sse : 1, 112 sse2 : 1, 113 : 1, 114 ht : 1, 115 : 3; 116 } bits; 117 }; 118 119 union DcpCpuid4Eax { 120 uint32_t value; 121 struct { 122 uint32_t cache_type : 5, 123 : 21, 124 cores_per_cpu : 6; 125 } bits; 126 }; 127 128 union DcpCpuid4Ebx { 129 uint32_t value; 130 struct { 131 uint32_t L1_line_size : 12, 132 partitions : 10, 133 associativity : 10; 134 } bits; 135 }; 136 137 union TplCpuidBEbx { 138 uint32_t value; 139 struct { 140 uint32_t logical_cpus : 16, 141 : 16; 142 } bits; 143 }; 144 145 union ExtCpuid1Ecx { 146 uint32_t value; 147 struct { 148 uint32_t LahfSahf : 1, 149 CmpLegacy : 1, 150 : 3, 151 lzcnt_intel : 1, 152 lzcnt : 1, 153 sse4a : 1, 154 misalignsse : 1, 155 prefetchw : 1, 156 : 22; 157 } bits; 158 }; 159 160 union ExtCpuid1Edx { 161 uint32_t value; 162 struct { 163 uint32_t : 22, 164 mmx_amd : 1, 165 mmx : 1, 166 fxsr : 1, 167 : 4, 168 long_mode : 1, 169 tdnow2 : 1, 170 tdnow : 1; 171 } bits; 172 }; 173 174 union ExtCpuid5Ex { 175 uint32_t value; 176 struct { 177 uint32_t L1_line_size : 8, 178 L1_tag_lines : 8, 179 L1_assoc : 8, 180 L1_size : 8; 181 } bits; 182 }; 183 184 union ExtCpuid7Edx { 185 uint32_t value; 186 struct { 187 uint32_t : 8, 188 tsc_invariance : 1, 189 : 23; 190 } bits; 191 }; 192 193 union ExtCpuid8Ecx { 194 uint32_t value; 195 struct { 196 uint32_t cores_per_cpu : 8, 197 : 24; 198 } bits; 199 }; 200 201 union SefCpuid7Eax { 202 uint32_t value; 203 }; 204 205 union SefCpuid7Ebx { 206 uint32_t value; 207 struct { 208 uint32_t fsgsbase : 1, 209 : 2, 210 bmi1 : 1, 211 : 1, 212 avx2 : 1, 213 : 2, 214 bmi2 : 1, 215 erms : 1, 216 : 1, 217 rtm : 1, 218 : 4, 219 avx512f : 1, 220 avx512dq : 1, 221 : 1, 222 adx : 1, 223 : 3, 224 clflushopt : 1, 225 clwb : 1, 226 : 1, 227 avx512pf : 1, 228 avx512er : 1, 229 avx512cd : 1, 230 sha : 1, 231 avx512bw : 1, 232 avx512vl : 1; 233 } bits; 234 }; 235 236 union SefCpuid7Ecx { 237 uint32_t value; 238 struct { 239 uint32_t prefetchwt1 : 1, 240 avx512_vbmi : 1, 241 umip : 1, 242 pku : 1, 243 ospke : 1, 244 : 1, 245 avx512_vbmi2 : 1, 246 : 1, 247 gfni : 1, 248 vaes : 1, 249 avx512_vpclmulqdq : 1, 250 avx512_vnni : 1, 251 avx512_bitalg : 1, 252 : 1, 253 avx512_vpopcntdq : 1, 254 : 17; 255 } bits; 256 }; 257 258 union SefCpuid7Edx { 259 uint32_t value; 260 struct { 261 uint32_t : 2, 262 avx512_4vnniw : 1, 263 avx512_4fmaps : 1, 264 : 28; 265 } bits; 266 }; 267 268 union ExtCpuid1EEbx { 269 uint32_t value; 270 struct { 271 uint32_t : 8, 272 threads_per_core : 8, 273 : 16; 274 } bits; 275 }; 276 277 union XemXcr0Eax { 278 uint32_t value; 279 struct { 280 uint32_t x87 : 1, 281 sse : 1, 282 ymm : 1, 283 bndregs : 1, 284 bndcsr : 1, 285 opmask : 1, 286 zmm512 : 1, 287 zmm32 : 1, 288 : 24; 289 } bits; 290 }; 291 292 protected: 293 static int _cpu; 294 static int _model; 295 static int _stepping; 296 297 static bool _has_intel_jcc_erratum; 298 299 static address _cpuinfo_segv_addr; // address of instruction which causes SEGV 300 static address _cpuinfo_cont_addr; // address of instruction after the one which causes SEGV 301 302 enum Feature_Flag : uint64_t { 303 CPU_CX8 = (1ULL << 0), // next bits are from cpuid 1 (EDX) 304 CPU_CMOV = (1ULL << 1), 305 CPU_FXSR = (1ULL << 2), 306 CPU_HT = (1ULL << 3), 307 308 CPU_MMX = (1ULL << 4), 309 CPU_3DNOW_PREFETCH = (1ULL << 5), // Processor supports 3dnow prefetch and prefetchw instructions 310 // may not necessarily support other 3dnow instructions 311 CPU_SSE = (1ULL << 6), 312 CPU_SSE2 = (1ULL << 7), 313 314 CPU_SSE3 = (1ULL << 8), // SSE3 comes from cpuid 1 (ECX) 315 CPU_SSSE3 = (1ULL << 9), 316 CPU_SSE4A = (1ULL << 10), 317 CPU_SSE4_1 = (1ULL << 11), 318 319 CPU_SSE4_2 = (1ULL << 12), 320 CPU_POPCNT = (1ULL << 13), 321 CPU_LZCNT = (1ULL << 14), 322 CPU_TSC = (1ULL << 15), 323 324 CPU_TSCINV_BIT = (1ULL << 16), 325 CPU_TSCINV = (1ULL << 17), 326 CPU_AVX = (1ULL << 18), 327 CPU_AVX2 = (1ULL << 19), 328 329 CPU_AES = (1ULL << 20), 330 CPU_ERMS = (1ULL << 21), // enhanced 'rep movsb/stosb' instructions 331 CPU_CLMUL = (1ULL << 22), // carryless multiply for CRC 332 CPU_BMI1 = (1ULL << 23), 333 334 CPU_BMI2 = (1ULL << 24), 335 CPU_RTM = (1ULL << 25), // Restricted Transactional Memory instructions 336 CPU_ADX = (1ULL << 26), 337 CPU_AVX512F = (1ULL << 27), // AVX 512bit foundation instructions 338 339 CPU_AVX512DQ = (1ULL << 28), 340 CPU_AVX512PF = (1ULL << 29), 341 CPU_AVX512ER = (1ULL << 30), 342 CPU_AVX512CD = (1ULL << 31), 343 344 CPU_AVX512BW = (1ULL << 32), // Byte and word vector instructions 345 CPU_AVX512VL = (1ULL << 33), // EVEX instructions with smaller vector length 346 CPU_SHA = (1ULL << 34), // SHA instructions 347 CPU_FMA = (1ULL << 35), // FMA instructions 348 349 CPU_VZEROUPPER = (1ULL << 36), // Vzeroupper instruction 350 CPU_AVX512_VPOPCNTDQ = (1ULL << 37), // Vector popcount 351 CPU_AVX512_VPCLMULQDQ = (1ULL << 38), // Vector carryless multiplication 352 CPU_AVX512_VAES = (1ULL << 39), // Vector AES instruction 353 354 CPU_AVX512_VNNI = (1ULL << 40), // Vector Neural Network Instructions 355 CPU_FLUSH = (1ULL << 41), // flush instruction 356 CPU_FLUSHOPT = (1ULL << 42), // flusopth instruction 357 CPU_CLWB = (1ULL << 43), // clwb instruction 358 359 CPU_AVX512_VBMI2 = (1ULL << 44), // VBMI2 shift left double instructions 360 CPU_AVX512_VBMI = (1ULL << 45), // Vector BMI instructions 361 CPU_HV = (1ULL << 46), // Hypervisor instructions 362 363 CPU_MAX_FEATURE = CPU_HV 364 }; 365 366 #define FEATURES_NAMES \ 367 "cx8", "cmov", "fxsr", "ht", \ 368 "mmx", "3dnowpref", "sse", "sse2", \ 369 "sse3", "ssse3", "sse4a", "sse4.1", \ 370 "sse4.2", "popcnt", "lzcnt", "tsc", \ 371 "tscinvbit", "tscinv", "avx", "avx2", \ 372 "aes", "erms", "clmul", "bmi1", \ 373 "bmi2", "rtm", "adx", "avx512f", \ 374 "avx512dq", "avx512pf", "avx512er", "avx512cd", \ 375 "avx512bw", "avx512vl", "sha", "fma", \ 376 "vzeroupper", "avx512_vpopcntdq", "avx512_vpclmulqdq", "avx512_vaes", \ 377 "avx512_vnni", "clflush", "clflushopt", "clwb", \ 378 "avx512_vmbi2", "avx512_vmbi", "hv" 379 380 static const char* _features_names[]; 381 382 // NB! When adding new CPU feature detection consider updating vmStructs_x86.hpp, vmStructs_jvmci.hpp, and VM_Version::get_processor_features(). 383 384 enum Extended_Family { 385 // AMD 386 CPU_FAMILY_AMD_11H = 0x11, 387 // ZX 388 CPU_FAMILY_ZX_CORE_F6 = 6, 389 CPU_FAMILY_ZX_CORE_F7 = 7, 390 // Intel 391 CPU_FAMILY_INTEL_CORE = 6, 392 CPU_MODEL_NEHALEM = 0x1e, 393 CPU_MODEL_NEHALEM_EP = 0x1a, 394 CPU_MODEL_NEHALEM_EX = 0x2e, 395 CPU_MODEL_WESTMERE = 0x25, 396 CPU_MODEL_WESTMERE_EP = 0x2c, 397 CPU_MODEL_WESTMERE_EX = 0x2f, 398 CPU_MODEL_SANDYBRIDGE = 0x2a, 399 CPU_MODEL_SANDYBRIDGE_EP = 0x2d, 400 CPU_MODEL_IVYBRIDGE_EP = 0x3a, 401 CPU_MODEL_HASWELL_E3 = 0x3c, 402 CPU_MODEL_HASWELL_E7 = 0x3f, 403 CPU_MODEL_BROADWELL = 0x3d, 404 CPU_MODEL_SKYLAKE = 0x55 405 }; 406 407 // cpuid information block. All info derived from executing cpuid with 408 // various function numbers is stored here. Intel and AMD info is 409 // merged in this block: accessor methods disentangle it. 410 // 411 // The info block is laid out in subblocks of 4 dwords corresponding to 412 // eax, ebx, ecx and edx, whether or not they contain anything useful. 413 struct CpuidInfo { 414 // cpuid function 0 415 uint32_t std_max_function; 416 uint32_t std_vendor_name_0; 417 uint32_t std_vendor_name_1; 418 uint32_t std_vendor_name_2; 419 420 // cpuid function 1 421 StdCpuid1Eax std_cpuid1_eax; 422 StdCpuid1Ebx std_cpuid1_ebx; 423 StdCpuid1Ecx std_cpuid1_ecx; 424 StdCpuid1Edx std_cpuid1_edx; 425 426 // cpuid function 4 (deterministic cache parameters) 427 DcpCpuid4Eax dcp_cpuid4_eax; 428 DcpCpuid4Ebx dcp_cpuid4_ebx; 429 uint32_t dcp_cpuid4_ecx; // unused currently 430 uint32_t dcp_cpuid4_edx; // unused currently 431 432 // cpuid function 7 (structured extended features) 433 SefCpuid7Eax sef_cpuid7_eax; 434 SefCpuid7Ebx sef_cpuid7_ebx; 435 SefCpuid7Ecx sef_cpuid7_ecx; 436 SefCpuid7Edx sef_cpuid7_edx; 437 438 // cpuid function 0xB (processor topology) 439 // ecx = 0 440 uint32_t tpl_cpuidB0_eax; 441 TplCpuidBEbx tpl_cpuidB0_ebx; 442 uint32_t tpl_cpuidB0_ecx; // unused currently 443 uint32_t tpl_cpuidB0_edx; // unused currently 444 445 // ecx = 1 446 uint32_t tpl_cpuidB1_eax; 447 TplCpuidBEbx tpl_cpuidB1_ebx; 448 uint32_t tpl_cpuidB1_ecx; // unused currently 449 uint32_t tpl_cpuidB1_edx; // unused currently 450 451 // ecx = 2 452 uint32_t tpl_cpuidB2_eax; 453 TplCpuidBEbx tpl_cpuidB2_ebx; 454 uint32_t tpl_cpuidB2_ecx; // unused currently 455 uint32_t tpl_cpuidB2_edx; // unused currently 456 457 // cpuid function 0x80000000 // example, unused 458 uint32_t ext_max_function; 459 uint32_t ext_vendor_name_0; 460 uint32_t ext_vendor_name_1; 461 uint32_t ext_vendor_name_2; 462 463 // cpuid function 0x80000001 464 uint32_t ext_cpuid1_eax; // reserved 465 uint32_t ext_cpuid1_ebx; // reserved 466 ExtCpuid1Ecx ext_cpuid1_ecx; 467 ExtCpuid1Edx ext_cpuid1_edx; 468 469 // cpuid functions 0x80000002 thru 0x80000004: example, unused 470 uint32_t proc_name_0, proc_name_1, proc_name_2, proc_name_3; 471 uint32_t proc_name_4, proc_name_5, proc_name_6, proc_name_7; 472 uint32_t proc_name_8, proc_name_9, proc_name_10,proc_name_11; 473 474 // cpuid function 0x80000005 // AMD L1, Intel reserved 475 uint32_t ext_cpuid5_eax; // unused currently 476 uint32_t ext_cpuid5_ebx; // reserved 477 ExtCpuid5Ex ext_cpuid5_ecx; // L1 data cache info (AMD) 478 ExtCpuid5Ex ext_cpuid5_edx; // L1 instruction cache info (AMD) 479 480 // cpuid function 0x80000007 481 uint32_t ext_cpuid7_eax; // reserved 482 uint32_t ext_cpuid7_ebx; // reserved 483 uint32_t ext_cpuid7_ecx; // reserved 484 ExtCpuid7Edx ext_cpuid7_edx; // tscinv 485 486 // cpuid function 0x80000008 487 uint32_t ext_cpuid8_eax; // unused currently 488 uint32_t ext_cpuid8_ebx; // reserved 489 ExtCpuid8Ecx ext_cpuid8_ecx; 490 uint32_t ext_cpuid8_edx; // reserved 491 492 // cpuid function 0x8000001E // AMD 17h 493 uint32_t ext_cpuid1E_eax; 494 ExtCpuid1EEbx ext_cpuid1E_ebx; // threads per core (AMD17h) 495 uint32_t ext_cpuid1E_ecx; 496 uint32_t ext_cpuid1E_edx; // unused currently 497 498 // extended control register XCR0 (the XFEATURE_ENABLED_MASK register) 499 XemXcr0Eax xem_xcr0_eax; 500 uint32_t xem_xcr0_edx; // reserved 501 502 // Space to save ymm registers after signal handle 503 int ymm_save[8*4]; // Save ymm0, ymm7, ymm8, ymm15 504 505 // Space to save zmm registers after signal handle 506 int zmm_save[16*4]; // Save zmm0, zmm7, zmm8, zmm31 507 }; 508 509 // The actual cpuid info block 510 static CpuidInfo _cpuid_info; 511 512 // Extractors and predicates extended_cpu_family()513 static uint32_t extended_cpu_family() { 514 uint32_t result = _cpuid_info.std_cpuid1_eax.bits.family; 515 result += _cpuid_info.std_cpuid1_eax.bits.ext_family; 516 return result; 517 } 518 extended_cpu_model()519 static uint32_t extended_cpu_model() { 520 uint32_t result = _cpuid_info.std_cpuid1_eax.bits.model; 521 result |= _cpuid_info.std_cpuid1_eax.bits.ext_model << 4; 522 return result; 523 } 524 cpu_stepping()525 static uint32_t cpu_stepping() { 526 uint32_t result = _cpuid_info.std_cpuid1_eax.bits.stepping; 527 return result; 528 } 529 logical_processor_count()530 static uint logical_processor_count() { 531 uint result = threads_per_core(); 532 return result; 533 } 534 535 static bool compute_has_intel_jcc_erratum(); 536 feature_flags()537 static uint64_t feature_flags() { 538 uint64_t result = 0; 539 if (_cpuid_info.std_cpuid1_edx.bits.cmpxchg8 != 0) 540 result |= CPU_CX8; 541 if (_cpuid_info.std_cpuid1_edx.bits.cmov != 0) 542 result |= CPU_CMOV; 543 if (_cpuid_info.std_cpuid1_edx.bits.clflush != 0) 544 result |= CPU_FLUSH; 545 #ifdef _LP64 546 // clflush should always be available on x86_64 547 // if not we are in real trouble because we rely on it 548 // to flush the code cache. 549 assert ((result & CPU_FLUSH) != 0, "clflush should be available"); 550 #endif 551 if (_cpuid_info.std_cpuid1_edx.bits.fxsr != 0 || (is_amd_family() && 552 _cpuid_info.ext_cpuid1_edx.bits.fxsr != 0)) 553 result |= CPU_FXSR; 554 // HT flag is set for multi-core processors also. 555 if (threads_per_core() > 1) 556 result |= CPU_HT; 557 if (_cpuid_info.std_cpuid1_edx.bits.mmx != 0 || (is_amd_family() && 558 _cpuid_info.ext_cpuid1_edx.bits.mmx != 0)) 559 result |= CPU_MMX; 560 if (_cpuid_info.std_cpuid1_edx.bits.sse != 0) 561 result |= CPU_SSE; 562 if (_cpuid_info.std_cpuid1_edx.bits.sse2 != 0) 563 result |= CPU_SSE2; 564 if (_cpuid_info.std_cpuid1_ecx.bits.sse3 != 0) 565 result |= CPU_SSE3; 566 if (_cpuid_info.std_cpuid1_ecx.bits.ssse3 != 0) 567 result |= CPU_SSSE3; 568 if (_cpuid_info.std_cpuid1_ecx.bits.sse4_1 != 0) 569 result |= CPU_SSE4_1; 570 if (_cpuid_info.std_cpuid1_ecx.bits.sse4_2 != 0) 571 result |= CPU_SSE4_2; 572 if (_cpuid_info.std_cpuid1_ecx.bits.popcnt != 0) 573 result |= CPU_POPCNT; 574 if (_cpuid_info.std_cpuid1_ecx.bits.avx != 0 && 575 _cpuid_info.std_cpuid1_ecx.bits.osxsave != 0 && 576 _cpuid_info.xem_xcr0_eax.bits.sse != 0 && 577 _cpuid_info.xem_xcr0_eax.bits.ymm != 0) { 578 result |= CPU_AVX; 579 result |= CPU_VZEROUPPER; 580 if (_cpuid_info.sef_cpuid7_ebx.bits.avx2 != 0) 581 result |= CPU_AVX2; 582 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512f != 0 && 583 _cpuid_info.xem_xcr0_eax.bits.opmask != 0 && 584 _cpuid_info.xem_xcr0_eax.bits.zmm512 != 0 && 585 _cpuid_info.xem_xcr0_eax.bits.zmm32 != 0) { 586 result |= CPU_AVX512F; 587 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512cd != 0) 588 result |= CPU_AVX512CD; 589 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512dq != 0) 590 result |= CPU_AVX512DQ; 591 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512pf != 0) 592 result |= CPU_AVX512PF; 593 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512er != 0) 594 result |= CPU_AVX512ER; 595 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512bw != 0) 596 result |= CPU_AVX512BW; 597 if (_cpuid_info.sef_cpuid7_ebx.bits.avx512vl != 0) 598 result |= CPU_AVX512VL; 599 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vpopcntdq != 0) 600 result |= CPU_AVX512_VPOPCNTDQ; 601 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vpclmulqdq != 0) 602 result |= CPU_AVX512_VPCLMULQDQ; 603 if (_cpuid_info.sef_cpuid7_ecx.bits.vaes != 0) 604 result |= CPU_AVX512_VAES; 605 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vnni != 0) 606 result |= CPU_AVX512_VNNI; 607 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vbmi != 0) 608 result |= CPU_AVX512_VBMI; 609 if (_cpuid_info.sef_cpuid7_ecx.bits.avx512_vbmi2 != 0) 610 result |= CPU_AVX512_VBMI2; 611 } 612 } 613 if (_cpuid_info.std_cpuid1_ecx.bits.hv != 0) 614 result |= CPU_HV; 615 if (_cpuid_info.sef_cpuid7_ebx.bits.bmi1 != 0) 616 result |= CPU_BMI1; 617 if (_cpuid_info.std_cpuid1_edx.bits.tsc != 0) 618 result |= CPU_TSC; 619 if (_cpuid_info.ext_cpuid7_edx.bits.tsc_invariance != 0) 620 result |= CPU_TSCINV_BIT; 621 if (_cpuid_info.std_cpuid1_ecx.bits.aes != 0) 622 result |= CPU_AES; 623 if (_cpuid_info.sef_cpuid7_ebx.bits.erms != 0) 624 result |= CPU_ERMS; 625 if (_cpuid_info.std_cpuid1_ecx.bits.clmul != 0) 626 result |= CPU_CLMUL; 627 if (_cpuid_info.sef_cpuid7_ebx.bits.rtm != 0) 628 result |= CPU_RTM; 629 if (_cpuid_info.sef_cpuid7_ebx.bits.adx != 0) 630 result |= CPU_ADX; 631 if (_cpuid_info.sef_cpuid7_ebx.bits.bmi2 != 0) 632 result |= CPU_BMI2; 633 if (_cpuid_info.sef_cpuid7_ebx.bits.sha != 0) 634 result |= CPU_SHA; 635 if (_cpuid_info.std_cpuid1_ecx.bits.fma != 0) 636 result |= CPU_FMA; 637 if (_cpuid_info.sef_cpuid7_ebx.bits.clflushopt != 0) 638 result |= CPU_FLUSHOPT; 639 640 // AMD|Hygon features. 641 if (is_amd_family()) { 642 if ((_cpuid_info.ext_cpuid1_edx.bits.tdnow != 0) || 643 (_cpuid_info.ext_cpuid1_ecx.bits.prefetchw != 0)) 644 result |= CPU_3DNOW_PREFETCH; 645 if (_cpuid_info.ext_cpuid1_ecx.bits.lzcnt != 0) 646 result |= CPU_LZCNT; 647 if (_cpuid_info.ext_cpuid1_ecx.bits.sse4a != 0) 648 result |= CPU_SSE4A; 649 } 650 651 // Intel features. 652 if (is_intel()) { 653 if (_cpuid_info.ext_cpuid1_ecx.bits.lzcnt_intel != 0) 654 result |= CPU_LZCNT; 655 // for Intel, ecx.bits.misalignsse bit (bit 8) indicates support for prefetchw 656 if (_cpuid_info.ext_cpuid1_ecx.bits.misalignsse != 0) { 657 result |= CPU_3DNOW_PREFETCH; 658 } 659 if (_cpuid_info.sef_cpuid7_ebx.bits.clwb != 0) { 660 result |= CPU_CLWB; 661 } 662 } 663 664 // ZX features. 665 if (is_zx()) { 666 if (_cpuid_info.ext_cpuid1_ecx.bits.lzcnt_intel != 0) 667 result |= CPU_LZCNT; 668 // for ZX, ecx.bits.misalignsse bit (bit 8) indicates support for prefetchw 669 if (_cpuid_info.ext_cpuid1_ecx.bits.misalignsse != 0) { 670 result |= CPU_3DNOW_PREFETCH; 671 } 672 } 673 674 // Composite features. 675 if (supports_tscinv_bit() && 676 ((is_amd_family() && !is_amd_Barcelona()) || 677 is_intel_tsc_synched_at_init())) { 678 result |= CPU_TSCINV; 679 } 680 681 return result; 682 } 683 os_supports_avx_vectors()684 static bool os_supports_avx_vectors() { 685 bool retVal = false; 686 int nreg = 2 LP64_ONLY(+2); 687 if (supports_evex()) { 688 // Verify that OS save/restore all bits of EVEX registers 689 // during signal processing. 690 retVal = true; 691 for (int i = 0; i < 16 * nreg; i++) { // 64 bytes per zmm register 692 if (_cpuid_info.zmm_save[i] != ymm_test_value()) { 693 retVal = false; 694 break; 695 } 696 } 697 } else if (supports_avx()) { 698 // Verify that OS save/restore all bits of AVX registers 699 // during signal processing. 700 retVal = true; 701 for (int i = 0; i < 8 * nreg; i++) { // 32 bytes per ymm register 702 if (_cpuid_info.ymm_save[i] != ymm_test_value()) { 703 retVal = false; 704 break; 705 } 706 } 707 // zmm_save will be set on a EVEX enabled machine even if we choose AVX code gen 708 if (retVal == false) { 709 // Verify that OS save/restore all bits of EVEX registers 710 // during signal processing. 711 retVal = true; 712 for (int i = 0; i < 16 * nreg; i++) { // 64 bytes per zmm register 713 if (_cpuid_info.zmm_save[i] != ymm_test_value()) { 714 retVal = false; 715 break; 716 } 717 } 718 } 719 } 720 return retVal; 721 } 722 723 static void get_processor_features(); 724 725 public: 726 // Offsets for cpuid asm stub std_cpuid0_offset()727 static ByteSize std_cpuid0_offset() { return byte_offset_of(CpuidInfo, std_max_function); } std_cpuid1_offset()728 static ByteSize std_cpuid1_offset() { return byte_offset_of(CpuidInfo, std_cpuid1_eax); } dcp_cpuid4_offset()729 static ByteSize dcp_cpuid4_offset() { return byte_offset_of(CpuidInfo, dcp_cpuid4_eax); } sef_cpuid7_offset()730 static ByteSize sef_cpuid7_offset() { return byte_offset_of(CpuidInfo, sef_cpuid7_eax); } ext_cpuid1_offset()731 static ByteSize ext_cpuid1_offset() { return byte_offset_of(CpuidInfo, ext_cpuid1_eax); } ext_cpuid5_offset()732 static ByteSize ext_cpuid5_offset() { return byte_offset_of(CpuidInfo, ext_cpuid5_eax); } ext_cpuid7_offset()733 static ByteSize ext_cpuid7_offset() { return byte_offset_of(CpuidInfo, ext_cpuid7_eax); } ext_cpuid8_offset()734 static ByteSize ext_cpuid8_offset() { return byte_offset_of(CpuidInfo, ext_cpuid8_eax); } ext_cpuid1E_offset()735 static ByteSize ext_cpuid1E_offset() { return byte_offset_of(CpuidInfo, ext_cpuid1E_eax); } tpl_cpuidB0_offset()736 static ByteSize tpl_cpuidB0_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB0_eax); } tpl_cpuidB1_offset()737 static ByteSize tpl_cpuidB1_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB1_eax); } tpl_cpuidB2_offset()738 static ByteSize tpl_cpuidB2_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB2_eax); } xem_xcr0_offset()739 static ByteSize xem_xcr0_offset() { return byte_offset_of(CpuidInfo, xem_xcr0_eax); } ymm_save_offset()740 static ByteSize ymm_save_offset() { return byte_offset_of(CpuidInfo, ymm_save); } zmm_save_offset()741 static ByteSize zmm_save_offset() { return byte_offset_of(CpuidInfo, zmm_save); } 742 743 // The value used to check ymm register after signal handle ymm_test_value()744 static int ymm_test_value() { return 0xCAFEBABE; } 745 746 static void get_cpu_info_wrapper(); set_cpuinfo_segv_addr(address pc)747 static void set_cpuinfo_segv_addr(address pc) { _cpuinfo_segv_addr = pc; } is_cpuinfo_segv_addr(address pc)748 static bool is_cpuinfo_segv_addr(address pc) { return _cpuinfo_segv_addr == pc; } set_cpuinfo_cont_addr(address pc)749 static void set_cpuinfo_cont_addr(address pc) { _cpuinfo_cont_addr = pc; } cpuinfo_cont_addr()750 static address cpuinfo_cont_addr() { return _cpuinfo_cont_addr; } 751 clean_cpuFeatures()752 static void clean_cpuFeatures() { _features = 0; } set_avx_cpuFeatures()753 static void set_avx_cpuFeatures() { _features = (CPU_SSE | CPU_SSE2 | CPU_AVX | CPU_VZEROUPPER ); } set_evex_cpuFeatures()754 static void set_evex_cpuFeatures() { _features = (CPU_AVX512F | CPU_SSE | CPU_SSE2 | CPU_VZEROUPPER ); } 755 756 757 // Initialization 758 static void initialize(); 759 760 // Override Abstract_VM_Version implementation 761 static void print_platform_virtualization_info(outputStream*); 762 763 // Override Abstract_VM_Version implementation 764 static bool use_biased_locking(); 765 766 // Asserts assert_is_initialized()767 static void assert_is_initialized() { 768 assert(_cpuid_info.std_cpuid1_eax.bits.family != 0, "VM_Version not initialized"); 769 } 770 771 // 772 // Processor family: 773 // 3 - 386 774 // 4 - 486 775 // 5 - Pentium 776 // 6 - PentiumPro, Pentium II, Celeron, Xeon, Pentium III, Athlon, 777 // Pentium M, Core Solo, Core Duo, Core2 Duo 778 // family 6 model: 9, 13, 14, 15 779 // 0x0f - Pentium 4, Opteron 780 // 781 // Note: The cpu family should be used to select between 782 // instruction sequences which are valid on all Intel 783 // processors. Use the feature test functions below to 784 // determine whether a particular instruction is supported. 785 // cpu_family()786 static int cpu_family() { return _cpu;} is_P6()787 static bool is_P6() { return cpu_family() >= 6; } is_amd()788 static bool is_amd() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x68747541; } // 'htuA' is_hygon()789 static bool is_hygon() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x6F677948; } // 'ogyH' is_amd_family()790 static bool is_amd_family() { return is_amd() || is_hygon(); } is_intel()791 static bool is_intel() { assert_is_initialized(); return _cpuid_info.std_vendor_name_0 == 0x756e6547; } // 'uneG' is_zx()792 static bool is_zx() { assert_is_initialized(); return (_cpuid_info.std_vendor_name_0 == 0x746e6543) || (_cpuid_info.std_vendor_name_0 == 0x68532020); } // 'tneC'||'hS ' is_atom_family()793 static bool is_atom_family() { return ((cpu_family() == 0x06) && ((extended_cpu_model() == 0x36) || (extended_cpu_model() == 0x37) || (extended_cpu_model() == 0x4D))); } //Silvermont and Centerton is_knights_family()794 static bool is_knights_family() { return ((cpu_family() == 0x06) && ((extended_cpu_model() == 0x57) || (extended_cpu_model() == 0x85))); } // Xeon Phi 3200/5200/7200 and Future Xeon Phi 795 supports_processor_topology()796 static bool supports_processor_topology() { 797 return (_cpuid_info.std_max_function >= 0xB) && 798 // eax[4:0] | ebx[0:15] == 0 indicates invalid topology level. 799 // Some cpus have max cpuid >= 0xB but do not support processor topology. 800 (((_cpuid_info.tpl_cpuidB0_eax & 0x1f) | _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus) != 0); 801 } 802 cores_per_cpu()803 static uint cores_per_cpu() { 804 uint result = 1; 805 if (is_intel()) { 806 bool supports_topology = supports_processor_topology(); 807 if (supports_topology) { 808 result = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus / 809 _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus; 810 } 811 if (!supports_topology || result == 0) { 812 result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1); 813 } 814 } else if (is_amd_family()) { 815 result = (_cpuid_info.ext_cpuid8_ecx.bits.cores_per_cpu + 1); 816 } else if (is_zx()) { 817 bool supports_topology = supports_processor_topology(); 818 if (supports_topology) { 819 result = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus / 820 _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus; 821 } 822 if (!supports_topology || result == 0) { 823 result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1); 824 } 825 } 826 return result; 827 } 828 threads_per_core()829 static uint threads_per_core() { 830 uint result = 1; 831 if (is_intel() && supports_processor_topology()) { 832 result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus; 833 } else if (is_zx() && supports_processor_topology()) { 834 result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus; 835 } else if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) { 836 if (cpu_family() >= 0x17) { 837 result = _cpuid_info.ext_cpuid1E_ebx.bits.threads_per_core + 1; 838 } else { 839 result = _cpuid_info.std_cpuid1_ebx.bits.threads_per_cpu / 840 cores_per_cpu(); 841 } 842 } 843 return (result == 0 ? 1 : result); 844 } 845 L1_line_size()846 static intx L1_line_size() { 847 intx result = 0; 848 if (is_intel()) { 849 result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1); 850 } else if (is_amd_family()) { 851 result = _cpuid_info.ext_cpuid5_ecx.bits.L1_line_size; 852 } else if (is_zx()) { 853 result = (_cpuid_info.dcp_cpuid4_ebx.bits.L1_line_size + 1); 854 } 855 if (result < 32) // not defined ? 856 result = 32; // 32 bytes by default on x86 and other x64 857 return result; 858 } 859 prefetch_data_size()860 static intx prefetch_data_size() { 861 return L1_line_size(); 862 } 863 864 // 865 // Feature identification 866 // supports_cpuid()867 static bool supports_cpuid() { return _features != 0; } supports_cmpxchg8()868 static bool supports_cmpxchg8() { return (_features & CPU_CX8) != 0; } supports_cmov()869 static bool supports_cmov() { return (_features & CPU_CMOV) != 0; } supports_fxsr()870 static bool supports_fxsr() { return (_features & CPU_FXSR) != 0; } supports_ht()871 static bool supports_ht() { return (_features & CPU_HT) != 0; } supports_mmx()872 static bool supports_mmx() { return (_features & CPU_MMX) != 0; } supports_sse()873 static bool supports_sse() { return (_features & CPU_SSE) != 0; } supports_sse2()874 static bool supports_sse2() { return (_features & CPU_SSE2) != 0; } supports_sse3()875 static bool supports_sse3() { return (_features & CPU_SSE3) != 0; } supports_ssse3()876 static bool supports_ssse3() { return (_features & CPU_SSSE3)!= 0; } supports_sse4_1()877 static bool supports_sse4_1() { return (_features & CPU_SSE4_1) != 0; } supports_sse4_2()878 static bool supports_sse4_2() { return (_features & CPU_SSE4_2) != 0; } supports_popcnt()879 static bool supports_popcnt() { return (_features & CPU_POPCNT) != 0; } supports_avx()880 static bool supports_avx() { return (_features & CPU_AVX) != 0; } supports_avx2()881 static bool supports_avx2() { return (_features & CPU_AVX2) != 0; } supports_tsc()882 static bool supports_tsc() { return (_features & CPU_TSC) != 0; } supports_aes()883 static bool supports_aes() { return (_features & CPU_AES) != 0; } supports_erms()884 static bool supports_erms() { return (_features & CPU_ERMS) != 0; } supports_clmul()885 static bool supports_clmul() { return (_features & CPU_CLMUL) != 0; } supports_rtm()886 static bool supports_rtm() { return (_features & CPU_RTM) != 0; } supports_bmi1()887 static bool supports_bmi1() { return (_features & CPU_BMI1) != 0; } supports_bmi2()888 static bool supports_bmi2() { return (_features & CPU_BMI2) != 0; } supports_adx()889 static bool supports_adx() { return (_features & CPU_ADX) != 0; } supports_evex()890 static bool supports_evex() { return (_features & CPU_AVX512F) != 0; } supports_avx512dq()891 static bool supports_avx512dq() { return (_features & CPU_AVX512DQ) != 0; } supports_avx512pf()892 static bool supports_avx512pf() { return (_features & CPU_AVX512PF) != 0; } supports_avx512er()893 static bool supports_avx512er() { return (_features & CPU_AVX512ER) != 0; } supports_avx512cd()894 static bool supports_avx512cd() { return (_features & CPU_AVX512CD) != 0; } supports_avx512bw()895 static bool supports_avx512bw() { return (_features & CPU_AVX512BW) != 0; } supports_avx512vl()896 static bool supports_avx512vl() { return (_features & CPU_AVX512VL) != 0; } supports_avx512vlbw()897 static bool supports_avx512vlbw() { return (supports_evex() && supports_avx512bw() && supports_avx512vl()); } supports_avx512vldq()898 static bool supports_avx512vldq() { return (supports_evex() && supports_avx512dq() && supports_avx512vl()); } supports_avx512vlbwdq()899 static bool supports_avx512vlbwdq() { return (supports_evex() && supports_avx512vl() && 900 supports_avx512bw() && supports_avx512dq()); } supports_avx512novl()901 static bool supports_avx512novl() { return (supports_evex() && !supports_avx512vl()); } supports_avx512nobw()902 static bool supports_avx512nobw() { return (supports_evex() && !supports_avx512bw()); } supports_avx256only()903 static bool supports_avx256only() { return (supports_avx2() && !supports_evex()); } supports_avxonly()904 static bool supports_avxonly() { return ((supports_avx2() || supports_avx()) && !supports_evex()); } supports_sha()905 static bool supports_sha() { return (_features & CPU_SHA) != 0; } supports_fma()906 static bool supports_fma() { return (_features & CPU_FMA) != 0 && supports_avx(); } supports_vzeroupper()907 static bool supports_vzeroupper() { return (_features & CPU_VZEROUPPER) != 0; } supports_avx512_vpopcntdq()908 static bool supports_avx512_vpopcntdq() { return (_features & CPU_AVX512_VPOPCNTDQ) != 0; } supports_avx512_vpclmulqdq()909 static bool supports_avx512_vpclmulqdq() { return (_features & CPU_AVX512_VPCLMULQDQ) != 0; } supports_avx512_vaes()910 static bool supports_avx512_vaes() { return (_features & CPU_AVX512_VAES) != 0; } supports_avx512_vnni()911 static bool supports_avx512_vnni() { return (_features & CPU_AVX512_VNNI) != 0; } supports_avx512_vbmi()912 static bool supports_avx512_vbmi() { return (_features & CPU_AVX512_VBMI) != 0; } supports_avx512_vbmi2()913 static bool supports_avx512_vbmi2() { return (_features & CPU_AVX512_VBMI2) != 0; } supports_hv()914 static bool supports_hv() { return (_features & CPU_HV) != 0; } 915 916 // Intel features is_intel_family_core()917 static bool is_intel_family_core() { return is_intel() && 918 extended_cpu_family() == CPU_FAMILY_INTEL_CORE; } 919 is_intel_skylake()920 static bool is_intel_skylake() { return is_intel_family_core() && 921 extended_cpu_model() == CPU_MODEL_SKYLAKE; } 922 is_intel_tsc_synched_at_init()923 static bool is_intel_tsc_synched_at_init() { 924 if (is_intel_family_core()) { 925 uint32_t ext_model = extended_cpu_model(); 926 if (ext_model == CPU_MODEL_NEHALEM_EP || 927 ext_model == CPU_MODEL_WESTMERE_EP || 928 ext_model == CPU_MODEL_SANDYBRIDGE_EP || 929 ext_model == CPU_MODEL_IVYBRIDGE_EP) { 930 // <= 2-socket invariant tsc support. EX versions are usually used 931 // in > 2-socket systems and likely don't synchronize tscs at 932 // initialization. 933 // Code that uses tsc values must be prepared for them to arbitrarily 934 // jump forward or backward. 935 return true; 936 } 937 } 938 return false; 939 } 940 941 // This checks if the JVM is potentially affected by an erratum on Intel CPUs (SKX102) 942 // that causes unpredictable behaviour when jcc crosses 64 byte boundaries. Its microcode 943 // mitigation causes regressions when jumps or fused conditional branches cross or end at 944 // 32 byte boundaries. has_intel_jcc_erratum()945 static bool has_intel_jcc_erratum() { return _has_intel_jcc_erratum; } 946 947 // AMD features supports_3dnow_prefetch()948 static bool supports_3dnow_prefetch() { return (_features & CPU_3DNOW_PREFETCH) != 0; } supports_lzcnt()949 static bool supports_lzcnt() { return (_features & CPU_LZCNT) != 0; } supports_sse4a()950 static bool supports_sse4a() { return (_features & CPU_SSE4A) != 0; } 951 is_amd_Barcelona()952 static bool is_amd_Barcelona() { return is_amd() && 953 extended_cpu_family() == CPU_FAMILY_AMD_11H; } 954 955 // Intel and AMD newer cores support fast timestamps well supports_tscinv_bit()956 static bool supports_tscinv_bit() { 957 return (_features & CPU_TSCINV_BIT) != 0; 958 } supports_tscinv()959 static bool supports_tscinv() { 960 return (_features & CPU_TSCINV) != 0; 961 } 962 963 // Intel Core and newer cpus have fast IDIV instruction (excluding Atom). has_fast_idiv()964 static bool has_fast_idiv() { return is_intel() && cpu_family() == 6 && 965 supports_sse3() && _model != 0x1C; } 966 supports_compare_and_exchange()967 static bool supports_compare_and_exchange() { return true; } 968 allocate_prefetch_distance(bool use_watermark_prefetch)969 static intx allocate_prefetch_distance(bool use_watermark_prefetch) { 970 // Hardware prefetching (distance/size in bytes): 971 // Pentium 3 - 64 / 32 972 // Pentium 4 - 256 / 128 973 // Athlon - 64 / 32 ???? 974 // Opteron - 128 / 64 only when 2 sequential cache lines accessed 975 // Core - 128 / 64 976 // 977 // Software prefetching (distance in bytes / instruction with best score): 978 // Pentium 3 - 128 / prefetchnta 979 // Pentium 4 - 512 / prefetchnta 980 // Athlon - 128 / prefetchnta 981 // Opteron - 256 / prefetchnta 982 // Core - 256 / prefetchnta 983 // It will be used only when AllocatePrefetchStyle > 0 984 985 if (is_amd_family()) { // AMD | Hygon 986 if (supports_sse2()) { 987 return 256; // Opteron 988 } else { 989 return 128; // Athlon 990 } 991 } else { // Intel 992 if (supports_sse3() && cpu_family() == 6) { 993 if (supports_sse4_2() && supports_ht()) { // Nehalem based cpus 994 return 192; 995 } else if (use_watermark_prefetch) { // watermark prefetching on Core 996 #ifdef _LP64 997 return 384; 998 #else 999 return 320; 1000 #endif 1001 } 1002 } 1003 if (supports_sse2()) { 1004 if (cpu_family() == 6) { 1005 return 256; // Pentium M, Core, Core2 1006 } else { 1007 return 512; // Pentium 4 1008 } 1009 } else { 1010 return 128; // Pentium 3 (and all other old CPUs) 1011 } 1012 } 1013 } 1014 1015 // SSE2 and later processors implement a 'pause' instruction 1016 // that can be used for efficient implementation of 1017 // the intrinsic for java.lang.Thread.onSpinWait() supports_on_spin_wait()1018 static bool supports_on_spin_wait() { return supports_sse2(); } 1019 1020 // x86_64 supports fast class initialization checks for static methods. supports_fast_class_init_checks()1021 static bool supports_fast_class_init_checks() { 1022 return LP64_ONLY(true) NOT_LP64(false); // not implemented on x86_32 1023 } 1024 supports_stack_watermark_barrier()1025 constexpr static bool supports_stack_watermark_barrier() { 1026 return true; 1027 } 1028 1029 // there are several insns to force cache line sync to memory which 1030 // we can use to ensure mapped non-volatile memory is up to date with 1031 // pending in-cache changes. 1032 // 1033 // 64 bit cpus always support clflush which writes back and evicts 1034 // on 32 bit cpus support is recorded via a feature flag 1035 // 1036 // clflushopt is optional and acts like clflush except it does 1037 // not synchronize with other memory ops. it needs a preceding 1038 // and trailing StoreStore fence 1039 // 1040 // clwb is an optional intel-specific instruction which 1041 // writes back without evicting the line. it also does not 1042 // synchronize with other memory ops. so, it needs preceding 1043 // and trailing StoreStore fences. 1044 1045 #ifdef _LP64 supports_clflush()1046 static bool supports_clflush() { 1047 // clflush should always be available on x86_64 1048 // if not we are in real trouble because we rely on it 1049 // to flush the code cache. 1050 // Unfortunately, Assembler::clflush is currently called as part 1051 // of generation of the code cache flush routine. This happens 1052 // under Universe::init before the processor features are set 1053 // up. Assembler::flush calls this routine to check that clflush 1054 // is allowed. So, we give the caller a free pass if Universe init 1055 // is still in progress. 1056 assert ((!Universe::is_fully_initialized() || (_features & CPU_FLUSH) != 0), "clflush should be available"); 1057 return true; 1058 } 1059 #else supports_clflush()1060 static bool supports_clflush() { return ((_features & CPU_FLUSH) != 0); } 1061 #endif // _LP64 1062 // Note: CPU_FLUSHOPT and CPU_CLWB bits should always be zero for 32-bit supports_clflushopt()1063 static bool supports_clflushopt() { return ((_features & CPU_FLUSHOPT) != 0); } supports_clwb()1064 static bool supports_clwb() { return ((_features & CPU_CLWB) != 0); } 1065 1066 #ifdef __APPLE__ 1067 // Is the CPU running emulated (for example macOS Rosetta running x86_64 code on M1 ARM (aarch64) 1068 static bool is_cpu_emulated(); 1069 #endif 1070 1071 // support functions for virtualization detection 1072 private: 1073 static void check_virtualizations(); 1074 }; 1075 1076 #endif // CPU_X86_VM_VERSION_X86_HPP 1077