1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 #include <sys/param.h> 31 #include <sys/pcpu.h> 32 #include <sys/systm.h> 33 #include <sys/sysctl.h> 34 35 #include <machine/clock.h> 36 #include <machine/cpufunc.h> 37 #include <machine/md_var.h> 38 #include <machine/segments.h> 39 #include <machine/specialreg.h> 40 41 #include <machine/vmm.h> 42 43 #include "vmm_host.h" 44 #include "vmm_ktr.h" 45 #include "vmm_util.h" 46 #include "x86.h" 47 48 SYSCTL_DECL(_hw_vmm); 49 static SYSCTL_NODE(_hw_vmm, OID_AUTO, topology, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 50 NULL); 51 52 #define CPUID_VM_HIGH 0x40000000 53 54 static const char bhyve_id[12] = "bhyve bhyve "; 55 56 static uint64_t bhyve_xcpuids; 57 SYSCTL_ULONG(_hw_vmm, OID_AUTO, bhyve_xcpuids, CTLFLAG_RW, &bhyve_xcpuids, 0, 58 "Number of times an unknown cpuid leaf was accessed"); 59 60 static int cpuid_leaf_b = 1; 61 SYSCTL_INT(_hw_vmm_topology, OID_AUTO, cpuid_leaf_b, CTLFLAG_RDTUN, 62 &cpuid_leaf_b, 0, NULL); 63 64 /* 65 * Round up to the next power of two, if necessary, and then take log2. 66 * Returns -1 if argument is zero. 67 */ 68 static __inline int 69 log2(u_int x) 70 { 71 72 return (fls(x << (1 - powerof2(x))) - 1); 73 } 74 75 int 76 x86_emulate_cpuid(struct vcpu *vcpu, uint64_t *rax, uint64_t *rbx, 77 uint64_t *rcx, uint64_t *rdx) 78 { 79 struct vm *vm = vcpu_vm(vcpu); 80 int vcpu_id = vcpu_vcpuid(vcpu); 81 const struct xsave_limits *limits; 82 uint64_t cr4; 83 int error, enable_invpcid, enable_rdpid, enable_rdtscp, level, 84 width, x2apic_id; 85 unsigned int func, regs[4], logical_cpus, param; 86 enum x2apic_state x2apic_state; 87 uint16_t cores, maxcpus, sockets, threads; 88 89 /* 90 * The function of CPUID is controlled through the provided value of 91 * %eax (and secondarily %ecx, for certain leaf data). 92 */ 93 func = (uint32_t)*rax; 94 param = (uint32_t)*rcx; 95 96 VCPU_CTR2(vm, vcpu_id, "cpuid %#x,%#x", func, param); 97 98 /* 99 * Requests for invalid CPUID levels should map to the highest 100 * available level instead. 101 */ 102 if (cpu_exthigh != 0 && func >= 0x80000000) { 103 if (func > cpu_exthigh) 104 func = cpu_exthigh; 105 } else if (func >= 0x40000000) { 106 if (func > CPUID_VM_HIGH) 107 func = CPUID_VM_HIGH; 108 } else if (func > cpu_high) { 109 func = cpu_high; 110 } 111 112 /* 113 * In general the approach used for CPU topology is to 114 * advertise a flat topology where all CPUs are packages with 115 * no multi-core or SMT. 116 */ 117 switch (func) { 118 /* 119 * Pass these through to the guest 120 */ 121 case CPUID_0000_0000: 122 case CPUID_0000_0002: 123 case CPUID_0000_0003: 124 case CPUID_8000_0000: 125 case CPUID_8000_0002: 126 case CPUID_8000_0003: 127 case CPUID_8000_0004: 128 case CPUID_8000_0006: 129 cpuid_count(func, param, regs); 130 break; 131 case CPUID_8000_0008: 132 cpuid_count(func, param, regs); 133 if (vmm_is_svm()) { 134 /* 135 * As on Intel (0000_0007:0, EDX), mask out 136 * unsupported or unsafe AMD extended features 137 * (8000_0008 EBX). 138 */ 139 regs[1] &= (AMDFEID_CLZERO | AMDFEID_IRPERF | 140 AMDFEID_XSAVEERPTR); 141 142 vm_get_topology(vm, &sockets, &cores, &threads, 143 &maxcpus); 144 /* 145 * Here, width is ApicIdCoreIdSize, present on 146 * at least Family 15h and newer. It 147 * represents the "number of bits in the 148 * initial apicid that indicate thread id 149 * within a package." 150 * 151 * Our topo_probe_amd() uses it for 152 * pkg_id_shift and other OSes may rely on it. 153 */ 154 width = MIN(0xF, log2(threads * cores)); 155 if (width < 0x4) 156 width = 0; 157 logical_cpus = MIN(0xFF, threads * cores - 1); 158 regs[2] = (width << AMDID_COREID_SIZE_SHIFT) | logical_cpus; 159 } 160 break; 161 162 case CPUID_8000_0001: 163 cpuid_count(func, param, regs); 164 165 /* 166 * Hide SVM from guest. 167 */ 168 regs[2] &= ~AMDID2_SVM; 169 170 /* 171 * Don't advertise extended performance counter MSRs 172 * to the guest. 173 */ 174 regs[2] &= ~AMDID2_PCXC; 175 regs[2] &= ~AMDID2_PNXC; 176 regs[2] &= ~AMDID2_PTSCEL2I; 177 178 /* 179 * Don't advertise Instruction Based Sampling feature. 180 */ 181 regs[2] &= ~AMDID2_IBS; 182 183 /* NodeID MSR not available */ 184 regs[2] &= ~AMDID2_NODE_ID; 185 186 /* Don't advertise the OS visible workaround feature */ 187 regs[2] &= ~AMDID2_OSVW; 188 189 /* Hide mwaitx/monitorx capability from the guest */ 190 regs[2] &= ~AMDID2_MWAITX; 191 192 /* Advertise RDTSCP if it is enabled. */ 193 error = vm_get_capability(vcpu, 194 VM_CAP_RDTSCP, &enable_rdtscp); 195 if (error == 0 && enable_rdtscp) 196 regs[3] |= AMDID_RDTSCP; 197 else 198 regs[3] &= ~AMDID_RDTSCP; 199 break; 200 201 case CPUID_8000_0007: 202 /* 203 * AMD uses this leaf to advertise the processor's 204 * power monitoring and RAS capabilities. These 205 * features are hardware-specific and exposing 206 * them to a guest doesn't make a lot of sense. 207 * 208 * Intel uses this leaf only to advertise the 209 * "Invariant TSC" feature with all other bits 210 * being reserved (set to zero). 211 */ 212 regs[0] = 0; 213 regs[1] = 0; 214 regs[2] = 0; 215 regs[3] = 0; 216 217 /* 218 * "Invariant TSC" can be advertised to the guest if: 219 * - host TSC frequency is invariant 220 * - host TSCs are synchronized across physical cpus 221 * 222 * XXX This still falls short because the vcpu 223 * can observe the TSC moving backwards as it 224 * migrates across physical cpus. But at least 225 * it should discourage the guest from using the 226 * TSC to keep track of time. 227 */ 228 if (tsc_is_invariant && smp_tsc) 229 regs[3] |= AMDPM_TSC_INVARIANT; 230 break; 231 232 case CPUID_8000_001D: 233 /* AMD Cache topology, like 0000_0004 for Intel. */ 234 if (!vmm_is_svm()) 235 goto default_leaf; 236 237 /* 238 * Similar to Intel, generate a ficticious cache 239 * topology for the guest with L3 shared by the 240 * package, and L1 and L2 local to a core. 241 */ 242 vm_get_topology(vm, &sockets, &cores, &threads, 243 &maxcpus); 244 switch (param) { 245 case 0: 246 logical_cpus = threads; 247 level = 1; 248 func = 1; /* data cache */ 249 break; 250 case 1: 251 logical_cpus = threads; 252 level = 2; 253 func = 3; /* unified cache */ 254 break; 255 case 2: 256 logical_cpus = threads * cores; 257 level = 3; 258 func = 3; /* unified cache */ 259 break; 260 default: 261 logical_cpus = 0; 262 level = 0; 263 func = 0; 264 break; 265 } 266 267 logical_cpus = MIN(0xfff, logical_cpus - 1); 268 regs[0] = (logical_cpus << 14) | (1 << 8) | 269 (level << 5) | func; 270 regs[1] = (func > 0) ? (CACHE_LINE_SIZE - 1) : 0; 271 regs[2] = 0; 272 regs[3] = 0; 273 break; 274 275 case CPUID_8000_001E: 276 /* 277 * AMD Family 16h+ and Hygon Family 18h additional 278 * identifiers. 279 */ 280 if (!vmm_is_svm() || CPUID_TO_FAMILY(cpu_id) < 0x16) 281 goto default_leaf; 282 283 vm_get_topology(vm, &sockets, &cores, &threads, 284 &maxcpus); 285 regs[0] = vcpu_id; 286 threads = MIN(0xFF, threads - 1); 287 regs[1] = (threads << 8) | 288 (vcpu_id >> log2(threads + 1)); 289 /* 290 * XXX Bhyve topology cannot yet represent >1 node per 291 * processor. 292 */ 293 regs[2] = 0; 294 regs[3] = 0; 295 break; 296 297 case CPUID_0000_0001: 298 do_cpuid(1, regs); 299 300 error = vm_get_x2apic_state(vcpu, &x2apic_state); 301 if (error) { 302 panic("x86_emulate_cpuid: error %d " 303 "fetching x2apic state", error); 304 } 305 306 /* 307 * Override the APIC ID only in ebx 308 */ 309 regs[1] &= ~(CPUID_LOCAL_APIC_ID); 310 regs[1] |= (vcpu_id << CPUID_0000_0001_APICID_SHIFT); 311 312 /* 313 * Don't expose VMX, SpeedStep, TME or SMX capability. 314 * Advertise x2APIC capability and Hypervisor guest. 315 */ 316 regs[2] &= ~(CPUID2_VMX | CPUID2_EST | CPUID2_TM2); 317 regs[2] &= ~(CPUID2_SMX); 318 319 regs[2] |= CPUID2_HV; 320 321 if (x2apic_state != X2APIC_DISABLED) 322 regs[2] |= CPUID2_X2APIC; 323 else 324 regs[2] &= ~CPUID2_X2APIC; 325 326 /* 327 * Only advertise CPUID2_XSAVE in the guest if 328 * the host is using XSAVE. 329 */ 330 if (!(regs[2] & CPUID2_OSXSAVE)) 331 regs[2] &= ~CPUID2_XSAVE; 332 333 /* 334 * If CPUID2_XSAVE is being advertised and the 335 * guest has set CR4_XSAVE, set 336 * CPUID2_OSXSAVE. 337 */ 338 regs[2] &= ~CPUID2_OSXSAVE; 339 if (regs[2] & CPUID2_XSAVE) { 340 error = vm_get_register(vcpu, 341 VM_REG_GUEST_CR4, &cr4); 342 if (error) 343 panic("x86_emulate_cpuid: error %d " 344 "fetching %%cr4", error); 345 if (cr4 & CR4_XSAVE) 346 regs[2] |= CPUID2_OSXSAVE; 347 } 348 349 /* 350 * Hide monitor/mwait until we know how to deal with 351 * these instructions. 352 */ 353 regs[2] &= ~CPUID2_MON; 354 355 /* 356 * Hide the performance and debug features. 357 */ 358 regs[2] &= ~CPUID2_PDCM; 359 360 /* 361 * No TSC deadline support in the APIC yet 362 */ 363 regs[2] &= ~CPUID2_TSCDLT; 364 365 /* 366 * Hide thermal monitoring 367 */ 368 regs[3] &= ~(CPUID_ACPI | CPUID_TM); 369 370 /* 371 * Hide the debug store capability. 372 */ 373 regs[3] &= ~CPUID_DS; 374 375 /* 376 * Advertise the Machine Check and MTRR capability. 377 * 378 * Some guest OSes (e.g. Windows) will not boot if 379 * these features are absent. 380 */ 381 regs[3] |= (CPUID_MCA | CPUID_MCE | CPUID_MTRR); 382 383 vm_get_topology(vm, &sockets, &cores, &threads, 384 &maxcpus); 385 logical_cpus = threads * cores; 386 regs[1] &= ~CPUID_HTT_CORES; 387 regs[1] |= (logical_cpus & 0xff) << 16; 388 regs[3] |= CPUID_HTT; 389 break; 390 391 case CPUID_0000_0004: 392 cpuid_count(func, param, regs); 393 394 if (regs[0] || regs[1] || regs[2] || regs[3]) { 395 vm_get_topology(vm, &sockets, &cores, &threads, 396 &maxcpus); 397 regs[0] &= 0x3ff; 398 regs[0] |= (cores - 1) << 26; 399 /* 400 * Cache topology: 401 * - L1 and L2 are shared only by the logical 402 * processors in a single core. 403 * - L3 and above are shared by all logical 404 * processors in the package. 405 */ 406 logical_cpus = threads; 407 level = (regs[0] >> 5) & 0x7; 408 if (level >= 3) 409 logical_cpus *= cores; 410 regs[0] |= (logical_cpus - 1) << 14; 411 } 412 break; 413 414 case CPUID_0000_0007: 415 regs[0] = 0; 416 regs[1] = 0; 417 regs[2] = 0; 418 regs[3] = 0; 419 420 /* leaf 0 */ 421 if (param == 0) { 422 cpuid_count(func, param, regs); 423 424 /* Only leaf 0 is supported */ 425 regs[0] = 0; 426 427 /* 428 * Expose known-safe features. 429 */ 430 regs[1] &= CPUID_STDEXT_FSGSBASE | 431 CPUID_STDEXT_BMI1 | CPUID_STDEXT_HLE | 432 CPUID_STDEXT_AVX2 | CPUID_STDEXT_SMEP | 433 CPUID_STDEXT_BMI2 | 434 CPUID_STDEXT_ERMS | CPUID_STDEXT_RTM | 435 CPUID_STDEXT_AVX512F | 436 CPUID_STDEXT_AVX512DQ | 437 CPUID_STDEXT_RDSEED | 438 CPUID_STDEXT_SMAP | 439 CPUID_STDEXT_AVX512PF | 440 CPUID_STDEXT_AVX512ER | 441 CPUID_STDEXT_AVX512CD | CPUID_STDEXT_SHA | 442 CPUID_STDEXT_AVX512BW | 443 CPUID_STDEXT_AVX512VL; 444 regs[2] &= CPUID_STDEXT2_VAES | 445 CPUID_STDEXT2_VPCLMULQDQ; 446 regs[3] &= CPUID_STDEXT3_MD_CLEAR; 447 448 /* Advertise RDPID if it is enabled. */ 449 error = vm_get_capability(vcpu, VM_CAP_RDPID, 450 &enable_rdpid); 451 if (error == 0 && enable_rdpid) 452 regs[2] |= CPUID_STDEXT2_RDPID; 453 454 /* Advertise INVPCID if it is enabled. */ 455 error = vm_get_capability(vcpu, 456 VM_CAP_ENABLE_INVPCID, &enable_invpcid); 457 if (error == 0 && enable_invpcid) 458 regs[1] |= CPUID_STDEXT_INVPCID; 459 } 460 break; 461 462 case CPUID_0000_0006: 463 regs[0] = CPUTPM1_ARAT; 464 regs[1] = 0; 465 regs[2] = 0; 466 regs[3] = 0; 467 break; 468 469 case CPUID_0000_000A: 470 /* 471 * Handle the access, but report 0 for 472 * all options 473 */ 474 regs[0] = 0; 475 regs[1] = 0; 476 regs[2] = 0; 477 regs[3] = 0; 478 break; 479 480 case CPUID_0000_000B: 481 /* 482 * Intel processor topology enumeration 483 */ 484 if (vmm_is_intel()) { 485 vm_get_topology(vm, &sockets, &cores, &threads, 486 &maxcpus); 487 if (param == 0) { 488 logical_cpus = threads; 489 width = log2(logical_cpus); 490 level = CPUID_TYPE_SMT; 491 x2apic_id = vcpu_id; 492 } 493 494 if (param == 1) { 495 logical_cpus = threads * cores; 496 width = log2(logical_cpus); 497 level = CPUID_TYPE_CORE; 498 x2apic_id = vcpu_id; 499 } 500 501 if (!cpuid_leaf_b || param >= 2) { 502 width = 0; 503 logical_cpus = 0; 504 level = 0; 505 x2apic_id = 0; 506 } 507 508 regs[0] = width & 0x1f; 509 regs[1] = logical_cpus & 0xffff; 510 regs[2] = (level << 8) | (param & 0xff); 511 regs[3] = x2apic_id; 512 } else { 513 regs[0] = 0; 514 regs[1] = 0; 515 regs[2] = 0; 516 regs[3] = 0; 517 } 518 break; 519 520 case CPUID_0000_000D: 521 limits = vmm_get_xsave_limits(); 522 if (!limits->xsave_enabled) { 523 regs[0] = 0; 524 regs[1] = 0; 525 regs[2] = 0; 526 regs[3] = 0; 527 break; 528 } 529 530 cpuid_count(func, param, regs); 531 switch (param) { 532 case 0: 533 /* 534 * Only permit the guest to use bits 535 * that are active in the host in 536 * %xcr0. Also, claim that the 537 * maximum save area size is 538 * equivalent to the host's current 539 * save area size. Since this runs 540 * "inside" of vmrun(), it runs with 541 * the guest's xcr0, so the current 542 * save area size is correct as-is. 543 */ 544 regs[0] &= limits->xcr0_allowed; 545 regs[2] = limits->xsave_max_size; 546 regs[3] &= (limits->xcr0_allowed >> 32); 547 break; 548 case 1: 549 /* Only permit XSAVEOPT. */ 550 regs[0] &= CPUID_EXTSTATE_XSAVEOPT; 551 regs[1] = 0; 552 regs[2] = 0; 553 regs[3] = 0; 554 break; 555 default: 556 /* 557 * If the leaf is for a permitted feature, 558 * pass through as-is, otherwise return 559 * all zeroes. 560 */ 561 if (!(limits->xcr0_allowed & (1ul << param))) { 562 regs[0] = 0; 563 regs[1] = 0; 564 regs[2] = 0; 565 regs[3] = 0; 566 } 567 break; 568 } 569 break; 570 571 case CPUID_0000_000F: 572 case CPUID_0000_0010: 573 /* 574 * Do not report any Resource Director Technology 575 * capabilities. Exposing control of cache or memory 576 * controller resource partitioning to the guest is not 577 * at all sensible. 578 * 579 * This is already hidden at a high level by masking of 580 * leaf 0x7. Even still, a guest may look here for 581 * detailed capability information. 582 */ 583 regs[0] = 0; 584 regs[1] = 0; 585 regs[2] = 0; 586 regs[3] = 0; 587 break; 588 589 case CPUID_0000_0015: 590 /* 591 * Don't report CPU TSC/Crystal ratio and clock 592 * values since guests may use these to derive the 593 * local APIC frequency.. 594 */ 595 regs[0] = 0; 596 regs[1] = 0; 597 regs[2] = 0; 598 regs[3] = 0; 599 break; 600 601 case 0x40000000: 602 regs[0] = CPUID_VM_HIGH; 603 bcopy(bhyve_id, ®s[1], 4); 604 bcopy(bhyve_id + 4, ®s[2], 4); 605 bcopy(bhyve_id + 8, ®s[3], 4); 606 break; 607 608 default: 609 default_leaf: 610 /* 611 * The leaf value has already been clamped so 612 * simply pass this through, keeping count of 613 * how many unhandled leaf values have been seen. 614 */ 615 atomic_add_long(&bhyve_xcpuids, 1); 616 cpuid_count(func, param, regs); 617 break; 618 } 619 620 /* 621 * CPUID clears the upper 32-bits of the long-mode registers. 622 */ 623 *rax = regs[0]; 624 *rbx = regs[1]; 625 *rcx = regs[2]; 626 *rdx = regs[3]; 627 628 return (1); 629 } 630 631 bool 632 vm_cpuid_capability(struct vcpu *vcpu, enum vm_cpuid_capability cap) 633 { 634 bool rv; 635 636 KASSERT(cap > 0 && cap < VCC_LAST, ("%s: invalid vm_cpu_capability %d", 637 __func__, cap)); 638 639 /* 640 * Simply passthrough the capabilities of the host cpu for now. 641 */ 642 rv = false; 643 switch (cap) { 644 case VCC_NO_EXECUTE: 645 if (amd_feature & AMDID_NX) 646 rv = true; 647 break; 648 case VCC_FFXSR: 649 if (amd_feature & AMDID_FFXSR) 650 rv = true; 651 break; 652 case VCC_TCE: 653 if (amd_feature2 & AMDID2_TCE) 654 rv = true; 655 break; 656 default: 657 panic("%s: unknown vm_cpu_capability %d", __func__, cap); 658 } 659 return (rv); 660 } 661 662 int 663 vm_rdmtrr(struct vm_mtrr *mtrr, u_int num, uint64_t *val) 664 { 665 switch (num) { 666 case MSR_MTRRcap: 667 *val = MTRR_CAP_WC | MTRR_CAP_FIXED | VMM_MTRR_VAR_MAX; 668 break; 669 case MSR_MTRRdefType: 670 *val = mtrr->def_type; 671 break; 672 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7: 673 *val = mtrr->fixed4k[num - MSR_MTRR4kBase]; 674 break; 675 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: 676 *val = mtrr->fixed16k[num - MSR_MTRR16kBase]; 677 break; 678 case MSR_MTRR64kBase: 679 *val = mtrr->fixed64k; 680 break; 681 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: { 682 u_int offset = num - MSR_MTRRVarBase; 683 if (offset % 2 == 0) { 684 *val = mtrr->var[offset / 2].base; 685 } else { 686 *val = mtrr->var[offset / 2].mask; 687 } 688 break; 689 } 690 default: 691 return (-1); 692 } 693 694 return (0); 695 } 696 697 int 698 vm_wrmtrr(struct vm_mtrr *mtrr, u_int num, uint64_t val) 699 { 700 switch (num) { 701 case MSR_MTRRcap: 702 /* MTRRCAP is read only */ 703 return (-1); 704 case MSR_MTRRdefType: 705 if (val & ~VMM_MTRR_DEF_MASK) { 706 /* generate #GP on writes to reserved fields */ 707 return (-1); 708 } 709 mtrr->def_type = val; 710 break; 711 case MSR_MTRR4kBase ... MSR_MTRR4kBase + 7: 712 mtrr->fixed4k[num - MSR_MTRR4kBase] = val; 713 break; 714 case MSR_MTRR16kBase ... MSR_MTRR16kBase + 1: 715 mtrr->fixed16k[num - MSR_MTRR16kBase] = val; 716 break; 717 case MSR_MTRR64kBase: 718 mtrr->fixed64k = val; 719 break; 720 case MSR_MTRRVarBase ... MSR_MTRRVarBase + (VMM_MTRR_VAR_MAX * 2) - 1: { 721 u_int offset = num - MSR_MTRRVarBase; 722 if (offset % 2 == 0) { 723 if (val & ~VMM_MTRR_PHYSBASE_MASK) { 724 /* generate #GP on writes to reserved fields */ 725 return (-1); 726 } 727 mtrr->var[offset / 2].base = val; 728 } else { 729 if (val & ~VMM_MTRR_PHYSMASK_MASK) { 730 /* generate #GP on writes to reserved fields */ 731 return (-1); 732 } 733 mtrr->var[offset / 2].mask = val; 734 } 735 break; 736 } 737 default: 738 return (-1); 739 } 740 741 return (0); 742 } 743