1 /*- 2 * Copyright (c) 1999 Michael Smith <msmith@freebsd.org> 3 * Copyright (c) 2017 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Konstantin Belousov 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/kernel.h> 36 #include <sys/systm.h> 37 #include <sys/malloc.h> 38 #include <sys/memrange.h> 39 #include <sys/smp.h> 40 #include <sys/sysctl.h> 41 42 #include <vm/vm.h> 43 #include <vm/vm_param.h> 44 #include <vm/pmap.h> 45 46 #include <machine/cputypes.h> 47 #include <machine/md_var.h> 48 #include <machine/specialreg.h> 49 50 /* 51 * Pentium Pro+ memory range operations 52 * 53 * This code will probably be impenetrable without reference to the 54 * Intel Pentium Pro documentation or x86-64 programmers manual vol 2. 55 */ 56 57 static char *mem_owner_bios = "BIOS"; 58 59 #define MR686_FIXMTRR (1<<0) 60 61 #define mrwithin(mr, a) \ 62 (((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len))) 63 #define mroverlap(mra, mrb) \ 64 (mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base)) 65 66 #define mrvalid(base, len) \ 67 ((!(base & ((1 << 12) - 1))) && /* base is multiple of 4k */ \ 68 ((len) >= (1 << 12)) && /* length is >= 4k */ \ 69 powerof2((len)) && /* ... and power of two */ \ 70 !((base) & ((len) - 1))) /* range is not discontiuous */ 71 72 #define mrcopyflags(curr, new) \ 73 (((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK)) 74 75 static int mtrrs_disabled; 76 SYSCTL_INT(_machdep, OID_AUTO, disable_mtrrs, CTLFLAG_RDTUN, 77 &mtrrs_disabled, 0, 78 "Disable MTRRs."); 79 80 static void x86_mrinit(struct mem_range_softc *sc); 81 static int x86_mrset(struct mem_range_softc *sc, 82 struct mem_range_desc *mrd, int *arg); 83 static void x86_mrAPinit(struct mem_range_softc *sc); 84 static void x86_mrreinit(struct mem_range_softc *sc); 85 86 static struct mem_range_ops x86_mrops = { 87 x86_mrinit, 88 x86_mrset, 89 x86_mrAPinit, 90 x86_mrreinit 91 }; 92 93 /* XXX for AP startup hook */ 94 static u_int64_t mtrrcap, mtrrdef; 95 96 /* The bitmask for the PhysBase and PhysMask fields of the variable MTRRs. */ 97 static u_int64_t mtrr_physmask; 98 99 static struct mem_range_desc *mem_range_match(struct mem_range_softc *sc, 100 struct mem_range_desc *mrd); 101 static void x86_mrfetch(struct mem_range_softc *sc); 102 static int x86_mtrrtype(int flags); 103 static int x86_mrt2mtrr(int flags, int oldval); 104 static int x86_mtrrconflict(int flag1, int flag2); 105 static void x86_mrstore(struct mem_range_softc *sc); 106 static void x86_mrstoreone(void *arg); 107 static struct mem_range_desc *x86_mtrrfixsearch(struct mem_range_softc *sc, 108 u_int64_t addr); 109 static int x86_mrsetlow(struct mem_range_softc *sc, 110 struct mem_range_desc *mrd, int *arg); 111 static int x86_mrsetvariable(struct mem_range_softc *sc, 112 struct mem_range_desc *mrd, int *arg); 113 114 /* ia32 MTRR type to memory range type conversion */ 115 static int x86_mtrrtomrt[] = { 116 MDF_UNCACHEABLE, 117 MDF_WRITECOMBINE, 118 MDF_UNKNOWN, 119 MDF_UNKNOWN, 120 MDF_WRITETHROUGH, 121 MDF_WRITEPROTECT, 122 MDF_WRITEBACK 123 }; 124 125 #define MTRRTOMRTLEN nitems(x86_mtrrtomrt) 126 127 static int 128 x86_mtrr2mrt(int val) 129 { 130 131 if (val < 0 || val >= MTRRTOMRTLEN) 132 return (MDF_UNKNOWN); 133 return (x86_mtrrtomrt[val]); 134 } 135 136 /* 137 * x86 MTRR conflicts. Writeback and uncachable may overlap. 138 */ 139 static int 140 x86_mtrrconflict(int flag1, int flag2) 141 { 142 143 flag1 &= MDF_ATTRMASK; 144 flag2 &= MDF_ATTRMASK; 145 if ((flag1 & MDF_UNKNOWN) || (flag2 & MDF_UNKNOWN)) 146 return (1); 147 if (flag1 == flag2 || 148 (flag1 == MDF_WRITEBACK && flag2 == MDF_UNCACHEABLE) || 149 (flag2 == MDF_WRITEBACK && flag1 == MDF_UNCACHEABLE)) 150 return (0); 151 return (1); 152 } 153 154 /* 155 * Look for an exactly-matching range. 156 */ 157 static struct mem_range_desc * 158 mem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd) 159 { 160 struct mem_range_desc *cand; 161 int i; 162 163 for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++) 164 if ((cand->mr_base == mrd->mr_base) && 165 (cand->mr_len == mrd->mr_len)) 166 return (cand); 167 return (NULL); 168 } 169 170 /* 171 * Ensure that the direct map region does not contain any mappings 172 * that span MTRRs of different types. However, the fixed MTRRs can 173 * be ignored, because a large page mapping the first 1 MB of physical 174 * memory is a special case that the processor handles. Invalidate 175 * any old TLB entries that might hold inconsistent memory type 176 * information. 177 */ 178 static void 179 x86_mr_split_dmap(struct mem_range_softc *sc __unused) 180 { 181 #ifdef __amd64__ 182 struct mem_range_desc *mrd; 183 int i; 184 185 i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0; 186 mrd = sc->mr_desc + i; 187 for (; i < sc->mr_ndesc; i++, mrd++) { 188 if ((mrd->mr_flags & (MDF_ACTIVE | MDF_BOGUS)) == MDF_ACTIVE) 189 pmap_demote_DMAP(mrd->mr_base, mrd->mr_len, TRUE); 190 } 191 #endif 192 } 193 194 /* 195 * Fetch the current mtrr settings from the current CPU (assumed to 196 * all be in sync in the SMP case). Note that if we are here, we 197 * assume that MTRRs are enabled, and we may or may not have fixed 198 * MTRRs. 199 */ 200 static void 201 x86_mrfetch(struct mem_range_softc *sc) 202 { 203 struct mem_range_desc *mrd; 204 u_int64_t msrv; 205 int i, j, msr; 206 207 mrd = sc->mr_desc; 208 209 /* Get fixed-range MTRRs. */ 210 if (sc->mr_cap & MR686_FIXMTRR) { 211 msr = MSR_MTRR64kBase; 212 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) { 213 msrv = rdmsr(msr); 214 for (j = 0; j < 8; j++, mrd++) { 215 mrd->mr_flags = 216 (mrd->mr_flags & ~MDF_ATTRMASK) | 217 x86_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE; 218 if (mrd->mr_owner[0] == 0) 219 strcpy(mrd->mr_owner, mem_owner_bios); 220 msrv = msrv >> 8; 221 } 222 } 223 msr = MSR_MTRR16kBase; 224 for (i = 0; i < MTRR_N16K / 8; i++, msr++) { 225 msrv = rdmsr(msr); 226 for (j = 0; j < 8; j++, mrd++) { 227 mrd->mr_flags = 228 (mrd->mr_flags & ~MDF_ATTRMASK) | 229 x86_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE; 230 if (mrd->mr_owner[0] == 0) 231 strcpy(mrd->mr_owner, mem_owner_bios); 232 msrv = msrv >> 8; 233 } 234 } 235 msr = MSR_MTRR4kBase; 236 for (i = 0; i < MTRR_N4K / 8; i++, msr++) { 237 msrv = rdmsr(msr); 238 for (j = 0; j < 8; j++, mrd++) { 239 mrd->mr_flags = 240 (mrd->mr_flags & ~MDF_ATTRMASK) | 241 x86_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE; 242 if (mrd->mr_owner[0] == 0) 243 strcpy(mrd->mr_owner, mem_owner_bios); 244 msrv = msrv >> 8; 245 } 246 } 247 } 248 249 /* Get remainder which must be variable MTRRs. */ 250 msr = MSR_MTRRVarBase; 251 for (; mrd - sc->mr_desc < sc->mr_ndesc; msr += 2, mrd++) { 252 msrv = rdmsr(msr); 253 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | 254 x86_mtrr2mrt(msrv & MTRR_PHYSBASE_TYPE); 255 mrd->mr_base = msrv & mtrr_physmask; 256 msrv = rdmsr(msr + 1); 257 mrd->mr_flags = (msrv & MTRR_PHYSMASK_VALID) ? 258 (mrd->mr_flags | MDF_ACTIVE) : 259 (mrd->mr_flags & ~MDF_ACTIVE); 260 261 /* Compute the range from the mask. Ick. */ 262 mrd->mr_len = (~(msrv & mtrr_physmask) & 263 (mtrr_physmask | 0xfff)) + 1; 264 if (!mrvalid(mrd->mr_base, mrd->mr_len)) 265 mrd->mr_flags |= MDF_BOGUS; 266 267 /* If unclaimed and active, must be the BIOS. */ 268 if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0)) 269 strcpy(mrd->mr_owner, mem_owner_bios); 270 } 271 } 272 273 /* 274 * Return the MTRR memory type matching a region's flags 275 */ 276 static int 277 x86_mtrrtype(int flags) 278 { 279 int i; 280 281 flags &= MDF_ATTRMASK; 282 283 for (i = 0; i < MTRRTOMRTLEN; i++) { 284 if (x86_mtrrtomrt[i] == MDF_UNKNOWN) 285 continue; 286 if (flags == x86_mtrrtomrt[i]) 287 return (i); 288 } 289 return (-1); 290 } 291 292 static int 293 x86_mrt2mtrr(int flags, int oldval) 294 { 295 int val; 296 297 if ((val = x86_mtrrtype(flags)) == -1) 298 return (oldval & 0xff); 299 return (val & 0xff); 300 } 301 302 /* 303 * Update running CPU(s) MTRRs to match the ranges in the descriptor 304 * list. 305 * 306 * Must be called with interrupts enabled. 307 */ 308 static void 309 x86_mrstore(struct mem_range_softc *sc) 310 { 311 312 smp_rendezvous(NULL, x86_mrstoreone, NULL, sc); 313 } 314 315 /* 316 * Update the current CPU's MTRRs with those represented in the 317 * descriptor list. Note that we do this wholesale rather than just 318 * stuffing one entry; this is simpler (but slower, of course). 319 */ 320 static void 321 x86_mrstoreone(void *arg) 322 { 323 struct mem_range_softc *sc = arg; 324 struct mem_range_desc *mrd; 325 u_int64_t omsrv, msrv; 326 int i, j, msr; 327 u_long cr0, cr4; 328 329 mrd = sc->mr_desc; 330 331 critical_enter(); 332 333 /* Disable PGE. */ 334 cr4 = rcr4(); 335 load_cr4(cr4 & ~CR4_PGE); 336 337 /* Disable caches (CD = 1, NW = 0). */ 338 cr0 = rcr0(); 339 load_cr0((cr0 & ~CR0_NW) | CR0_CD); 340 341 /* Flushes caches and TLBs. */ 342 wbinvd(); 343 invltlb(); 344 345 /* Disable MTRRs (E = 0). */ 346 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~MTRR_DEF_ENABLE); 347 348 /* Set fixed-range MTRRs. */ 349 if (sc->mr_cap & MR686_FIXMTRR) { 350 msr = MSR_MTRR64kBase; 351 for (i = 0; i < MTRR_N64K / 8; i++, msr++) { 352 msrv = 0; 353 omsrv = rdmsr(msr); 354 for (j = 7; j >= 0; j--) { 355 msrv = msrv << 8; 356 msrv |= x86_mrt2mtrr((mrd + j)->mr_flags, 357 omsrv >> (j * 8)); 358 } 359 wrmsr(msr, msrv); 360 mrd += 8; 361 } 362 msr = MSR_MTRR16kBase; 363 for (i = 0; i < MTRR_N16K / 8; i++, msr++) { 364 msrv = 0; 365 omsrv = rdmsr(msr); 366 for (j = 7; j >= 0; j--) { 367 msrv = msrv << 8; 368 msrv |= x86_mrt2mtrr((mrd + j)->mr_flags, 369 omsrv >> (j * 8)); 370 } 371 wrmsr(msr, msrv); 372 mrd += 8; 373 } 374 msr = MSR_MTRR4kBase; 375 for (i = 0; i < MTRR_N4K / 8; i++, msr++) { 376 msrv = 0; 377 omsrv = rdmsr(msr); 378 for (j = 7; j >= 0; j--) { 379 msrv = msrv << 8; 380 msrv |= x86_mrt2mtrr((mrd + j)->mr_flags, 381 omsrv >> (j * 8)); 382 } 383 wrmsr(msr, msrv); 384 mrd += 8; 385 } 386 } 387 388 /* Set remainder which must be variable MTRRs. */ 389 msr = MSR_MTRRVarBase; 390 for (; mrd - sc->mr_desc < sc->mr_ndesc; msr += 2, mrd++) { 391 /* base/type register */ 392 omsrv = rdmsr(msr); 393 if (mrd->mr_flags & MDF_ACTIVE) { 394 msrv = mrd->mr_base & mtrr_physmask; 395 msrv |= x86_mrt2mtrr(mrd->mr_flags, omsrv); 396 } else { 397 msrv = 0; 398 } 399 wrmsr(msr, msrv); 400 401 /* mask/active register */ 402 if (mrd->mr_flags & MDF_ACTIVE) { 403 msrv = MTRR_PHYSMASK_VALID | 404 rounddown2(mtrr_physmask, mrd->mr_len); 405 } else { 406 msrv = 0; 407 } 408 wrmsr(msr + 1, msrv); 409 } 410 411 /* Flush caches and TLBs. */ 412 wbinvd(); 413 invltlb(); 414 415 /* Enable MTRRs. */ 416 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | MTRR_DEF_ENABLE); 417 418 /* Restore caches and PGE. */ 419 load_cr0(cr0); 420 load_cr4(cr4); 421 422 critical_exit(); 423 } 424 425 /* 426 * Hunt for the fixed MTRR referencing (addr) 427 */ 428 static struct mem_range_desc * 429 x86_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr) 430 { 431 struct mem_range_desc *mrd; 432 int i; 433 434 for (i = 0, mrd = sc->mr_desc; i < MTRR_N64K + MTRR_N16K + MTRR_N4K; 435 i++, mrd++) 436 if (addr >= mrd->mr_base && 437 addr < mrd->mr_base + mrd->mr_len) 438 return (mrd); 439 return (NULL); 440 } 441 442 /* 443 * Try to satisfy the given range request by manipulating the fixed 444 * MTRRs that cover low memory. 445 * 446 * Note that we try to be generous here; we'll bloat the range out to 447 * the next higher/lower boundary to avoid the consumer having to know 448 * too much about the mechanisms here. 449 * 450 * XXX note that this will have to be updated when we start supporting 451 * "busy" ranges. 452 */ 453 static int 454 x86_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg) 455 { 456 struct mem_range_desc *first_md, *last_md, *curr_md; 457 458 /* Range check. */ 459 if ((first_md = x86_mtrrfixsearch(sc, mrd->mr_base)) == NULL || 460 (last_md = x86_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) 461 == NULL) 462 return (EINVAL); 463 464 /* Check that we aren't doing something risky. */ 465 if ((mrd->mr_flags & MDF_FORCE) == 0) { 466 for (curr_md = first_md; curr_md <= last_md; curr_md++) { 467 if ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN) 468 return (EACCES); 469 } 470 } 471 472 /* Set flags, clear set-by-firmware flag. */ 473 for (curr_md = first_md; curr_md <= last_md; curr_md++) { 474 curr_md->mr_flags = mrcopyflags(curr_md->mr_flags & 475 ~MDF_FIRMWARE, mrd->mr_flags); 476 bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner)); 477 } 478 479 return (0); 480 } 481 482 /* 483 * Modify/add a variable MTRR to satisfy the request. 484 * 485 * XXX needs to be updated to properly support "busy" ranges. 486 */ 487 static int 488 x86_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd, 489 int *arg) 490 { 491 struct mem_range_desc *curr_md, *free_md; 492 int i; 493 494 /* 495 * Scan the currently active variable descriptors, look for 496 * one we exactly match (straight takeover) and for possible 497 * accidental overlaps. 498 * 499 * Keep track of the first empty variable descriptor in case 500 * we can't perform a takeover. 501 */ 502 i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0; 503 curr_md = sc->mr_desc + i; 504 free_md = NULL; 505 for (; i < sc->mr_ndesc; i++, curr_md++) { 506 if (curr_md->mr_flags & MDF_ACTIVE) { 507 /* Exact match? */ 508 if (curr_md->mr_base == mrd->mr_base && 509 curr_md->mr_len == mrd->mr_len) { 510 /* Whoops, owned by someone. */ 511 if (curr_md->mr_flags & MDF_BUSY) 512 return (EBUSY); 513 514 /* Check that we aren't doing something risky */ 515 if (!(mrd->mr_flags & MDF_FORCE) && 516 (curr_md->mr_flags & MDF_ATTRMASK) == 517 MDF_UNKNOWN) 518 return (EACCES); 519 520 /* Ok, just hijack this entry. */ 521 free_md = curr_md; 522 break; 523 } 524 525 /* Non-exact overlap? */ 526 if (mroverlap(curr_md, mrd)) { 527 /* Between conflicting region types? */ 528 if (x86_mtrrconflict(curr_md->mr_flags, 529 mrd->mr_flags)) 530 return (EINVAL); 531 } 532 } else if (free_md == NULL) { 533 free_md = curr_md; 534 } 535 } 536 537 /* Got somewhere to put it? */ 538 if (free_md == NULL) 539 return (ENOSPC); 540 541 /* Set up new descriptor. */ 542 free_md->mr_base = mrd->mr_base; 543 free_md->mr_len = mrd->mr_len; 544 free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags); 545 bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner)); 546 return (0); 547 } 548 549 /* 550 * Handle requests to set memory range attributes by manipulating MTRRs. 551 */ 552 static int 553 x86_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg) 554 { 555 struct mem_range_desc *targ; 556 int error; 557 558 switch (*arg) { 559 case MEMRANGE_SET_UPDATE: 560 /* 561 * Make sure that what's being asked for is even 562 * possible at all. 563 */ 564 if (!mrvalid(mrd->mr_base, mrd->mr_len) || 565 x86_mtrrtype(mrd->mr_flags) == -1) 566 return (EINVAL); 567 568 #define FIXTOP \ 569 ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000)) 570 571 /* Are the "low memory" conditions applicable? */ 572 if ((sc->mr_cap & MR686_FIXMTRR) != 0 && 573 mrd->mr_base + mrd->mr_len <= FIXTOP) { 574 if ((error = x86_mrsetlow(sc, mrd, arg)) != 0) 575 return (error); 576 } else { 577 /* It's time to play with variable MTRRs. */ 578 if ((error = x86_mrsetvariable(sc, mrd, arg)) != 0) 579 return (error); 580 } 581 break; 582 583 case MEMRANGE_SET_REMOVE: 584 if ((targ = mem_range_match(sc, mrd)) == NULL) 585 return (ENOENT); 586 if (targ->mr_flags & MDF_FIXACTIVE) 587 return (EPERM); 588 if (targ->mr_flags & MDF_BUSY) 589 return (EBUSY); 590 targ->mr_flags &= ~MDF_ACTIVE; 591 targ->mr_owner[0] = 0; 592 break; 593 594 default: 595 return (EOPNOTSUPP); 596 } 597 598 x86_mr_split_dmap(sc); 599 600 /* Update the hardware. */ 601 x86_mrstore(sc); 602 603 /* Refetch to see where we're at. */ 604 x86_mrfetch(sc); 605 return (0); 606 } 607 608 /* 609 * Work out how many ranges we support, initialise storage for them, 610 * and fetch the initial settings. 611 */ 612 static void 613 x86_mrinit(struct mem_range_softc *sc) 614 { 615 struct mem_range_desc *mrd; 616 int i, nmdesc; 617 618 if (sc->mr_desc != NULL) 619 /* Already initialized. */ 620 return; 621 622 nmdesc = 0; 623 mtrrcap = rdmsr(MSR_MTRRcap); 624 mtrrdef = rdmsr(MSR_MTRRdefType); 625 626 /* For now, bail out if MTRRs are not enabled. */ 627 if (!(mtrrdef & MTRR_DEF_ENABLE)) { 628 if (bootverbose) 629 printf("CPU supports MTRRs but not enabled\n"); 630 return; 631 } 632 nmdesc = mtrrcap & MTRR_CAP_VCNT; 633 if (bootverbose) 634 printf("Pentium Pro MTRR support enabled\n"); 635 636 /* 637 * Determine the size of the PhysMask and PhysBase fields in 638 * the variable range MTRRs. 639 */ 640 mtrr_physmask = (((uint64_t)1 << cpu_maxphyaddr) - 1) & 641 ~(uint64_t)0xfff; 642 643 /* If fixed MTRRs supported and enabled. */ 644 if ((mtrrcap & MTRR_CAP_FIXED) && (mtrrdef & MTRR_DEF_FIXED_ENABLE)) { 645 sc->mr_cap = MR686_FIXMTRR; 646 nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K; 647 } 648 649 sc->mr_desc = malloc(nmdesc * sizeof(struct mem_range_desc), M_MEMDESC, 650 M_WAITOK | M_ZERO); 651 sc->mr_ndesc = nmdesc; 652 653 mrd = sc->mr_desc; 654 655 /* Populate the fixed MTRR entries' base/length. */ 656 if (sc->mr_cap & MR686_FIXMTRR) { 657 for (i = 0; i < MTRR_N64K; i++, mrd++) { 658 mrd->mr_base = i * 0x10000; 659 mrd->mr_len = 0x10000; 660 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | 661 MDF_FIXACTIVE; 662 } 663 for (i = 0; i < MTRR_N16K; i++, mrd++) { 664 mrd->mr_base = i * 0x4000 + 0x80000; 665 mrd->mr_len = 0x4000; 666 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | 667 MDF_FIXACTIVE; 668 } 669 for (i = 0; i < MTRR_N4K; i++, mrd++) { 670 mrd->mr_base = i * 0x1000 + 0xc0000; 671 mrd->mr_len = 0x1000; 672 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | 673 MDF_FIXACTIVE; 674 } 675 } 676 677 /* 678 * Get current settings, anything set now is considered to 679 * have been set by the firmware. (XXX has something already 680 * played here?) 681 */ 682 x86_mrfetch(sc); 683 mrd = sc->mr_desc; 684 for (i = 0; i < sc->mr_ndesc; i++, mrd++) { 685 if (mrd->mr_flags & MDF_ACTIVE) 686 mrd->mr_flags |= MDF_FIRMWARE; 687 } 688 689 x86_mr_split_dmap(sc); 690 } 691 692 /* 693 * Initialise MTRRs on an AP after the BSP has run the init code. 694 */ 695 static void 696 x86_mrAPinit(struct mem_range_softc *sc) 697 { 698 699 x86_mrstoreone(sc); 700 wrmsr(MSR_MTRRdefType, mtrrdef); 701 } 702 703 /* 704 * Re-initialise running CPU(s) MTRRs to match the ranges in the descriptor 705 * list. 706 * 707 * Must be called with interrupts enabled. 708 */ 709 static void 710 x86_mrreinit(struct mem_range_softc *sc) 711 { 712 713 smp_rendezvous(NULL, (void (*)(void *))x86_mrAPinit, NULL, sc); 714 } 715 716 static void 717 x86_mem_drvinit(void *unused) 718 { 719 720 if (mtrrs_disabled) 721 return; 722 if (!(cpu_feature & CPUID_MTRR)) 723 return; 724 mem_range_softc.mr_op = &x86_mrops; 725 x86_mrinit(&mem_range_softc); 726 } 727 SYSINIT(x86memdev, SI_SUB_CPU, SI_ORDER_ANY, x86_mem_drvinit, NULL); 728