1 /* $OpenBSD: amd64_mem.c,v 1.14 2018/07/27 21:11:31 kettenis Exp $ */
2 /*
3 * Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: src/sys/i386/i386/i686_mem.c,v 1.8 1999/10/12 22:53:05 green Exp $
28 */
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/malloc.h>
33 #include <sys/memrange.h>
34
35 #include <machine/cpufunc.h>
36 #include <machine/intr.h>
37 #include <machine/specialreg.h>
38
39 /*
40 * This code implements a set of MSRs known as MTRR which define caching
41 * modes/behavior for various memory ranges.
42 */
43
44 char *mem_owner_bios = "BIOS";
45
46 #define MR_FIXMTRR (1<<0)
47
48 #define mrwithin(mr, a) \
49 (((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len)))
50 #define mroverlap(mra, mrb) \
51 (mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base))
52
53 #define mrvalid(base, len) \
54 ((!(base & ((1 << 12) - 1))) && /* base is multiple of 4k */ \
55 ((len) >= (1 << 12)) && /* length is >= 4k */ \
56 powerof2((len)) && /* ... and power of two */ \
57 !((base) & ((len) - 1))) /* range is not discontiuous */
58
59 #define mrcopyflags(curr, new) (((curr) & ~MDF_ATTRMASK) | \
60 ((new) & MDF_ATTRMASK))
61
62 #define FIXTOP ((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + \
63 (MTRR_N4K * 0x1000))
64
65 void mrinit(struct mem_range_softc *sc);
66 int mrset(struct mem_range_softc *sc,
67 struct mem_range_desc *mrd, int *arg);
68 void mrinit_cpu(struct mem_range_softc *sc);
69 void mrreload_cpu(struct mem_range_softc *sc);
70
71 struct mem_range_ops mrops = {
72 mrinit,
73 mrset,
74 mrinit_cpu,
75 mrreload_cpu
76 };
77
78 u_int64_t mtrrcap, mtrrdef;
79 u_int64_t mtrrmask = 0x0000000ffffff000ULL;
80
81 struct mem_range_desc *mem_range_match(struct mem_range_softc *sc,
82 struct mem_range_desc *mrd);
83 void mrfetch(struct mem_range_softc *sc);
84 int mtrrtype(u_int64_t flags);
85 int mrt2mtrr(u_int64_t flags);
86 int mtrr2mrt(int val);
87 int mtrrconflict(u_int64_t flag1, u_int64_t flag2);
88 void mrstore(struct mem_range_softc *sc);
89 void mrstoreone(struct mem_range_softc *sc);
90 struct mem_range_desc *mtrrfixsearch(struct mem_range_softc *sc,
91 u_int64_t addr);
92 int mrsetlow(struct mem_range_softc *sc,
93 struct mem_range_desc *mrd, int *arg);
94 int mrsetvariable(struct mem_range_softc *sc,
95 struct mem_range_desc *mrd, int *arg);
96
97 /* MTRR type to memory range type conversion */
98 int mtrrtomrt[] = {
99 MDF_UNCACHEABLE,
100 MDF_WRITECOMBINE,
101 MDF_UNKNOWN,
102 MDF_UNKNOWN,
103 MDF_WRITETHROUGH,
104 MDF_WRITEPROTECT,
105 MDF_WRITEBACK
106 };
107
108 int
mtrr2mrt(int val)109 mtrr2mrt(int val)
110 {
111 if (val < 0 || val >= nitems(mtrrtomrt))
112 return MDF_UNKNOWN;
113 return mtrrtomrt[val];
114 }
115
116 /*
117 * MTRR conflicts. Writeback and uncachable may overlap.
118 */
119 int
mtrrconflict(u_int64_t flag1,u_int64_t flag2)120 mtrrconflict(u_int64_t flag1, u_int64_t flag2)
121 {
122 flag1 &= MDF_ATTRMASK;
123 flag2 &= MDF_ATTRMASK;
124 if (flag1 == flag2 ||
125 (flag1 == MDF_WRITEBACK && flag2 == MDF_UNCACHEABLE) ||
126 (flag2 == MDF_WRITEBACK && flag1 == MDF_UNCACHEABLE))
127 return 0;
128 return 1;
129 }
130
131 /*
132 * Look for an exactly-matching range.
133 */
134 struct mem_range_desc *
mem_range_match(struct mem_range_softc * sc,struct mem_range_desc * mrd)135 mem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd)
136 {
137 struct mem_range_desc *cand;
138 int i;
139
140 for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++)
141 if ((cand->mr_base == mrd->mr_base) &&
142 (cand->mr_len == mrd->mr_len))
143 return(cand);
144 return(NULL);
145 }
146
147 /*
148 * Fetch the current mtrr settings from the current CPU (assumed to all
149 * be in sync in the SMP case). Note that if we are here, we assume
150 * that MTRRs are enabled, and we may or may not have fixed MTRRs.
151 */
152 void
mrfetch(struct mem_range_softc * sc)153 mrfetch(struct mem_range_softc *sc)
154 {
155 struct mem_range_desc *mrd;
156 u_int64_t msrv;
157 int i, j, msr, mrt;
158
159 mrd = sc->mr_desc;
160
161 /* We should never be fetching MTRRs from an AP */
162 KASSERT(CPU_IS_PRIMARY(curcpu()));
163
164 /* Get fixed-range MTRRs, if the CPU supports them */
165 if (sc->mr_cap & MR_FIXMTRR) {
166 msr = MSR_MTRRfix64K_00000;
167 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
168 msrv = rdmsr(msr);
169 for (j = 0; j < 8; j++, mrd++) {
170 mrt = mtrr2mrt(msrv & 0xff);
171 if (mrt == MDF_UNKNOWN)
172 mrt = MDF_UNCACHEABLE;
173 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
174 mrt | MDF_ACTIVE;
175 if (mrd->mr_owner[0] == 0)
176 strlcpy(mrd->mr_owner, mem_owner_bios,
177 sizeof(mrd->mr_owner));
178 msrv = msrv >> 8;
179 }
180 }
181
182 msr = MSR_MTRRfix16K_80000;
183 for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
184 msrv = rdmsr(msr);
185 for (j = 0; j < 8; j++, mrd++) {
186 mrt = mtrr2mrt(msrv & 0xff);
187 if (mrt == MDF_UNKNOWN)
188 mrt = MDF_UNCACHEABLE;
189 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
190 mrt | MDF_ACTIVE;
191 if (mrd->mr_owner[0] == 0)
192 strlcpy(mrd->mr_owner, mem_owner_bios,
193 sizeof(mrd->mr_owner));
194 msrv = msrv >> 8;
195 }
196 }
197
198 msr = MSR_MTRRfix4K_C0000;
199 for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
200 msrv = rdmsr(msr);
201 for (j = 0; j < 8; j++, mrd++) {
202 mrt = mtrr2mrt(msrv & 0xff);
203 if (mrt == MDF_UNKNOWN)
204 mrt = MDF_UNCACHEABLE;
205 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
206 mrt | MDF_ACTIVE;
207 if (mrd->mr_owner[0] == 0)
208 strlcpy(mrd->mr_owner, mem_owner_bios,
209 sizeof(mrd->mr_owner));
210 msrv = msrv >> 8;
211 }
212 }
213 }
214
215 /* Get remainder which must be variable MTRRs */
216 msr = MSR_MTRRvarBase;
217 for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
218 msrv = rdmsr(msr);
219 mrt = mtrr2mrt(msrv & 0xff);
220 if (mrt == MDF_UNKNOWN)
221 mrt = MDF_UNCACHEABLE;
222 mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) | mrt;
223 mrd->mr_base = msrv & mtrrmask;
224 msrv = rdmsr(msr + 1);
225 mrd->mr_flags = (msrv & 0x800) ?
226 (mrd->mr_flags | MDF_ACTIVE) :
227 (mrd->mr_flags & ~MDF_ACTIVE);
228 /* Compute the range from the mask. Ick. */
229 mrd->mr_len = (~(msrv & mtrrmask) & mtrrmask) + 0x1000;
230 if (!mrvalid(mrd->mr_base, mrd->mr_len))
231 mrd->mr_flags |= MDF_BOGUS;
232 /* If unclaimed and active, must be the BIOS */
233 if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0))
234 strlcpy(mrd->mr_owner, mem_owner_bios,
235 sizeof(mrd->mr_owner));
236 }
237 }
238
239 /*
240 * Return the MTRR memory type matching a region's flags
241 */
242 int
mtrrtype(u_int64_t flags)243 mtrrtype(u_int64_t flags)
244 {
245 int i;
246
247 flags &= MDF_ATTRMASK;
248
249 for (i = 0; i < nitems(mtrrtomrt); i++) {
250 if (mtrrtomrt[i] == MDF_UNKNOWN)
251 continue;
252 if (flags == mtrrtomrt[i])
253 return(i);
254 }
255 return MDF_UNCACHEABLE;
256 }
257
258 int
mrt2mtrr(u_int64_t flags)259 mrt2mtrr(u_int64_t flags)
260 {
261 int val;
262
263 val = mtrrtype(flags);
264
265 return val & 0xff;
266 }
267
268 /*
269 * Update running CPU(s) MTRRs to match the ranges in the descriptor
270 * list.
271 *
272 * XXX Must be called with interrupts enabled.
273 */
274 void
mrstore(struct mem_range_softc * sc)275 mrstore(struct mem_range_softc *sc)
276 {
277 u_long s;
278
279 s = intr_disable();
280 #ifdef MULTIPROCESSOR
281 x86_broadcast_ipi(X86_IPI_MTRR);
282 #endif
283 mrstoreone(sc);
284 intr_restore(s);
285 }
286
287 /*
288 * Update the current CPU's MTRRs with those represented in the
289 * descriptor list. Note that we do this wholesale rather than
290 * just stuffing one entry; this is simpler (but slower, of course).
291 */
292 void
mrstoreone(struct mem_range_softc * sc)293 mrstoreone(struct mem_range_softc *sc)
294 {
295 struct mem_range_desc *mrd;
296 u_int64_t msrv;
297 int i, j, msr;
298 u_int cr4save;
299
300 mrd = sc->mr_desc;
301
302 cr4save = rcr4(); /* save cr4 */
303 if (cr4save & CR4_PGE)
304 lcr4(cr4save & ~CR4_PGE);
305
306 /* Flush caches, then disable caches, then disable MTRRs */
307 wbinvd();
308 lcr0((rcr0() & ~CR0_NW) | CR0_CD);
309 wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~0x800);
310
311 /* Set fixed-range MTRRs */
312 if (sc->mr_cap & MR_FIXMTRR) {
313 msr = MSR_MTRRfix64K_00000;
314 for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
315 msrv = 0;
316 for (j = 7; j >= 0; j--) {
317 msrv = msrv << 8;
318 msrv |= mrt2mtrr((mrd + j)->mr_flags);
319 }
320 wrmsr(msr, msrv);
321 mrd += 8;
322 }
323
324 msr = MSR_MTRRfix16K_80000;
325 for (i = 0, msrv = 0; i < (MTRR_N16K / 8); i++, msr++) {
326 for (j = 7; j >= 0; j--) {
327 msrv = msrv << 8;
328 msrv |= mrt2mtrr((mrd + j)->mr_flags);
329 }
330 wrmsr(msr, msrv);
331 mrd += 8;
332 }
333
334 msr = MSR_MTRRfix4K_C0000;
335 for (i = 0, msrv = 0; i < (MTRR_N4K / 8); i++, msr++) {
336 for (j = 7; j >= 0; j--) {
337 msrv = msrv << 8;
338 msrv |= mrt2mtrr((mrd + j)->mr_flags);
339 }
340 wrmsr(msr, msrv);
341 mrd += 8;
342 }
343 }
344
345 /* Set remainder which must be variable MTRRs */
346 msr = MSR_MTRRvarBase;
347 for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
348 if (mrd->mr_flags & MDF_ACTIVE) {
349 msrv = mrd->mr_base & mtrrmask;
350 msrv |= mrt2mtrr(mrd->mr_flags);
351 } else
352 msrv = 0;
353
354 wrmsr(msr, msrv);
355
356 /* mask/active register */
357 if (mrd->mr_flags & MDF_ACTIVE) {
358 msrv = 0x800 | (~(mrd->mr_len - 1) & mtrrmask);
359 } else
360 msrv = 0;
361
362 wrmsr(msr + 1, msrv);
363 }
364
365 /* Re-enable caches and MTRRs */
366 wrmsr(MSR_MTRRdefType, mtrrdef | 0x800);
367 lcr0(rcr0() & ~(CR0_CD | CR0_NW));
368 lcr4(cr4save);
369 }
370
371 /*
372 * Hunt for the fixed MTRR referencing (addr)
373 */
374 struct mem_range_desc *
mtrrfixsearch(struct mem_range_softc * sc,u_int64_t addr)375 mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr)
376 {
377 struct mem_range_desc *mrd;
378 int i;
379
380 for (i = 0, mrd = sc->mr_desc; i < (MTRR_N64K + MTRR_N16K + MTRR_N4K); i++, mrd++)
381 if ((addr >= mrd->mr_base) && (addr < (mrd->mr_base + mrd->mr_len)))
382 return(mrd);
383 return(NULL);
384 }
385
386 /*
387 * Try to satisfy the given range request by manipulating the fixed MTRRs that
388 * cover low memory.
389 *
390 * Note that we try to be generous here; we'll bloat the range out to the
391 * next higher/lower boundary to avoid the consumer having to know too much
392 * about the mechanisms here.
393 */
394 int
mrsetlow(struct mem_range_softc * sc,struct mem_range_desc * mrd,int * arg)395 mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
396 {
397 struct mem_range_desc *first_md, *last_md, *curr_md;
398
399 /* range check */
400 if (((first_md = mtrrfixsearch(sc, mrd->mr_base)) == NULL) ||
401 ((last_md = mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL))
402 return(EINVAL);
403
404 /* check we aren't doing something risky */
405 if (!(mrd->mr_flags & MDF_FORCE))
406 for (curr_md = first_md; curr_md <= last_md; curr_md++) {
407 if ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN)
408 return (EACCES);
409 }
410
411 /* set flags, clear set-by-firmware flag */
412 for (curr_md = first_md; curr_md <= last_md; curr_md++) {
413 curr_md->mr_flags = mrcopyflags(curr_md->mr_flags & ~MDF_FIRMWARE, mrd->mr_flags);
414 memcpy(curr_md->mr_owner, mrd->mr_owner, sizeof(mrd->mr_owner));
415 }
416
417 return(0);
418 }
419
420
421 /*
422 * Modify/add a variable MTRR to satisfy the request.
423 */
424 int
mrsetvariable(struct mem_range_softc * sc,struct mem_range_desc * mrd,int * arg)425 mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
426 {
427 struct mem_range_desc *curr_md, *free_md;
428 int i;
429
430 /*
431 * Scan the currently active variable descriptors, look for
432 * one we exactly match (straight takeover) and for possible
433 * accidental overlaps.
434 * Keep track of the first empty variable descriptor in case we
435 * can't perform a takeover.
436 */
437 i = (sc->mr_cap & MR_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
438 curr_md = sc->mr_desc + i;
439 free_md = NULL;
440 for (; i < sc->mr_ndesc; i++, curr_md++) {
441 if (curr_md->mr_flags & MDF_ACTIVE) {
442 /* exact match? */
443 if ((curr_md->mr_base == mrd->mr_base) &&
444 (curr_md->mr_len == mrd->mr_len)) {
445 /* check we aren't doing something risky */
446 if (!(mrd->mr_flags & MDF_FORCE) &&
447 ((curr_md->mr_flags & MDF_ATTRMASK)
448 == MDF_UNKNOWN))
449 return (EACCES);
450 /* Ok, just hijack this entry */
451 free_md = curr_md;
452 break;
453 }
454 /* non-exact overlap ? */
455 if (mroverlap(curr_md, mrd)) {
456 /* between conflicting region types? */
457 if (mtrrconflict(curr_md->mr_flags,
458 mrd->mr_flags))
459 return(EINVAL);
460 }
461 } else if (free_md == NULL) {
462 free_md = curr_md;
463 }
464 }
465 /* got somewhere to put it? */
466 if (free_md == NULL)
467 return(ENOSPC);
468
469 /* Set up new descriptor */
470 free_md->mr_base = mrd->mr_base;
471 free_md->mr_len = mrd->mr_len;
472 free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags);
473 memcpy(free_md->mr_owner, mrd->mr_owner, sizeof(mrd->mr_owner));
474 return(0);
475 }
476
477 /*
478 * Handle requests to set memory range attributes by manipulating MTRRs.
479 */
480 int
mrset(struct mem_range_softc * sc,struct mem_range_desc * mrd,int * arg)481 mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
482 {
483 struct mem_range_desc *targ;
484 int error = 0;
485
486 switch(*arg) {
487 case MEMRANGE_SET_UPDATE:
488 /* make sure that what's being asked for is possible */
489 if (!mrvalid(mrd->mr_base, mrd->mr_len) ||
490 mtrrtype(mrd->mr_flags) == -1)
491 return(EINVAL);
492
493 /* are the "low memory" conditions applicable? */
494 if ((sc->mr_cap & MR_FIXMTRR) &&
495 ((mrd->mr_base + mrd->mr_len) <= FIXTOP)) {
496 if ((error = mrsetlow(sc, mrd, arg)) != 0)
497 return(error);
498 } else {
499 /* it's time to play with variable MTRRs */
500 if ((error = mrsetvariable(sc, mrd, arg)) != 0)
501 return(error);
502 }
503 break;
504
505 case MEMRANGE_SET_REMOVE:
506 if ((targ = mem_range_match(sc, mrd)) == NULL)
507 return(ENOENT);
508 if (targ->mr_flags & MDF_FIXACTIVE)
509 return(EPERM);
510 targ->mr_flags &= ~MDF_ACTIVE;
511 targ->mr_owner[0] = 0;
512 break;
513
514 default:
515 return(EOPNOTSUPP);
516 }
517
518 /* update the hardware */
519 mrstore(sc);
520 return(0);
521 }
522
523 /*
524 * Work out how many ranges we support, initialise storage for them,
525 * fetch the initial settings.
526 */
527 void
mrinit(struct mem_range_softc * sc)528 mrinit(struct mem_range_softc *sc)
529 {
530 struct mem_range_desc *mrd;
531 uint32_t regs[4];
532 int nmdesc = 0;
533 int i;
534
535 mtrrcap = rdmsr(MSR_MTRRcap);
536 mtrrdef = rdmsr(MSR_MTRRdefType);
537
538 /* For now, bail out if MTRRs are not enabled */
539 if (!(mtrrdef & MTRRdefType_ENABLE)) {
540 printf("mtrr: CPU supports MTRRs but not enabled by BIOS\n");
541 return;
542 }
543 nmdesc = mtrrcap & 0xff;
544 printf("mtrr: Pentium Pro MTRR support, %d var ranges", nmdesc);
545
546 /* If fixed MTRRs supported and enabled */
547 if ((mtrrcap & MTRRcap_FIXED) &&
548 (mtrrdef & MTRRdefType_FIXED_ENABLE)) {
549 sc->mr_cap = MR_FIXMTRR;
550 nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K;
551 printf(", %d fixed ranges", MTRR_N64K + MTRR_N16K + MTRR_N4K);
552 }
553
554 printf("\n");
555
556 sc->mr_desc = mallocarray(nmdesc, sizeof(struct mem_range_desc),
557 M_MEMDESC, M_WAITOK|M_ZERO);
558 sc->mr_ndesc = nmdesc;
559
560 mrd = sc->mr_desc;
561
562 /* Populate the fixed MTRR entries' base/length */
563 if (sc->mr_cap & MR_FIXMTRR) {
564 for (i = 0; i < MTRR_N64K; i++, mrd++) {
565 mrd->mr_base = i * 0x10000;
566 mrd->mr_len = 0x10000;
567 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
568 }
569
570 for (i = 0; i < MTRR_N16K; i++, mrd++) {
571 mrd->mr_base = i * 0x4000 + 0x80000;
572 mrd->mr_len = 0x4000;
573 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
574 }
575
576 for (i = 0; i < MTRR_N4K; i++, mrd++) {
577 mrd->mr_base = i * 0x1000 + 0xc0000;
578 mrd->mr_len = 0x1000;
579 mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN | MDF_FIXACTIVE;
580 }
581 }
582
583 /*
584 * Fetch maximum physical address size supported by the
585 * processor as supported by CPUID leaf function 0x80000008.
586 * If CPUID does not support leaf function 0x80000008, use the
587 * default 36-bit address size.
588 */
589 if (curcpu()->ci_pnfeatset >= 0x80000008) {
590 CPUID(0x80000008, regs[0], regs[1], regs[2], regs[3]);
591 if (regs[0] & 0xff) {
592 mtrrmask = (1ULL << (regs[0] & 0xff)) - 1;
593 mtrrmask &= ~0x0000000000000fffULL;
594 }
595 }
596
597 /*
598 * Get current settings, anything set now is considered to have
599 * been set by the firmware.
600 */
601 mrfetch(sc);
602 mrd = sc->mr_desc;
603 for (i = 0; i < sc->mr_ndesc; i++, mrd++) {
604 if (mrd->mr_flags & MDF_ACTIVE)
605 mrd->mr_flags |= MDF_FIRMWARE;
606 }
607 }
608
609 /*
610 * Initialise MTRRs on a cpu from the software state.
611 */
612 void
mrinit_cpu(struct mem_range_softc * sc)613 mrinit_cpu(struct mem_range_softc *sc)
614 {
615 mrstoreone(sc); /* set MTRRs to match BSP */
616 }
617
618 void
mrreload_cpu(struct mem_range_softc * sc)619 mrreload_cpu(struct mem_range_softc *sc)
620 {
621 u_long s;
622
623 s = intr_disable();
624 mrstoreone(sc); /* set MTRRs to match BSP */
625 intr_restore(s);
626 }
627