xref: /dragonfly/sys/platform/pc64/x86_64/amd64_mem.c (revision 3f7b7260)
102f0e3c6SMatthew Dillon /*-
202f0e3c6SMatthew Dillon  * Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
302f0e3c6SMatthew Dillon  * All rights reserved.
402f0e3c6SMatthew Dillon  *
502f0e3c6SMatthew Dillon  * Redistribution and use in source and binary forms, with or without
602f0e3c6SMatthew Dillon  * modification, are permitted provided that the following conditions
702f0e3c6SMatthew Dillon  * are met:
802f0e3c6SMatthew Dillon  * 1. Redistributions of source code must retain the above copyright
902f0e3c6SMatthew Dillon  *    notice, this list of conditions and the following disclaimer.
1002f0e3c6SMatthew Dillon  * 2. Redistributions in binary form must reproduce the above copyright
1102f0e3c6SMatthew Dillon  *    notice, this list of conditions and the following disclaimer in the
1202f0e3c6SMatthew Dillon  *    documentation and/or other materials provided with the distribution.
1302f0e3c6SMatthew Dillon  *
1402f0e3c6SMatthew Dillon  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1502f0e3c6SMatthew Dillon  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1602f0e3c6SMatthew Dillon  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1702f0e3c6SMatthew Dillon  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
1802f0e3c6SMatthew Dillon  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1902f0e3c6SMatthew Dillon  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2002f0e3c6SMatthew Dillon  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2102f0e3c6SMatthew Dillon  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2202f0e3c6SMatthew Dillon  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2302f0e3c6SMatthew Dillon  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2402f0e3c6SMatthew Dillon  * SUCH DAMAGE.
2502f0e3c6SMatthew Dillon  */
2602f0e3c6SMatthew Dillon 
2702f0e3c6SMatthew Dillon #include <sys/param.h>
2802f0e3c6SMatthew Dillon #include <sys/kernel.h>
2902f0e3c6SMatthew Dillon #include <sys/systm.h>
3002f0e3c6SMatthew Dillon #include <sys/malloc.h>
3102f0e3c6SMatthew Dillon #include <sys/memrange.h>
3202f0e3c6SMatthew Dillon #include <sys/sysctl.h>
3302f0e3c6SMatthew Dillon #include <sys/thread.h>
3402f0e3c6SMatthew Dillon 
3502f0e3c6SMatthew Dillon #include <vm/vm.h>
3602f0e3c6SMatthew Dillon #include <vm/vm_param.h>
3702f0e3c6SMatthew Dillon #include <vm/pmap.h>
3802f0e3c6SMatthew Dillon 
3902f0e3c6SMatthew Dillon #include <sys/thread2.h>
4002f0e3c6SMatthew Dillon 
4102f0e3c6SMatthew Dillon #include <machine/cputypes.h>
4202f0e3c6SMatthew Dillon #include <machine/md_var.h>
4302f0e3c6SMatthew Dillon #include <machine/specialreg.h>
4402f0e3c6SMatthew Dillon #include <machine/smp.h>
4502f0e3c6SMatthew Dillon 
4602f0e3c6SMatthew Dillon /*
4702f0e3c6SMatthew Dillon  * amd64 memory range operations
4802f0e3c6SMatthew Dillon  *
4902f0e3c6SMatthew Dillon  * This code will probably be impenetrable without reference to the
5002f0e3c6SMatthew Dillon  * Intel Pentium Pro documentation or x86-64 programmers manual vol 2.
5102f0e3c6SMatthew Dillon  */
5202f0e3c6SMatthew Dillon 
5302f0e3c6SMatthew Dillon static char *mem_owner_bios = "BIOS";
5402f0e3c6SMatthew Dillon 
5502f0e3c6SMatthew Dillon #define	MR686_FIXMTRR	(1<<0)
5602f0e3c6SMatthew Dillon 
5702f0e3c6SMatthew Dillon #define	mrwithin(mr, a)							\
5802f0e3c6SMatthew Dillon 	(((a) >= (mr)->mr_base) && ((a) < ((mr)->mr_base + (mr)->mr_len)))
5902f0e3c6SMatthew Dillon #define	mroverlap(mra, mrb)						\
6002f0e3c6SMatthew Dillon 	(mrwithin(mra, mrb->mr_base) || mrwithin(mrb, mra->mr_base))
6102f0e3c6SMatthew Dillon 
6202f0e3c6SMatthew Dillon #define	mrvalid(base, len) 						\
6302f0e3c6SMatthew Dillon 	((!(base & ((1 << 12) - 1))) &&	/* base is multiple of 4k */	\
6402f0e3c6SMatthew Dillon 	    ((len) >= (1 << 12)) &&	/* length is >= 4k */		\
6502f0e3c6SMatthew Dillon 	    powerof2((len)) &&		/* ... and power of two */	\
6602f0e3c6SMatthew Dillon 	    !((base) & ((len) - 1)))	/* range is not discontiuous */
6702f0e3c6SMatthew Dillon 
6802f0e3c6SMatthew Dillon #define	mrcopyflags(curr, new)						\
6902f0e3c6SMatthew Dillon 	(((curr) & ~MDF_ATTRMASK) | ((new) & MDF_ATTRMASK))
7002f0e3c6SMatthew Dillon 
7102f0e3c6SMatthew Dillon static int mtrrs_disabled;
7202f0e3c6SMatthew Dillon TUNABLE_INT("machdep.disable_mtrrs", &mtrrs_disabled);
7302f0e3c6SMatthew Dillon SYSCTL_INT(_machdep, OID_AUTO, disable_mtrrs, CTLFLAG_RD,
7402f0e3c6SMatthew Dillon     &mtrrs_disabled, 0, "Disable amd64 MTRRs.");
7502f0e3c6SMatthew Dillon 
7602f0e3c6SMatthew Dillon static void	amd64_mrinit(struct mem_range_softc *sc);
7702f0e3c6SMatthew Dillon static int	amd64_mrset(struct mem_range_softc *sc,
7802f0e3c6SMatthew Dillon 		    struct mem_range_desc *mrd, int *arg);
7902f0e3c6SMatthew Dillon static void	amd64_mrAPinit(struct mem_range_softc *sc);
8002f0e3c6SMatthew Dillon static void	amd64_mrreinit(struct mem_range_softc *sc);
8102f0e3c6SMatthew Dillon 
8202f0e3c6SMatthew Dillon static struct mem_range_ops amd64_mrops = {
8302f0e3c6SMatthew Dillon 	amd64_mrinit,
8402f0e3c6SMatthew Dillon 	amd64_mrset,
8502f0e3c6SMatthew Dillon 	amd64_mrAPinit,
8602f0e3c6SMatthew Dillon 	amd64_mrreinit
8702f0e3c6SMatthew Dillon };
8802f0e3c6SMatthew Dillon 
8902f0e3c6SMatthew Dillon /* XXX for AP startup hook */
9002f0e3c6SMatthew Dillon static u_int64_t mtrrcap, mtrrdef;
9102f0e3c6SMatthew Dillon 
9202f0e3c6SMatthew Dillon /* The bitmask for the PhysBase and PhysMask fields of the variable MTRRs. */
9302f0e3c6SMatthew Dillon static u_int64_t mtrr_physmask;
9402f0e3c6SMatthew Dillon 
9502f0e3c6SMatthew Dillon static struct mem_range_desc *mem_range_match(struct mem_range_softc *sc,
9602f0e3c6SMatthew Dillon 		    struct mem_range_desc *mrd);
9702f0e3c6SMatthew Dillon static void	amd64_mrfetch(struct mem_range_softc *sc);
9802f0e3c6SMatthew Dillon static int	amd64_mtrrtype(int flags);
9902f0e3c6SMatthew Dillon static int	amd64_mrt2mtrr(int flags, int oldval);
10002f0e3c6SMatthew Dillon static int	amd64_mtrrconflict(int flag1, int flag2);
10102f0e3c6SMatthew Dillon static void	amd64_mrstore(struct mem_range_softc *sc);
10202f0e3c6SMatthew Dillon static void	amd64_mrstoreone(void *arg);
10302f0e3c6SMatthew Dillon static struct mem_range_desc *amd64_mtrrfixsearch(struct mem_range_softc *sc,
10402f0e3c6SMatthew Dillon 		    u_int64_t addr);
10502f0e3c6SMatthew Dillon static int	amd64_mrsetlow(struct mem_range_softc *sc,
10602f0e3c6SMatthew Dillon 		    struct mem_range_desc *mrd, int *arg);
10702f0e3c6SMatthew Dillon static int	amd64_mrsetvariable(struct mem_range_softc *sc,
10802f0e3c6SMatthew Dillon 		    struct mem_range_desc *mrd, int *arg);
10902f0e3c6SMatthew Dillon 
11002f0e3c6SMatthew Dillon /* amd64 MTRR type to memory range type conversion */
11102f0e3c6SMatthew Dillon static int amd64_mtrrtomrt[] = {
11202f0e3c6SMatthew Dillon 	MDF_UNCACHEABLE,
11302f0e3c6SMatthew Dillon 	MDF_WRITECOMBINE,
11402f0e3c6SMatthew Dillon 	MDF_UNKNOWN,
11502f0e3c6SMatthew Dillon 	MDF_UNKNOWN,
11602f0e3c6SMatthew Dillon 	MDF_WRITETHROUGH,
11702f0e3c6SMatthew Dillon 	MDF_WRITEPROTECT,
11802f0e3c6SMatthew Dillon 	MDF_WRITEBACK
11902f0e3c6SMatthew Dillon };
12002f0e3c6SMatthew Dillon 
121b370aff7SSascha Wildner #define	MTRRTOMRTLEN NELEM(amd64_mtrrtomrt)
12202f0e3c6SMatthew Dillon 
12302f0e3c6SMatthew Dillon static int
amd64_mtrr2mrt(int val)12402f0e3c6SMatthew Dillon amd64_mtrr2mrt(int val)
12502f0e3c6SMatthew Dillon {
12602f0e3c6SMatthew Dillon 
12702f0e3c6SMatthew Dillon 	if (val < 0 || val >= MTRRTOMRTLEN)
12802f0e3c6SMatthew Dillon 		return (MDF_UNKNOWN);
12902f0e3c6SMatthew Dillon 	return (amd64_mtrrtomrt[val]);
13002f0e3c6SMatthew Dillon }
13102f0e3c6SMatthew Dillon 
13202f0e3c6SMatthew Dillon /*
13302f0e3c6SMatthew Dillon  * amd64 MTRR conflicts. Writeback and uncachable may overlap.
13402f0e3c6SMatthew Dillon  */
13502f0e3c6SMatthew Dillon static int
amd64_mtrrconflict(int flag1,int flag2)13602f0e3c6SMatthew Dillon amd64_mtrrconflict(int flag1, int flag2)
13702f0e3c6SMatthew Dillon {
13802f0e3c6SMatthew Dillon 
13902f0e3c6SMatthew Dillon 	flag1 &= MDF_ATTRMASK;
14002f0e3c6SMatthew Dillon 	flag2 &= MDF_ATTRMASK;
14102f0e3c6SMatthew Dillon 	if ((flag1 & MDF_UNKNOWN) || (flag2 & MDF_UNKNOWN))
14202f0e3c6SMatthew Dillon 		return (1);
14302f0e3c6SMatthew Dillon 	if (flag1 == flag2 ||
14402f0e3c6SMatthew Dillon 	    (flag1 == MDF_WRITEBACK && flag2 == MDF_UNCACHEABLE) ||
14502f0e3c6SMatthew Dillon 	    (flag2 == MDF_WRITEBACK && flag1 == MDF_UNCACHEABLE))
14602f0e3c6SMatthew Dillon 		return (0);
14702f0e3c6SMatthew Dillon 	return (1);
14802f0e3c6SMatthew Dillon }
14902f0e3c6SMatthew Dillon 
15002f0e3c6SMatthew Dillon /*
15102f0e3c6SMatthew Dillon  * Look for an exactly-matching range.
15202f0e3c6SMatthew Dillon  */
15302f0e3c6SMatthew Dillon static struct mem_range_desc *
mem_range_match(struct mem_range_softc * sc,struct mem_range_desc * mrd)15402f0e3c6SMatthew Dillon mem_range_match(struct mem_range_softc *sc, struct mem_range_desc *mrd)
15502f0e3c6SMatthew Dillon {
15602f0e3c6SMatthew Dillon 	struct mem_range_desc *cand;
15702f0e3c6SMatthew Dillon 	int i;
15802f0e3c6SMatthew Dillon 
15902f0e3c6SMatthew Dillon 	for (i = 0, cand = sc->mr_desc; i < sc->mr_ndesc; i++, cand++)
16002f0e3c6SMatthew Dillon 		if ((cand->mr_base == mrd->mr_base) &&
16102f0e3c6SMatthew Dillon 		    (cand->mr_len == mrd->mr_len))
16202f0e3c6SMatthew Dillon 			return (cand);
16302f0e3c6SMatthew Dillon 	return (NULL);
16402f0e3c6SMatthew Dillon }
16502f0e3c6SMatthew Dillon 
16602f0e3c6SMatthew Dillon /*
16702f0e3c6SMatthew Dillon  * Fetch the current mtrr settings from the current CPU (assumed to
16802f0e3c6SMatthew Dillon  * all be in sync in the SMP case).  Note that if we are here, we
16902f0e3c6SMatthew Dillon  * assume that MTRRs are enabled, and we may or may not have fixed
17002f0e3c6SMatthew Dillon  * MTRRs.
17102f0e3c6SMatthew Dillon  */
17202f0e3c6SMatthew Dillon static void
amd64_mrfetch(struct mem_range_softc * sc)17302f0e3c6SMatthew Dillon amd64_mrfetch(struct mem_range_softc *sc)
17402f0e3c6SMatthew Dillon {
17502f0e3c6SMatthew Dillon 	struct mem_range_desc *mrd;
17602f0e3c6SMatthew Dillon 	u_int64_t msrv;
17702f0e3c6SMatthew Dillon 	int i, j, msr;
17802f0e3c6SMatthew Dillon 
17902f0e3c6SMatthew Dillon 	mrd = sc->mr_desc;
18002f0e3c6SMatthew Dillon 
18102f0e3c6SMatthew Dillon 	/* Get fixed-range MTRRs. */
18202f0e3c6SMatthew Dillon 	if (sc->mr_cap & MR686_FIXMTRR) {
18302f0e3c6SMatthew Dillon 		msr = MSR_MTRR64kBase;
18402f0e3c6SMatthew Dillon 		for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
18502f0e3c6SMatthew Dillon 			msrv = rdmsr(msr);
18602f0e3c6SMatthew Dillon 			for (j = 0; j < 8; j++, mrd++) {
18702f0e3c6SMatthew Dillon 				mrd->mr_flags =
18802f0e3c6SMatthew Dillon 				    (mrd->mr_flags & ~MDF_ATTRMASK) |
18902f0e3c6SMatthew Dillon 				    amd64_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
19002f0e3c6SMatthew Dillon 				if (mrd->mr_owner[0] == 0)
19102f0e3c6SMatthew Dillon 					strcpy(mrd->mr_owner, mem_owner_bios);
19202f0e3c6SMatthew Dillon 				msrv = msrv >> 8;
19302f0e3c6SMatthew Dillon 			}
19402f0e3c6SMatthew Dillon 		}
19502f0e3c6SMatthew Dillon 		msr = MSR_MTRR16kBase;
19602f0e3c6SMatthew Dillon 		for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
19702f0e3c6SMatthew Dillon 			msrv = rdmsr(msr);
19802f0e3c6SMatthew Dillon 			for (j = 0; j < 8; j++, mrd++) {
19902f0e3c6SMatthew Dillon 				mrd->mr_flags =
20002f0e3c6SMatthew Dillon 				    (mrd->mr_flags & ~MDF_ATTRMASK) |
20102f0e3c6SMatthew Dillon 				    amd64_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
20202f0e3c6SMatthew Dillon 				if (mrd->mr_owner[0] == 0)
20302f0e3c6SMatthew Dillon 					strcpy(mrd->mr_owner, mem_owner_bios);
20402f0e3c6SMatthew Dillon 				msrv = msrv >> 8;
20502f0e3c6SMatthew Dillon 			}
20602f0e3c6SMatthew Dillon 		}
20702f0e3c6SMatthew Dillon 		msr = MSR_MTRR4kBase;
20802f0e3c6SMatthew Dillon 		for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
20902f0e3c6SMatthew Dillon 			msrv = rdmsr(msr);
21002f0e3c6SMatthew Dillon 			for (j = 0; j < 8; j++, mrd++) {
21102f0e3c6SMatthew Dillon 				mrd->mr_flags =
21202f0e3c6SMatthew Dillon 				    (mrd->mr_flags & ~MDF_ATTRMASK) |
21302f0e3c6SMatthew Dillon 				    amd64_mtrr2mrt(msrv & 0xff) | MDF_ACTIVE;
21402f0e3c6SMatthew Dillon 				if (mrd->mr_owner[0] == 0)
21502f0e3c6SMatthew Dillon 					strcpy(mrd->mr_owner, mem_owner_bios);
21602f0e3c6SMatthew Dillon 				msrv = msrv >> 8;
21702f0e3c6SMatthew Dillon 			}
21802f0e3c6SMatthew Dillon 		}
21902f0e3c6SMatthew Dillon 	}
22002f0e3c6SMatthew Dillon 
22102f0e3c6SMatthew Dillon 	/* Get remainder which must be variable MTRRs. */
22202f0e3c6SMatthew Dillon 	msr = MSR_MTRRVarBase;
22302f0e3c6SMatthew Dillon 	for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
22402f0e3c6SMatthew Dillon 		msrv = rdmsr(msr);
22502f0e3c6SMatthew Dillon 		mrd->mr_flags = (mrd->mr_flags & ~MDF_ATTRMASK) |
22602f0e3c6SMatthew Dillon 		    amd64_mtrr2mrt(msrv & MTRR_PHYSBASE_TYPE);
22702f0e3c6SMatthew Dillon 		mrd->mr_base = msrv & mtrr_physmask;
22802f0e3c6SMatthew Dillon 		msrv = rdmsr(msr + 1);
22902f0e3c6SMatthew Dillon 		mrd->mr_flags = (msrv & MTRR_PHYSMASK_VALID) ?
23002f0e3c6SMatthew Dillon 		    (mrd->mr_flags | MDF_ACTIVE) :
23102f0e3c6SMatthew Dillon 		    (mrd->mr_flags & ~MDF_ACTIVE);
23202f0e3c6SMatthew Dillon 
23302f0e3c6SMatthew Dillon 		/* Compute the range from the mask. Ick. */
23402f0e3c6SMatthew Dillon 		mrd->mr_len = (~(msrv & mtrr_physmask) &
23502f0e3c6SMatthew Dillon 		    (mtrr_physmask | 0xfffL)) + 1;
23602f0e3c6SMatthew Dillon 		if (!mrvalid(mrd->mr_base, mrd->mr_len))
23702f0e3c6SMatthew Dillon 			mrd->mr_flags |= MDF_BOGUS;
23802f0e3c6SMatthew Dillon 
23902f0e3c6SMatthew Dillon 		/* If unclaimed and active, must be the BIOS. */
24002f0e3c6SMatthew Dillon 		if ((mrd->mr_flags & MDF_ACTIVE) && (mrd->mr_owner[0] == 0))
24102f0e3c6SMatthew Dillon 			strcpy(mrd->mr_owner, mem_owner_bios);
24202f0e3c6SMatthew Dillon 	}
24302f0e3c6SMatthew Dillon }
24402f0e3c6SMatthew Dillon 
24502f0e3c6SMatthew Dillon /*
24602f0e3c6SMatthew Dillon  * Return the MTRR memory type matching a region's flags
24702f0e3c6SMatthew Dillon  */
24802f0e3c6SMatthew Dillon static int
amd64_mtrrtype(int flags)24902f0e3c6SMatthew Dillon amd64_mtrrtype(int flags)
25002f0e3c6SMatthew Dillon {
25102f0e3c6SMatthew Dillon 	int i;
25202f0e3c6SMatthew Dillon 
25302f0e3c6SMatthew Dillon 	flags &= MDF_ATTRMASK;
25402f0e3c6SMatthew Dillon 
25502f0e3c6SMatthew Dillon 	for (i = 0; i < MTRRTOMRTLEN; i++) {
25602f0e3c6SMatthew Dillon 		if (amd64_mtrrtomrt[i] == MDF_UNKNOWN)
25702f0e3c6SMatthew Dillon 			continue;
25802f0e3c6SMatthew Dillon 		if (flags == amd64_mtrrtomrt[i])
25902f0e3c6SMatthew Dillon 			return (i);
26002f0e3c6SMatthew Dillon 	}
26102f0e3c6SMatthew Dillon 	return (-1);
26202f0e3c6SMatthew Dillon }
26302f0e3c6SMatthew Dillon 
26402f0e3c6SMatthew Dillon static int
amd64_mrt2mtrr(int flags,int oldval)26502f0e3c6SMatthew Dillon amd64_mrt2mtrr(int flags, int oldval)
26602f0e3c6SMatthew Dillon {
26702f0e3c6SMatthew Dillon 	int val;
26802f0e3c6SMatthew Dillon 
26902f0e3c6SMatthew Dillon 	if ((val = amd64_mtrrtype(flags)) == -1)
27002f0e3c6SMatthew Dillon 		return (oldval & 0xff);
27102f0e3c6SMatthew Dillon 	return (val & 0xff);
27202f0e3c6SMatthew Dillon }
27302f0e3c6SMatthew Dillon 
27402f0e3c6SMatthew Dillon /*
27502f0e3c6SMatthew Dillon  * Update running CPU(s) MTRRs to match the ranges in the descriptor
27602f0e3c6SMatthew Dillon  * list.
27702f0e3c6SMatthew Dillon  *
27802f0e3c6SMatthew Dillon  * XXX Must be called with interrupts enabled.
27902f0e3c6SMatthew Dillon  */
28002f0e3c6SMatthew Dillon static void
amd64_mrstore(struct mem_range_softc * sc)28102f0e3c6SMatthew Dillon amd64_mrstore(struct mem_range_softc *sc)
28202f0e3c6SMatthew Dillon {
28302f0e3c6SMatthew Dillon 	/*
28402f0e3c6SMatthew Dillon 	 * We should use ipi_all_but_self() to call other CPUs into a
28502f0e3c6SMatthew Dillon 	 * locking gate, then call a target function to do this work.
28602f0e3c6SMatthew Dillon 	 * The "proper" solution involves a generalised locking gate
28702f0e3c6SMatthew Dillon 	 * implementation, not ready yet.
28802f0e3c6SMatthew Dillon 	 */
28902f0e3c6SMatthew Dillon 	lwkt_send_ipiq_mask(smp_active_mask, (void *)amd64_mrstoreone, sc);
29002f0e3c6SMatthew Dillon }
29102f0e3c6SMatthew Dillon 
29202f0e3c6SMatthew Dillon /*
29302f0e3c6SMatthew Dillon  * Update the current CPU's MTRRs with those represented in the
29402f0e3c6SMatthew Dillon  * descriptor list.  Note that we do this wholesale rather than just
29502f0e3c6SMatthew Dillon  * stuffing one entry; this is simpler (but slower, of course).
29602f0e3c6SMatthew Dillon  */
29702f0e3c6SMatthew Dillon static void
amd64_mrstoreone(void * arg)29802f0e3c6SMatthew Dillon amd64_mrstoreone(void *arg)
29902f0e3c6SMatthew Dillon {
30002f0e3c6SMatthew Dillon 	struct mem_range_softc *sc = arg;
30102f0e3c6SMatthew Dillon 	struct mem_range_desc *mrd;
30202f0e3c6SMatthew Dillon 	u_int64_t omsrv, msrv;
30302f0e3c6SMatthew Dillon 	int i, j, msr;
30402f0e3c6SMatthew Dillon 	u_long cr0, cr4;
30502f0e3c6SMatthew Dillon 
30602f0e3c6SMatthew Dillon 	mrd = sc->mr_desc;
30702f0e3c6SMatthew Dillon 
30802f0e3c6SMatthew Dillon 	crit_enter();
30902f0e3c6SMatthew Dillon 
31002f0e3c6SMatthew Dillon 	/* Disable PGE. */
31102f0e3c6SMatthew Dillon 	cr4 = rcr4();
31202f0e3c6SMatthew Dillon 	load_cr4(cr4 & ~CR4_PGE);
31302f0e3c6SMatthew Dillon 
31402f0e3c6SMatthew Dillon 	/* Disable caches (CD = 1, NW = 0). */
31502f0e3c6SMatthew Dillon 	cr0 = rcr0();
31602f0e3c6SMatthew Dillon 	load_cr0((cr0 & ~CR0_NW) | CR0_CD);
31702f0e3c6SMatthew Dillon 
31802f0e3c6SMatthew Dillon 	/* Flushes caches and TLBs. */
31902f0e3c6SMatthew Dillon 	wbinvd();
32002f0e3c6SMatthew Dillon 	cpu_invltlb();
32102f0e3c6SMatthew Dillon 
32202f0e3c6SMatthew Dillon 	/* Disable MTRRs (E = 0). */
32302f0e3c6SMatthew Dillon 	wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) & ~MTRR_DEF_ENABLE);
32402f0e3c6SMatthew Dillon 
32502f0e3c6SMatthew Dillon 	/* Set fixed-range MTRRs. */
32602f0e3c6SMatthew Dillon 	if (sc->mr_cap & MR686_FIXMTRR) {
32702f0e3c6SMatthew Dillon 		msr = MSR_MTRR64kBase;
32802f0e3c6SMatthew Dillon 		for (i = 0; i < (MTRR_N64K / 8); i++, msr++) {
32902f0e3c6SMatthew Dillon 			msrv = 0;
33002f0e3c6SMatthew Dillon 			omsrv = rdmsr(msr);
33102f0e3c6SMatthew Dillon 			for (j = 7; j >= 0; j--) {
33202f0e3c6SMatthew Dillon 				msrv = msrv << 8;
33302f0e3c6SMatthew Dillon 				msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags,
33402f0e3c6SMatthew Dillon 				    omsrv >> (j * 8));
33502f0e3c6SMatthew Dillon 			}
33602f0e3c6SMatthew Dillon 			wrmsr(msr, msrv);
33702f0e3c6SMatthew Dillon 			mrd += 8;
33802f0e3c6SMatthew Dillon 		}
33902f0e3c6SMatthew Dillon 		msr = MSR_MTRR16kBase;
34002f0e3c6SMatthew Dillon 		for (i = 0; i < (MTRR_N16K / 8); i++, msr++) {
34102f0e3c6SMatthew Dillon 			msrv = 0;
34202f0e3c6SMatthew Dillon 			omsrv = rdmsr(msr);
34302f0e3c6SMatthew Dillon 			for (j = 7; j >= 0; j--) {
34402f0e3c6SMatthew Dillon 				msrv = msrv << 8;
34502f0e3c6SMatthew Dillon 				msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags,
34602f0e3c6SMatthew Dillon 				    omsrv >> (j * 8));
34702f0e3c6SMatthew Dillon 			}
34802f0e3c6SMatthew Dillon 			wrmsr(msr, msrv);
34902f0e3c6SMatthew Dillon 			mrd += 8;
35002f0e3c6SMatthew Dillon 		}
35102f0e3c6SMatthew Dillon 		msr = MSR_MTRR4kBase;
35202f0e3c6SMatthew Dillon 		for (i = 0; i < (MTRR_N4K / 8); i++, msr++) {
35302f0e3c6SMatthew Dillon 			msrv = 0;
35402f0e3c6SMatthew Dillon 			omsrv = rdmsr(msr);
35502f0e3c6SMatthew Dillon 			for (j = 7; j >= 0; j--) {
35602f0e3c6SMatthew Dillon 				msrv = msrv << 8;
35702f0e3c6SMatthew Dillon 				msrv |= amd64_mrt2mtrr((mrd + j)->mr_flags,
35802f0e3c6SMatthew Dillon 				    omsrv >> (j * 8));
35902f0e3c6SMatthew Dillon 			}
36002f0e3c6SMatthew Dillon 			wrmsr(msr, msrv);
36102f0e3c6SMatthew Dillon 			mrd += 8;
36202f0e3c6SMatthew Dillon 		}
36302f0e3c6SMatthew Dillon 	}
36402f0e3c6SMatthew Dillon 
36502f0e3c6SMatthew Dillon 	/* Set remainder which must be variable MTRRs. */
36602f0e3c6SMatthew Dillon 	msr = MSR_MTRRVarBase;
36702f0e3c6SMatthew Dillon 	for (; (mrd - sc->mr_desc) < sc->mr_ndesc; msr += 2, mrd++) {
36802f0e3c6SMatthew Dillon 		/* base/type register */
36902f0e3c6SMatthew Dillon 		omsrv = rdmsr(msr);
37002f0e3c6SMatthew Dillon 		if (mrd->mr_flags & MDF_ACTIVE) {
37102f0e3c6SMatthew Dillon 			msrv = mrd->mr_base & mtrr_physmask;
37202f0e3c6SMatthew Dillon 			msrv |= amd64_mrt2mtrr(mrd->mr_flags, omsrv);
37302f0e3c6SMatthew Dillon 		} else {
37402f0e3c6SMatthew Dillon 			msrv = 0;
37502f0e3c6SMatthew Dillon 		}
37602f0e3c6SMatthew Dillon 		wrmsr(msr, msrv);
37702f0e3c6SMatthew Dillon 
37802f0e3c6SMatthew Dillon 		/* mask/active register */
37902f0e3c6SMatthew Dillon 		if (mrd->mr_flags & MDF_ACTIVE) {
38002f0e3c6SMatthew Dillon 			msrv = MTRR_PHYSMASK_VALID |
381*3f7b7260SSascha Wildner 			    rounddown2(mtrr_physmask, mrd->mr_len);
38202f0e3c6SMatthew Dillon 		} else {
38302f0e3c6SMatthew Dillon 			msrv = 0;
38402f0e3c6SMatthew Dillon 		}
38502f0e3c6SMatthew Dillon 		wrmsr(msr + 1, msrv);
38602f0e3c6SMatthew Dillon 	}
38702f0e3c6SMatthew Dillon 
38802f0e3c6SMatthew Dillon 	/* Flush caches and TLBs. */
38902f0e3c6SMatthew Dillon 	wbinvd();
39002f0e3c6SMatthew Dillon 	cpu_invltlb();
39102f0e3c6SMatthew Dillon 
39202f0e3c6SMatthew Dillon 	/* Enable MTRRs. */
39302f0e3c6SMatthew Dillon 	wrmsr(MSR_MTRRdefType, rdmsr(MSR_MTRRdefType) | MTRR_DEF_ENABLE);
39402f0e3c6SMatthew Dillon 
39502f0e3c6SMatthew Dillon 	/* Restore caches and PGE. */
39602f0e3c6SMatthew Dillon 	load_cr0(cr0);
39702f0e3c6SMatthew Dillon 	load_cr4(cr4);
39802f0e3c6SMatthew Dillon 
39902f0e3c6SMatthew Dillon 	crit_exit();
40002f0e3c6SMatthew Dillon }
40102f0e3c6SMatthew Dillon 
40202f0e3c6SMatthew Dillon /*
40302f0e3c6SMatthew Dillon  * Hunt for the fixed MTRR referencing (addr)
40402f0e3c6SMatthew Dillon  */
40502f0e3c6SMatthew Dillon static struct mem_range_desc *
amd64_mtrrfixsearch(struct mem_range_softc * sc,u_int64_t addr)40602f0e3c6SMatthew Dillon amd64_mtrrfixsearch(struct mem_range_softc *sc, u_int64_t addr)
40702f0e3c6SMatthew Dillon {
40802f0e3c6SMatthew Dillon 	struct mem_range_desc *mrd;
40902f0e3c6SMatthew Dillon 	int i;
41002f0e3c6SMatthew Dillon 
41102f0e3c6SMatthew Dillon 	for (i = 0, mrd = sc->mr_desc; i < (MTRR_N64K + MTRR_N16K + MTRR_N4K);
41202f0e3c6SMatthew Dillon 	     i++, mrd++)
41302f0e3c6SMatthew Dillon 		if ((addr >= mrd->mr_base) &&
41402f0e3c6SMatthew Dillon 		    (addr < (mrd->mr_base + mrd->mr_len)))
41502f0e3c6SMatthew Dillon 			return (mrd);
41602f0e3c6SMatthew Dillon 	return (NULL);
41702f0e3c6SMatthew Dillon }
41802f0e3c6SMatthew Dillon 
41902f0e3c6SMatthew Dillon /*
42002f0e3c6SMatthew Dillon  * Try to satisfy the given range request by manipulating the fixed
42102f0e3c6SMatthew Dillon  * MTRRs that cover low memory.
42202f0e3c6SMatthew Dillon  *
42302f0e3c6SMatthew Dillon  * Note that we try to be generous here; we'll bloat the range out to
42402f0e3c6SMatthew Dillon  * the next higher/lower boundary to avoid the consumer having to know
42502f0e3c6SMatthew Dillon  * too much about the mechanisms here.
42602f0e3c6SMatthew Dillon  *
42702f0e3c6SMatthew Dillon  * XXX note that this will have to be updated when we start supporting
42802f0e3c6SMatthew Dillon  * "busy" ranges.
42902f0e3c6SMatthew Dillon  */
43002f0e3c6SMatthew Dillon static int
amd64_mrsetlow(struct mem_range_softc * sc,struct mem_range_desc * mrd,int * arg)43102f0e3c6SMatthew Dillon amd64_mrsetlow(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
43202f0e3c6SMatthew Dillon {
43302f0e3c6SMatthew Dillon 	struct mem_range_desc *first_md, *last_md, *curr_md;
43402f0e3c6SMatthew Dillon 
43502f0e3c6SMatthew Dillon 	/* Range check. */
43602f0e3c6SMatthew Dillon 	if (((first_md = amd64_mtrrfixsearch(sc, mrd->mr_base)) == NULL) ||
43702f0e3c6SMatthew Dillon 	    ((last_md = amd64_mtrrfixsearch(sc, mrd->mr_base + mrd->mr_len - 1)) == NULL))
43802f0e3c6SMatthew Dillon 		return (EINVAL);
43902f0e3c6SMatthew Dillon 
44002f0e3c6SMatthew Dillon 	/* Check that we aren't doing something risky. */
44102f0e3c6SMatthew Dillon 	if (!(mrd->mr_flags & MDF_FORCE))
44202f0e3c6SMatthew Dillon 		for (curr_md = first_md; curr_md <= last_md; curr_md++) {
44302f0e3c6SMatthew Dillon 			if ((curr_md->mr_flags & MDF_ATTRMASK) == MDF_UNKNOWN)
44402f0e3c6SMatthew Dillon 				return (EACCES);
44502f0e3c6SMatthew Dillon 		}
44602f0e3c6SMatthew Dillon 
44702f0e3c6SMatthew Dillon 	/* Set flags, clear set-by-firmware flag. */
44802f0e3c6SMatthew Dillon 	for (curr_md = first_md; curr_md <= last_md; curr_md++) {
44902f0e3c6SMatthew Dillon 		curr_md->mr_flags = mrcopyflags(curr_md->mr_flags &
45002f0e3c6SMatthew Dillon 		    ~MDF_FIRMWARE, mrd->mr_flags);
45102f0e3c6SMatthew Dillon 		bcopy(mrd->mr_owner, curr_md->mr_owner, sizeof(mrd->mr_owner));
45202f0e3c6SMatthew Dillon 	}
45302f0e3c6SMatthew Dillon 
45402f0e3c6SMatthew Dillon 	return (0);
45502f0e3c6SMatthew Dillon }
45602f0e3c6SMatthew Dillon 
45702f0e3c6SMatthew Dillon /*
45802f0e3c6SMatthew Dillon  * Modify/add a variable MTRR to satisfy the request.
45902f0e3c6SMatthew Dillon  *
46002f0e3c6SMatthew Dillon  * XXX needs to be updated to properly support "busy" ranges.
46102f0e3c6SMatthew Dillon  */
46202f0e3c6SMatthew Dillon static int
amd64_mrsetvariable(struct mem_range_softc * sc,struct mem_range_desc * mrd,int * arg)46302f0e3c6SMatthew Dillon amd64_mrsetvariable(struct mem_range_softc *sc, struct mem_range_desc *mrd,
46402f0e3c6SMatthew Dillon     int *arg)
46502f0e3c6SMatthew Dillon {
46602f0e3c6SMatthew Dillon 	struct mem_range_desc *curr_md, *free_md;
46702f0e3c6SMatthew Dillon 	int i;
46802f0e3c6SMatthew Dillon 
46902f0e3c6SMatthew Dillon 	/*
47002f0e3c6SMatthew Dillon 	 * Scan the currently active variable descriptors, look for
47102f0e3c6SMatthew Dillon 	 * one we exactly match (straight takeover) and for possible
47202f0e3c6SMatthew Dillon 	 * accidental overlaps.
47302f0e3c6SMatthew Dillon 	 *
47402f0e3c6SMatthew Dillon 	 * Keep track of the first empty variable descriptor in case
47502f0e3c6SMatthew Dillon 	 * we can't perform a takeover.
47602f0e3c6SMatthew Dillon 	 */
47702f0e3c6SMatthew Dillon 	i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
47802f0e3c6SMatthew Dillon 	curr_md = sc->mr_desc + i;
47902f0e3c6SMatthew Dillon 	free_md = NULL;
48002f0e3c6SMatthew Dillon 	for (; i < sc->mr_ndesc; i++, curr_md++) {
48102f0e3c6SMatthew Dillon 		if (curr_md->mr_flags & MDF_ACTIVE) {
48202f0e3c6SMatthew Dillon 			/* Exact match? */
48302f0e3c6SMatthew Dillon 			if ((curr_md->mr_base == mrd->mr_base) &&
48402f0e3c6SMatthew Dillon 			    (curr_md->mr_len == mrd->mr_len)) {
48502f0e3c6SMatthew Dillon 
48602f0e3c6SMatthew Dillon 				/* Whoops, owned by someone. */
48702f0e3c6SMatthew Dillon 				if (curr_md->mr_flags & MDF_BUSY)
48802f0e3c6SMatthew Dillon 					return (EBUSY);
48902f0e3c6SMatthew Dillon 
49002f0e3c6SMatthew Dillon 				/* Check that we aren't doing something risky */
49102f0e3c6SMatthew Dillon 				if (!(mrd->mr_flags & MDF_FORCE) &&
49202f0e3c6SMatthew Dillon 				    ((curr_md->mr_flags & MDF_ATTRMASK) ==
49302f0e3c6SMatthew Dillon 				    MDF_UNKNOWN))
49402f0e3c6SMatthew Dillon 					return (EACCES);
49502f0e3c6SMatthew Dillon 
49602f0e3c6SMatthew Dillon 				/* Ok, just hijack this entry. */
49702f0e3c6SMatthew Dillon 				free_md = curr_md;
49802f0e3c6SMatthew Dillon 				break;
49902f0e3c6SMatthew Dillon 			}
50002f0e3c6SMatthew Dillon 
50102f0e3c6SMatthew Dillon 			/* Non-exact overlap? */
50202f0e3c6SMatthew Dillon 			if (mroverlap(curr_md, mrd)) {
50302f0e3c6SMatthew Dillon 				/* Between conflicting region types? */
50402f0e3c6SMatthew Dillon 				if (amd64_mtrrconflict(curr_md->mr_flags,
50502f0e3c6SMatthew Dillon 				    mrd->mr_flags))
50602f0e3c6SMatthew Dillon 					return (EINVAL);
50702f0e3c6SMatthew Dillon 			}
50802f0e3c6SMatthew Dillon 		} else if (free_md == NULL) {
50902f0e3c6SMatthew Dillon 			free_md = curr_md;
51002f0e3c6SMatthew Dillon 		}
51102f0e3c6SMatthew Dillon 	}
51202f0e3c6SMatthew Dillon 
51302f0e3c6SMatthew Dillon 	/* Got somewhere to put it? */
51402f0e3c6SMatthew Dillon 	if (free_md == NULL)
51502f0e3c6SMatthew Dillon 		return (ENOSPC);
51602f0e3c6SMatthew Dillon 
51702f0e3c6SMatthew Dillon 	/* Set up new descriptor. */
51802f0e3c6SMatthew Dillon 	free_md->mr_base = mrd->mr_base;
51902f0e3c6SMatthew Dillon 	free_md->mr_len = mrd->mr_len;
52002f0e3c6SMatthew Dillon 	free_md->mr_flags = mrcopyflags(MDF_ACTIVE, mrd->mr_flags);
52102f0e3c6SMatthew Dillon 	bcopy(mrd->mr_owner, free_md->mr_owner, sizeof(mrd->mr_owner));
52202f0e3c6SMatthew Dillon 	return (0);
52302f0e3c6SMatthew Dillon }
52402f0e3c6SMatthew Dillon 
52502f0e3c6SMatthew Dillon /*
52602f0e3c6SMatthew Dillon  * Handle requests to set memory range attributes by manipulating MTRRs.
52702f0e3c6SMatthew Dillon  */
52802f0e3c6SMatthew Dillon static int
amd64_mrset(struct mem_range_softc * sc,struct mem_range_desc * mrd,int * arg)52902f0e3c6SMatthew Dillon amd64_mrset(struct mem_range_softc *sc, struct mem_range_desc *mrd, int *arg)
53002f0e3c6SMatthew Dillon {
53102f0e3c6SMatthew Dillon 	struct mem_range_desc *targ;
53202f0e3c6SMatthew Dillon 	int error;
53302f0e3c6SMatthew Dillon 
53402f0e3c6SMatthew Dillon 	switch (*arg) {
53502f0e3c6SMatthew Dillon 	case MEMRANGE_SET_UPDATE:
53602f0e3c6SMatthew Dillon 		/*
53702f0e3c6SMatthew Dillon 		 * Make sure that what's being asked for is even
53802f0e3c6SMatthew Dillon 		 * possible at all.
53902f0e3c6SMatthew Dillon 		 */
54002f0e3c6SMatthew Dillon 		if (!mrvalid(mrd->mr_base, mrd->mr_len) ||
54102f0e3c6SMatthew Dillon 		    amd64_mtrrtype(mrd->mr_flags) == -1)
54202f0e3c6SMatthew Dillon 			return (EINVAL);
54302f0e3c6SMatthew Dillon 
54402f0e3c6SMatthew Dillon #define	FIXTOP	((MTRR_N64K * 0x10000) + (MTRR_N16K * 0x4000) + (MTRR_N4K * 0x1000))
54502f0e3c6SMatthew Dillon 
54602f0e3c6SMatthew Dillon 		/* Are the "low memory" conditions applicable? */
54702f0e3c6SMatthew Dillon 		if ((sc->mr_cap & MR686_FIXMTRR) &&
54802f0e3c6SMatthew Dillon 		    ((mrd->mr_base + mrd->mr_len) <= FIXTOP)) {
54902f0e3c6SMatthew Dillon 			if ((error = amd64_mrsetlow(sc, mrd, arg)) != 0)
55002f0e3c6SMatthew Dillon 				return (error);
55102f0e3c6SMatthew Dillon 		} else {
55202f0e3c6SMatthew Dillon 			/* It's time to play with variable MTRRs. */
55302f0e3c6SMatthew Dillon 			if ((error = amd64_mrsetvariable(sc, mrd, arg)) != 0)
55402f0e3c6SMatthew Dillon 				return (error);
55502f0e3c6SMatthew Dillon 		}
55602f0e3c6SMatthew Dillon 		break;
55702f0e3c6SMatthew Dillon 
55802f0e3c6SMatthew Dillon 	case MEMRANGE_SET_REMOVE:
55902f0e3c6SMatthew Dillon 		if ((targ = mem_range_match(sc, mrd)) == NULL)
56002f0e3c6SMatthew Dillon 			return (ENOENT);
56102f0e3c6SMatthew Dillon 		if (targ->mr_flags & MDF_FIXACTIVE)
56202f0e3c6SMatthew Dillon 			return (EPERM);
56302f0e3c6SMatthew Dillon 		if (targ->mr_flags & MDF_BUSY)
56402f0e3c6SMatthew Dillon 			return (EBUSY);
56502f0e3c6SMatthew Dillon 		targ->mr_flags &= ~MDF_ACTIVE;
56602f0e3c6SMatthew Dillon 		targ->mr_owner[0] = 0;
56702f0e3c6SMatthew Dillon 		break;
56802f0e3c6SMatthew Dillon 
56902f0e3c6SMatthew Dillon 	default:
57002f0e3c6SMatthew Dillon 		return (EOPNOTSUPP);
57102f0e3c6SMatthew Dillon 	}
57202f0e3c6SMatthew Dillon 
57302f0e3c6SMatthew Dillon #if 0
57402f0e3c6SMatthew Dillon 	/* XXX */
57502f0e3c6SMatthew Dillon 	/*
57602f0e3c6SMatthew Dillon 	 * Ensure that the direct map region does not contain any mappings
57702f0e3c6SMatthew Dillon 	 * that span MTRRs of different types.  However, the fixed MTRRs can
57802f0e3c6SMatthew Dillon 	 * be ignored, because a large page mapping the first 1 MB of physical
57902f0e3c6SMatthew Dillon 	 * memory is a special case that the processor handles.  The entire
58002f0e3c6SMatthew Dillon 	 * TLB will be invalidated by amd64_mrstore(), so pmap_demote_DMAP()
58102f0e3c6SMatthew Dillon 	 * needn't do it.
58202f0e3c6SMatthew Dillon 	 */
58302f0e3c6SMatthew Dillon 	int i;
58402f0e3c6SMatthew Dillon 
58502f0e3c6SMatthew Dillon 	i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
58602f0e3c6SMatthew Dillon 	mrd = sc->mr_desc + i;
58702f0e3c6SMatthew Dillon 	for (; i < sc->mr_ndesc; i++, mrd++) {
58802f0e3c6SMatthew Dillon 		if ((mrd->mr_flags & (MDF_ACTIVE | MDF_BOGUS)) == MDF_ACTIVE)
58902f0e3c6SMatthew Dillon 			pmap_demote_DMAP(mrd->mr_base, mrd->mr_len, FALSE);
59002f0e3c6SMatthew Dillon 	}
59102f0e3c6SMatthew Dillon #endif
59202f0e3c6SMatthew Dillon 
59302f0e3c6SMatthew Dillon 	/* Update the hardware. */
59402f0e3c6SMatthew Dillon 	amd64_mrstore(sc);
59502f0e3c6SMatthew Dillon 
59602f0e3c6SMatthew Dillon 	/* Refetch to see where we're at. */
59702f0e3c6SMatthew Dillon 	amd64_mrfetch(sc);
59802f0e3c6SMatthew Dillon 	return (0);
59902f0e3c6SMatthew Dillon }
60002f0e3c6SMatthew Dillon 
60102f0e3c6SMatthew Dillon /*
60202f0e3c6SMatthew Dillon  * Work out how many ranges we support, initialise storage for them,
60302f0e3c6SMatthew Dillon  * and fetch the initial settings.
60402f0e3c6SMatthew Dillon  */
60502f0e3c6SMatthew Dillon static void
amd64_mrinit(struct mem_range_softc * sc)60602f0e3c6SMatthew Dillon amd64_mrinit(struct mem_range_softc *sc)
60702f0e3c6SMatthew Dillon {
60802f0e3c6SMatthew Dillon 	struct mem_range_desc *mrd;
60902f0e3c6SMatthew Dillon 	u_int regs[4];
61002f0e3c6SMatthew Dillon 	int i, nmdesc = 0, pabits;
61102f0e3c6SMatthew Dillon 
61202f0e3c6SMatthew Dillon 	mtrrcap = rdmsr(MSR_MTRRcap);
61302f0e3c6SMatthew Dillon 	mtrrdef = rdmsr(MSR_MTRRdefType);
61402f0e3c6SMatthew Dillon 
61502f0e3c6SMatthew Dillon 	/* For now, bail out if MTRRs are not enabled. */
61602f0e3c6SMatthew Dillon 	if (!(mtrrdef & MTRR_DEF_ENABLE)) {
61702f0e3c6SMatthew Dillon 		if (bootverbose)
61802f0e3c6SMatthew Dillon 			kprintf("CPU supports MTRRs but not enabled\n");
61902f0e3c6SMatthew Dillon 		return;
62002f0e3c6SMatthew Dillon 	}
62102f0e3c6SMatthew Dillon 	nmdesc = mtrrcap & MTRR_CAP_VCNT;
62202f0e3c6SMatthew Dillon 
62302f0e3c6SMatthew Dillon 	/*
62402f0e3c6SMatthew Dillon 	 * Determine the size of the PhysMask and PhysBase fields in
62502f0e3c6SMatthew Dillon 	 * the variable range MTRRs.  If the extended CPUID 0x80000008
62602f0e3c6SMatthew Dillon 	 * is present, use that to figure out how many physical
62702f0e3c6SMatthew Dillon 	 * address bits the CPU supports.  Otherwise, default to 36
62802f0e3c6SMatthew Dillon 	 * address bits.
62902f0e3c6SMatthew Dillon 	 */
63002f0e3c6SMatthew Dillon 	if (cpu_exthigh >= 0x80000008) {
63102f0e3c6SMatthew Dillon 		do_cpuid(0x80000008, regs);
63202f0e3c6SMatthew Dillon 		pabits = regs[0] & 0xff;
63302f0e3c6SMatthew Dillon 	} else
63402f0e3c6SMatthew Dillon 		pabits = 36;
63502f0e3c6SMatthew Dillon 	mtrr_physmask = ((1UL << pabits) - 1) & ~0xfffUL;
63602f0e3c6SMatthew Dillon 
63702f0e3c6SMatthew Dillon 	/* If fixed MTRRs supported and enabled. */
63802f0e3c6SMatthew Dillon 	if ((mtrrcap & MTRR_CAP_FIXED) && (mtrrdef & MTRR_DEF_FIXED_ENABLE)) {
63902f0e3c6SMatthew Dillon 		sc->mr_cap = MR686_FIXMTRR;
64002f0e3c6SMatthew Dillon 		nmdesc += MTRR_N64K + MTRR_N16K + MTRR_N4K;
64102f0e3c6SMatthew Dillon 	}
64202f0e3c6SMatthew Dillon 
64302f0e3c6SMatthew Dillon 	sc->mr_desc = kmalloc(nmdesc * sizeof(struct mem_range_desc),
64402f0e3c6SMatthew Dillon 			      M_MEMDESC, M_WAITOK | M_ZERO);
64502f0e3c6SMatthew Dillon 	sc->mr_ndesc = nmdesc;
64602f0e3c6SMatthew Dillon 
64702f0e3c6SMatthew Dillon 	mrd = sc->mr_desc;
64802f0e3c6SMatthew Dillon 
64902f0e3c6SMatthew Dillon 	/* Populate the fixed MTRR entries' base/length. */
65002f0e3c6SMatthew Dillon 	if (sc->mr_cap & MR686_FIXMTRR) {
65102f0e3c6SMatthew Dillon 		for (i = 0; i < MTRR_N64K; i++, mrd++) {
65202f0e3c6SMatthew Dillon 			mrd->mr_base = i * 0x10000;
65302f0e3c6SMatthew Dillon 			mrd->mr_len = 0x10000;
65402f0e3c6SMatthew Dillon 			mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
65502f0e3c6SMatthew Dillon 			    MDF_FIXACTIVE;
65602f0e3c6SMatthew Dillon 		}
65702f0e3c6SMatthew Dillon 		for (i = 0; i < MTRR_N16K; i++, mrd++) {
65802f0e3c6SMatthew Dillon 			mrd->mr_base = i * 0x4000 + 0x80000;
65902f0e3c6SMatthew Dillon 			mrd->mr_len = 0x4000;
66002f0e3c6SMatthew Dillon 			mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
66102f0e3c6SMatthew Dillon 			    MDF_FIXACTIVE;
66202f0e3c6SMatthew Dillon 		}
66302f0e3c6SMatthew Dillon 		for (i = 0; i < MTRR_N4K; i++, mrd++) {
66402f0e3c6SMatthew Dillon 			mrd->mr_base = i * 0x1000 + 0xc0000;
66502f0e3c6SMatthew Dillon 			mrd->mr_len = 0x1000;
66602f0e3c6SMatthew Dillon 			mrd->mr_flags = MDF_FIXBASE | MDF_FIXLEN |
66702f0e3c6SMatthew Dillon 			    MDF_FIXACTIVE;
66802f0e3c6SMatthew Dillon 		}
66902f0e3c6SMatthew Dillon 	}
67002f0e3c6SMatthew Dillon 
67102f0e3c6SMatthew Dillon 	/*
67202f0e3c6SMatthew Dillon 	 * Get current settings, anything set now is considered to
67302f0e3c6SMatthew Dillon 	 * have been set by the firmware. (XXX has something already
67402f0e3c6SMatthew Dillon 	 * played here?)
67502f0e3c6SMatthew Dillon 	 */
67602f0e3c6SMatthew Dillon 	amd64_mrfetch(sc);
67702f0e3c6SMatthew Dillon 	mrd = sc->mr_desc;
67802f0e3c6SMatthew Dillon 	for (i = 0; i < sc->mr_ndesc; i++, mrd++) {
67902f0e3c6SMatthew Dillon 		if (mrd->mr_flags & MDF_ACTIVE)
68002f0e3c6SMatthew Dillon 			mrd->mr_flags |= MDF_FIRMWARE;
68102f0e3c6SMatthew Dillon 	}
68202f0e3c6SMatthew Dillon 
68302f0e3c6SMatthew Dillon #if 0
68402f0e3c6SMatthew Dillon 	/*
68502f0e3c6SMatthew Dillon 	 * Ensure that the direct map region does not contain any mappings
68602f0e3c6SMatthew Dillon 	 * that span MTRRs of different types.  However, the fixed MTRRs can
68702f0e3c6SMatthew Dillon 	 * be ignored, because a large page mapping the first 1 MB of physical
68802f0e3c6SMatthew Dillon 	 * memory is a special case that the processor handles.  Invalidate
68902f0e3c6SMatthew Dillon 	 * any old TLB entries that might hold inconsistent memory type
69002f0e3c6SMatthew Dillon 	 * information.
69102f0e3c6SMatthew Dillon 	 */
69202f0e3c6SMatthew Dillon 	i = (sc->mr_cap & MR686_FIXMTRR) ? MTRR_N64K + MTRR_N16K + MTRR_N4K : 0;
69302f0e3c6SMatthew Dillon 	mrd = sc->mr_desc + i;
69402f0e3c6SMatthew Dillon 	for (; i < sc->mr_ndesc; i++, mrd++) {
69502f0e3c6SMatthew Dillon 		if ((mrd->mr_flags & (MDF_ACTIVE | MDF_BOGUS)) == MDF_ACTIVE)
69602f0e3c6SMatthew Dillon 			pmap_demote_DMAP(mrd->mr_base, mrd->mr_len, TRUE);
69702f0e3c6SMatthew Dillon 	}
69802f0e3c6SMatthew Dillon #endif
69902f0e3c6SMatthew Dillon }
70002f0e3c6SMatthew Dillon 
70102f0e3c6SMatthew Dillon /*
70202f0e3c6SMatthew Dillon  * Initialise MTRRs on an AP after the BSP has run the init code.
70302f0e3c6SMatthew Dillon  */
70402f0e3c6SMatthew Dillon static void
amd64_mrAPinit(struct mem_range_softc * sc)70502f0e3c6SMatthew Dillon amd64_mrAPinit(struct mem_range_softc *sc)
70602f0e3c6SMatthew Dillon {
70702f0e3c6SMatthew Dillon 	amd64_mrstoreone(sc);
70802f0e3c6SMatthew Dillon 	wrmsr(MSR_MTRRdefType, mtrrdef);
70902f0e3c6SMatthew Dillon }
71002f0e3c6SMatthew Dillon 
71102f0e3c6SMatthew Dillon /*
71202f0e3c6SMatthew Dillon  * Re-initialise running CPU(s) MTRRs to match the ranges in the descriptor
71302f0e3c6SMatthew Dillon  * list.
71402f0e3c6SMatthew Dillon  *
71502f0e3c6SMatthew Dillon  * XXX Must be called with interrupts enabled.
71602f0e3c6SMatthew Dillon  */
71702f0e3c6SMatthew Dillon static void
amd64_mrreinit(struct mem_range_softc * sc)71802f0e3c6SMatthew Dillon amd64_mrreinit(struct mem_range_softc *sc)
71902f0e3c6SMatthew Dillon {
72002f0e3c6SMatthew Dillon 	/*
72102f0e3c6SMatthew Dillon 	 * We should use ipi_all_but_self() to call other CPUs into a
72202f0e3c6SMatthew Dillon 	 * locking gate, then call a target function to do this work.
72302f0e3c6SMatthew Dillon 	 * The "proper" solution involves a generalised locking gate
72402f0e3c6SMatthew Dillon 	 * implementation, not ready yet.
72502f0e3c6SMatthew Dillon 	 */
72602f0e3c6SMatthew Dillon 	lwkt_send_ipiq_mask(smp_active_mask, (void *)amd64_mrAPinit, sc);
72702f0e3c6SMatthew Dillon }
72802f0e3c6SMatthew Dillon 
72902f0e3c6SMatthew Dillon static void
amd64_mem_drvinit(void * unused)73002f0e3c6SMatthew Dillon amd64_mem_drvinit(void *unused)
73102f0e3c6SMatthew Dillon {
73202f0e3c6SMatthew Dillon 
73302f0e3c6SMatthew Dillon 	if (mtrrs_disabled)
73402f0e3c6SMatthew Dillon 		return;
73502f0e3c6SMatthew Dillon 	if (!(cpu_feature & CPUID_MTRR))
73602f0e3c6SMatthew Dillon 		return;
73702f0e3c6SMatthew Dillon 	if ((cpu_id & 0xf00) != 0x600 && (cpu_id & 0xf00) != 0xf00)
73802f0e3c6SMatthew Dillon 		return;
73902f0e3c6SMatthew Dillon 	switch (cpu_vendor_id) {
74002f0e3c6SMatthew Dillon 	case CPU_VENDOR_INTEL:
74102f0e3c6SMatthew Dillon 	case CPU_VENDOR_AMD:
74202f0e3c6SMatthew Dillon 	case CPU_VENDOR_CENTAUR:
74302f0e3c6SMatthew Dillon 		break;
74402f0e3c6SMatthew Dillon 	default:
74502f0e3c6SMatthew Dillon 		return;
74602f0e3c6SMatthew Dillon 	}
74702f0e3c6SMatthew Dillon 	mem_range_softc.mr_op = &amd64_mrops;
74802f0e3c6SMatthew Dillon }
74902f0e3c6SMatthew Dillon SYSINIT(amd64memdev, SI_SUB_DRIVERS, SI_ORDER_FIRST, amd64_mem_drvinit, NULL);
750