1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * the Systems Programming Group of the University of Utah Computer 7 * Science Department and William Jolitz of UUNET Technologies Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. Neither the name of the University nor the names of its contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * Derived from hp300 version by Mike Hibler, this version by William 34 * Jolitz uses a recursive map [a pde points to the page directory] to 35 * map the page tables using the pagetables themselves. This is done to 36 * reduce the impact on kernel virtual memory for lots of sparse address 37 * space, and to reduce the cost of memory to each process. 38 * 39 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 40 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 41 * $FreeBSD: src/sys/i386/include/pmap.h,v 1.65.2.3 2001/10/03 07:15:37 peter Exp $ 42 */ 43 44 #ifndef _MACHINE_PMAP_H_ 45 #define _MACHINE_PMAP_H_ 46 47 #include <cpu/pmap.h> 48 49 /* 50 * Pte related macros 51 */ 52 #define KVADDR(l4, l3, l2, l1) ( \ 53 ((unsigned long)-1 << 47) | \ 54 ((unsigned long)(l4) << PML4SHIFT) | \ 55 ((unsigned long)(l3) << PDPSHIFT) | \ 56 ((unsigned long)(l2) << PDRSHIFT) | \ 57 ((unsigned long)(l1) << PAGE_SHIFT)) 58 59 #define UVADDR(l4, l3, l2, l1) ( \ 60 ((unsigned long)(l4) << PML4SHIFT) | \ 61 ((unsigned long)(l3) << PDPSHIFT) | \ 62 ((unsigned long)(l2) << PDRSHIFT) | \ 63 ((unsigned long)(l1) << PAGE_SHIFT)) 64 65 /* 66 * Initial number of kernel page tables. NKPT is now calculated in the 67 * pmap code. 68 * 69 * Give NKPDPE a generous value, allowing the kernel to map up to 128G. 70 */ 71 #define NKPML4E 1 /* number of kernel PML4 slots */ 72 #define NKPDPE 128 /* number of kernel PDP slots */ 73 74 #define NUPDP_TOTAL (NPML4EPG/2) /* total PDP pages */ 75 #define NUPD_TOTAL (NUPDP_TOTAL*NPDPEPG) /* total PD pages */ 76 #define NUPT_TOTAL (NUPD_TOTAL*NPDEPG) /* total PT pages */ 77 78 #define NDMPML4E 1 /* number of dmap PML4 slots */ 79 #define NDMPDPE NPTEPG /* number of dmap PDPE slots */ 80 81 /* 82 * The *PML4I values control the layout of virtual memory 83 */ 84 #define PML4PML4I NUPDP_TOTAL /* Index of recursive pml4 mapping */ 85 #define NUPML4E NUPDP_TOTAL /* for vmparam.h */ 86 87 88 /* 89 * Currently no tests available (see vm/vm_page.c) 90 */ 91 #define MD_PAGE_FREEABLE(m) 1 92 93 #ifndef LOCORE 94 95 #ifndef _SYS_TYPES_H_ 96 #include <sys/types.h> 97 #endif 98 #ifndef _SYS_QUEUE_H_ 99 #include <sys/queue.h> 100 #endif 101 #ifndef _SYS_SPINLOCK_H_ 102 #include <sys/spinlock.h> 103 #endif 104 #ifndef _SYS_THREAD_H_ 105 #include <sys/thread.h> 106 #endif 107 #ifndef _SYS_VKERNEL_H_ 108 #include <sys/vkernel.h> 109 #endif 110 #ifndef _MACHINE_TYPES_H_ 111 #include <machine/types.h> 112 #endif 113 #ifndef _MACHINE_PARAM_H_ 114 #include <machine/param.h> 115 #endif 116 117 #ifdef _KERNEL 118 119 /* 120 * XXX 121 */ 122 #define vtophys(va) pmap_kextract(((vm_offset_t)(va))) 123 #define vtophys_pte(va) ((pt_entry_t)pmap_kextract(((vm_offset_t)(va)))) 124 125 #endif 126 127 #define pte_load_clear(pte) atomic_readandclear_long(pte) 128 129 /* 130 * Pmap stuff 131 */ 132 struct pv_entry; 133 struct vm_page; 134 struct vm_object; 135 136 struct pv_entry_rb_tree; 137 RB_PROTOTYPE2(pv_entry_rb_tree, pv_entry, pv_entry, 138 pv_entry_compare, vm_offset_t); 139 140 struct md_page { 141 int pv_list_count; 142 TAILQ_HEAD(,pv_entry) pv_list; 143 }; 144 145 struct md_object { 146 }; 147 148 /* 149 * Each machine dependent implementation is expected to 150 * keep certain statistics. They may do this anyway they 151 * so choose, but are expected to return the statistics 152 * in the following structure. 153 */ 154 struct pmap_statistics { 155 long resident_count; /* # of pages mapped (total) */ 156 long wired_count; /* # of pages wired */ 157 }; 158 typedef struct pmap_statistics *pmap_statistics_t; 159 160 struct pmap { 161 pml4_entry_t *pm_pml4; /* KVA of level 4 page table */ 162 struct vm_page *pm_pdirm; /* VM page for pg directory */ 163 vpte_t pm_pdirpte; /* pte mapping phys page */ 164 struct vm_object *pm_pteobj; /* Container for pte's */ 165 TAILQ_ENTRY(pmap) pm_pmnode; /* list of pmaps */ 166 RB_HEAD(pv_entry_rb_tree, pv_entry) pm_pvroot; 167 int pm_count; /* reference count */ 168 cpulock_t pm_active_lock; /* interlock */ 169 cpumask_t pm_active; /* active on cpus */ 170 vm_pindex_t pm_pdindex; /* page dir page in obj */ 171 struct pmap_statistics pm_stats; /* pmap statistics */ 172 struct vm_page *pm_ptphint; /* pmap ptp hint */ 173 int pm_generation; /* detect pvlist deletions */ 174 struct spinlock pm_spin; 175 }; 176 177 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) 178 #define pmap_resident_tlnw_count(pmap) ((pmap)->pm_stats.resident_count - \ 179 (pmap)->pm_stats.wired_count) 180 181 typedef struct pmap *pmap_t; 182 183 #ifdef _KERNEL 184 extern struct pmap kernel_pmap; 185 #endif 186 187 /* 188 * For each vm_page_t, there is a list of all currently valid virtual 189 * mappings of that page. An entry is a pv_entry_t, the list is pv_table. 190 */ 191 typedef struct pv_entry { 192 pmap_t pv_pmap; /* pmap where mapping lies */ 193 vm_offset_t pv_va; /* virtual address for mapping */ 194 TAILQ_ENTRY(pv_entry) pv_list; 195 RB_ENTRY(pv_entry) pv_entry; 196 struct vm_page *pv_ptem; /* VM page for pte */ 197 } *pv_entry_t; 198 199 #ifdef _KERNEL 200 201 extern caddr_t CADDR1; 202 extern pt_entry_t *CMAP1; 203 extern char *ptvmmap; /* poor name! */ 204 extern vm_offset_t clean_sva; 205 extern vm_offset_t clean_eva; 206 207 #ifndef __VM_PAGE_T_DEFINED__ 208 #define __VM_PAGE_T_DEFINED__ 209 typedef struct vm_page *vm_page_t; 210 #endif 211 #ifndef __VM_MEMATTR_T_DEFINED__ 212 #define __VM_MEMATTR_T_DEFINED__ 213 typedef char vm_memattr_t; 214 #endif 215 216 void pmap_bootstrap(vm_paddr_t *, int64_t); 217 void *pmap_mapdev (vm_paddr_t, vm_size_t); 218 void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma); 219 void pmap_unmapdev (vm_offset_t, vm_size_t); 220 void pmap_release(struct pmap *pmap); 221 void pmap_interlock_wait (struct vmspace *); 222 223 struct vm_page *pmap_use_pt (pmap_t, vm_offset_t); 224 225 static __inline int 226 pmap_emulate_ad_bits(pmap_t pmap) { 227 return 0; 228 } 229 230 #endif /* _KERNEL */ 231 232 #endif /* !LOCORE */ 233 234 #endif /* !_MACHINE_PMAP_H_ */ 235