1 /* $NetBSD: pmap.h,v 1.53 2002/04/12 21:52:48 thorpej Exp $ */ 2 3 /* 4 * Copyright (c 2002 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Copyright (c) 1994,1995 Mark Brinicombe. 40 * All rights reserved. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. All advertising materials mentioning features or use of this software 51 * must display the following acknowledgement: 52 * This product includes software developed by Mark Brinicombe 53 * 4. The name of the author may not be used to endorse or promote products 54 * derived from this software without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 58 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 59 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 60 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 61 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 62 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 63 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 64 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 65 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 66 */ 67 68 #ifndef _ARM32_PMAP_H_ 69 #define _ARM32_PMAP_H_ 70 71 #ifdef _KERNEL 72 73 #include <arm/cpuconf.h> 74 #include <arm/cpufunc.h> 75 #include <arm/arm32/pte.h> 76 #include <uvm/uvm_object.h> 77 78 /* 79 * a pmap describes a processes' 4GB virtual address space. this 80 * virtual address space can be broken up into 4096 1MB regions which 81 * are described by L1 PTEs in the L1 table. 82 * 83 * There is a line drawn at KERNEL_BASE. Everything below that line 84 * changes when the VM context is switched. Everything above that line 85 * is the same no matter which VM context is running. This is achieved 86 * by making the L1 PTEs for those slots above KERNEL_BASE reference 87 * kernel L2 tables. 88 * 89 * The L2 tables are mapped linearly starting at PTE_BASE. PTE_BASE 90 * is below KERNEL_BASE, which means that the current process's PTEs 91 * are always available starting at PTE_BASE. Another region of KVA 92 * above KERNEL_BASE, APTE_BASE, is reserved for mapping in the PTEs 93 * of another process, should we need to manipulate them. 94 * 95 * The basic layout of the virtual address space thus looks like this: 96 * 97 * 0xffffffff 98 * . 99 * . 100 * . 101 * KERNEL_BASE 102 * -------------------- 103 * PTE_BASE 104 * . 105 * . 106 * . 107 * 0x00000000 108 */ 109 110 /* 111 * The pmap structure itself. 112 */ 113 struct pmap { 114 struct uvm_object pm_obj; /* uvm_object */ 115 #define pm_lock pm_obj.vmobjlock 116 LIST_ENTRY(pmap) pm_list; /* list (lck by pm_list lock) */ 117 pd_entry_t *pm_pdir; /* KVA of page directory */ 118 struct l1pt *pm_l1pt; /* L1 table metadata */ 119 paddr_t pm_pptpt; /* PA of pt's page table */ 120 vaddr_t pm_vptpt; /* VA of pt's page table */ 121 struct pmap_statistics pm_stats; /* pmap statistics */ 122 struct vm_page *pm_ptphint; /* recently used PT */ 123 }; 124 125 typedef struct pmap *pmap_t; 126 127 /* 128 * Physical / virtual address structure. In a number of places (particularly 129 * during bootstrapping) we need to keep track of the physical and virtual 130 * addresses of various pages 131 */ 132 typedef struct pv_addr { 133 SLIST_ENTRY(pv_addr) pv_list; 134 paddr_t pv_pa; 135 vaddr_t pv_va; 136 } pv_addr_t; 137 138 /* 139 * Determine various modes for PTEs (user vs. kernel, cacheable 140 * vs. non-cacheable). 141 */ 142 #define PTE_KERNEL 0 143 #define PTE_USER 1 144 #define PTE_NOCACHE 0 145 #define PTE_CACHE 1 146 147 /* 148 * Flags that indicate attributes of pages or mappings of pages. 149 * 150 * The PVF_MOD and PVF_REF flags are stored in the mdpage for each 151 * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual 152 * pv_entry's for each page. They live in the same "namespace" so 153 * that we can clear multiple attributes at a time. 154 * 155 * Note the "non-cacheable" flag generally means the page has 156 * multiple mappings in a given address space. 157 */ 158 #define PVF_MOD 0x01 /* page is modified */ 159 #define PVF_REF 0x02 /* page is referenced */ 160 #define PVF_WIRED 0x04 /* mapping is wired */ 161 #define PVF_WRITE 0x08 /* mapping is writable */ 162 #define PVF_NC 0x10 /* mapping is non-cacheable */ 163 164 /* 165 * Commonly referenced structures 166 */ 167 extern struct pmap kernel_pmap_store; 168 extern int pmap_debug_level; /* Only exists if PMAP_DEBUG */ 169 170 /* 171 * Macros that we need to export 172 */ 173 #define pmap_kernel() (&kernel_pmap_store) 174 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) 175 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) 176 177 #define pmap_is_modified(pg) \ 178 (((pg)->mdpage.pvh_attrs & PVF_MOD) != 0) 179 #define pmap_is_referenced(pg) \ 180 (((pg)->mdpage.pvh_attrs & PVF_REF) != 0) 181 182 #define pmap_copy(dp, sp, da, l, sa) /* nothing */ 183 184 #define pmap_phys_address(ppn) (arm_ptob((ppn))) 185 186 /* 187 * Functions that we need to export 188 */ 189 vaddr_t pmap_map(vaddr_t, vaddr_t, vaddr_t, int); 190 void pmap_procwr(struct proc *, vaddr_t, int); 191 192 #define PMAP_NEED_PROCWR 193 #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */ 194 195 /* Functions we use internally. */ 196 void pmap_bootstrap(pd_entry_t *, pv_addr_t); 197 void pmap_debug(int); 198 int pmap_handled_emulation(struct pmap *, vaddr_t); 199 int pmap_modified_emulation(struct pmap *, vaddr_t); 200 void pmap_postinit(void); 201 202 void vector_page_setprot(int); 203 204 /* Bootstrapping routines. */ 205 void pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int); 206 void pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int); 207 vsize_t pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int); 208 void pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *); 209 210 /* 211 * Special page zero routine for use by the idle loop (no cache cleans). 212 */ 213 boolean_t pmap_pageidlezero __P((paddr_t)); 214 #define PMAP_PAGEIDLEZERO(pa) pmap_pageidlezero((pa)) 215 216 /* 217 * The current top of kernel VM 218 */ 219 extern vaddr_t pmap_curmaxkvaddr; 220 221 /* 222 * Useful macros and constants 223 */ 224 225 /* Virtual address to page table entry */ 226 #define vtopte(va) \ 227 (((pt_entry_t *)PTE_BASE) + arm_btop((vaddr_t) (va))) 228 229 /* Virtual address to physical address */ 230 #define vtophys(va) \ 231 ((*vtopte(va) & L2_S_FRAME) | ((vaddr_t) (va) & L2_S_OFFSET)) 232 233 #define l1pte_valid(pde) ((pde) != 0) 234 #define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S) 235 #define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C) 236 #define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F) 237 238 #define l2pte_valid(pte) ((pte) != 0) 239 #define l2pte_pa(pte) ((pte) & L2_S_FRAME) 240 241 /* L1 and L2 page table macros */ 242 #define pmap_pdei(v) ((v & L1_S_FRAME) >> L1_S_SHIFT) 243 #define pmap_pde(m, v) (&((m)->pm_pdir[pmap_pdei(v)])) 244 245 #define pmap_pde_v(pde) l1pte_valid(*(pde)) 246 #define pmap_pde_section(pde) l1pte_section_p(*(pde)) 247 #define pmap_pde_page(pde) l1pte_page_p(*(pde)) 248 #define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde)) 249 250 #define pmap_pte_v(pte) l2pte_valid(*(pte)) 251 #define pmap_pte_pa(pte) l2pte_pa(*(pte)) 252 253 254 /* Size of the kernel part of the L1 page table */ 255 #define KERNEL_PD_SIZE \ 256 (L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t)) 257 258 /************************* ARM MMU configuration *****************************/ 259 260 #if ARM_MMU_GENERIC == 1 261 void pmap_copy_page_generic(paddr_t, paddr_t); 262 void pmap_zero_page_generic(paddr_t); 263 264 void pmap_pte_init_generic(void); 265 #if defined(CPU_ARM9) 266 void pmap_pte_init_arm9(void); 267 #endif /* CPU_ARM9 */ 268 #endif /* ARM_MMU_GENERIC == 1 */ 269 270 #if ARM_MMU_XSCALE == 1 271 void pmap_copy_page_xscale(paddr_t, paddr_t); 272 void pmap_zero_page_xscale(paddr_t); 273 274 void pmap_pte_init_xscale(void); 275 276 void xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t); 277 #endif /* ARM_MMU_XSCALE == 1 */ 278 279 extern pt_entry_t pte_l1_s_cache_mode; 280 extern pt_entry_t pte_l1_s_cache_mask; 281 282 extern pt_entry_t pte_l2_l_cache_mode; 283 extern pt_entry_t pte_l2_l_cache_mask; 284 285 extern pt_entry_t pte_l2_s_cache_mode; 286 extern pt_entry_t pte_l2_s_cache_mask; 287 288 extern pt_entry_t pte_l2_s_prot_u; 289 extern pt_entry_t pte_l2_s_prot_w; 290 extern pt_entry_t pte_l2_s_prot_mask; 291 292 extern pt_entry_t pte_l1_s_proto; 293 extern pt_entry_t pte_l1_c_proto; 294 extern pt_entry_t pte_l2_s_proto; 295 296 extern void (*pmap_copy_page_func)(paddr_t, paddr_t); 297 extern void (*pmap_zero_page_func)(paddr_t); 298 299 /*****************************************************************************/ 300 301 /* 302 * tell MI code that the cache is virtually-indexed *and* virtually-tagged. 303 */ 304 #define PMAP_CACHE_VIVT 305 306 /* 307 * These macros define the various bit masks in the PTE. 308 * 309 * We use these macros since we use different bits on different processor 310 * models. 311 */ 312 #define L1_S_PROT_U (L1_S_AP(AP_U)) 313 #define L1_S_PROT_W (L1_S_AP(AP_W)) 314 #define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W) 315 316 #define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C) 317 #define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X)) 318 319 #define L2_L_PROT_U (L2_AP(AP_U)) 320 #define L2_L_PROT_W (L2_AP(AP_W)) 321 #define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W) 322 323 #define L2_L_CACHE_MASK_generic (L2_B|L2_C) 324 #define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X)) 325 326 #define L2_S_PROT_U_generic (L2_AP(AP_U)) 327 #define L2_S_PROT_W_generic (L2_AP(AP_W)) 328 #define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W) 329 330 #define L2_S_PROT_U_xscale (L2_AP0(AP_U)) 331 #define L2_S_PROT_W_xscale (L2_AP0(AP_W)) 332 #define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W) 333 334 #define L2_S_CACHE_MASK_generic (L2_B|L2_C) 335 #define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X)) 336 337 #define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP) 338 #define L1_S_PROTO_xscale (L1_TYPE_S) 339 340 #define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2) 341 #define L1_C_PROTO_xscale (L1_TYPE_C) 342 343 #define L2_L_PROTO (L2_TYPE_L) 344 345 #define L2_S_PROTO_generic (L2_TYPE_S) 346 #define L2_S_PROTO_xscale (L2_TYPE_XSCALE_XS) 347 348 /* 349 * User-visible names for the ones that vary with MMU class. 350 */ 351 352 #if ARM_NMMUS > 1 353 /* More than one MMU class configured; use variables. */ 354 #define L2_S_PROT_U pte_l2_s_prot_u 355 #define L2_S_PROT_W pte_l2_s_prot_w 356 #define L2_S_PROT_MASK pte_l2_s_prot_mask 357 358 #define L1_S_CACHE_MASK pte_l1_s_cache_mask 359 #define L2_L_CACHE_MASK pte_l2_l_cache_mask 360 #define L2_S_CACHE_MASK pte_l2_s_cache_mask 361 362 #define L1_S_PROTO pte_l1_s_proto 363 #define L1_C_PROTO pte_l1_c_proto 364 #define L2_S_PROTO pte_l2_s_proto 365 366 #define pmap_copy_page(s, d) (*pmap_copy_page_func)((s), (d)) 367 #define pmap_zero_page(d) (*pmap_zero_page_func)((d)) 368 #elif ARM_MMU_GENERIC == 1 369 #define L2_S_PROT_U L2_S_PROT_U_generic 370 #define L2_S_PROT_W L2_S_PROT_W_generic 371 #define L2_S_PROT_MASK L2_S_PROT_MASK_generic 372 373 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic 374 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic 375 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic 376 377 #define L1_S_PROTO L1_S_PROTO_generic 378 #define L1_C_PROTO L1_C_PROTO_generic 379 #define L2_S_PROTO L2_S_PROTO_generic 380 381 #define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d)) 382 #define pmap_zero_page(d) pmap_zero_page_generic((d)) 383 #elif ARM_MMU_XSCALE == 1 384 #define L2_S_PROT_U L2_S_PROT_U_xscale 385 #define L2_S_PROT_W L2_S_PROT_W_xscale 386 #define L2_S_PROT_MASK L2_S_PROT_MASK_xscale 387 388 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale 389 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale 390 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale 391 392 #define L1_S_PROTO L1_S_PROTO_xscale 393 #define L1_C_PROTO L1_C_PROTO_xscale 394 #define L2_S_PROTO L2_S_PROTO_xscale 395 396 #define pmap_copy_page(s, d) pmap_copy_page_xscale((s), (d)) 397 #define pmap_zero_page(d) pmap_zero_page_xscale((d)) 398 #endif /* ARM_NMMUS > 1 */ 399 400 /* 401 * These macros return various bits based on kernel/user and protection. 402 * Note that the compiler will usually fold these at compile time. 403 */ 404 #define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \ 405 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0)) 406 407 #define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \ 408 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0)) 409 410 #define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \ 411 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0)) 412 413 #endif /* _KERNEL */ 414 415 #endif /* _ARM32_PMAP_H_ */ 416