1 /* $OpenBSD: pmap.h,v 1.55 2023/12/11 22:12:53 kettenis Exp $ */ 2 /* $NetBSD: pmap.h,v 1.76 2003/09/06 09:10:46 rearnsha Exp $ */ 3 4 /* 5 * Copyright (c) 2002, 2003 Wasabi Systems, Inc. 6 * All rights reserved. 7 * 8 * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed for the NetBSD Project by 21 * Wasabi Systems, Inc. 22 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 23 * or promote products derived from this software without specific prior 24 * written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Copyright (c) 1994,1995 Mark Brinicombe. 41 * All rights reserved. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed by Mark Brinicombe 54 * 4. The name of the author may not be used to endorse or promote products 55 * derived from this software without specific prior written permission. 56 * 57 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 58 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 59 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 60 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 61 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 62 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 63 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 64 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 65 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 66 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 67 */ 68 69 #ifndef _ARM_PMAP_H_ 70 #define _ARM_PMAP_H_ 71 72 #ifdef _KERNEL 73 74 #include <arm/cpuconf.h> 75 #include <arm/pte.h> 76 #ifndef _LOCORE 77 #include <arm/cpufunc.h> 78 #endif 79 80 /* 81 * a pmap describes a processes' 4GB virtual address space. this 82 * virtual address space can be broken up into 4096 1MB regions which 83 * are described by L1 PTEs in the L1 table. 84 * 85 * There is a line drawn at KERNEL_BASE. Everything below that line 86 * changes when the VM context is switched. Everything above that line 87 * is the same no matter which VM context is running. This is achieved 88 * by making the L1 PTEs for those slots above KERNEL_BASE reference 89 * kernel L2 tables. 90 * 91 * The basic layout of the virtual address space thus looks like this: 92 * 93 * 0xffffffff 94 * . 95 * . 96 * . 97 * KERNEL_BASE 98 * -------------------- 99 * . 100 * . 101 * . 102 * 0x00000000 103 */ 104 105 /* 106 * The number of L2 descriptor tables which can be tracked by an l2_dtable. 107 * A bucket size of 16 provides for 16MB of contiguous virtual address 108 * space per l2_dtable. Most processes will, therefore, require only two or 109 * three of these to map their whole working set. 110 */ 111 #define L2_BUCKET_LOG2 4 112 #define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2) 113 114 /* 115 * Given the above "L2-descriptors-per-l2_dtable" constant, the number 116 * of l2_dtable structures required to track all possible page descriptors 117 * mappable by an L1 translation table is given by the following constants: 118 */ 119 #define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2) 120 #define L2_SIZE (1 << L2_LOG2) 121 122 #ifndef _LOCORE 123 124 struct l1_ttable; 125 struct l2_dtable; 126 127 /* 128 * Track cache/tlb occupancy using the following structure 129 */ 130 union pmap_cache_state { 131 struct { 132 union { 133 u_int8_t csu_cache_b[2]; 134 u_int16_t csu_cache; 135 } cs_cache_u; 136 137 union { 138 u_int8_t csu_tlb_b[2]; 139 u_int16_t csu_tlb; 140 } cs_tlb_u; 141 } cs_s; 142 u_int32_t cs_all; 143 }; 144 #define cs_cache_id cs_s.cs_cache_u.csu_cache_b[0] 145 #define cs_cache_d cs_s.cs_cache_u.csu_cache_b[1] 146 #define cs_cache cs_s.cs_cache_u.csu_cache 147 #define cs_tlb_id cs_s.cs_tlb_u.csu_tlb_b[0] 148 #define cs_tlb_d cs_s.cs_tlb_u.csu_tlb_b[1] 149 #define cs_tlb cs_s.cs_tlb_u.csu_tlb 150 151 /* 152 * Assigned to cs_all to force cacheops to work for a particular pmap 153 */ 154 #define PMAP_CACHE_STATE_ALL 0xffffffffu 155 156 /* 157 * This structure is used by machine-dependent code to describe 158 * static mappings of devices, created at bootstrap time. 159 */ 160 struct pmap_devmap { 161 vaddr_t pd_va; /* virtual address */ 162 paddr_t pd_pa; /* physical address */ 163 psize_t pd_size; /* size of region */ 164 vm_prot_t pd_prot; /* protection code */ 165 int pd_cache; /* cache attributes */ 166 }; 167 168 /* 169 * The pmap structure itself 170 */ 171 struct pmap { 172 u_int8_t pm_domain; 173 int pm_remove_all; 174 struct l1_ttable *pm_l1; 175 union pmap_cache_state pm_cstate; 176 u_int pm_refs; 177 struct l2_dtable *pm_l2[L2_SIZE]; 178 struct pmap_statistics pm_stats; 179 }; 180 181 typedef struct pmap *pmap_t; 182 183 /* 184 * MD flags that we use for pmap_enter (in the pa): 185 */ 186 #define PMAP_PA_MASK ~((paddr_t)PAGE_MASK) /* to remove the flags */ 187 #define PMAP_NOCACHE 0x1 /* non-cacheable memory. */ 188 #define PMAP_DEVICE 0x2 /* device memory. */ 189 190 /* 191 * Physical / virtual address structure. In a number of places (particularly 192 * during bootstrapping) we need to keep track of the physical and virtual 193 * addresses of various pages 194 */ 195 typedef struct pv_addr { 196 SLIST_ENTRY(pv_addr) pv_list; 197 paddr_t pv_pa; 198 vaddr_t pv_va; 199 } pv_addr_t; 200 201 /* 202 * Determine various modes for PTEs (user vs. kernel, cacheable 203 * vs. non-cacheable). 204 */ 205 #define PTE_KERNEL 0 206 #define PTE_USER 1 207 #define PTE_NOCACHE 0 208 #define PTE_CACHE 1 209 #define PTE_PAGETABLE 2 210 211 /* 212 * Flags that indicate attributes of pages or mappings of pages. 213 * 214 * The PVF_MOD and PVF_REF flags are stored in the mdpage for each 215 * page. PVF_WIRED and PVF_WRITE are kept in individual pv_entry's 216 * for each page. They live in the same "namespace" so that we can 217 * clear multiple attributes at a time. 218 */ 219 #define PVF_MOD 0x01 /* page is modified */ 220 #define PVF_REF 0x02 /* page is referenced */ 221 #define PVF_WIRED 0x04 /* mapping is wired */ 222 #define PVF_WRITE 0x08 /* mapping is writable */ 223 #define PVF_EXEC 0x10 /* mapping is executable */ 224 225 /* 226 * Commonly referenced structures 227 */ 228 extern struct pmap kernel_pmap_store; 229 230 /* 231 * Macros that we need to export 232 */ 233 #define pmap_kernel() (&kernel_pmap_store) 234 #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) 235 #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) 236 237 #define pmap_is_modified(pg) \ 238 (((pg)->mdpage.pvh_attrs & PVF_MOD) != 0) 239 #define pmap_is_referenced(pg) \ 240 (((pg)->mdpage.pvh_attrs & PVF_REF) != 0) 241 242 #define pmap_deactivate(p) do { /* nothing */ } while (0) 243 244 #define pmap_init_percpu() do { /* nothing */ } while (0) 245 #define pmap_unuse_final(p) do { /* nothing */ } while (0) 246 #define pmap_remove_holes(vm) do { /* nothing */ } while (0) 247 248 /* 249 * Functions that we need to export 250 */ 251 void pmap_remove_all(pmap_t); 252 void pmap_uncache_page(paddr_t, vaddr_t); 253 254 #define PMAP_CHECK_COPYIN 1 255 256 #define PMAP_GROWKERNEL /* turn on pmap_growkernel interface */ 257 258 /* Functions we use internally. */ 259 void pmap_bootstrap(pd_entry_t *, vaddr_t, vaddr_t); 260 261 int pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int); 262 int pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **); 263 int pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **); 264 void pmap_set_pcb_pagedir(pmap_t, struct pcb *); 265 266 void pmap_postinit(void); 267 268 void vector_page_setprot(int); 269 270 /* XXX */ 271 void pmap_kenter_cache(vaddr_t va, paddr_t pa, vm_prot_t prot, int cacheable); 272 273 const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t); 274 const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t); 275 276 /* Bootstrapping routines. */ 277 void pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int); 278 void pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int); 279 vsize_t pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int); 280 void pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *); 281 void pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *); 282 void pmap_devmap_register(const struct pmap_devmap *); 283 284 /* 285 * The current top of kernel VM 286 */ 287 extern vaddr_t pmap_curmaxkvaddr; 288 289 /* 290 * Useful macros and constants 291 */ 292 293 /* Virtual address to page table entry */ 294 static __inline pt_entry_t * 295 vtopte(vaddr_t va) 296 { 297 pd_entry_t *pdep; 298 pt_entry_t *ptep; 299 300 if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == FALSE) 301 return (NULL); 302 return (ptep); 303 } 304 305 /* 306 * The new pmap ensures that page-tables are always mapping Write-Thru. 307 * Thus, on some platforms we can run fast and loose and avoid syncing PTEs 308 * on every change. 309 * 310 * Unfortunately, not all CPUs have a write-through cache mode. So we 311 * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs, 312 * and if there is the chance for PTE syncs to be needed, we define 313 * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run) 314 * the code. 315 */ 316 extern int pmap_needs_pte_sync; 317 318 #define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync 319 #define PMAP_INCLUDE_PTE_SYNC 320 321 #define PTE_SYNC(pte) \ 322 do { \ 323 cpu_drain_writebuf(); \ 324 if (PMAP_NEEDS_PTE_SYNC) { \ 325 paddr_t pa; \ 326 cpu_dcache_wb_range((vaddr_t)(pte), sizeof(pt_entry_t));\ 327 if (cpu_sdcache_enabled()) { \ 328 (void)pmap_extract(pmap_kernel(), (vaddr_t)(pte), &pa); \ 329 cpu_sdcache_wb_range((vaddr_t)(pte), (paddr_t)(pa), \ 330 sizeof(pt_entry_t)); \ 331 }; \ 332 cpu_drain_writebuf(); \ 333 } \ 334 } while (/*CONSTCOND*/0) 335 336 #define PTE_SYNC_RANGE(pte, cnt) \ 337 do { \ 338 cpu_drain_writebuf(); \ 339 if (PMAP_NEEDS_PTE_SYNC) { \ 340 paddr_t pa; \ 341 cpu_dcache_wb_range((vaddr_t)(pte), \ 342 (cnt) << 2); /* * sizeof(pt_entry_t) */ \ 343 if (cpu_sdcache_enabled()) { \ 344 (void)pmap_extract(pmap_kernel(), (vaddr_t)(pte), &pa);\ 345 cpu_sdcache_wb_range((vaddr_t)(pte), (paddr_t)(pa), \ 346 (cnt) << 2); /* * sizeof(pt_entry_t) */ \ 347 }; \ 348 cpu_drain_writebuf(); \ 349 } \ 350 } while (/*CONSTCOND*/0) 351 352 #define l1pte_valid(pde) (((pde) & L1_TYPE_MASK) != L1_TYPE_INV) 353 #define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S) 354 #define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C) 355 #define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F) 356 357 #define l2pte_index(v) (((v) & L2_ADDR_BITS) >> L2_S_SHIFT) 358 #define l2pte_valid(pte) (((pte) & L2_TYPE_MASK) != L2_TYPE_INV) 359 #define l2pte_pa(pte) ((pte) & L2_S_FRAME) 360 361 /* L1 and L2 page table macros */ 362 #define pmap_pde_v(pde) l1pte_valid(*(pde)) 363 #define pmap_pde_section(pde) l1pte_section_p(*(pde)) 364 #define pmap_pde_page(pde) l1pte_page_p(*(pde)) 365 #define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde)) 366 367 /************************* ARM MMU configuration *****************************/ 368 369 #if (ARM_MMU_V7) != 0 370 void pmap_copy_page_generic(struct vm_page *, struct vm_page *); 371 void pmap_zero_page_generic(struct vm_page *); 372 373 void pmap_pte_init_generic(void); 374 void pmap_pte_init_armv7(void); 375 #endif /* (ARM_MMU_V7) != 0 */ 376 377 #if ARM_MMU_V7 == 1 378 void pmap_pte_init_v7(void); 379 #endif /* ARM_MMU_V7 == 1 */ 380 381 extern pt_entry_t pte_l1_s_cache_mode; 382 extern pt_entry_t pte_l1_s_cache_mask; 383 384 extern pt_entry_t pte_l2_l_cache_mode; 385 extern pt_entry_t pte_l2_l_cache_mask; 386 387 extern pt_entry_t pte_l2_s_cache_mode; 388 extern pt_entry_t pte_l2_s_cache_mask; 389 390 extern pt_entry_t pte_l1_s_cache_mode_pt; 391 extern pt_entry_t pte_l2_l_cache_mode_pt; 392 extern pt_entry_t pte_l2_s_cache_mode_pt; 393 394 extern pt_entry_t pte_l1_s_coherent; 395 extern pt_entry_t pte_l2_l_coherent; 396 extern pt_entry_t pte_l2_s_coherent; 397 398 extern pt_entry_t pte_l1_s_prot_ur; 399 extern pt_entry_t pte_l1_s_prot_uw; 400 extern pt_entry_t pte_l1_s_prot_kr; 401 extern pt_entry_t pte_l1_s_prot_kw; 402 extern pt_entry_t pte_l1_s_prot_mask; 403 404 extern pt_entry_t pte_l2_l_prot_ur; 405 extern pt_entry_t pte_l2_l_prot_uw; 406 extern pt_entry_t pte_l2_l_prot_kr; 407 extern pt_entry_t pte_l2_l_prot_kw; 408 extern pt_entry_t pte_l2_l_prot_mask; 409 410 extern pt_entry_t pte_l2_s_prot_ur; 411 extern pt_entry_t pte_l2_s_prot_uw; 412 extern pt_entry_t pte_l2_s_prot_kr; 413 extern pt_entry_t pte_l2_s_prot_kw; 414 extern pt_entry_t pte_l2_s_prot_mask; 415 416 extern pt_entry_t pte_l1_s_proto; 417 extern pt_entry_t pte_l1_c_proto; 418 extern pt_entry_t pte_l2_s_proto; 419 420 extern void (*pmap_copy_page_func)(struct vm_page *, struct vm_page *); 421 extern void (*pmap_zero_page_func)(struct vm_page *); 422 423 #endif /* !_LOCORE */ 424 425 /*****************************************************************************/ 426 427 /* 428 * Definitions for MMU domains 429 */ 430 #define PMAP_DOMAINS 15 /* 15 'user' domains (0-14) */ 431 #define PMAP_DOMAIN_KERNEL 15 /* The kernel uses domain #15 */ 432 433 /* 434 * These macros define the various bit masks in the PTE. 435 * 436 * We use these macros since we use different bits on different processor 437 * models. 438 */ 439 #define L1_S_PROT_UR_v7 (L1_S_V7_AP(AP_V7_KRUR)) 440 #define L1_S_PROT_UW_v7 (L1_S_V7_AP(AP_KRWURW)) 441 #define L1_S_PROT_KR_v7 (L1_S_V7_AP(AP_V7_KR)) 442 #define L1_S_PROT_KW_v7 (L1_S_V7_AP(AP_KRW)) 443 #define L1_S_PROT_MASK_v7 (L1_S_V7_AP(0x07)) 444 445 #define L1_S_CACHE_MASK_v7 (L1_S_B|L1_S_C|L1_S_V7_TEX_MASK) 446 447 #define L1_S_COHERENT_v7 (L1_S_C) 448 449 #define L2_L_PROT_UR_v7 (L2_V7_AP(AP_V7_KRUR)) 450 #define L2_L_PROT_UW_v7 (L2_V7_AP(AP_KRWURW)) 451 #define L2_L_PROT_KR_v7 (L2_V7_AP(AP_V7_KR)) 452 #define L2_L_PROT_KW_v7 (L2_V7_AP(AP_KRW)) 453 #define L2_L_PROT_MASK_v7 (L2_V7_AP(0x07) | L2_V7_L_XN) 454 455 #define L2_L_CACHE_MASK_v7 (L2_B|L2_C|L2_V7_L_TEX_MASK) 456 457 #define L2_L_COHERENT_v7 (L2_C) 458 459 #define L2_S_PROT_UR_v7 (L2_V7_AP(AP_V7_KRUR)) 460 #define L2_S_PROT_UW_v7 (L2_V7_AP(AP_KRWURW)) 461 #define L2_S_PROT_KR_v7 (L2_V7_AP(AP_V7_KR)) 462 #define L2_S_PROT_KW_v7 (L2_V7_AP(AP_KRW)) 463 #define L2_S_PROT_MASK_v7 (L2_V7_AP(0x07) | L2_V7_S_XN) 464 465 #define L2_S_CACHE_MASK_v7 (L2_B|L2_C|L2_V7_S_TEX_MASK) 466 467 #define L2_S_COHERENT_v7 (L2_C) 468 469 #define L1_S_PROTO_v7 (L1_TYPE_S) 470 471 #define L1_C_PROTO_v7 (L1_TYPE_C) 472 473 #define L2_L_PROTO (L2_TYPE_L) 474 475 #define L2_S_PROTO_v7 (L2_TYPE_S) 476 477 /* 478 * User-visible names for the ones that vary with MMU class. 479 */ 480 481 #if ARM_NMMUS > 1 482 /* More than one MMU class configured; use variables. */ 483 #define L1_S_PROT_UR pte_l1_s_prot_ur 484 #define L1_S_PROT_UW pte_l1_s_prot_uw 485 #define L1_S_PROT_KR pte_l1_s_prot_kr 486 #define L1_S_PROT_KW pte_l1_s_prot_kw 487 #define L1_S_PROT_MASK pte_l1_s_prot_mask 488 489 #define L2_L_PROT_UR pte_l2_l_prot_ur 490 #define L2_L_PROT_UW pte_l2_l_prot_uw 491 #define L2_L_PROT_KR pte_l2_l_prot_kr 492 #define L2_L_PROT_KW pte_l2_l_prot_kw 493 #define L2_L_PROT_MASK pte_l2_l_prot_mask 494 495 #define L2_S_PROT_UR pte_l2_s_prot_ur 496 #define L2_S_PROT_UW pte_l2_s_prot_uw 497 #define L2_S_PROT_KR pte_l2_s_prot_kr 498 #define L2_S_PROT_KW pte_l2_s_prot_kw 499 #define L2_S_PROT_MASK pte_l2_s_prot_mask 500 501 #define L1_S_CACHE_MASK pte_l1_s_cache_mask 502 #define L2_L_CACHE_MASK pte_l2_l_cache_mask 503 #define L2_S_CACHE_MASK pte_l2_s_cache_mask 504 505 #define L1_S_COHERENT pte_l1_s_coherent 506 #define L2_L_COHERENT pte_l2_l_coherent 507 #define L2_S_COHERENT pte_l2_s_coherent 508 509 #define L1_S_PROTO pte_l1_s_proto 510 #define L1_C_PROTO pte_l1_c_proto 511 #define L2_S_PROTO pte_l2_s_proto 512 513 #define pmap_copy_page(s, d) (*pmap_copy_page_func)((s), (d)) 514 #define pmap_zero_page(d) (*pmap_zero_page_func)((d)) 515 #elif ARM_MMU_V7 == 1 516 #define L1_S_PROT_UR L1_S_PROT_UR_v7 517 #define L1_S_PROT_UW L1_S_PROT_UW_v7 518 #define L1_S_PROT_KR L1_S_PROT_KR_v7 519 #define L1_S_PROT_KW L1_S_PROT_KW_v7 520 #define L1_S_PROT_MASK L1_S_PROT_MASK_v7 521 522 #define L2_L_PROT_UR L2_L_PROT_UR_v7 523 #define L2_L_PROT_UW L2_L_PROT_UW_v7 524 #define L2_L_PROT_KR L2_L_PROT_KR_v7 525 #define L2_L_PROT_KW L2_L_PROT_KW_v7 526 #define L2_L_PROT_MASK L2_L_PROT_MASK_v7 527 528 #define L2_S_PROT_UR L2_S_PROT_UR_v7 529 #define L2_S_PROT_UW L2_S_PROT_UW_v7 530 #define L2_S_PROT_KR L2_S_PROT_KR_v7 531 #define L2_S_PROT_KW L2_S_PROT_KW_v7 532 #define L2_S_PROT_MASK L2_S_PROT_MASK_v7 533 534 #define L1_S_CACHE_MASK L1_S_CACHE_MASK_v7 535 #define L2_L_CACHE_MASK L2_L_CACHE_MASK_v7 536 #define L2_S_CACHE_MASK L2_S_CACHE_MASK_v7 537 538 #define L1_S_COHERENT L1_S_COHERENT_v7 539 #define L2_L_COHERENT L2_L_COHERENT_v7 540 #define L2_S_COHERENT L2_S_COHERENT_v7 541 542 #define L1_S_PROTO L1_S_PROTO_v7 543 #define L1_C_PROTO L1_C_PROTO_v7 544 #define L2_S_PROTO L2_S_PROTO_v7 545 546 #define pmap_copy_page(s, d) pmap_copy_page_generic((s), (d)) 547 #define pmap_zero_page(d) pmap_zero_page_generic((d)) 548 #endif /* ARM_NMMUS > 1 */ 549 550 /* 551 * These macros return various bits based on kernel/user and protection. 552 * Note that the compiler will usually fold these at compile time. 553 */ 554 #ifndef _LOCORE 555 static __inline pt_entry_t 556 L1_S_PROT(int ku, vm_prot_t pr) 557 { 558 pt_entry_t pte; 559 560 if (ku == PTE_USER) 561 pte = (pr & PROT_WRITE) ? L1_S_PROT_UW : L1_S_PROT_UR; 562 else 563 pte = (pr & PROT_WRITE) ? L1_S_PROT_KW : L1_S_PROT_KR; 564 565 if ((pr & PROT_EXEC) == 0) 566 pte |= L1_S_V7_XN; 567 568 return pte; 569 } 570 static __inline pt_entry_t 571 L2_L_PROT(int ku, vm_prot_t pr) 572 { 573 pt_entry_t pte; 574 575 if (ku == PTE_USER) 576 pte = (pr & PROT_WRITE) ? L2_L_PROT_UW : L2_L_PROT_UR; 577 else 578 pte = (pr & PROT_WRITE) ? L2_L_PROT_KW : L2_L_PROT_KR; 579 580 if ((pr & PROT_EXEC) == 0) 581 pte |= L2_V7_L_XN; 582 583 return pte; 584 } 585 static __inline pt_entry_t 586 L2_S_PROT(int ku, vm_prot_t pr) 587 { 588 pt_entry_t pte; 589 590 if (ku == PTE_USER) 591 pte = (pr & PROT_WRITE) ? L2_S_PROT_UW : L2_S_PROT_UR; 592 else 593 pte = (pr & PROT_WRITE) ? L2_S_PROT_KW : L2_S_PROT_KR; 594 595 if ((pr & PROT_EXEC) == 0) 596 pte |= L2_V7_S_XN; 597 598 return pte; 599 } 600 601 static __inline int 602 l2pte_is_writeable(pt_entry_t pte, struct pmap *pm) 603 { 604 return (pte & L2_V7_AP(0x4)) == 0; 605 } 606 #endif 607 608 /* 609 * Macros to test if a mapping is mappable with an L1 Section mapping 610 * or an L2 Large Page mapping. 611 */ 612 #define L1_S_MAPPABLE_P(va, pa, size) \ 613 ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE) 614 615 #define L2_L_MAPPABLE_P(va, pa, size) \ 616 ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE) 617 618 #endif /* _KERNEL */ 619 620 #ifndef _LOCORE 621 /* 622 * pmap-specific data store in the vm_page structure. 623 */ 624 struct vm_page_md { 625 struct pv_entry *pvh_list; /* pv_entry list */ 626 int pvh_attrs; /* page attributes */ 627 }; 628 629 #define VM_MDPAGE_INIT(pg) \ 630 do { \ 631 (pg)->mdpage.pvh_list = NULL; \ 632 (pg)->mdpage.pvh_attrs = 0; \ 633 } while (/*CONSTCOND*/0) 634 #endif /* _LOCORE */ 635 636 #endif /* _ARM_PMAP_H_ */ 637