1 /* $NetBSD: pmap.c,v 1.97 2002/05/14 19:22:34 chris Exp $ */ 2 3 /* 4 * Copyright (c) 2002 Wasabi Systems, Inc. 5 * Copyright (c) 2001 Richard Earnshaw 6 * Copyright (c) 2001 Christopher Gilbert 7 * All rights reserved. 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. The name of the company nor the name of the author may be used to 15 * endorse or promote products derived from this software without specific 16 * prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 19 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 22 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 24 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /*- 32 * Copyright (c) 1999 The NetBSD Foundation, Inc. 33 * All rights reserved. 34 * 35 * This code is derived from software contributed to The NetBSD Foundation 36 * by Charles M. Hannum. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. All advertising materials mentioning features or use of this software 47 * must display the following acknowledgement: 48 * This product includes software developed by the NetBSD 49 * Foundation, Inc. and its contributors. 50 * 4. Neither the name of The NetBSD Foundation nor the names of its 51 * contributors may be used to endorse or promote products derived 52 * from this software without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 55 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 56 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 57 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 58 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 59 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 60 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 61 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 62 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 63 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 64 * POSSIBILITY OF SUCH DAMAGE. 65 */ 66 67 /* 68 * Copyright (c) 1994-1998 Mark Brinicombe. 69 * Copyright (c) 1994 Brini. 70 * All rights reserved. 71 * 72 * This code is derived from software written for Brini by Mark Brinicombe 73 * 74 * Redistribution and use in source and binary forms, with or without 75 * modification, are permitted provided that the following conditions 76 * are met: 77 * 1. Redistributions of source code must retain the above copyright 78 * notice, this list of conditions and the following disclaimer. 79 * 2. Redistributions in binary form must reproduce the above copyright 80 * notice, this list of conditions and the following disclaimer in the 81 * documentation and/or other materials provided with the distribution. 82 * 3. All advertising materials mentioning features or use of this software 83 * must display the following acknowledgement: 84 * This product includes software developed by Mark Brinicombe. 85 * 4. The name of the author may not be used to endorse or promote products 86 * derived from this software without specific prior written permission. 87 * 88 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 89 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 90 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 91 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 92 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 93 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 94 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 95 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 96 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 97 * 98 * RiscBSD kernel project 99 * 100 * pmap.c 101 * 102 * Machine dependant vm stuff 103 * 104 * Created : 20/09/94 105 */ 106 107 /* 108 * Performance improvements, UVM changes, overhauls and part-rewrites 109 * were contributed by Neil A. Carson <neil@causality.com>. 110 */ 111 112 /* 113 * The dram block info is currently referenced from the bootconfig. 114 * This should be placed in a separate structure. 115 */ 116 117 /* 118 * Special compilation symbols 119 * PMAP_DEBUG - Build in pmap_debug_level code 120 */ 121 122 /* Include header files */ 123 124 #include "opt_pmap_debug.h" 125 #include "opt_ddb.h" 126 127 #include <sys/types.h> 128 #include <sys/param.h> 129 #include <sys/kernel.h> 130 #include <sys/systm.h> 131 #include <sys/proc.h> 132 #include <sys/malloc.h> 133 #include <sys/user.h> 134 #include <sys/pool.h> 135 #include <sys/cdefs.h> 136 137 #include <uvm/uvm.h> 138 139 #include <machine/bootconfig.h> 140 #include <machine/bus.h> 141 #include <machine/pmap.h> 142 #include <machine/pcb.h> 143 #include <machine/param.h> 144 #include <arm/arm32/katelib.h> 145 146 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.97 2002/05/14 19:22:34 chris Exp $"); 147 #ifdef PMAP_DEBUG 148 #define PDEBUG(_lev_,_stat_) \ 149 if (pmap_debug_level >= (_lev_)) \ 150 ((_stat_)) 151 int pmap_debug_level = -2; 152 void pmap_dump_pvlist(vaddr_t phys, char *m); 153 154 /* 155 * for switching to potentially finer grained debugging 156 */ 157 #define PDB_FOLLOW 0x0001 158 #define PDB_INIT 0x0002 159 #define PDB_ENTER 0x0004 160 #define PDB_REMOVE 0x0008 161 #define PDB_CREATE 0x0010 162 #define PDB_PTPAGE 0x0020 163 #define PDB_GROWKERN 0x0040 164 #define PDB_BITS 0x0080 165 #define PDB_COLLECT 0x0100 166 #define PDB_PROTECT 0x0200 167 #define PDB_MAP_L1 0x0400 168 #define PDB_BOOTSTRAP 0x1000 169 #define PDB_PARANOIA 0x2000 170 #define PDB_WIRING 0x4000 171 #define PDB_PVDUMP 0x8000 172 173 int debugmap = 0; 174 int pmapdebug = PDB_PARANOIA | PDB_FOLLOW; 175 #define NPDEBUG(_lev_,_stat_) \ 176 if (pmapdebug & (_lev_)) \ 177 ((_stat_)) 178 179 #else /* PMAP_DEBUG */ 180 #define PDEBUG(_lev_,_stat_) /* Nothing */ 181 #define NPDEBUG(_lev_,_stat_) /* Nothing */ 182 #endif /* PMAP_DEBUG */ 183 184 struct pmap kernel_pmap_store; 185 186 /* 187 * linked list of all non-kernel pmaps 188 */ 189 190 static LIST_HEAD(, pmap) pmaps; 191 192 /* 193 * pool that pmap structures are allocated from 194 */ 195 196 struct pool pmap_pmap_pool; 197 198 static pt_entry_t *csrc_pte, *cdst_pte; 199 static vaddr_t csrcp, cdstp; 200 201 char *memhook; 202 extern caddr_t msgbufaddr; 203 204 boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ 205 /* 206 * locking data structures 207 */ 208 209 static struct lock pmap_main_lock; 210 static struct simplelock pvalloc_lock; 211 static struct simplelock pmaps_lock; 212 #ifdef LOCKDEBUG 213 #define PMAP_MAP_TO_HEAD_LOCK() \ 214 (void) spinlockmgr(&pmap_main_lock, LK_SHARED, NULL) 215 #define PMAP_MAP_TO_HEAD_UNLOCK() \ 216 (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL) 217 218 #define PMAP_HEAD_TO_MAP_LOCK() \ 219 (void) spinlockmgr(&pmap_main_lock, LK_EXCLUSIVE, NULL) 220 #define PMAP_HEAD_TO_MAP_UNLOCK() \ 221 (void) spinlockmgr(&pmap_main_lock, LK_RELEASE, NULL) 222 #else 223 #define PMAP_MAP_TO_HEAD_LOCK() /* nothing */ 224 #define PMAP_MAP_TO_HEAD_UNLOCK() /* nothing */ 225 #define PMAP_HEAD_TO_MAP_LOCK() /* nothing */ 226 #define PMAP_HEAD_TO_MAP_UNLOCK() /* nothing */ 227 #endif /* LOCKDEBUG */ 228 229 /* 230 * pv_page management structures: locked by pvalloc_lock 231 */ 232 233 TAILQ_HEAD(pv_pagelist, pv_page); 234 static struct pv_pagelist pv_freepages; /* list of pv_pages with free entrys */ 235 static struct pv_pagelist pv_unusedpgs; /* list of unused pv_pages */ 236 static int pv_nfpvents; /* # of free pv entries */ 237 static struct pv_page *pv_initpage; /* bootstrap page from kernel_map */ 238 static vaddr_t pv_cachedva; /* cached VA for later use */ 239 240 #define PVE_LOWAT (PVE_PER_PVPAGE / 2) /* free pv_entry low water mark */ 241 #define PVE_HIWAT (PVE_LOWAT + (PVE_PER_PVPAGE * 2)) 242 /* high water mark */ 243 244 /* 245 * local prototypes 246 */ 247 248 static struct pv_entry *pmap_add_pvpage __P((struct pv_page *, boolean_t)); 249 static struct pv_entry *pmap_alloc_pv __P((struct pmap *, int)); /* see codes below */ 250 #define ALLOCPV_NEED 0 /* need PV now */ 251 #define ALLOCPV_TRY 1 /* just try to allocate, don't steal */ 252 #define ALLOCPV_NONEED 2 /* don't need PV, just growing cache */ 253 static struct pv_entry *pmap_alloc_pvpage __P((struct pmap *, int)); 254 static void pmap_enter_pv __P((struct vm_page *, 255 struct pv_entry *, struct pmap *, 256 vaddr_t, struct vm_page *, int)); 257 static void pmap_free_pv __P((struct pmap *, struct pv_entry *)); 258 static void pmap_free_pvs __P((struct pmap *, struct pv_entry *)); 259 static void pmap_free_pv_doit __P((struct pv_entry *)); 260 static void pmap_free_pvpage __P((void)); 261 static boolean_t pmap_is_curpmap __P((struct pmap *)); 262 static struct pv_entry *pmap_remove_pv __P((struct vm_page *, struct pmap *, 263 vaddr_t)); 264 #define PMAP_REMOVE_ALL 0 /* remove all mappings */ 265 #define PMAP_REMOVE_SKIPWIRED 1 /* skip wired mappings */ 266 267 static u_int pmap_modify_pv __P((struct pmap *, vaddr_t, struct vm_page *, 268 u_int, u_int)); 269 270 /* 271 * Structure that describes and L1 table. 272 */ 273 struct l1pt { 274 SIMPLEQ_ENTRY(l1pt) pt_queue; /* Queue pointers */ 275 struct pglist pt_plist; /* Allocated page list */ 276 vaddr_t pt_va; /* Allocated virtual address */ 277 int pt_flags; /* Flags */ 278 }; 279 #define PTFLAG_STATIC 0x01 /* Statically allocated */ 280 #define PTFLAG_KPT 0x02 /* Kernel pt's are mapped */ 281 #define PTFLAG_CLEAN 0x04 /* L1 is clean */ 282 283 static void pmap_free_l1pt __P((struct l1pt *)); 284 static int pmap_allocpagedir __P((struct pmap *)); 285 static int pmap_clean_page __P((struct pv_entry *, boolean_t)); 286 static void pmap_remove_all __P((struct vm_page *)); 287 288 static int pmap_alloc_ptpt(struct pmap *); 289 static void pmap_free_ptpt(struct pmap *); 290 291 static struct vm_page *pmap_alloc_ptp __P((struct pmap *, vaddr_t)); 292 static struct vm_page *pmap_get_ptp __P((struct pmap *, vaddr_t)); 293 __inline static void pmap_clearbit __P((struct vm_page *, unsigned int)); 294 295 extern paddr_t physical_start; 296 extern paddr_t physical_freestart; 297 extern paddr_t physical_end; 298 extern paddr_t physical_freeend; 299 extern unsigned int free_pages; 300 extern int max_processes; 301 302 vaddr_t virtual_avail; 303 vaddr_t virtual_end; 304 vaddr_t pmap_curmaxkvaddr; 305 306 vaddr_t avail_start; 307 vaddr_t avail_end; 308 309 extern pv_addr_t systempage; 310 311 /* Variables used by the L1 page table queue code */ 312 SIMPLEQ_HEAD(l1pt_queue, l1pt); 313 static struct l1pt_queue l1pt_static_queue; /* head of our static l1 queue */ 314 static int l1pt_static_queue_count; /* items in the static l1 queue */ 315 static int l1pt_static_create_count; /* static l1 items created */ 316 static struct l1pt_queue l1pt_queue; /* head of our l1 queue */ 317 static int l1pt_queue_count; /* items in the l1 queue */ 318 static int l1pt_create_count; /* stat - L1's create count */ 319 static int l1pt_reuse_count; /* stat - L1's reused count */ 320 321 /* Local function prototypes (not used outside this file) */ 322 void pmap_pinit __P((struct pmap *)); 323 void pmap_freepagedir __P((struct pmap *)); 324 325 /* Other function prototypes */ 326 extern void bzero_page __P((vaddr_t)); 327 extern void bcopy_page __P((vaddr_t, vaddr_t)); 328 329 struct l1pt *pmap_alloc_l1pt __P((void)); 330 static __inline void pmap_map_in_l1 __P((struct pmap *pmap, vaddr_t va, 331 vaddr_t l2pa, boolean_t)); 332 333 static pt_entry_t *pmap_map_ptes __P((struct pmap *)); 334 static void pmap_unmap_ptes __P((struct pmap *)); 335 336 __inline static void pmap_vac_me_harder __P((struct pmap *, struct vm_page *, 337 pt_entry_t *, boolean_t)); 338 static void pmap_vac_me_kpmap __P((struct pmap *, struct vm_page *, 339 pt_entry_t *, boolean_t)); 340 static void pmap_vac_me_user __P((struct pmap *, struct vm_page *, 341 pt_entry_t *, boolean_t)); 342 343 /* 344 * real definition of pv_entry. 345 */ 346 347 struct pv_entry { 348 struct pv_entry *pv_next; /* next pv_entry */ 349 struct pmap *pv_pmap; /* pmap where mapping lies */ 350 vaddr_t pv_va; /* virtual address for mapping */ 351 int pv_flags; /* flags */ 352 struct vm_page *pv_ptp; /* vm_page for the ptp */ 353 }; 354 355 /* 356 * pv_entrys are dynamically allocated in chunks from a single page. 357 * we keep track of how many pv_entrys are in use for each page and 358 * we can free pv_entry pages if needed. there is one lock for the 359 * entire allocation system. 360 */ 361 362 struct pv_page_info { 363 TAILQ_ENTRY(pv_page) pvpi_list; 364 struct pv_entry *pvpi_pvfree; 365 int pvpi_nfree; 366 }; 367 368 /* 369 * number of pv_entry's in a pv_page 370 * (note: won't work on systems where NPBG isn't a constant) 371 */ 372 373 #define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \ 374 sizeof(struct pv_entry)) 375 376 /* 377 * a pv_page: where pv_entrys are allocated from 378 */ 379 380 struct pv_page { 381 struct pv_page_info pvinfo; 382 struct pv_entry pvents[PVE_PER_PVPAGE]; 383 }; 384 385 #ifdef MYCROFT_HACK 386 int mycroft_hack = 0; 387 #endif 388 389 /* Function to set the debug level of the pmap code */ 390 391 #ifdef PMAP_DEBUG 392 void 393 pmap_debug(int level) 394 { 395 pmap_debug_level = level; 396 printf("pmap_debug: level=%d\n", pmap_debug_level); 397 } 398 #endif /* PMAP_DEBUG */ 399 400 __inline static boolean_t 401 pmap_is_curpmap(struct pmap *pmap) 402 { 403 404 if ((curproc && curproc->p_vmspace->vm_map.pmap == pmap) || 405 pmap == pmap_kernel()) 406 return (TRUE); 407 408 return (FALSE); 409 } 410 411 #include "isadma.h" 412 413 #if NISADMA > 0 414 /* 415 * Used to protect memory for ISA DMA bounce buffers. If, when loading 416 * pages into the system, memory intersects with any of these ranges, 417 * the intersecting memory will be loaded into a lower-priority free list. 418 */ 419 bus_dma_segment_t *pmap_isa_dma_ranges; 420 int pmap_isa_dma_nranges; 421 422 /* 423 * Check if a memory range intersects with an ISA DMA range, and 424 * return the page-rounded intersection if it does. The intersection 425 * will be placed on a lower-priority free list. 426 */ 427 static boolean_t 428 pmap_isa_dma_range_intersect(paddr_t pa, psize_t size, paddr_t *pap, 429 psize_t *sizep) 430 { 431 bus_dma_segment_t *ds; 432 int i; 433 434 if (pmap_isa_dma_ranges == NULL) 435 return (FALSE); 436 437 for (i = 0, ds = pmap_isa_dma_ranges; 438 i < pmap_isa_dma_nranges; i++, ds++) { 439 if (ds->ds_addr <= pa && pa < (ds->ds_addr + ds->ds_len)) { 440 /* 441 * Beginning of region intersects with this range. 442 */ 443 *pap = trunc_page(pa); 444 *sizep = round_page(min(pa + size, 445 ds->ds_addr + ds->ds_len) - pa); 446 return (TRUE); 447 } 448 if (pa < ds->ds_addr && ds->ds_addr < (pa + size)) { 449 /* 450 * End of region intersects with this range. 451 */ 452 *pap = trunc_page(ds->ds_addr); 453 *sizep = round_page(min((pa + size) - ds->ds_addr, 454 ds->ds_len)); 455 return (TRUE); 456 } 457 } 458 459 /* 460 * No intersection found. 461 */ 462 return (FALSE); 463 } 464 #endif /* NISADMA > 0 */ 465 466 /* 467 * p v _ e n t r y f u n c t i o n s 468 */ 469 470 /* 471 * pv_entry allocation functions: 472 * the main pv_entry allocation functions are: 473 * pmap_alloc_pv: allocate a pv_entry structure 474 * pmap_free_pv: free one pv_entry 475 * pmap_free_pvs: free a list of pv_entrys 476 * 477 * the rest are helper functions 478 */ 479 480 /* 481 * pmap_alloc_pv: inline function to allocate a pv_entry structure 482 * => we lock pvalloc_lock 483 * => if we fail, we call out to pmap_alloc_pvpage 484 * => 3 modes: 485 * ALLOCPV_NEED = we really need a pv_entry, even if we have to steal it 486 * ALLOCPV_TRY = we want a pv_entry, but not enough to steal 487 * ALLOCPV_NONEED = we are trying to grow our free list, don't really need 488 * one now 489 * 490 * "try" is for optional functions like pmap_copy(). 491 */ 492 493 __inline static struct pv_entry * 494 pmap_alloc_pv(struct pmap *pmap, int mode) 495 { 496 struct pv_page *pvpage; 497 struct pv_entry *pv; 498 499 simple_lock(&pvalloc_lock); 500 501 pvpage = TAILQ_FIRST(&pv_freepages); 502 503 if (pvpage != NULL) { 504 pvpage->pvinfo.pvpi_nfree--; 505 if (pvpage->pvinfo.pvpi_nfree == 0) { 506 /* nothing left in this one? */ 507 TAILQ_REMOVE(&pv_freepages, pvpage, pvinfo.pvpi_list); 508 } 509 pv = pvpage->pvinfo.pvpi_pvfree; 510 KASSERT(pv); 511 pvpage->pvinfo.pvpi_pvfree = pv->pv_next; 512 pv_nfpvents--; /* took one from pool */ 513 } else { 514 pv = NULL; /* need more of them */ 515 } 516 517 /* 518 * if below low water mark or we didn't get a pv_entry we try and 519 * create more pv_entrys ... 520 */ 521 522 if (pv_nfpvents < PVE_LOWAT || pv == NULL) { 523 if (pv == NULL) 524 pv = pmap_alloc_pvpage(pmap, (mode == ALLOCPV_TRY) ? 525 mode : ALLOCPV_NEED); 526 else 527 (void) pmap_alloc_pvpage(pmap, ALLOCPV_NONEED); 528 } 529 530 simple_unlock(&pvalloc_lock); 531 return(pv); 532 } 533 534 /* 535 * pmap_alloc_pvpage: maybe allocate a new pvpage 536 * 537 * if need_entry is false: try and allocate a new pv_page 538 * if need_entry is true: try and allocate a new pv_page and return a 539 * new pv_entry from it. if we are unable to allocate a pv_page 540 * we make a last ditch effort to steal a pv_page from some other 541 * mapping. if that fails, we panic... 542 * 543 * => we assume that the caller holds pvalloc_lock 544 */ 545 546 static struct pv_entry * 547 pmap_alloc_pvpage(struct pmap *pmap, int mode) 548 { 549 struct vm_page *pg; 550 struct pv_page *pvpage; 551 struct pv_entry *pv; 552 int s; 553 554 /* 555 * if we need_entry and we've got unused pv_pages, allocate from there 556 */ 557 558 pvpage = TAILQ_FIRST(&pv_unusedpgs); 559 if (mode != ALLOCPV_NONEED && pvpage != NULL) { 560 561 /* move it to pv_freepages list */ 562 TAILQ_REMOVE(&pv_unusedpgs, pvpage, pvinfo.pvpi_list); 563 TAILQ_INSERT_HEAD(&pv_freepages, pvpage, pvinfo.pvpi_list); 564 565 /* allocate a pv_entry */ 566 pvpage->pvinfo.pvpi_nfree--; /* can't go to zero */ 567 pv = pvpage->pvinfo.pvpi_pvfree; 568 KASSERT(pv); 569 pvpage->pvinfo.pvpi_pvfree = pv->pv_next; 570 571 pv_nfpvents--; /* took one from pool */ 572 return(pv); 573 } 574 575 /* 576 * see if we've got a cached unmapped VA that we can map a page in. 577 * if not, try to allocate one. 578 */ 579 580 581 if (pv_cachedva == 0) { 582 s = splvm(); 583 pv_cachedva = uvm_km_kmemalloc(kmem_map, NULL, 584 PAGE_SIZE, UVM_KMF_TRYLOCK|UVM_KMF_VALLOC); 585 splx(s); 586 if (pv_cachedva == 0) { 587 return (NULL); 588 } 589 } 590 591 pg = uvm_pagealloc(NULL, pv_cachedva - vm_map_min(kernel_map), NULL, 592 UVM_PGA_USERESERVE); 593 594 if (pg == NULL) 595 return (NULL); 596 pg->flags &= ~PG_BUSY; /* never busy */ 597 598 /* 599 * add a mapping for our new pv_page and free its entrys (save one!) 600 * 601 * NOTE: If we are allocating a PV page for the kernel pmap, the 602 * pmap is already locked! (...but entering the mapping is safe...) 603 */ 604 605 pmap_kenter_pa(pv_cachedva, VM_PAGE_TO_PHYS(pg), 606 VM_PROT_READ|VM_PROT_WRITE); 607 pmap_update(pmap_kernel()); 608 pvpage = (struct pv_page *) pv_cachedva; 609 pv_cachedva = 0; 610 return (pmap_add_pvpage(pvpage, mode != ALLOCPV_NONEED)); 611 } 612 613 /* 614 * pmap_add_pvpage: add a pv_page's pv_entrys to the free list 615 * 616 * => caller must hold pvalloc_lock 617 * => if need_entry is true, we allocate and return one pv_entry 618 */ 619 620 static struct pv_entry * 621 pmap_add_pvpage(struct pv_page *pvp, boolean_t need_entry) 622 { 623 int tofree, lcv; 624 625 /* do we need to return one? */ 626 tofree = (need_entry) ? PVE_PER_PVPAGE - 1 : PVE_PER_PVPAGE; 627 628 pvp->pvinfo.pvpi_pvfree = NULL; 629 pvp->pvinfo.pvpi_nfree = tofree; 630 for (lcv = 0 ; lcv < tofree ; lcv++) { 631 pvp->pvents[lcv].pv_next = pvp->pvinfo.pvpi_pvfree; 632 pvp->pvinfo.pvpi_pvfree = &pvp->pvents[lcv]; 633 } 634 if (need_entry) 635 TAILQ_INSERT_TAIL(&pv_freepages, pvp, pvinfo.pvpi_list); 636 else 637 TAILQ_INSERT_TAIL(&pv_unusedpgs, pvp, pvinfo.pvpi_list); 638 pv_nfpvents += tofree; 639 return((need_entry) ? &pvp->pvents[lcv] : NULL); 640 } 641 642 /* 643 * pmap_free_pv_doit: actually free a pv_entry 644 * 645 * => do not call this directly! instead use either 646 * 1. pmap_free_pv ==> free a single pv_entry 647 * 2. pmap_free_pvs => free a list of pv_entrys 648 * => we must be holding pvalloc_lock 649 */ 650 651 __inline static void 652 pmap_free_pv_doit(struct pv_entry *pv) 653 { 654 struct pv_page *pvp; 655 656 pvp = (struct pv_page *) arm_trunc_page((vaddr_t)pv); 657 pv_nfpvents++; 658 pvp->pvinfo.pvpi_nfree++; 659 660 /* nfree == 1 => fully allocated page just became partly allocated */ 661 if (pvp->pvinfo.pvpi_nfree == 1) { 662 TAILQ_INSERT_HEAD(&pv_freepages, pvp, pvinfo.pvpi_list); 663 } 664 665 /* free it */ 666 pv->pv_next = pvp->pvinfo.pvpi_pvfree; 667 pvp->pvinfo.pvpi_pvfree = pv; 668 669 /* 670 * are all pv_page's pv_entry's free? move it to unused queue. 671 */ 672 673 if (pvp->pvinfo.pvpi_nfree == PVE_PER_PVPAGE) { 674 TAILQ_REMOVE(&pv_freepages, pvp, pvinfo.pvpi_list); 675 TAILQ_INSERT_HEAD(&pv_unusedpgs, pvp, pvinfo.pvpi_list); 676 } 677 } 678 679 /* 680 * pmap_free_pv: free a single pv_entry 681 * 682 * => we gain the pvalloc_lock 683 */ 684 685 __inline static void 686 pmap_free_pv(struct pmap *pmap, struct pv_entry *pv) 687 { 688 simple_lock(&pvalloc_lock); 689 pmap_free_pv_doit(pv); 690 691 /* 692 * Can't free the PV page if the PV entries were associated with 693 * the kernel pmap; the pmap is already locked. 694 */ 695 if (pv_nfpvents > PVE_HIWAT && TAILQ_FIRST(&pv_unusedpgs) != NULL && 696 pmap != pmap_kernel()) 697 pmap_free_pvpage(); 698 699 simple_unlock(&pvalloc_lock); 700 } 701 702 /* 703 * pmap_free_pvs: free a list of pv_entrys 704 * 705 * => we gain the pvalloc_lock 706 */ 707 708 __inline static void 709 pmap_free_pvs(struct pmap *pmap, struct pv_entry *pvs) 710 { 711 struct pv_entry *nextpv; 712 713 simple_lock(&pvalloc_lock); 714 715 for ( /* null */ ; pvs != NULL ; pvs = nextpv) { 716 nextpv = pvs->pv_next; 717 pmap_free_pv_doit(pvs); 718 } 719 720 /* 721 * Can't free the PV page if the PV entries were associated with 722 * the kernel pmap; the pmap is already locked. 723 */ 724 if (pv_nfpvents > PVE_HIWAT && TAILQ_FIRST(&pv_unusedpgs) != NULL && 725 pmap != pmap_kernel()) 726 pmap_free_pvpage(); 727 728 simple_unlock(&pvalloc_lock); 729 } 730 731 732 /* 733 * pmap_free_pvpage: try and free an unused pv_page structure 734 * 735 * => assume caller is holding the pvalloc_lock and that 736 * there is a page on the pv_unusedpgs list 737 * => if we can't get a lock on the kmem_map we try again later 738 */ 739 740 static void 741 pmap_free_pvpage(void) 742 { 743 int s; 744 struct vm_map *map; 745 struct vm_map_entry *dead_entries; 746 struct pv_page *pvp; 747 748 s = splvm(); /* protect kmem_map */ 749 750 pvp = TAILQ_FIRST(&pv_unusedpgs); 751 752 /* 753 * note: watch out for pv_initpage which is allocated out of 754 * kernel_map rather than kmem_map. 755 */ 756 if (pvp == pv_initpage) 757 map = kernel_map; 758 else 759 map = kmem_map; 760 if (vm_map_lock_try(map)) { 761 762 /* remove pvp from pv_unusedpgs */ 763 TAILQ_REMOVE(&pv_unusedpgs, pvp, pvinfo.pvpi_list); 764 765 /* unmap the page */ 766 dead_entries = NULL; 767 uvm_unmap_remove(map, (vaddr_t)pvp, ((vaddr_t)pvp) + PAGE_SIZE, 768 &dead_entries); 769 vm_map_unlock(map); 770 771 if (dead_entries != NULL) 772 uvm_unmap_detach(dead_entries, 0); 773 774 pv_nfpvents -= PVE_PER_PVPAGE; /* update free count */ 775 } 776 if (pvp == pv_initpage) 777 /* no more initpage, we've freed it */ 778 pv_initpage = NULL; 779 780 splx(s); 781 } 782 783 /* 784 * main pv_entry manipulation functions: 785 * pmap_enter_pv: enter a mapping onto a vm_page list 786 * pmap_remove_pv: remove a mappiing from a vm_page list 787 * 788 * NOTE: pmap_enter_pv expects to lock the pvh itself 789 * pmap_remove_pv expects te caller to lock the pvh before calling 790 */ 791 792 /* 793 * pmap_enter_pv: enter a mapping onto a vm_page lst 794 * 795 * => caller should hold the proper lock on pmap_main_lock 796 * => caller should have pmap locked 797 * => we will gain the lock on the vm_page and allocate the new pv_entry 798 * => caller should adjust ptp's wire_count before calling 799 * => caller should not adjust pmap's wire_count 800 */ 801 802 __inline static void 803 pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, struct pmap *pmap, 804 vaddr_t va, struct vm_page *ptp, int flags) 805 { 806 pve->pv_pmap = pmap; 807 pve->pv_va = va; 808 pve->pv_ptp = ptp; /* NULL for kernel pmap */ 809 pve->pv_flags = flags; 810 simple_lock(&pg->mdpage.pvh_slock); /* lock vm_page */ 811 pve->pv_next = pg->mdpage.pvh_list; /* add to ... */ 812 pg->mdpage.pvh_list = pve; /* ... locked list */ 813 simple_unlock(&pg->mdpage.pvh_slock); /* unlock, done! */ 814 if (pve->pv_flags & PVF_WIRED) 815 ++pmap->pm_stats.wired_count; 816 } 817 818 /* 819 * pmap_remove_pv: try to remove a mapping from a pv_list 820 * 821 * => caller should hold proper lock on pmap_main_lock 822 * => pmap should be locked 823 * => caller should hold lock on vm_page [so that attrs can be adjusted] 824 * => caller should adjust ptp's wire_count and free PTP if needed 825 * => caller should NOT adjust pmap's wire_count 826 * => we return the removed pve 827 */ 828 829 __inline static struct pv_entry * 830 pmap_remove_pv(struct vm_page *pg, struct pmap *pmap, vaddr_t va) 831 { 832 struct pv_entry *pve, **prevptr; 833 834 prevptr = &pg->mdpage.pvh_list; /* previous pv_entry pointer */ 835 pve = *prevptr; 836 while (pve) { 837 if (pve->pv_pmap == pmap && pve->pv_va == va) { /* match? */ 838 *prevptr = pve->pv_next; /* remove it! */ 839 if (pve->pv_flags & PVF_WIRED) 840 --pmap->pm_stats.wired_count; 841 break; 842 } 843 prevptr = &pve->pv_next; /* previous pointer */ 844 pve = pve->pv_next; /* advance */ 845 } 846 return(pve); /* return removed pve */ 847 } 848 849 /* 850 * 851 * pmap_modify_pv: Update pv flags 852 * 853 * => caller should hold lock on vm_page [so that attrs can be adjusted] 854 * => caller should NOT adjust pmap's wire_count 855 * => caller must call pmap_vac_me_harder() if writable status of a page 856 * may have changed. 857 * => we return the old flags 858 * 859 * Modify a physical-virtual mapping in the pv table 860 */ 861 862 static /* __inline */ u_int 863 pmap_modify_pv(struct pmap *pmap, vaddr_t va, struct vm_page *pg, 864 u_int bic_mask, u_int eor_mask) 865 { 866 struct pv_entry *npv; 867 u_int flags, oflags; 868 869 /* 870 * There is at least one VA mapping this page. 871 */ 872 873 for (npv = pg->mdpage.pvh_list; npv; npv = npv->pv_next) { 874 if (pmap == npv->pv_pmap && va == npv->pv_va) { 875 oflags = npv->pv_flags; 876 npv->pv_flags = flags = 877 ((oflags & ~bic_mask) ^ eor_mask); 878 if ((flags ^ oflags) & PVF_WIRED) { 879 if (flags & PVF_WIRED) 880 ++pmap->pm_stats.wired_count; 881 else 882 --pmap->pm_stats.wired_count; 883 } 884 return (oflags); 885 } 886 } 887 return (0); 888 } 889 890 /* 891 * Map the specified level 2 pagetable into the level 1 page table for 892 * the given pmap to cover a chunk of virtual address space starting from the 893 * address specified. 894 */ 895 static __inline void 896 pmap_map_in_l1(struct pmap *pmap, vaddr_t va, paddr_t l2pa, boolean_t selfref) 897 { 898 vaddr_t ptva; 899 900 /* Calculate the index into the L1 page table. */ 901 ptva = (va >> L1_S_SHIFT) & ~3; 902 903 /* Map page table into the L1. */ 904 pmap->pm_pdir[ptva + 0] = L1_C_PROTO | (l2pa + 0x000); 905 pmap->pm_pdir[ptva + 1] = L1_C_PROTO | (l2pa + 0x400); 906 pmap->pm_pdir[ptva + 2] = L1_C_PROTO | (l2pa + 0x800); 907 pmap->pm_pdir[ptva + 3] = L1_C_PROTO | (l2pa + 0xc00); 908 909 /* Map the page table into the page table area. */ 910 if (selfref) 911 *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = L2_S_PROTO | l2pa | 912 L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE); 913 } 914 915 #if 0 916 static __inline void 917 pmap_unmap_in_l1(struct pmap *pmap, vaddr_t va) 918 { 919 vaddr_t ptva; 920 921 /* Calculate the index into the L1 page table. */ 922 ptva = (va >> L1_S_SHIFT) & ~3; 923 924 /* Unmap page table from the L1. */ 925 pmap->pm_pdir[ptva + 0] = 0; 926 pmap->pm_pdir[ptva + 1] = 0; 927 pmap->pm_pdir[ptva + 2] = 0; 928 pmap->pm_pdir[ptva + 3] = 0; 929 930 /* Unmap the page table from the page table area. */ 931 *((pt_entry_t *)(pmap->pm_vptpt + ptva)) = 0; 932 } 933 #endif 934 935 /* 936 * Used to map a range of physical addresses into kernel 937 * virtual address space. 938 * 939 * For now, VM is already on, we only need to map the 940 * specified memory. 941 */ 942 vaddr_t 943 pmap_map(vaddr_t va, paddr_t spa, paddr_t epa, vm_prot_t prot) 944 { 945 while (spa < epa) { 946 pmap_kenter_pa(va, spa, prot); 947 va += NBPG; 948 spa += NBPG; 949 } 950 pmap_update(pmap_kernel()); 951 return(va); 952 } 953 954 955 /* 956 * void pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt) 957 * 958 * bootstrap the pmap system. This is called from initarm and allows 959 * the pmap system to initailise any structures it requires. 960 * 961 * Currently this sets up the kernel_pmap that is statically allocated 962 * and also allocated virtual addresses for certain page hooks. 963 * Currently the only one page hook is allocated that is used 964 * to zero physical pages of memory. 965 * It also initialises the start and end address of the kernel data space. 966 */ 967 extern paddr_t physical_freestart; 968 extern paddr_t physical_freeend; 969 970 char *boot_head; 971 972 void 973 pmap_bootstrap(pd_entry_t *kernel_l1pt, pv_addr_t kernel_ptpt) 974 { 975 pt_entry_t *pte; 976 int loop; 977 paddr_t start, end; 978 #if NISADMA > 0 979 paddr_t istart; 980 psize_t isize; 981 #endif 982 983 pmap_kernel()->pm_pdir = kernel_l1pt; 984 pmap_kernel()->pm_pptpt = kernel_ptpt.pv_pa; 985 pmap_kernel()->pm_vptpt = kernel_ptpt.pv_va; 986 simple_lock_init(&pmap_kernel()->pm_lock); 987 pmap_kernel()->pm_obj.pgops = NULL; 988 TAILQ_INIT(&(pmap_kernel()->pm_obj.memq)); 989 pmap_kernel()->pm_obj.uo_npages = 0; 990 pmap_kernel()->pm_obj.uo_refs = 1; 991 992 /* 993 * Initialize PAGE_SIZE-dependent variables. 994 */ 995 uvm_setpagesize(); 996 997 loop = 0; 998 while (loop < bootconfig.dramblocks) { 999 start = (paddr_t)bootconfig.dram[loop].address; 1000 end = start + (bootconfig.dram[loop].pages * NBPG); 1001 if (start < physical_freestart) 1002 start = physical_freestart; 1003 if (end > physical_freeend) 1004 end = physical_freeend; 1005 #if 0 1006 printf("%d: %lx -> %lx\n", loop, start, end - 1); 1007 #endif 1008 #if NISADMA > 0 1009 if (pmap_isa_dma_range_intersect(start, end - start, 1010 &istart, &isize)) { 1011 /* 1012 * Place the pages that intersect with the 1013 * ISA DMA range onto the ISA DMA free list. 1014 */ 1015 #if 0 1016 printf(" ISADMA 0x%lx -> 0x%lx\n", istart, 1017 istart + isize - 1); 1018 #endif 1019 uvm_page_physload(atop(istart), 1020 atop(istart + isize), atop(istart), 1021 atop(istart + isize), VM_FREELIST_ISADMA); 1022 1023 /* 1024 * Load the pieces that come before 1025 * the intersection into the default 1026 * free list. 1027 */ 1028 if (start < istart) { 1029 #if 0 1030 printf(" BEFORE 0x%lx -> 0x%lx\n", 1031 start, istart - 1); 1032 #endif 1033 uvm_page_physload(atop(start), 1034 atop(istart), atop(start), 1035 atop(istart), VM_FREELIST_DEFAULT); 1036 } 1037 1038 /* 1039 * Load the pieces that come after 1040 * the intersection into the default 1041 * free list. 1042 */ 1043 if ((istart + isize) < end) { 1044 #if 0 1045 printf(" AFTER 0x%lx -> 0x%lx\n", 1046 (istart + isize), end - 1); 1047 #endif 1048 uvm_page_physload(atop(istart + isize), 1049 atop(end), atop(istart + isize), 1050 atop(end), VM_FREELIST_DEFAULT); 1051 } 1052 } else { 1053 uvm_page_physload(atop(start), atop(end), 1054 atop(start), atop(end), VM_FREELIST_DEFAULT); 1055 } 1056 #else /* NISADMA > 0 */ 1057 uvm_page_physload(atop(start), atop(end), 1058 atop(start), atop(end), VM_FREELIST_DEFAULT); 1059 #endif /* NISADMA > 0 */ 1060 ++loop; 1061 } 1062 1063 virtual_avail = KERNEL_VM_BASE; 1064 virtual_end = KERNEL_VM_BASE + KERNEL_VM_SIZE; 1065 1066 /* 1067 * now we allocate the "special" VAs which are used for tmp mappings 1068 * by the pmap (and other modules). we allocate the VAs by advancing 1069 * virtual_avail (note that there are no pages mapped at these VAs). 1070 * we find the PTE that maps the allocated VA via the linear PTE 1071 * mapping. 1072 */ 1073 1074 pte = ((pt_entry_t *) PTE_BASE) + atop(virtual_avail); 1075 1076 csrcp = virtual_avail; csrc_pte = pte; 1077 virtual_avail += PAGE_SIZE; pte++; 1078 1079 cdstp = virtual_avail; cdst_pte = pte; 1080 virtual_avail += PAGE_SIZE; pte++; 1081 1082 memhook = (char *) virtual_avail; /* don't need pte */ 1083 virtual_avail += PAGE_SIZE; pte++; 1084 1085 msgbufaddr = (caddr_t) virtual_avail; /* don't need pte */ 1086 virtual_avail += round_page(MSGBUFSIZE); 1087 pte += atop(round_page(MSGBUFSIZE)); 1088 1089 /* 1090 * init the static-global locks and global lists. 1091 */ 1092 spinlockinit(&pmap_main_lock, "pmaplk", 0); 1093 simple_lock_init(&pvalloc_lock); 1094 simple_lock_init(&pmaps_lock); 1095 LIST_INIT(&pmaps); 1096 TAILQ_INIT(&pv_freepages); 1097 TAILQ_INIT(&pv_unusedpgs); 1098 1099 /* 1100 * initialize the pmap pool. 1101 */ 1102 1103 pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl", 1104 &pool_allocator_nointr); 1105 1106 cpu_dcache_wbinv_all(); 1107 } 1108 1109 /* 1110 * void pmap_init(void) 1111 * 1112 * Initialize the pmap module. 1113 * Called by vm_init() in vm/vm_init.c in order to initialise 1114 * any structures that the pmap system needs to map virtual memory. 1115 */ 1116 1117 extern int physmem; 1118 1119 void 1120 pmap_init(void) 1121 { 1122 1123 /* 1124 * Set the available memory vars - These do not map to real memory 1125 * addresses and cannot as the physical memory is fragmented. 1126 * They are used by ps for %mem calculations. 1127 * One could argue whether this should be the entire memory or just 1128 * the memory that is useable in a user process. 1129 */ 1130 avail_start = 0; 1131 avail_end = physmem * NBPG; 1132 1133 /* 1134 * now we need to free enough pv_entry structures to allow us to get 1135 * the kmem_map/kmem_object allocated and inited (done after this 1136 * function is finished). to do this we allocate one bootstrap page out 1137 * of kernel_map and use it to provide an initial pool of pv_entry 1138 * structures. we never free this page. 1139 */ 1140 1141 pv_initpage = (struct pv_page *) uvm_km_alloc(kernel_map, PAGE_SIZE); 1142 if (pv_initpage == NULL) 1143 panic("pmap_init: pv_initpage"); 1144 pv_cachedva = 0; /* a VA we have allocated but not used yet */ 1145 pv_nfpvents = 0; 1146 (void) pmap_add_pvpage(pv_initpage, FALSE); 1147 1148 pmap_initialized = TRUE; 1149 1150 /* Initialise our L1 page table queues and counters */ 1151 SIMPLEQ_INIT(&l1pt_static_queue); 1152 l1pt_static_queue_count = 0; 1153 l1pt_static_create_count = 0; 1154 SIMPLEQ_INIT(&l1pt_queue); 1155 l1pt_queue_count = 0; 1156 l1pt_create_count = 0; 1157 l1pt_reuse_count = 0; 1158 } 1159 1160 /* 1161 * pmap_postinit() 1162 * 1163 * This routine is called after the vm and kmem subsystems have been 1164 * initialised. This allows the pmap code to perform any initialisation 1165 * that can only be done one the memory allocation is in place. 1166 */ 1167 1168 void 1169 pmap_postinit(void) 1170 { 1171 int loop; 1172 struct l1pt *pt; 1173 1174 #ifdef PMAP_STATIC_L1S 1175 for (loop = 0; loop < PMAP_STATIC_L1S; ++loop) { 1176 #else /* PMAP_STATIC_L1S */ 1177 for (loop = 0; loop < max_processes; ++loop) { 1178 #endif /* PMAP_STATIC_L1S */ 1179 /* Allocate a L1 page table */ 1180 pt = pmap_alloc_l1pt(); 1181 if (!pt) 1182 panic("Cannot allocate static L1 page tables\n"); 1183 1184 /* Clean it */ 1185 bzero((void *)pt->pt_va, L1_TABLE_SIZE); 1186 pt->pt_flags |= (PTFLAG_STATIC | PTFLAG_CLEAN); 1187 /* Add the page table to the queue */ 1188 SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pt, pt_queue); 1189 ++l1pt_static_queue_count; 1190 ++l1pt_static_create_count; 1191 } 1192 } 1193 1194 1195 /* 1196 * Create and return a physical map. 1197 * 1198 * If the size specified for the map is zero, the map is an actual physical 1199 * map, and may be referenced by the hardware. 1200 * 1201 * If the size specified is non-zero, the map will be used in software only, 1202 * and is bounded by that size. 1203 */ 1204 1205 pmap_t 1206 pmap_create(void) 1207 { 1208 struct pmap *pmap; 1209 1210 /* 1211 * Fetch pmap entry from the pool 1212 */ 1213 1214 pmap = pool_get(&pmap_pmap_pool, PR_WAITOK); 1215 /* XXX is this really needed! */ 1216 memset(pmap, 0, sizeof(*pmap)); 1217 1218 simple_lock_init(&pmap->pm_obj.vmobjlock); 1219 pmap->pm_obj.pgops = NULL; /* currently not a mappable object */ 1220 TAILQ_INIT(&pmap->pm_obj.memq); 1221 pmap->pm_obj.uo_npages = 0; 1222 pmap->pm_obj.uo_refs = 1; 1223 pmap->pm_stats.wired_count = 0; 1224 pmap->pm_stats.resident_count = 1; 1225 pmap->pm_ptphint = NULL; 1226 1227 /* Now init the machine part of the pmap */ 1228 pmap_pinit(pmap); 1229 return(pmap); 1230 } 1231 1232 /* 1233 * pmap_alloc_l1pt() 1234 * 1235 * This routine allocates physical and virtual memory for a L1 page table 1236 * and wires it. 1237 * A l1pt structure is returned to describe the allocated page table. 1238 * 1239 * This routine is allowed to fail if the required memory cannot be allocated. 1240 * In this case NULL is returned. 1241 */ 1242 1243 struct l1pt * 1244 pmap_alloc_l1pt(void) 1245 { 1246 paddr_t pa; 1247 vaddr_t va; 1248 struct l1pt *pt; 1249 int error; 1250 struct vm_page *m; 1251 pt_entry_t *pte; 1252 1253 /* Allocate virtual address space for the L1 page table */ 1254 va = uvm_km_valloc(kernel_map, L1_TABLE_SIZE); 1255 if (va == 0) { 1256 #ifdef DIAGNOSTIC 1257 PDEBUG(0, 1258 printf("pmap: Cannot allocate pageable memory for L1\n")); 1259 #endif /* DIAGNOSTIC */ 1260 return(NULL); 1261 } 1262 1263 /* Allocate memory for the l1pt structure */ 1264 pt = (struct l1pt *)malloc(sizeof(struct l1pt), M_VMPMAP, M_WAITOK); 1265 1266 /* 1267 * Allocate pages from the VM system. 1268 */ 1269 TAILQ_INIT(&pt->pt_plist); 1270 error = uvm_pglistalloc(L1_TABLE_SIZE, physical_start, physical_end, 1271 L1_TABLE_SIZE, 0, &pt->pt_plist, 1, M_WAITOK); 1272 if (error) { 1273 #ifdef DIAGNOSTIC 1274 PDEBUG(0, 1275 printf("pmap: Cannot allocate physical mem for L1 (%d)\n", 1276 error)); 1277 #endif /* DIAGNOSTIC */ 1278 /* Release the resources we already have claimed */ 1279 free(pt, M_VMPMAP); 1280 uvm_km_free(kernel_map, va, L1_TABLE_SIZE); 1281 return(NULL); 1282 } 1283 1284 /* Map our physical pages into our virtual space */ 1285 pt->pt_va = va; 1286 m = TAILQ_FIRST(&pt->pt_plist); 1287 while (m && va < (pt->pt_va + L1_TABLE_SIZE)) { 1288 pa = VM_PAGE_TO_PHYS(m); 1289 1290 pte = vtopte(va); 1291 1292 /* 1293 * Assert that the PTE is invalid. If it's invalid, 1294 * then we are guaranteed that there won't be an entry 1295 * for this VA in the TLB. 1296 */ 1297 KDASSERT(pmap_pte_v(pte) == 0); 1298 1299 *pte = L2_S_PROTO | VM_PAGE_TO_PHYS(m) | 1300 L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE); 1301 1302 va += NBPG; 1303 m = m->pageq.tqe_next; 1304 } 1305 1306 #ifdef DIAGNOSTIC 1307 if (m) 1308 panic("pmap_alloc_l1pt: pglist not empty\n"); 1309 #endif /* DIAGNOSTIC */ 1310 1311 pt->pt_flags = 0; 1312 return(pt); 1313 } 1314 1315 /* 1316 * Free a L1 page table previously allocated with pmap_alloc_l1pt(). 1317 */ 1318 static void 1319 pmap_free_l1pt(struct l1pt *pt) 1320 { 1321 /* Separate the physical memory for the virtual space */ 1322 pmap_kremove(pt->pt_va, L1_TABLE_SIZE); 1323 pmap_update(pmap_kernel()); 1324 1325 /* Return the physical memory */ 1326 uvm_pglistfree(&pt->pt_plist); 1327 1328 /* Free the virtual space */ 1329 uvm_km_free(kernel_map, pt->pt_va, L1_TABLE_SIZE); 1330 1331 /* Free the l1pt structure */ 1332 free(pt, M_VMPMAP); 1333 } 1334 1335 /* 1336 * pmap_alloc_ptpt: 1337 * 1338 * Allocate the page table that maps the PTE array. 1339 */ 1340 static int 1341 pmap_alloc_ptpt(struct pmap *pmap) 1342 { 1343 struct vm_page *pg; 1344 pt_entry_t *pte; 1345 1346 KASSERT(pmap->pm_vptpt == 0); 1347 1348 pmap->pm_vptpt = uvm_km_valloc(kernel_map, L2_TABLE_SIZE); 1349 if (pmap->pm_vptpt == 0) { 1350 PDEBUG(0, 1351 printf("pmap_alloc_ptpt: no KVA for PTPT\n")); 1352 return (ENOMEM); 1353 } 1354 1355 for (;;) { 1356 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO); 1357 if (pg != NULL) 1358 break; 1359 uvm_wait("pmap_ptpt"); 1360 } 1361 1362 pmap->pm_pptpt = VM_PAGE_TO_PHYS(pg); 1363 1364 pte = vtopte(pmap->pm_vptpt); 1365 1366 KDASSERT(pmap_pte_v(pte) == 0); 1367 1368 *pte = L2_S_PROTO | pmap->pm_pptpt | 1369 L2_S_PROT(PTE_KERNEL, VM_PROT_READ|VM_PROT_WRITE); 1370 1371 return (0); 1372 } 1373 1374 /* 1375 * pmap_free_ptpt: 1376 * 1377 * Free the page table that maps the PTE array. 1378 */ 1379 static void 1380 pmap_free_ptpt(struct pmap *pmap) 1381 { 1382 1383 pmap_kremove(pmap->pm_vptpt, L2_TABLE_SIZE); 1384 pmap_update(pmap_kernel()); 1385 1386 uvm_pagefree(PHYS_TO_VM_PAGE(pmap->pm_pptpt)); 1387 1388 uvm_km_free(kernel_map, pmap->pm_vptpt, L2_TABLE_SIZE); 1389 } 1390 1391 /* 1392 * Allocate a page directory. 1393 * This routine will either allocate a new page directory from the pool 1394 * of L1 page tables currently held by the kernel or it will allocate 1395 * a new one via pmap_alloc_l1pt(). 1396 * It will then initialise the l1 page table for use. 1397 */ 1398 static int 1399 pmap_allocpagedir(struct pmap *pmap) 1400 { 1401 paddr_t pa; 1402 struct l1pt *pt; 1403 int error; 1404 1405 PDEBUG(0, printf("pmap_allocpagedir(%p)\n", pmap)); 1406 1407 /* Do we have any spare L1's lying around ? */ 1408 if (l1pt_static_queue_count) { 1409 --l1pt_static_queue_count; 1410 pt = l1pt_static_queue.sqh_first; 1411 SIMPLEQ_REMOVE_HEAD(&l1pt_static_queue, pt, pt_queue); 1412 } else if (l1pt_queue_count) { 1413 --l1pt_queue_count; 1414 pt = l1pt_queue.sqh_first; 1415 SIMPLEQ_REMOVE_HEAD(&l1pt_queue, pt, pt_queue); 1416 ++l1pt_reuse_count; 1417 } else { 1418 pt = pmap_alloc_l1pt(); 1419 if (!pt) 1420 return(ENOMEM); 1421 ++l1pt_create_count; 1422 } 1423 1424 /* Store the pointer to the l1 descriptor in the pmap. */ 1425 pmap->pm_l1pt = pt; 1426 1427 /* Get the physical address of the start of the l1 */ 1428 pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pt->pt_plist)); 1429 1430 /* Store the virtual address of the l1 in the pmap. */ 1431 pmap->pm_pdir = (pd_entry_t *)pt->pt_va; 1432 1433 /* Clean the L1 if it is dirty */ 1434 if (!(pt->pt_flags & PTFLAG_CLEAN)) 1435 bzero((void *)pmap->pm_pdir, (L1_TABLE_SIZE - KERNEL_PD_SIZE)); 1436 1437 /* Allocate a page table to map all the page tables for this pmap */ 1438 if ((error = pmap_alloc_ptpt(pmap)) != 0) { 1439 pmap_freepagedir(pmap); 1440 return (error); 1441 } 1442 1443 /* need to lock this all up for growkernel */ 1444 simple_lock(&pmaps_lock); 1445 1446 /* Duplicate the kernel mappings. */ 1447 bcopy((char *)pmap_kernel()->pm_pdir + (L1_TABLE_SIZE - KERNEL_PD_SIZE), 1448 (char *)pmap->pm_pdir + (L1_TABLE_SIZE - KERNEL_PD_SIZE), 1449 KERNEL_PD_SIZE); 1450 1451 /* Wire in this page table */ 1452 pmap_map_in_l1(pmap, PTE_BASE, pmap->pm_pptpt, TRUE); 1453 1454 pt->pt_flags &= ~PTFLAG_CLEAN; /* L1 is dirty now */ 1455 1456 /* 1457 * Map the kernel page tables into the new PT map. 1458 */ 1459 bcopy((char *)(PTE_BASE 1460 + (PTE_BASE >> (PGSHIFT - 2)) 1461 + ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2)), 1462 (char *)pmap->pm_vptpt + ((L1_TABLE_SIZE - KERNEL_PD_SIZE) >> 2), 1463 (KERNEL_PD_SIZE >> 2)); 1464 1465 LIST_INSERT_HEAD(&pmaps, pmap, pm_list); 1466 simple_unlock(&pmaps_lock); 1467 1468 return(0); 1469 } 1470 1471 1472 /* 1473 * Initialize a preallocated and zeroed pmap structure, 1474 * such as one in a vmspace structure. 1475 */ 1476 1477 void 1478 pmap_pinit(struct pmap *pmap) 1479 { 1480 int backoff = 6; 1481 int retry = 10; 1482 1483 PDEBUG(0, printf("pmap_pinit(%p)\n", pmap)); 1484 1485 /* Keep looping until we succeed in allocating a page directory */ 1486 while (pmap_allocpagedir(pmap) != 0) { 1487 /* 1488 * Ok we failed to allocate a suitable block of memory for an 1489 * L1 page table. This means that either: 1490 * 1. 16KB of virtual address space could not be allocated 1491 * 2. 16KB of physically contiguous memory on a 16KB boundary 1492 * could not be allocated. 1493 * 1494 * Since we cannot fail we will sleep for a while and try 1495 * again. 1496 * 1497 * Searching for a suitable L1 PT is expensive: 1498 * to avoid hogging the system when memory is really 1499 * scarce, use an exponential back-off so that 1500 * eventually we won't retry more than once every 8 1501 * seconds. This should allow other processes to run 1502 * to completion and free up resources. 1503 */ 1504 (void) ltsleep(&lbolt, PVM, "l1ptwait", (hz << 3) >> backoff, 1505 NULL); 1506 if (--retry == 0) { 1507 retry = 10; 1508 if (backoff) 1509 --backoff; 1510 } 1511 } 1512 1513 if (vector_page < KERNEL_BASE) { 1514 /* 1515 * Map the vector page. This will also allocate and map 1516 * an L2 table for it. 1517 */ 1518 pmap_enter(pmap, vector_page, systempage.pv_pa, 1519 VM_PROT_READ, VM_PROT_READ | PMAP_WIRED); 1520 pmap_update(pmap); 1521 } 1522 } 1523 1524 1525 void 1526 pmap_freepagedir(struct pmap *pmap) 1527 { 1528 /* Free the memory used for the page table mapping */ 1529 if (pmap->pm_vptpt != 0) 1530 pmap_free_ptpt(pmap); 1531 1532 /* junk the L1 page table */ 1533 if (pmap->pm_l1pt->pt_flags & PTFLAG_STATIC) { 1534 /* Add the page table to the queue */ 1535 SIMPLEQ_INSERT_TAIL(&l1pt_static_queue, pmap->pm_l1pt, pt_queue); 1536 ++l1pt_static_queue_count; 1537 } else if (l1pt_queue_count < 8) { 1538 /* Add the page table to the queue */ 1539 SIMPLEQ_INSERT_TAIL(&l1pt_queue, pmap->pm_l1pt, pt_queue); 1540 ++l1pt_queue_count; 1541 } else 1542 pmap_free_l1pt(pmap->pm_l1pt); 1543 } 1544 1545 1546 /* 1547 * Retire the given physical map from service. 1548 * Should only be called if the map contains no valid mappings. 1549 */ 1550 1551 void 1552 pmap_destroy(struct pmap *pmap) 1553 { 1554 struct vm_page *page; 1555 int count; 1556 1557 if (pmap == NULL) 1558 return; 1559 1560 PDEBUG(0, printf("pmap_destroy(%p)\n", pmap)); 1561 1562 /* 1563 * Drop reference count 1564 */ 1565 simple_lock(&pmap->pm_obj.vmobjlock); 1566 count = --pmap->pm_obj.uo_refs; 1567 simple_unlock(&pmap->pm_obj.vmobjlock); 1568 if (count > 0) { 1569 return; 1570 } 1571 1572 /* 1573 * reference count is zero, free pmap resources and then free pmap. 1574 */ 1575 1576 /* 1577 * remove it from global list of pmaps 1578 */ 1579 1580 simple_lock(&pmaps_lock); 1581 LIST_REMOVE(pmap, pm_list); 1582 simple_unlock(&pmaps_lock); 1583 1584 if (vector_page < KERNEL_BASE) { 1585 /* Remove the vector page mapping */ 1586 pmap_remove(pmap, vector_page, vector_page + NBPG); 1587 pmap_update(pmap); 1588 } 1589 1590 /* 1591 * Free any page tables still mapped 1592 * This is only temporay until pmap_enter can count the number 1593 * of mappings made in a page table. Then pmap_remove() can 1594 * reduce the count and free the pagetable when the count 1595 * reaches zero. Note that entries in this list should match the 1596 * contents of the ptpt, however this is faster than walking a 1024 1597 * entries looking for pt's 1598 * taken from i386 pmap.c 1599 */ 1600 /* 1601 * vmobjlock must be held while freeing pages 1602 */ 1603 simple_lock(&pmap->pm_obj.vmobjlock); 1604 while ((page = TAILQ_FIRST(&pmap->pm_obj.memq)) != NULL) { 1605 KASSERT((page->flags & PG_BUSY) == 0); 1606 page->wire_count = 0; 1607 uvm_pagefree(page); 1608 } 1609 simple_unlock(&pmap->pm_obj.vmobjlock); 1610 1611 /* Free the page dir */ 1612 pmap_freepagedir(pmap); 1613 1614 /* return the pmap to the pool */ 1615 pool_put(&pmap_pmap_pool, pmap); 1616 } 1617 1618 1619 /* 1620 * void pmap_reference(struct pmap *pmap) 1621 * 1622 * Add a reference to the specified pmap. 1623 */ 1624 1625 void 1626 pmap_reference(struct pmap *pmap) 1627 { 1628 if (pmap == NULL) 1629 return; 1630 1631 simple_lock(&pmap->pm_lock); 1632 pmap->pm_obj.uo_refs++; 1633 simple_unlock(&pmap->pm_lock); 1634 } 1635 1636 /* 1637 * void pmap_virtual_space(vaddr_t *start, vaddr_t *end) 1638 * 1639 * Return the start and end addresses of the kernel's virtual space. 1640 * These values are setup in pmap_bootstrap and are updated as pages 1641 * are allocated. 1642 */ 1643 1644 void 1645 pmap_virtual_space(vaddr_t *start, vaddr_t *end) 1646 { 1647 *start = virtual_avail; 1648 *end = virtual_end; 1649 } 1650 1651 /* 1652 * Activate the address space for the specified process. If the process 1653 * is the current process, load the new MMU context. 1654 */ 1655 void 1656 pmap_activate(struct proc *p) 1657 { 1658 struct pmap *pmap = p->p_vmspace->vm_map.pmap; 1659 struct pcb *pcb = &p->p_addr->u_pcb; 1660 1661 (void) pmap_extract(pmap_kernel(), (vaddr_t)pmap->pm_pdir, 1662 (paddr_t *)&pcb->pcb_pagedir); 1663 1664 PDEBUG(0, printf("pmap_activate: p=%p pmap=%p pcb=%p pdir=%p l1=%p\n", 1665 p, pmap, pcb, pmap->pm_pdir, pcb->pcb_pagedir)); 1666 1667 if (p == curproc) { 1668 PDEBUG(0, printf("pmap_activate: setting TTB\n")); 1669 setttb((u_int)pcb->pcb_pagedir); 1670 } 1671 } 1672 1673 /* 1674 * Deactivate the address space of the specified process. 1675 */ 1676 void 1677 pmap_deactivate(struct proc *p) 1678 { 1679 } 1680 1681 /* 1682 * Perform any deferred pmap operations. 1683 */ 1684 void 1685 pmap_update(struct pmap *pmap) 1686 { 1687 1688 /* 1689 * We haven't deferred any pmap operations, but we do need to 1690 * make sure TLB/cache operations have completed. 1691 */ 1692 cpu_cpwait(); 1693 } 1694 1695 /* 1696 * pmap_clean_page() 1697 * 1698 * This is a local function used to work out the best strategy to clean 1699 * a single page referenced by its entry in the PV table. It's used by 1700 * pmap_copy_page, pmap_zero page and maybe some others later on. 1701 * 1702 * Its policy is effectively: 1703 * o If there are no mappings, we don't bother doing anything with the cache. 1704 * o If there is one mapping, we clean just that page. 1705 * o If there are multiple mappings, we clean the entire cache. 1706 * 1707 * So that some functions can be further optimised, it returns 0 if it didn't 1708 * clean the entire cache, or 1 if it did. 1709 * 1710 * XXX One bug in this routine is that if the pv_entry has a single page 1711 * mapped at 0x00000000 a whole cache clean will be performed rather than 1712 * just the 1 page. Since this should not occur in everyday use and if it does 1713 * it will just result in not the most efficient clean for the page. 1714 */ 1715 static int 1716 pmap_clean_page(struct pv_entry *pv, boolean_t is_src) 1717 { 1718 struct pmap *pmap; 1719 struct pv_entry *npv; 1720 int cache_needs_cleaning = 0; 1721 vaddr_t page_to_clean = 0; 1722 1723 if (pv == NULL) 1724 /* nothing mapped in so nothing to flush */ 1725 return (0); 1726 1727 /* Since we flush the cache each time we change curproc, we 1728 * only need to flush the page if it is in the current pmap. 1729 */ 1730 if (curproc) 1731 pmap = curproc->p_vmspace->vm_map.pmap; 1732 else 1733 pmap = pmap_kernel(); 1734 1735 for (npv = pv; npv; npv = npv->pv_next) { 1736 if (npv->pv_pmap == pmap) { 1737 /* The page is mapped non-cacheable in 1738 * this map. No need to flush the cache. 1739 */ 1740 if (npv->pv_flags & PVF_NC) { 1741 #ifdef DIAGNOSTIC 1742 if (cache_needs_cleaning) 1743 panic("pmap_clean_page: " 1744 "cache inconsistency"); 1745 #endif 1746 break; 1747 } 1748 #if 0 1749 /* 1750 * XXX Can't do this because pmap_protect doesn't 1751 * XXX clean the page when it does a write-protect. 1752 */ 1753 else if (is_src && (npv->pv_flags & PVF_WRITE) == 0) 1754 continue; 1755 #endif 1756 if (cache_needs_cleaning){ 1757 page_to_clean = 0; 1758 break; 1759 } 1760 else 1761 page_to_clean = npv->pv_va; 1762 cache_needs_cleaning = 1; 1763 } 1764 } 1765 1766 if (page_to_clean) 1767 cpu_idcache_wbinv_range(page_to_clean, NBPG); 1768 else if (cache_needs_cleaning) { 1769 cpu_idcache_wbinv_all(); 1770 return (1); 1771 } 1772 return (0); 1773 } 1774 1775 /* 1776 * pmap_zero_page() 1777 * 1778 * Zero a given physical page by mapping it at a page hook point. 1779 * In doing the zero page op, the page we zero is mapped cachable, as with 1780 * StrongARM accesses to non-cached pages are non-burst making writing 1781 * _any_ bulk data very slow. 1782 */ 1783 #if ARM_MMU_GENERIC == 1 1784 void 1785 pmap_zero_page_generic(paddr_t phys) 1786 { 1787 #ifdef DEBUG 1788 struct vm_page *pg = PHYS_TO_VM_PAGE(phys); 1789 1790 if (pg->mdpage.pvh_list != NULL) 1791 panic("pmap_zero_page: page has mappings"); 1792 #endif 1793 1794 KDASSERT((phys & PGOFSET) == 0); 1795 1796 /* 1797 * Hook in the page, zero it, and purge the cache for that 1798 * zeroed page. Invalidate the TLB as needed. 1799 */ 1800 *cdst_pte = L2_S_PROTO | phys | 1801 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; 1802 cpu_tlb_flushD_SE(cdstp); 1803 cpu_cpwait(); 1804 bzero_page(cdstp); 1805 cpu_dcache_wbinv_range(cdstp, NBPG); 1806 } 1807 #endif /* ARM_MMU_GENERIC == 1 */ 1808 1809 #if ARM_MMU_XSCALE == 1 1810 void 1811 pmap_zero_page_xscale(paddr_t phys) 1812 { 1813 #ifdef DEBUG 1814 struct vm_page *pg = PHYS_TO_VM_PAGE(phys); 1815 1816 if (pg->mdpage.pvh_list != NULL) 1817 panic("pmap_zero_page: page has mappings"); 1818 #endif 1819 1820 KDASSERT((phys & PGOFSET) == 0); 1821 1822 /* 1823 * Hook in the page, zero it, and purge the cache for that 1824 * zeroed page. Invalidate the TLB as needed. 1825 */ 1826 *cdst_pte = L2_S_PROTO | phys | 1827 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | 1828 L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ 1829 cpu_tlb_flushD_SE(cdstp); 1830 cpu_cpwait(); 1831 bzero_page(cdstp); 1832 xscale_cache_clean_minidata(); 1833 } 1834 #endif /* ARM_MMU_XSCALE == 1 */ 1835 1836 /* pmap_pageidlezero() 1837 * 1838 * The same as above, except that we assume that the page is not 1839 * mapped. This means we never have to flush the cache first. Called 1840 * from the idle loop. 1841 */ 1842 boolean_t 1843 pmap_pageidlezero(paddr_t phys) 1844 { 1845 int i, *ptr; 1846 boolean_t rv = TRUE; 1847 #ifdef DEBUG 1848 struct vm_page *pg; 1849 1850 pg = PHYS_TO_VM_PAGE(phys); 1851 if (pg->mdpage.pvh_list != NULL) 1852 panic("pmap_pageidlezero: page has mappings"); 1853 #endif 1854 1855 KDASSERT((phys & PGOFSET) == 0); 1856 1857 /* 1858 * Hook in the page, zero it, and purge the cache for that 1859 * zeroed page. Invalidate the TLB as needed. 1860 */ 1861 *cdst_pte = L2_S_PROTO | phys | 1862 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; 1863 cpu_tlb_flushD_SE(cdstp); 1864 cpu_cpwait(); 1865 1866 for (i = 0, ptr = (int *)cdstp; 1867 i < (NBPG / sizeof(int)); i++) { 1868 if (sched_whichqs != 0) { 1869 /* 1870 * A process has become ready. Abort now, 1871 * so we don't keep it waiting while we 1872 * do slow memory access to finish this 1873 * page. 1874 */ 1875 rv = FALSE; 1876 break; 1877 } 1878 *ptr++ = 0; 1879 } 1880 1881 if (rv) 1882 /* 1883 * if we aborted we'll rezero this page again later so don't 1884 * purge it unless we finished it 1885 */ 1886 cpu_dcache_wbinv_range(cdstp, NBPG); 1887 return (rv); 1888 } 1889 1890 /* 1891 * pmap_copy_page() 1892 * 1893 * Copy one physical page into another, by mapping the pages into 1894 * hook points. The same comment regarding cachability as in 1895 * pmap_zero_page also applies here. 1896 */ 1897 #if ARM_MMU_GENERIC == 1 1898 void 1899 pmap_copy_page_generic(paddr_t src, paddr_t dst) 1900 { 1901 struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); 1902 #ifdef DEBUG 1903 struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst); 1904 1905 if (dst_pg->mdpage.pvh_list != NULL) 1906 panic("pmap_copy_page: dst page has mappings"); 1907 #endif 1908 1909 KDASSERT((src & PGOFSET) == 0); 1910 KDASSERT((dst & PGOFSET) == 0); 1911 1912 /* 1913 * Clean the source page. Hold the source page's lock for 1914 * the duration of the copy so that no other mappings can 1915 * be created while we have a potentially aliased mapping. 1916 */ 1917 simple_lock(&src_pg->mdpage.pvh_slock); 1918 (void) pmap_clean_page(src_pg->mdpage.pvh_list, TRUE); 1919 1920 /* 1921 * Map the pages into the page hook points, copy them, and purge 1922 * the cache for the appropriate page. Invalidate the TLB 1923 * as required. 1924 */ 1925 *csrc_pte = L2_S_PROTO | src | 1926 L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode; 1927 *cdst_pte = L2_S_PROTO | dst | 1928 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; 1929 cpu_tlb_flushD_SE(csrcp); 1930 cpu_tlb_flushD_SE(cdstp); 1931 cpu_cpwait(); 1932 bcopy_page(csrcp, cdstp); 1933 cpu_dcache_inv_range(csrcp, NBPG); 1934 simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */ 1935 cpu_dcache_wbinv_range(cdstp, NBPG); 1936 } 1937 #endif /* ARM_MMU_GENERIC == 1 */ 1938 1939 #if ARM_MMU_XSCALE == 1 1940 void 1941 pmap_copy_page_xscale(paddr_t src, paddr_t dst) 1942 { 1943 struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); 1944 #ifdef DEBUG 1945 struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst); 1946 1947 if (dst_pg->mdpage.pvh_list != NULL) 1948 panic("pmap_copy_page: dst page has mappings"); 1949 #endif 1950 1951 KDASSERT((src & PGOFSET) == 0); 1952 KDASSERT((dst & PGOFSET) == 0); 1953 1954 /* 1955 * Clean the source page. Hold the source page's lock for 1956 * the duration of the copy so that no other mappings can 1957 * be created while we have a potentially aliased mapping. 1958 */ 1959 simple_lock(&src_pg->mdpage.pvh_slock); 1960 (void) pmap_clean_page(src_pg->mdpage.pvh_list, TRUE); 1961 1962 /* 1963 * Map the pages into the page hook points, copy them, and purge 1964 * the cache for the appropriate page. Invalidate the TLB 1965 * as required. 1966 */ 1967 *csrc_pte = L2_S_PROTO | src | 1968 L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | 1969 L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ 1970 *cdst_pte = L2_S_PROTO | dst | 1971 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | 1972 L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ 1973 cpu_tlb_flushD_SE(csrcp); 1974 cpu_tlb_flushD_SE(cdstp); 1975 cpu_cpwait(); 1976 bcopy_page(csrcp, cdstp); 1977 simple_unlock(&src_pg->mdpage.pvh_slock); /* cache is safe again */ 1978 xscale_cache_clean_minidata(); 1979 } 1980 #endif /* ARM_MMU_XSCALE == 1 */ 1981 1982 #if 0 1983 void 1984 pmap_pte_addref(struct pmap *pmap, vaddr_t va) 1985 { 1986 pd_entry_t *pde; 1987 paddr_t pa; 1988 struct vm_page *m; 1989 1990 if (pmap == pmap_kernel()) 1991 return; 1992 1993 pde = pmap_pde(pmap, va & ~(3 << L1_S_SHIFT)); 1994 pa = pmap_pte_pa(pde); 1995 m = PHYS_TO_VM_PAGE(pa); 1996 ++m->wire_count; 1997 #ifdef MYCROFT_HACK 1998 printf("addref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n", 1999 pmap, va, pde, pa, m, m->wire_count); 2000 #endif 2001 } 2002 2003 void 2004 pmap_pte_delref(struct pmap *pmap, vaddr_t va) 2005 { 2006 pd_entry_t *pde; 2007 paddr_t pa; 2008 struct vm_page *m; 2009 2010 if (pmap == pmap_kernel()) 2011 return; 2012 2013 pde = pmap_pde(pmap, va & ~(3 << L1_S_SHIFT)); 2014 pa = pmap_pte_pa(pde); 2015 m = PHYS_TO_VM_PAGE(pa); 2016 --m->wire_count; 2017 #ifdef MYCROFT_HACK 2018 printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p wire=%d\n", 2019 pmap, va, pde, pa, m, m->wire_count); 2020 #endif 2021 if (m->wire_count == 0) { 2022 #ifdef MYCROFT_HACK 2023 printf("delref pmap=%p va=%08lx pde=%p pa=%08lx m=%p\n", 2024 pmap, va, pde, pa, m); 2025 #endif 2026 pmap_unmap_in_l1(pmap, va); 2027 uvm_pagefree(m); 2028 --pmap->pm_stats.resident_count; 2029 } 2030 } 2031 #else 2032 #define pmap_pte_addref(pmap, va) 2033 #define pmap_pte_delref(pmap, va) 2034 #endif 2035 2036 /* 2037 * Since we have a virtually indexed cache, we may need to inhibit caching if 2038 * there is more than one mapping and at least one of them is writable. 2039 * Since we purge the cache on every context switch, we only need to check for 2040 * other mappings within the same pmap, or kernel_pmap. 2041 * This function is also called when a page is unmapped, to possibly reenable 2042 * caching on any remaining mappings. 2043 * 2044 * The code implements the following logic, where: 2045 * 2046 * KW = # of kernel read/write pages 2047 * KR = # of kernel read only pages 2048 * UW = # of user read/write pages 2049 * UR = # of user read only pages 2050 * OW = # of user read/write pages in another pmap, then 2051 * 2052 * KC = kernel mapping is cacheable 2053 * UC = user mapping is cacheable 2054 * 2055 * KW=0,KR=0 KW=0,KR>0 KW=1,KR=0 KW>1,KR>=0 2056 * +--------------------------------------------- 2057 * UW=0,UR=0,OW=0 | --- KC=1 KC=1 KC=0 2058 * UW=0,UR>0,OW=0 | UC=1 KC=1,UC=1 KC=0,UC=0 KC=0,UC=0 2059 * UW=0,UR>0,OW>0 | UC=1 KC=0,UC=1 KC=0,UC=0 KC=0,UC=0 2060 * UW=1,UR=0,OW=0 | UC=1 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0 2061 * UW>1,UR>=0,OW>=0 | UC=0 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0 2062 * 2063 * Note that the pmap must have it's ptes mapped in, and passed with ptes. 2064 */ 2065 __inline static void 2066 pmap_vac_me_harder(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes, 2067 boolean_t clear_cache) 2068 { 2069 if (pmap == pmap_kernel()) 2070 pmap_vac_me_kpmap(pmap, pg, ptes, clear_cache); 2071 else 2072 pmap_vac_me_user(pmap, pg, ptes, clear_cache); 2073 } 2074 2075 static void 2076 pmap_vac_me_kpmap(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes, 2077 boolean_t clear_cache) 2078 { 2079 int user_entries = 0; 2080 int user_writable = 0; 2081 int user_cacheable = 0; 2082 int kernel_entries = 0; 2083 int kernel_writable = 0; 2084 int kernel_cacheable = 0; 2085 struct pv_entry *pv; 2086 struct pmap *last_pmap = pmap; 2087 2088 #ifdef DIAGNOSTIC 2089 if (pmap != pmap_kernel()) 2090 panic("pmap_vac_me_kpmap: pmap != pmap_kernel()"); 2091 #endif 2092 2093 /* 2094 * Pass one, see if there are both kernel and user pmaps for 2095 * this page. Calculate whether there are user-writable or 2096 * kernel-writable pages. 2097 */ 2098 for (pv = pg->mdpage.pvh_list; pv != NULL; pv = pv->pv_next) { 2099 if (pv->pv_pmap != pmap) { 2100 user_entries++; 2101 if (pv->pv_flags & PVF_WRITE) 2102 user_writable++; 2103 if ((pv->pv_flags & PVF_NC) == 0) 2104 user_cacheable++; 2105 } else { 2106 kernel_entries++; 2107 if (pv->pv_flags & PVF_WRITE) 2108 kernel_writable++; 2109 if ((pv->pv_flags & PVF_NC) == 0) 2110 kernel_cacheable++; 2111 } 2112 } 2113 2114 /* 2115 * We know we have just been updating a kernel entry, so if 2116 * all user pages are already cacheable, then there is nothing 2117 * further to do. 2118 */ 2119 if (kernel_entries == 0 && 2120 user_cacheable == user_entries) 2121 return; 2122 2123 if (user_entries) { 2124 /* 2125 * Scan over the list again, for each entry, if it 2126 * might not be set correctly, call pmap_vac_me_user 2127 * to recalculate the settings. 2128 */ 2129 for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) { 2130 /* 2131 * We know kernel mappings will get set 2132 * correctly in other calls. We also know 2133 * that if the pmap is the same as last_pmap 2134 * then we've just handled this entry. 2135 */ 2136 if (pv->pv_pmap == pmap || pv->pv_pmap == last_pmap) 2137 continue; 2138 /* 2139 * If there are kernel entries and this page 2140 * is writable but non-cacheable, then we can 2141 * skip this entry also. 2142 */ 2143 if (kernel_entries > 0 && 2144 (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 2145 (PVF_NC | PVF_WRITE)) 2146 continue; 2147 /* 2148 * Similarly if there are no kernel-writable 2149 * entries and the page is already 2150 * read-only/cacheable. 2151 */ 2152 if (kernel_writable == 0 && 2153 (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0) 2154 continue; 2155 /* 2156 * For some of the remaining cases, we know 2157 * that we must recalculate, but for others we 2158 * can't tell if they are correct or not, so 2159 * we recalculate anyway. 2160 */ 2161 pmap_unmap_ptes(last_pmap); 2162 last_pmap = pv->pv_pmap; 2163 ptes = pmap_map_ptes(last_pmap); 2164 pmap_vac_me_user(last_pmap, pg, ptes, 2165 pmap_is_curpmap(last_pmap)); 2166 } 2167 /* Restore the pte mapping that was passed to us. */ 2168 if (last_pmap != pmap) { 2169 pmap_unmap_ptes(last_pmap); 2170 ptes = pmap_map_ptes(pmap); 2171 } 2172 if (kernel_entries == 0) 2173 return; 2174 } 2175 2176 pmap_vac_me_user(pmap, pg, ptes, clear_cache); 2177 return; 2178 } 2179 2180 static void 2181 pmap_vac_me_user(struct pmap *pmap, struct vm_page *pg, pt_entry_t *ptes, 2182 boolean_t clear_cache) 2183 { 2184 struct pmap *kpmap = pmap_kernel(); 2185 struct pv_entry *pv, *npv; 2186 int entries = 0; 2187 int writable = 0; 2188 int cacheable_entries = 0; 2189 int kern_cacheable = 0; 2190 int other_writable = 0; 2191 2192 pv = pg->mdpage.pvh_list; 2193 KASSERT(ptes != NULL); 2194 2195 /* 2196 * Count mappings and writable mappings in this pmap. 2197 * Include kernel mappings as part of our own. 2198 * Keep a pointer to the first one. 2199 */ 2200 for (npv = pv; npv; npv = npv->pv_next) { 2201 /* Count mappings in the same pmap */ 2202 if (pmap == npv->pv_pmap || 2203 kpmap == npv->pv_pmap) { 2204 if (entries++ == 0) 2205 pv = npv; 2206 /* Cacheable mappings */ 2207 if ((npv->pv_flags & PVF_NC) == 0) { 2208 cacheable_entries++; 2209 if (kpmap == npv->pv_pmap) 2210 kern_cacheable++; 2211 } 2212 /* Writable mappings */ 2213 if (npv->pv_flags & PVF_WRITE) 2214 ++writable; 2215 } else if (npv->pv_flags & PVF_WRITE) 2216 other_writable = 1; 2217 } 2218 2219 PDEBUG(3,printf("pmap_vac_me_harder: pmap %p Entries %d, " 2220 "writable %d cacheable %d %s\n", pmap, entries, writable, 2221 cacheable_entries, clear_cache ? "clean" : "no clean")); 2222 2223 /* 2224 * Enable or disable caching as necessary. 2225 * Note: the first entry might be part of the kernel pmap, 2226 * so we can't assume this is indicative of the state of the 2227 * other (maybe non-kpmap) entries. 2228 */ 2229 if ((entries > 1 && writable) || 2230 (entries > 0 && pmap == kpmap && other_writable)) { 2231 if (cacheable_entries == 0) 2232 return; 2233 for (npv = pv; npv; npv = npv->pv_next) { 2234 if ((pmap == npv->pv_pmap 2235 || kpmap == npv->pv_pmap) && 2236 (npv->pv_flags & PVF_NC) == 0) { 2237 ptes[arm_btop(npv->pv_va)] &= ~L2_S_CACHE_MASK; 2238 npv->pv_flags |= PVF_NC; 2239 /* 2240 * If this page needs flushing from the 2241 * cache, and we aren't going to do it 2242 * below, do it now. 2243 */ 2244 if ((cacheable_entries < 4 && 2245 (clear_cache || npv->pv_pmap == kpmap)) || 2246 (npv->pv_pmap == kpmap && 2247 !clear_cache && kern_cacheable < 4)) { 2248 cpu_idcache_wbinv_range(npv->pv_va, 2249 NBPG); 2250 cpu_tlb_flushID_SE(npv->pv_va); 2251 } 2252 } 2253 } 2254 if ((clear_cache && cacheable_entries >= 4) || 2255 kern_cacheable >= 4) { 2256 cpu_idcache_wbinv_all(); 2257 cpu_tlb_flushID(); 2258 } 2259 cpu_cpwait(); 2260 } else if (entries > 0) { 2261 /* 2262 * Turn cacheing back on for some pages. If it is a kernel 2263 * page, only do so if there are no other writable pages. 2264 */ 2265 for (npv = pv; npv; npv = npv->pv_next) { 2266 if ((pmap == npv->pv_pmap || 2267 (kpmap == npv->pv_pmap && other_writable == 0)) && 2268 (npv->pv_flags & PVF_NC)) { 2269 ptes[arm_btop(npv->pv_va)] |= 2270 pte_l2_s_cache_mode; 2271 npv->pv_flags &= ~PVF_NC; 2272 } 2273 } 2274 } 2275 } 2276 2277 /* 2278 * pmap_remove() 2279 * 2280 * pmap_remove is responsible for nuking a number of mappings for a range 2281 * of virtual address space in the current pmap. To do this efficiently 2282 * is interesting, because in a number of cases a wide virtual address 2283 * range may be supplied that contains few actual mappings. So, the 2284 * optimisations are: 2285 * 1. Try and skip over hunks of address space for which an L1 entry 2286 * does not exist. 2287 * 2. Build up a list of pages we've hit, up to a maximum, so we can 2288 * maybe do just a partial cache clean. This path of execution is 2289 * complicated by the fact that the cache must be flushed _before_ 2290 * the PTE is nuked, being a VAC :-) 2291 * 3. Maybe later fast-case a single page, but I don't think this is 2292 * going to make _that_ much difference overall. 2293 */ 2294 2295 #define PMAP_REMOVE_CLEAN_LIST_SIZE 3 2296 2297 void 2298 pmap_remove(struct pmap *pmap, vaddr_t sva, vaddr_t eva) 2299 { 2300 int cleanlist_idx = 0; 2301 struct pagelist { 2302 vaddr_t va; 2303 pt_entry_t *pte; 2304 } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE]; 2305 pt_entry_t *pte = 0, *ptes; 2306 paddr_t pa; 2307 int pmap_active; 2308 struct vm_page *pg; 2309 2310 /* Exit quick if there is no pmap */ 2311 if (!pmap) 2312 return; 2313 2314 PDEBUG(0, printf("pmap_remove: pmap=%p sva=%08lx eva=%08lx\n", 2315 pmap, sva, eva)); 2316 2317 /* 2318 * we lock in the pmap => vm_page direction 2319 */ 2320 PMAP_MAP_TO_HEAD_LOCK(); 2321 2322 ptes = pmap_map_ptes(pmap); 2323 /* Get a page table pointer */ 2324 while (sva < eva) { 2325 if (pmap_pde_page(pmap_pde(pmap, sva))) 2326 break; 2327 sva = (sva & L1_S_FRAME) + L1_S_SIZE; 2328 } 2329 2330 pte = &ptes[arm_btop(sva)]; 2331 /* Note if the pmap is active thus require cache and tlb cleans */ 2332 pmap_active = pmap_is_curpmap(pmap); 2333 2334 /* Now loop along */ 2335 while (sva < eva) { 2336 /* Check if we can move to the next PDE (l1 chunk) */ 2337 if (!(sva & L2_ADDR_BITS)) 2338 if (!pmap_pde_page(pmap_pde(pmap, sva))) { 2339 sva += L1_S_SIZE; 2340 pte += arm_btop(L1_S_SIZE); 2341 continue; 2342 } 2343 2344 /* We've found a valid PTE, so this page of PTEs has to go. */ 2345 if (pmap_pte_v(pte)) { 2346 /* Update statistics */ 2347 --pmap->pm_stats.resident_count; 2348 2349 /* 2350 * Add this page to our cache remove list, if we can. 2351 * If, however the cache remove list is totally full, 2352 * then do a complete cache invalidation taking note 2353 * to backtrack the PTE table beforehand, and ignore 2354 * the lists in future because there's no longer any 2355 * point in bothering with them (we've paid the 2356 * penalty, so will carry on unhindered). Otherwise, 2357 * when we fall out, we just clean the list. 2358 */ 2359 PDEBUG(10, printf("remove: inv pte at %p(%x) ", pte, *pte)); 2360 pa = pmap_pte_pa(pte); 2361 2362 if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) { 2363 /* Add to the clean list. */ 2364 cleanlist[cleanlist_idx].pte = pte; 2365 cleanlist[cleanlist_idx].va = sva; 2366 cleanlist_idx++; 2367 } else if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) { 2368 int cnt; 2369 2370 /* Nuke everything if needed. */ 2371 if (pmap_active) { 2372 cpu_idcache_wbinv_all(); 2373 cpu_tlb_flushID(); 2374 } 2375 2376 /* 2377 * Roll back the previous PTE list, 2378 * and zero out the current PTE. 2379 */ 2380 for (cnt = 0; cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) { 2381 *cleanlist[cnt].pte = 0; 2382 pmap_pte_delref(pmap, cleanlist[cnt].va); 2383 } 2384 *pte = 0; 2385 pmap_pte_delref(pmap, sva); 2386 cleanlist_idx++; 2387 } else { 2388 /* 2389 * We've already nuked the cache and 2390 * TLB, so just carry on regardless, 2391 * and we won't need to do it again 2392 */ 2393 *pte = 0; 2394 pmap_pte_delref(pmap, sva); 2395 } 2396 2397 /* 2398 * Update flags. In a number of circumstances, 2399 * we could cluster a lot of these and do a 2400 * number of sequential pages in one go. 2401 */ 2402 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { 2403 struct pv_entry *pve; 2404 simple_lock(&pg->mdpage.pvh_slock); 2405 pve = pmap_remove_pv(pg, pmap, sva); 2406 pmap_free_pv(pmap, pve); 2407 pmap_vac_me_harder(pmap, pg, ptes, FALSE); 2408 simple_unlock(&pg->mdpage.pvh_slock); 2409 } 2410 } 2411 sva += NBPG; 2412 pte++; 2413 } 2414 2415 pmap_unmap_ptes(pmap); 2416 /* 2417 * Now, if we've fallen through down to here, chances are that there 2418 * are less than PMAP_REMOVE_CLEAN_LIST_SIZE mappings left. 2419 */ 2420 if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) { 2421 u_int cnt; 2422 2423 for (cnt = 0; cnt < cleanlist_idx; cnt++) { 2424 if (pmap_active) { 2425 cpu_idcache_wbinv_range(cleanlist[cnt].va, 2426 NBPG); 2427 *cleanlist[cnt].pte = 0; 2428 cpu_tlb_flushID_SE(cleanlist[cnt].va); 2429 } else 2430 *cleanlist[cnt].pte = 0; 2431 pmap_pte_delref(pmap, cleanlist[cnt].va); 2432 } 2433 } 2434 PMAP_MAP_TO_HEAD_UNLOCK(); 2435 } 2436 2437 /* 2438 * Routine: pmap_remove_all 2439 * Function: 2440 * Removes this physical page from 2441 * all physical maps in which it resides. 2442 * Reflects back modify bits to the pager. 2443 */ 2444 2445 static void 2446 pmap_remove_all(struct vm_page *pg) 2447 { 2448 struct pv_entry *pv, *npv; 2449 struct pmap *pmap; 2450 pt_entry_t *pte, *ptes; 2451 2452 PDEBUG(0, printf("pmap_remove_all: pa=%lx ", VM_PAGE_TO_PHYS(pg))); 2453 2454 /* set vm_page => pmap locking */ 2455 PMAP_HEAD_TO_MAP_LOCK(); 2456 2457 simple_lock(&pg->mdpage.pvh_slock); 2458 2459 pv = pg->mdpage.pvh_list; 2460 if (pv == NULL) { 2461 PDEBUG(0, printf("free page\n")); 2462 simple_unlock(&pg->mdpage.pvh_slock); 2463 PMAP_HEAD_TO_MAP_UNLOCK(); 2464 return; 2465 } 2466 pmap_clean_page(pv, FALSE); 2467 2468 while (pv) { 2469 pmap = pv->pv_pmap; 2470 ptes = pmap_map_ptes(pmap); 2471 pte = &ptes[arm_btop(pv->pv_va)]; 2472 2473 PDEBUG(0, printf("[%p,%08x,%08lx,%08x] ", pmap, *pte, 2474 pv->pv_va, pv->pv_flags)); 2475 #ifdef DEBUG 2476 if (pmap_pde_page(pmap_pde(pmap, pv->pv_va)) == 0 || 2477 pmap_pte_v(pte) == 0 || 2478 pmap_pte_pa(pte) != VM_PAGE_TO_PHYS(pg)) 2479 panic("pmap_remove_all: bad mapping"); 2480 #endif /* DEBUG */ 2481 2482 /* 2483 * Update statistics 2484 */ 2485 --pmap->pm_stats.resident_count; 2486 2487 /* Wired bit */ 2488 if (pv->pv_flags & PVF_WIRED) 2489 --pmap->pm_stats.wired_count; 2490 2491 /* 2492 * Invalidate the PTEs. 2493 * XXX: should cluster them up and invalidate as many 2494 * as possible at once. 2495 */ 2496 2497 #ifdef needednotdone 2498 reduce wiring count on page table pages as references drop 2499 #endif 2500 2501 *pte = 0; 2502 pmap_pte_delref(pmap, pv->pv_va); 2503 2504 npv = pv->pv_next; 2505 pmap_free_pv(pmap, pv); 2506 pv = npv; 2507 pmap_unmap_ptes(pmap); 2508 } 2509 pg->mdpage.pvh_list = NULL; 2510 simple_unlock(&pg->mdpage.pvh_slock); 2511 PMAP_HEAD_TO_MAP_UNLOCK(); 2512 2513 PDEBUG(0, printf("done\n")); 2514 cpu_tlb_flushID(); 2515 cpu_cpwait(); 2516 } 2517 2518 2519 /* 2520 * Set the physical protection on the specified range of this map as requested. 2521 */ 2522 2523 void 2524 pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot) 2525 { 2526 pt_entry_t *pte = NULL, *ptes; 2527 struct vm_page *pg; 2528 int armprot; 2529 int flush = 0; 2530 paddr_t pa; 2531 2532 PDEBUG(0, printf("pmap_protect: pmap=%p %08lx->%08lx %x\n", 2533 pmap, sva, eva, prot)); 2534 2535 if (~prot & VM_PROT_READ) { 2536 /* Just remove the mappings. */ 2537 pmap_remove(pmap, sva, eva); 2538 /* pmap_update not needed as it should be called by the caller 2539 * of pmap_protect */ 2540 return; 2541 } 2542 if (prot & VM_PROT_WRITE) { 2543 /* 2544 * If this is a read->write transition, just ignore it and let 2545 * uvm_fault() take care of it later. 2546 */ 2547 return; 2548 } 2549 2550 /* Need to lock map->head */ 2551 PMAP_MAP_TO_HEAD_LOCK(); 2552 2553 ptes = pmap_map_ptes(pmap); 2554 2555 /* 2556 * OK, at this point, we know we're doing write-protect operation. 2557 * If the pmap is active, write-back the range. 2558 */ 2559 if (pmap_is_curpmap(pmap)) 2560 cpu_dcache_wb_range(sva, eva - sva); 2561 2562 /* 2563 * We need to acquire a pointer to a page table page before entering 2564 * the following loop. 2565 */ 2566 while (sva < eva) { 2567 if (pmap_pde_page(pmap_pde(pmap, sva))) 2568 break; 2569 sva = (sva & L1_S_FRAME) + L1_S_SIZE; 2570 } 2571 2572 pte = &ptes[arm_btop(sva)]; 2573 2574 while (sva < eva) { 2575 /* only check once in a while */ 2576 if ((sva & L2_ADDR_BITS) == 0) { 2577 if (!pmap_pde_page(pmap_pde(pmap, sva))) { 2578 /* We can race ahead here, to the next pde. */ 2579 sva += L1_S_SIZE; 2580 pte += arm_btop(L1_S_SIZE); 2581 continue; 2582 } 2583 } 2584 2585 if (!pmap_pte_v(pte)) 2586 goto next; 2587 2588 flush = 1; 2589 2590 armprot = 0; 2591 if (sva < VM_MAXUSER_ADDRESS) 2592 armprot |= L2_S_PROT_U; 2593 else if (sva < VM_MAX_ADDRESS) 2594 armprot |= L2_S_PROT_W; /* XXX Ekk what is this ? */ 2595 *pte = (*pte & 0xfffff00f) | armprot; 2596 2597 pa = pmap_pte_pa(pte); 2598 2599 /* Get the physical page index */ 2600 2601 /* Clear write flag */ 2602 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { 2603 simple_lock(&pg->mdpage.pvh_slock); 2604 (void) pmap_modify_pv(pmap, sva, pg, PVF_WRITE, 0); 2605 pmap_vac_me_harder(pmap, pg, ptes, FALSE); 2606 simple_unlock(&pg->mdpage.pvh_slock); 2607 } 2608 2609 next: 2610 sva += NBPG; 2611 pte++; 2612 } 2613 pmap_unmap_ptes(pmap); 2614 PMAP_MAP_TO_HEAD_UNLOCK(); 2615 if (flush) 2616 cpu_tlb_flushID(); 2617 } 2618 2619 /* 2620 * void pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, 2621 * int flags) 2622 * 2623 * Insert the given physical page (p) at 2624 * the specified virtual address (v) in the 2625 * target physical map with the protection requested. 2626 * 2627 * If specified, the page will be wired down, meaning 2628 * that the related pte can not be reclaimed. 2629 * 2630 * NB: This is the only routine which MAY NOT lazy-evaluate 2631 * or lose information. That is, this routine must actually 2632 * insert this page into the given map NOW. 2633 */ 2634 2635 int 2636 pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, 2637 int flags) 2638 { 2639 pt_entry_t *ptes, opte, npte; 2640 paddr_t opa; 2641 boolean_t wired = (flags & PMAP_WIRED) != 0; 2642 struct vm_page *pg; 2643 struct pv_entry *pve; 2644 int error, nflags; 2645 2646 PDEBUG(5, printf("pmap_enter: V%08lx P%08lx in pmap %p prot=%08x, wired = %d\n", 2647 va, pa, pmap, prot, wired)); 2648 2649 #ifdef DIAGNOSTIC 2650 /* Valid address ? */ 2651 if (va >= (pmap_curmaxkvaddr)) 2652 panic("pmap_enter: too big"); 2653 if (pmap != pmap_kernel() && va != 0) { 2654 if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS) 2655 panic("pmap_enter: kernel page in user map"); 2656 } else { 2657 if (va >= VM_MIN_ADDRESS && va < VM_MAXUSER_ADDRESS) 2658 panic("pmap_enter: user page in kernel map"); 2659 if (va >= VM_MAXUSER_ADDRESS && va < VM_MAX_ADDRESS) 2660 panic("pmap_enter: entering PT page"); 2661 } 2662 #endif 2663 2664 KDASSERT(((va | pa) & PGOFSET) == 0); 2665 2666 /* 2667 * Get a pointer to the page. Later on in this function, we 2668 * test for a managed page by checking pg != NULL. 2669 */ 2670 pg = pmap_initialized ? PHYS_TO_VM_PAGE(pa) : NULL; 2671 2672 /* get lock */ 2673 PMAP_MAP_TO_HEAD_LOCK(); 2674 2675 /* 2676 * map the ptes. If there's not already an L2 table for this 2677 * address, allocate one. 2678 */ 2679 ptes = pmap_map_ptes(pmap); /* locks pmap */ 2680 if (pmap_pde_v(pmap_pde(pmap, va)) == 0) { 2681 struct vm_page *ptp; 2682 2683 /* kernel should be pre-grown */ 2684 KASSERT(pmap != pmap_kernel()); 2685 2686 /* if failure is allowed then don't try too hard */ 2687 ptp = pmap_get_ptp(pmap, va & L1_S_FRAME); 2688 if (ptp == NULL) { 2689 if (flags & PMAP_CANFAIL) { 2690 error = ENOMEM; 2691 goto out; 2692 } 2693 panic("pmap_enter: get ptp failed"); 2694 } 2695 } 2696 opte = ptes[arm_btop(va)]; 2697 2698 nflags = 0; 2699 if (prot & VM_PROT_WRITE) 2700 nflags |= PVF_WRITE; 2701 if (wired) 2702 nflags |= PVF_WIRED; 2703 2704 /* Is the pte valid ? If so then this page is already mapped */ 2705 if (l2pte_valid(opte)) { 2706 /* Get the physical address of the current page mapped */ 2707 opa = l2pte_pa(opte); 2708 2709 /* Are we mapping the same page ? */ 2710 if (opa == pa) { 2711 /* Has the wiring changed ? */ 2712 if (pg != NULL) { 2713 simple_lock(&pg->mdpage.pvh_slock); 2714 (void) pmap_modify_pv(pmap, va, pg, 2715 PVF_WRITE | PVF_WIRED, nflags); 2716 simple_unlock(&pg->mdpage.pvh_slock); 2717 } 2718 } else { 2719 struct vm_page *opg; 2720 2721 /* We are replacing the page with a new one. */ 2722 cpu_idcache_wbinv_range(va, NBPG); 2723 2724 /* 2725 * If it is part of our managed memory then we 2726 * must remove it from the PV list 2727 */ 2728 if ((opg = PHYS_TO_VM_PAGE(opa)) != NULL) { 2729 simple_lock(&opg->mdpage.pvh_slock); 2730 pve = pmap_remove_pv(opg, pmap, va); 2731 simple_unlock(&opg->mdpage.pvh_slock); 2732 } else { 2733 pve = NULL; 2734 } 2735 2736 goto enter; 2737 } 2738 } else { 2739 opa = 0; 2740 pve = NULL; 2741 pmap_pte_addref(pmap, va); 2742 2743 /* pte is not valid so we must be hooking in a new page */ 2744 ++pmap->pm_stats.resident_count; 2745 2746 enter: 2747 /* 2748 * Enter on the PV list if part of our managed memory 2749 */ 2750 if (pg != NULL) { 2751 if (pve == NULL) { 2752 pve = pmap_alloc_pv(pmap, ALLOCPV_NEED); 2753 if (pve == NULL) { 2754 if (flags & PMAP_CANFAIL) { 2755 error = ENOMEM; 2756 goto out; 2757 } 2758 panic("pmap_enter: no pv entries " 2759 "available"); 2760 } 2761 } 2762 /* enter_pv locks pvh when adding */ 2763 pmap_enter_pv(pg, pve, pmap, va, NULL, nflags); 2764 } else { 2765 if (pve != NULL) 2766 pmap_free_pv(pmap, pve); 2767 } 2768 } 2769 2770 /* Construct the pte, giving the correct access. */ 2771 npte = pa; 2772 2773 /* VA 0 is magic. */ 2774 if (pmap != pmap_kernel() && va != vector_page) 2775 npte |= L2_S_PROT_U; 2776 2777 if (pg != NULL) { 2778 #ifdef DIAGNOSTIC 2779 if ((flags & VM_PROT_ALL) & ~prot) 2780 panic("pmap_enter: access_type exceeds prot"); 2781 #endif 2782 npte |= pte_l2_s_cache_mode; 2783 if (flags & VM_PROT_WRITE) { 2784 npte |= L2_S_PROTO | L2_S_PROT_W; 2785 pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD; 2786 } else if (flags & VM_PROT_ALL) { 2787 npte |= L2_S_PROTO; 2788 pg->mdpage.pvh_attrs |= PVF_REF; 2789 } else 2790 npte |= L2_TYPE_INV; 2791 } else { 2792 if (prot & VM_PROT_WRITE) 2793 npte |= L2_S_PROTO | L2_S_PROT_W; 2794 else if (prot & VM_PROT_ALL) 2795 npte |= L2_S_PROTO; 2796 else 2797 npte |= L2_TYPE_INV; 2798 } 2799 2800 ptes[arm_btop(va)] = npte; 2801 2802 if (pg != NULL) { 2803 simple_lock(&pg->mdpage.pvh_slock); 2804 pmap_vac_me_harder(pmap, pg, ptes, pmap_is_curpmap(pmap)); 2805 simple_unlock(&pg->mdpage.pvh_slock); 2806 } 2807 2808 /* Better flush the TLB ... */ 2809 cpu_tlb_flushID_SE(va); 2810 error = 0; 2811 out: 2812 pmap_unmap_ptes(pmap); /* unlocks pmap */ 2813 PMAP_MAP_TO_HEAD_UNLOCK(); 2814 2815 return error; 2816 } 2817 2818 /* 2819 * pmap_kenter_pa: enter a kernel mapping 2820 * 2821 * => no need to lock anything assume va is already allocated 2822 * => should be faster than normal pmap enter function 2823 */ 2824 void 2825 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot) 2826 { 2827 pt_entry_t *pte; 2828 2829 pte = vtopte(va); 2830 KASSERT(!pmap_pte_v(pte)); 2831 2832 *pte = L2_S_PROTO | pa | 2833 L2_S_PROT(PTE_KERNEL, prot) | pte_l2_s_cache_mode; 2834 } 2835 2836 void 2837 pmap_kremove(vaddr_t va, vsize_t len) 2838 { 2839 pt_entry_t *pte; 2840 2841 for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) { 2842 2843 /* 2844 * We assume that we will only be called with small 2845 * regions of memory. 2846 */ 2847 2848 KASSERT(pmap_pde_page(pmap_pde(pmap_kernel(), va))); 2849 pte = vtopte(va); 2850 cpu_idcache_wbinv_range(va, PAGE_SIZE); 2851 *pte = 0; 2852 cpu_tlb_flushID_SE(va); 2853 } 2854 } 2855 2856 /* 2857 * pmap_page_protect: 2858 * 2859 * Lower the permission for all mappings to a given page. 2860 */ 2861 2862 void 2863 pmap_page_protect(struct vm_page *pg, vm_prot_t prot) 2864 { 2865 2866 PDEBUG(0, printf("pmap_page_protect(pa=%lx, prot=%d)\n", 2867 VM_PAGE_TO_PHYS(pg), prot)); 2868 2869 switch(prot) { 2870 case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE: 2871 case VM_PROT_READ|VM_PROT_WRITE: 2872 return; 2873 2874 case VM_PROT_READ: 2875 case VM_PROT_READ|VM_PROT_EXECUTE: 2876 pmap_clearbit(pg, PVF_WRITE); 2877 break; 2878 2879 default: 2880 pmap_remove_all(pg); 2881 break; 2882 } 2883 } 2884 2885 2886 /* 2887 * Routine: pmap_unwire 2888 * Function: Clear the wired attribute for a map/virtual-address 2889 * pair. 2890 * In/out conditions: 2891 * The mapping must already exist in the pmap. 2892 */ 2893 2894 void 2895 pmap_unwire(struct pmap *pmap, vaddr_t va) 2896 { 2897 pt_entry_t *ptes; 2898 struct vm_page *pg; 2899 paddr_t pa; 2900 2901 PMAP_MAP_TO_HEAD_LOCK(); 2902 ptes = pmap_map_ptes(pmap); /* locks pmap */ 2903 2904 if (pmap_pde_v(pmap_pde(pmap, va))) { 2905 #ifdef DIAGNOSTIC 2906 if (l2pte_valid(ptes[arm_btop(va)]) == 0) 2907 panic("pmap_unwire: invalid L2 PTE"); 2908 #endif 2909 /* Extract the physical address of the page */ 2910 pa = l2pte_pa(ptes[arm_btop(va)]); 2911 2912 if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) 2913 goto out; 2914 2915 /* Update the wired bit in the pv entry for this page. */ 2916 simple_lock(&pg->mdpage.pvh_slock); 2917 (void) pmap_modify_pv(pmap, va, pg, PVF_WIRED, 0); 2918 simple_unlock(&pg->mdpage.pvh_slock); 2919 } 2920 #ifdef DIAGNOSTIC 2921 else { 2922 panic("pmap_unwire: invalid L1 PTE"); 2923 } 2924 #endif 2925 out: 2926 pmap_unmap_ptes(pmap); /* unlocks pmap */ 2927 PMAP_MAP_TO_HEAD_UNLOCK(); 2928 } 2929 2930 /* 2931 * Routine: pmap_extract 2932 * Function: 2933 * Extract the physical page address associated 2934 * with the given map/virtual_address pair. 2935 */ 2936 boolean_t 2937 pmap_extract(struct pmap *pmap, vaddr_t va, paddr_t *pap) 2938 { 2939 pd_entry_t *pde; 2940 pt_entry_t *pte, *ptes; 2941 paddr_t pa; 2942 2943 PDEBUG(5, printf("pmap_extract: pmap=%p, va=0x%08lx -> ", pmap, va)); 2944 2945 ptes = pmap_map_ptes(pmap); /* locks pmap */ 2946 2947 pde = pmap_pde(pmap, va); 2948 pte = &ptes[arm_btop(va)]; 2949 2950 if (pmap_pde_section(pde)) { 2951 pa = (*pde & L1_S_FRAME) | (va & L1_S_OFFSET); 2952 PDEBUG(5, printf("section pa=0x%08lx\n", pa)); 2953 goto out; 2954 } else if (pmap_pde_page(pde) == 0 || pmap_pte_v(pte) == 0) { 2955 PDEBUG(5, printf("no mapping\n")); 2956 goto failed; 2957 } 2958 2959 if ((*pte & L2_TYPE_MASK) == L2_TYPE_L) { 2960 pa = (*pte & L2_L_FRAME) | (va & L2_L_OFFSET); 2961 PDEBUG(5, printf("large page pa=0x%08lx\n", pa)); 2962 goto out; 2963 } 2964 2965 pa = (*pte & L2_S_FRAME) | (va & L2_S_OFFSET); 2966 PDEBUG(5, printf("small page pa=0x%08lx\n", pa)); 2967 2968 out: 2969 if (pap != NULL) 2970 *pap = pa; 2971 2972 pmap_unmap_ptes(pmap); /* unlocks pmap */ 2973 return (TRUE); 2974 2975 failed: 2976 pmap_unmap_ptes(pmap); /* unlocks pmap */ 2977 return (FALSE); 2978 } 2979 2980 2981 /* 2982 * pmap_copy: 2983 * 2984 * Copy the range specified by src_addr/len from the source map to the 2985 * range dst_addr/len in the destination map. 2986 * 2987 * This routine is only advisory and need not do anything. 2988 */ 2989 /* Call deleted in <arm/arm32/pmap.h> */ 2990 2991 #if defined(PMAP_DEBUG) 2992 void 2993 pmap_dump_pvlist(phys, m) 2994 vaddr_t phys; 2995 char *m; 2996 { 2997 struct vm_page *pg; 2998 struct pv_entry *pv; 2999 3000 if ((pg = PHYS_TO_VM_PAGE(phys)) == NULL) { 3001 printf("INVALID PA\n"); 3002 return; 3003 } 3004 simple_lock(&pg->mdpage.pvh_slock); 3005 printf("%s %08lx:", m, phys); 3006 if (pg->mdpage.pvh_list == NULL) { 3007 simple_unlock(&pg->mdpage.pvh_slock); 3008 printf(" no mappings\n"); 3009 return; 3010 } 3011 3012 for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) 3013 printf(" pmap %p va %08lx flags %08x", pv->pv_pmap, 3014 pv->pv_va, pv->pv_flags); 3015 3016 printf("\n"); 3017 simple_unlock(&pg->mdpage.pvh_slock); 3018 } 3019 3020 #endif /* PMAP_DEBUG */ 3021 3022 static pt_entry_t * 3023 pmap_map_ptes(struct pmap *pmap) 3024 { 3025 struct proc *p; 3026 3027 /* the kernel's pmap is always accessible */ 3028 if (pmap == pmap_kernel()) { 3029 return (pt_entry_t *)PTE_BASE; 3030 } 3031 3032 if (pmap_is_curpmap(pmap)) { 3033 simple_lock(&pmap->pm_obj.vmobjlock); 3034 return (pt_entry_t *)PTE_BASE; 3035 } 3036 3037 p = curproc; 3038 KDASSERT(p != NULL); 3039 3040 /* need to lock both curpmap and pmap: use ordered locking */ 3041 if ((vaddr_t) pmap < (vaddr_t) p->p_vmspace->vm_map.pmap) { 3042 simple_lock(&pmap->pm_obj.vmobjlock); 3043 simple_lock(&p->p_vmspace->vm_map.pmap->pm_obj.vmobjlock); 3044 } else { 3045 simple_lock(&p->p_vmspace->vm_map.pmap->pm_obj.vmobjlock); 3046 simple_lock(&pmap->pm_obj.vmobjlock); 3047 } 3048 3049 pmap_map_in_l1(p->p_vmspace->vm_map.pmap, APTE_BASE, pmap->pm_pptpt, 3050 FALSE); 3051 cpu_tlb_flushD(); 3052 cpu_cpwait(); 3053 return (pt_entry_t *)APTE_BASE; 3054 } 3055 3056 /* 3057 * pmap_unmap_ptes: unlock the PTE mapping of "pmap" 3058 */ 3059 3060 static void 3061 pmap_unmap_ptes(struct pmap *pmap) 3062 { 3063 3064 if (pmap == pmap_kernel()) { 3065 return; 3066 } 3067 if (pmap_is_curpmap(pmap)) { 3068 simple_unlock(&pmap->pm_obj.vmobjlock); 3069 } else { 3070 KDASSERT(curproc != NULL); 3071 simple_unlock(&pmap->pm_obj.vmobjlock); 3072 simple_unlock( 3073 &curproc->p_vmspace->vm_map.pmap->pm_obj.vmobjlock); 3074 } 3075 } 3076 3077 /* 3078 * Modify pte bits for all ptes corresponding to the given physical address. 3079 * We use `maskbits' rather than `clearbits' because we're always passing 3080 * constants and the latter would require an extra inversion at run-time. 3081 */ 3082 3083 static void 3084 pmap_clearbit(struct vm_page *pg, u_int maskbits) 3085 { 3086 struct pv_entry *pv; 3087 pt_entry_t *ptes; 3088 vaddr_t va; 3089 int tlbentry; 3090 3091 PDEBUG(1, printf("pmap_clearbit: pa=%08lx mask=%08x\n", 3092 VM_PAGE_TO_PHYS(pg), maskbits)); 3093 3094 tlbentry = 0; 3095 3096 PMAP_HEAD_TO_MAP_LOCK(); 3097 simple_lock(&pg->mdpage.pvh_slock); 3098 3099 /* 3100 * Clear saved attributes (modify, reference) 3101 */ 3102 pg->mdpage.pvh_attrs &= ~maskbits; 3103 3104 if (pg->mdpage.pvh_list == NULL) { 3105 simple_unlock(&pg->mdpage.pvh_slock); 3106 PMAP_HEAD_TO_MAP_UNLOCK(); 3107 return; 3108 } 3109 3110 /* 3111 * Loop over all current mappings setting/clearing as appropos 3112 */ 3113 for (pv = pg->mdpage.pvh_list; pv; pv = pv->pv_next) { 3114 va = pv->pv_va; 3115 pv->pv_flags &= ~maskbits; 3116 ptes = pmap_map_ptes(pv->pv_pmap); /* locks pmap */ 3117 KASSERT(pmap_pde_v(pmap_pde(pv->pv_pmap, va))); 3118 if (maskbits & (PVF_WRITE|PVF_MOD)) { 3119 if ((pv->pv_flags & PVF_NC)) { 3120 /* 3121 * Entry is not cacheable: reenable 3122 * the cache, nothing to flush 3123 * 3124 * Don't turn caching on again if this 3125 * is a modified emulation. This 3126 * would be inconsitent with the 3127 * settings created by 3128 * pmap_vac_me_harder(). 3129 * 3130 * There's no need to call 3131 * pmap_vac_me_harder() here: all 3132 * pages are loosing their write 3133 * permission. 3134 * 3135 */ 3136 if (maskbits & PVF_WRITE) { 3137 ptes[arm_btop(va)] |= 3138 pte_l2_s_cache_mode; 3139 pv->pv_flags &= ~PVF_NC; 3140 } 3141 } else if (pmap_is_curpmap(pv->pv_pmap)) { 3142 /* 3143 * Entry is cacheable: check if pmap is 3144 * current if it is flush it, 3145 * otherwise it won't be in the cache 3146 */ 3147 cpu_idcache_wbinv_range(pv->pv_va, NBPG); 3148 } 3149 3150 /* make the pte read only */ 3151 ptes[arm_btop(va)] &= ~L2_S_PROT_W; 3152 } 3153 3154 if (maskbits & PVF_REF) 3155 ptes[arm_btop(va)] = 3156 (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_TYPE_INV; 3157 3158 if (pmap_is_curpmap(pv->pv_pmap)) { 3159 /* 3160 * if we had cacheable pte's we'd clean the 3161 * pte out to memory here 3162 * 3163 * flush tlb entry as it's in the current pmap 3164 */ 3165 cpu_tlb_flushID_SE(pv->pv_va); 3166 } 3167 pmap_unmap_ptes(pv->pv_pmap); /* unlocks pmap */ 3168 } 3169 cpu_cpwait(); 3170 3171 simple_unlock(&pg->mdpage.pvh_slock); 3172 PMAP_HEAD_TO_MAP_UNLOCK(); 3173 } 3174 3175 /* 3176 * pmap_clear_modify: 3177 * 3178 * Clear the "modified" attribute for a page. 3179 */ 3180 boolean_t 3181 pmap_clear_modify(struct vm_page *pg) 3182 { 3183 boolean_t rv; 3184 3185 if (pg->mdpage.pvh_attrs & PVF_MOD) { 3186 rv = TRUE; 3187 pmap_clearbit(pg, PVF_MOD); 3188 } else 3189 rv = FALSE; 3190 3191 PDEBUG(0, printf("pmap_clear_modify pa=%08lx -> %d\n", 3192 VM_PAGE_TO_PHYS(pg), rv)); 3193 3194 return (rv); 3195 } 3196 3197 /* 3198 * pmap_clear_reference: 3199 * 3200 * Clear the "referenced" attribute for a page. 3201 */ 3202 boolean_t 3203 pmap_clear_reference(struct vm_page *pg) 3204 { 3205 boolean_t rv; 3206 3207 if (pg->mdpage.pvh_attrs & PVF_REF) { 3208 rv = TRUE; 3209 pmap_clearbit(pg, PVF_REF); 3210 } else 3211 rv = FALSE; 3212 3213 PDEBUG(0, printf("pmap_clear_reference pa=%08lx -> %d\n", 3214 VM_PAGE_TO_PHYS(pg), rv)); 3215 3216 return (rv); 3217 } 3218 3219 /* 3220 * pmap_is_modified: 3221 * 3222 * Test if a page has the "modified" attribute. 3223 */ 3224 /* See <arm/arm32/pmap.h> */ 3225 3226 /* 3227 * pmap_is_referenced: 3228 * 3229 * Test if a page has the "referenced" attribute. 3230 */ 3231 /* See <arm/arm32/pmap.h> */ 3232 3233 int 3234 pmap_modified_emulation(struct pmap *pmap, vaddr_t va) 3235 { 3236 pt_entry_t *ptes; 3237 struct vm_page *pg; 3238 paddr_t pa; 3239 u_int flags; 3240 int rv = 0; 3241 3242 PDEBUG(2, printf("pmap_modified_emulation\n")); 3243 3244 PMAP_MAP_TO_HEAD_LOCK(); 3245 ptes = pmap_map_ptes(pmap); /* locks pmap */ 3246 3247 if (pmap_pde_v(pmap_pde(pmap, va)) == 0) { 3248 PDEBUG(2, printf("L1 PTE invalid\n")); 3249 goto out; 3250 } 3251 3252 PDEBUG(1, printf("pte=%08x\n", ptes[arm_btop(va)])); 3253 3254 /* Check for a invalid pte */ 3255 if (l2pte_valid(ptes[arm_btop(va)]) == 0) 3256 goto out; 3257 3258 /* This can happen if user code tries to access kernel memory. */ 3259 if ((ptes[arm_btop(va)] & L2_S_PROT_W) != 0) 3260 goto out; 3261 3262 /* Extract the physical address of the page */ 3263 pa = l2pte_pa(ptes[arm_btop(va)]); 3264 if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) 3265 goto out; 3266 3267 /* Get the current flags for this page. */ 3268 simple_lock(&pg->mdpage.pvh_slock); 3269 3270 flags = pmap_modify_pv(pmap, va, pg, 0, 0); 3271 PDEBUG(2, printf("pmap_modified_emulation: flags = %08x\n", flags)); 3272 3273 /* 3274 * Do the flags say this page is writable ? If not then it is a 3275 * genuine write fault. If yes then the write fault is our fault 3276 * as we did not reflect the write access in the PTE. Now we know 3277 * a write has occurred we can correct this and also set the 3278 * modified bit 3279 */ 3280 if (~flags & PVF_WRITE) { 3281 simple_unlock(&pg->mdpage.pvh_slock); 3282 goto out; 3283 } 3284 3285 PDEBUG(0, 3286 printf("pmap_modified_emulation: Got a hit va=%08lx, pte = %08x\n", 3287 va, ptes[arm_btop(va)])); 3288 pg->mdpage.pvh_attrs |= PVF_REF | PVF_MOD; 3289 3290 /* 3291 * Re-enable write permissions for the page. No need to call 3292 * pmap_vac_me_harder(), since this is just a 3293 * modified-emulation fault, and the PVF_WRITE bit isn't changing. 3294 * We've already set the cacheable bits based on the assumption 3295 * that we can write to this page. 3296 */ 3297 ptes[arm_btop(va)] = 3298 (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W; 3299 PDEBUG(0, printf("->(%08x)\n", ptes[arm_btop(va)])); 3300 3301 simple_unlock(&pg->mdpage.pvh_slock); 3302 3303 cpu_tlb_flushID_SE(va); 3304 cpu_cpwait(); 3305 rv = 1; 3306 out: 3307 pmap_unmap_ptes(pmap); /* unlocks pmap */ 3308 PMAP_MAP_TO_HEAD_UNLOCK(); 3309 return (rv); 3310 } 3311 3312 int 3313 pmap_handled_emulation(struct pmap *pmap, vaddr_t va) 3314 { 3315 pt_entry_t *ptes; 3316 struct vm_page *pg; 3317 paddr_t pa; 3318 int rv = 0; 3319 3320 PDEBUG(2, printf("pmap_handled_emulation\n")); 3321 3322 PMAP_MAP_TO_HEAD_LOCK(); 3323 ptes = pmap_map_ptes(pmap); /* locks pmap */ 3324 3325 if (pmap_pde_v(pmap_pde(pmap, va)) == 0) { 3326 PDEBUG(2, printf("L1 PTE invalid\n")); 3327 goto out; 3328 } 3329 3330 PDEBUG(1, printf("pte=%08x\n", ptes[arm_btop(va)])); 3331 3332 /* Check for invalid pte */ 3333 if (l2pte_valid(ptes[arm_btop(va)]) == 0) 3334 goto out; 3335 3336 /* This can happen if user code tries to access kernel memory. */ 3337 if ((ptes[arm_btop(va)] & L2_TYPE_MASK) != L2_TYPE_INV) 3338 goto out; 3339 3340 /* Extract the physical address of the page */ 3341 pa = l2pte_pa(ptes[arm_btop(va)]); 3342 if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) 3343 goto out; 3344 3345 simple_lock(&pg->mdpage.pvh_slock); 3346 3347 /* 3348 * Ok we just enable the pte and mark the attibs as handled 3349 * XXX Should we traverse the PV list and enable all PTEs? 3350 */ 3351 PDEBUG(0, 3352 printf("pmap_handled_emulation: Got a hit va=%08lx pte = %08x\n", 3353 va, ptes[arm_btop(va)])); 3354 pg->mdpage.pvh_attrs |= PVF_REF; 3355 3356 ptes[arm_btop(va)] = (ptes[arm_btop(va)] & ~L2_TYPE_MASK) | L2_S_PROTO; 3357 PDEBUG(0, printf("->(%08x)\n", ptes[arm_btop(va)])); 3358 3359 simple_unlock(&pg->mdpage.pvh_slock); 3360 3361 cpu_tlb_flushID_SE(va); 3362 cpu_cpwait(); 3363 rv = 1; 3364 out: 3365 pmap_unmap_ptes(pmap); /* unlocks pmap */ 3366 PMAP_MAP_TO_HEAD_UNLOCK(); 3367 return (rv); 3368 } 3369 3370 /* 3371 * pmap_collect: free resources held by a pmap 3372 * 3373 * => optional function. 3374 * => called when a process is swapped out to free memory. 3375 */ 3376 3377 void 3378 pmap_collect(struct pmap *pmap) 3379 { 3380 } 3381 3382 /* 3383 * Routine: pmap_procwr 3384 * 3385 * Function: 3386 * Synchronize caches corresponding to [addr, addr+len) in p. 3387 * 3388 */ 3389 void 3390 pmap_procwr(struct proc *p, vaddr_t va, int len) 3391 { 3392 /* We only need to do anything if it is the current process. */ 3393 if (p == curproc) 3394 cpu_icache_sync_range(va, len); 3395 } 3396 /* 3397 * PTP functions 3398 */ 3399 3400 /* 3401 * pmap_get_ptp: get a PTP (if there isn't one, allocate a new one) 3402 * 3403 * => pmap should NOT be pmap_kernel() 3404 * => pmap should be locked 3405 */ 3406 3407 static struct vm_page * 3408 pmap_get_ptp(struct pmap *pmap, vaddr_t va) 3409 { 3410 struct vm_page *ptp; 3411 3412 if (pmap_pde_page(pmap_pde(pmap, va))) { 3413 3414 /* valid... check hint (saves us a PA->PG lookup) */ 3415 if (pmap->pm_ptphint && 3416 (pmap->pm_pdir[pmap_pdei(va)] & L2_S_FRAME) == 3417 VM_PAGE_TO_PHYS(pmap->pm_ptphint)) 3418 return (pmap->pm_ptphint); 3419 ptp = uvm_pagelookup(&pmap->pm_obj, va); 3420 #ifdef DIAGNOSTIC 3421 if (ptp == NULL) 3422 panic("pmap_get_ptp: unmanaged user PTP"); 3423 #endif 3424 pmap->pm_ptphint = ptp; 3425 return(ptp); 3426 } 3427 3428 /* allocate a new PTP (updates ptphint) */ 3429 return(pmap_alloc_ptp(pmap, va)); 3430 } 3431 3432 /* 3433 * pmap_alloc_ptp: allocate a PTP for a PMAP 3434 * 3435 * => pmap should already be locked by caller 3436 * => we use the ptp's wire_count to count the number of active mappings 3437 * in the PTP (we start it at one to prevent any chance this PTP 3438 * will ever leak onto the active/inactive queues) 3439 */ 3440 3441 /*__inline */ static struct vm_page * 3442 pmap_alloc_ptp(struct pmap *pmap, vaddr_t va) 3443 { 3444 struct vm_page *ptp; 3445 3446 ptp = uvm_pagealloc(&pmap->pm_obj, va, NULL, 3447 UVM_PGA_USERESERVE|UVM_PGA_ZERO); 3448 if (ptp == NULL) 3449 return (NULL); 3450 3451 /* got one! */ 3452 ptp->flags &= ~PG_BUSY; /* never busy */ 3453 ptp->wire_count = 1; /* no mappings yet */ 3454 pmap_map_in_l1(pmap, va, VM_PAGE_TO_PHYS(ptp), TRUE); 3455 pmap->pm_stats.resident_count++; /* count PTP as resident */ 3456 pmap->pm_ptphint = ptp; 3457 return (ptp); 3458 } 3459 3460 vaddr_t 3461 pmap_growkernel(vaddr_t maxkvaddr) 3462 { 3463 struct pmap *kpm = pmap_kernel(), *pm; 3464 int s; 3465 paddr_t ptaddr; 3466 struct vm_page *ptp; 3467 3468 if (maxkvaddr <= pmap_curmaxkvaddr) 3469 goto out; /* we are OK */ 3470 NPDEBUG(PDB_GROWKERN, printf("pmap_growkernel: growing kernel from %lx to %lx\n", 3471 pmap_curmaxkvaddr, maxkvaddr)); 3472 3473 /* 3474 * whoops! we need to add kernel PTPs 3475 */ 3476 3477 s = splhigh(); /* to be safe */ 3478 simple_lock(&kpm->pm_obj.vmobjlock); 3479 /* due to the way the arm pmap works we map 4MB at a time */ 3480 for (/*null*/ ; pmap_curmaxkvaddr < maxkvaddr; 3481 pmap_curmaxkvaddr += 4 * L1_S_SIZE) { 3482 3483 if (uvm.page_init_done == FALSE) { 3484 3485 /* 3486 * we're growing the kernel pmap early (from 3487 * uvm_pageboot_alloc()). this case must be 3488 * handled a little differently. 3489 */ 3490 3491 if (uvm_page_physget(&ptaddr) == FALSE) 3492 panic("pmap_growkernel: out of memory"); 3493 pmap_zero_page(ptaddr); 3494 3495 /* map this page in */ 3496 pmap_map_in_l1(kpm, pmap_curmaxkvaddr, ptaddr, TRUE); 3497 3498 /* count PTP as resident */ 3499 kpm->pm_stats.resident_count++; 3500 continue; 3501 } 3502 3503 /* 3504 * THIS *MUST* BE CODED SO AS TO WORK IN THE 3505 * pmap_initialized == FALSE CASE! WE MAY BE 3506 * INVOKED WHILE pmap_init() IS RUNNING! 3507 */ 3508 3509 if ((ptp = pmap_alloc_ptp(kpm, pmap_curmaxkvaddr)) == NULL) 3510 panic("pmap_growkernel: alloc ptp failed"); 3511 3512 /* distribute new kernel PTP to all active pmaps */ 3513 simple_lock(&pmaps_lock); 3514 LIST_FOREACH(pm, &pmaps, pm_list) { 3515 pmap_map_in_l1(pm, pmap_curmaxkvaddr, 3516 VM_PAGE_TO_PHYS(ptp), TRUE); 3517 } 3518 3519 simple_unlock(&pmaps_lock); 3520 } 3521 3522 /* 3523 * flush out the cache, expensive but growkernel will happen so 3524 * rarely 3525 */ 3526 cpu_tlb_flushD(); 3527 cpu_cpwait(); 3528 3529 simple_unlock(&kpm->pm_obj.vmobjlock); 3530 splx(s); 3531 3532 out: 3533 return (pmap_curmaxkvaddr); 3534 } 3535 3536 /************************ Utility routines ****************************/ 3537 3538 /* 3539 * vector_page_setprot: 3540 * 3541 * Manipulate the protection of the vector page. 3542 */ 3543 void 3544 vector_page_setprot(int prot) 3545 { 3546 pt_entry_t *pte; 3547 3548 pte = vtopte(vector_page); 3549 3550 *pte = (*pte & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot); 3551 cpu_tlb_flushD_SE(vector_page); 3552 cpu_cpwait(); 3553 } 3554 3555 /************************ Bootstrapping routines ****************************/ 3556 3557 /* 3558 * This list exists for the benefit of pmap_map_chunk(). It keeps track 3559 * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can 3560 * find them as necessary. 3561 * 3562 * Note that the data on this list is not valid after initarm() returns. 3563 */ 3564 SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list); 3565 3566 static vaddr_t 3567 kernel_pt_lookup(paddr_t pa) 3568 { 3569 pv_addr_t *pv; 3570 3571 SLIST_FOREACH(pv, &kernel_pt_list, pv_list) { 3572 if (pv->pv_pa == pa) 3573 return (pv->pv_va); 3574 } 3575 return (0); 3576 } 3577 3578 /* 3579 * pmap_map_section: 3580 * 3581 * Create a single section mapping. 3582 */ 3583 void 3584 pmap_map_section(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache) 3585 { 3586 pd_entry_t *pde = (pd_entry_t *) l1pt; 3587 pd_entry_t fl = (cache == PTE_CACHE) ? pte_l1_s_cache_mode : 0; 3588 3589 KASSERT(((va | pa) & L1_S_OFFSET) == 0); 3590 3591 pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa | 3592 L1_S_PROT(PTE_KERNEL, prot) | fl; 3593 } 3594 3595 /* 3596 * pmap_map_entry: 3597 * 3598 * Create a single page mapping. 3599 */ 3600 void 3601 pmap_map_entry(vaddr_t l1pt, vaddr_t va, paddr_t pa, int prot, int cache) 3602 { 3603 pd_entry_t *pde = (pd_entry_t *) l1pt; 3604 pt_entry_t fl = (cache == PTE_CACHE) ? pte_l2_s_cache_mode : 0; 3605 pt_entry_t *pte; 3606 3607 KASSERT(((va | pa) & PGOFSET) == 0); 3608 3609 if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C) 3610 panic("pmap_map_entry: no L2 table for VA 0x%08lx", va); 3611 3612 pte = (pt_entry_t *) 3613 kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME); 3614 if (pte == NULL) 3615 panic("pmap_map_entry: can't find L2 table for VA 0x%08lx", va); 3616 3617 pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa | 3618 L2_S_PROT(PTE_KERNEL, prot) | fl; 3619 } 3620 3621 /* 3622 * pmap_link_l2pt: 3623 * 3624 * Link the L2 page table specified by "pa" into the L1 3625 * page table at the slot for "va". 3626 */ 3627 void 3628 pmap_link_l2pt(vaddr_t l1pt, vaddr_t va, pv_addr_t *l2pv) 3629 { 3630 pd_entry_t *pde = (pd_entry_t *) l1pt; 3631 u_int slot = va >> L1_S_SHIFT; 3632 3633 KASSERT((l2pv->pv_pa & PGOFSET) == 0); 3634 3635 pde[slot + 0] = L1_C_PROTO | (l2pv->pv_pa + 0x000); 3636 pde[slot + 1] = L1_C_PROTO | (l2pv->pv_pa + 0x400); 3637 pde[slot + 2] = L1_C_PROTO | (l2pv->pv_pa + 0x800); 3638 pde[slot + 3] = L1_C_PROTO | (l2pv->pv_pa + 0xc00); 3639 3640 SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list); 3641 } 3642 3643 /* 3644 * pmap_map_chunk: 3645 * 3646 * Map a chunk of memory using the most efficient mappings 3647 * possible (section, large page, small page) into the 3648 * provided L1 and L2 tables at the specified virtual address. 3649 */ 3650 vsize_t 3651 pmap_map_chunk(vaddr_t l1pt, vaddr_t va, paddr_t pa, vsize_t size, 3652 int prot, int cache) 3653 { 3654 pd_entry_t *pde = (pd_entry_t *) l1pt; 3655 pt_entry_t *pte, fl; 3656 vsize_t resid; 3657 int i; 3658 3659 resid = (size + (NBPG - 1)) & ~(NBPG - 1); 3660 3661 if (l1pt == 0) 3662 panic("pmap_map_chunk: no L1 table provided"); 3663 3664 #ifdef VERBOSE_INIT_ARM 3665 printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx " 3666 "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache); 3667 #endif 3668 3669 size = resid; 3670 3671 while (resid > 0) { 3672 /* See if we can use a section mapping. */ 3673 if (((pa | va) & L1_S_OFFSET) == 0 && 3674 resid >= L1_S_SIZE) { 3675 fl = (cache == PTE_CACHE) ? pte_l1_s_cache_mode : 0; 3676 #ifdef VERBOSE_INIT_ARM 3677 printf("S"); 3678 #endif 3679 pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa | 3680 L1_S_PROT(PTE_KERNEL, prot) | fl; 3681 va += L1_S_SIZE; 3682 pa += L1_S_SIZE; 3683 resid -= L1_S_SIZE; 3684 continue; 3685 } 3686 3687 /* 3688 * Ok, we're going to use an L2 table. Make sure 3689 * one is actually in the corresponding L1 slot 3690 * for the current VA. 3691 */ 3692 if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C) 3693 panic("pmap_map_chunk: no L2 table for VA 0x%08lx", va); 3694 3695 pte = (pt_entry_t *) 3696 kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME); 3697 if (pte == NULL) 3698 panic("pmap_map_chunk: can't find L2 table for VA" 3699 "0x%08lx", va); 3700 3701 /* See if we can use a L2 large page mapping. */ 3702 if (((pa | va) & L2_L_OFFSET) == 0 && 3703 resid >= L2_L_SIZE) { 3704 fl = (cache == PTE_CACHE) ? pte_l2_l_cache_mode : 0; 3705 #ifdef VERBOSE_INIT_ARM 3706 printf("L"); 3707 #endif 3708 for (i = 0; i < 16; i++) { 3709 pte[((va >> PGSHIFT) & 0x3f0) + i] = 3710 L2_L_PROTO | pa | 3711 L2_L_PROT(PTE_KERNEL, prot) | fl; 3712 } 3713 va += L2_L_SIZE; 3714 pa += L2_L_SIZE; 3715 resid -= L2_L_SIZE; 3716 continue; 3717 } 3718 3719 /* Use a small page mapping. */ 3720 fl = (cache == PTE_CACHE) ? pte_l2_s_cache_mode : 0; 3721 #ifdef VERBOSE_INIT_ARM 3722 printf("P"); 3723 #endif 3724 pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa | 3725 L2_S_PROT(PTE_KERNEL, prot) | fl; 3726 va += NBPG; 3727 pa += NBPG; 3728 resid -= NBPG; 3729 } 3730 #ifdef VERBOSE_INIT_ARM 3731 printf("\n"); 3732 #endif 3733 return (size); 3734 } 3735 3736 /********************** PTE initialization routines **************************/ 3737 3738 /* 3739 * These routines are called when the CPU type is identified to set up 3740 * the PTE prototypes, cache modes, etc. 3741 * 3742 * The variables are always here, just in case LKMs need to reference 3743 * them (though, they shouldn't). 3744 */ 3745 3746 pt_entry_t pte_l1_s_cache_mode; 3747 pt_entry_t pte_l1_s_cache_mask; 3748 3749 pt_entry_t pte_l2_l_cache_mode; 3750 pt_entry_t pte_l2_l_cache_mask; 3751 3752 pt_entry_t pte_l2_s_cache_mode; 3753 pt_entry_t pte_l2_s_cache_mask; 3754 3755 pt_entry_t pte_l2_s_prot_u; 3756 pt_entry_t pte_l2_s_prot_w; 3757 pt_entry_t pte_l2_s_prot_mask; 3758 3759 pt_entry_t pte_l1_s_proto; 3760 pt_entry_t pte_l1_c_proto; 3761 pt_entry_t pte_l2_s_proto; 3762 3763 void (*pmap_copy_page_func)(paddr_t, paddr_t); 3764 void (*pmap_zero_page_func)(paddr_t); 3765 3766 #if ARM_MMU_GENERIC == 1 3767 void 3768 pmap_pte_init_generic(void) 3769 { 3770 3771 pte_l1_s_cache_mode = L1_S_B|L1_S_C; 3772 pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic; 3773 3774 pte_l2_l_cache_mode = L2_B|L2_C; 3775 pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic; 3776 3777 pte_l2_s_cache_mode = L2_B|L2_C; 3778 pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic; 3779 3780 pte_l2_s_prot_u = L2_S_PROT_U_generic; 3781 pte_l2_s_prot_w = L2_S_PROT_W_generic; 3782 pte_l2_s_prot_mask = L2_S_PROT_MASK_generic; 3783 3784 pte_l1_s_proto = L1_S_PROTO_generic; 3785 pte_l1_c_proto = L1_C_PROTO_generic; 3786 pte_l2_s_proto = L2_S_PROTO_generic; 3787 3788 pmap_copy_page_func = pmap_copy_page_generic; 3789 pmap_zero_page_func = pmap_zero_page_generic; 3790 } 3791 3792 #if defined(CPU_ARM9) 3793 void 3794 pmap_pte_init_arm9(void) 3795 { 3796 3797 /* 3798 * ARM9 is compatible with generic, but we want to use 3799 * write-through caching for now. 3800 */ 3801 pmap_pte_init_generic(); 3802 3803 pte_l1_s_cache_mode = L1_S_C; 3804 pte_l2_l_cache_mode = L2_C; 3805 pte_l2_s_cache_mode = L2_C; 3806 } 3807 #endif /* CPU_ARM9 */ 3808 #endif /* ARM_MMU_GENERIC == 1 */ 3809 3810 #if ARM_MMU_XSCALE == 1 3811 void 3812 pmap_pte_init_xscale(void) 3813 { 3814 uint32_t auxctl; 3815 3816 pte_l1_s_cache_mode = L1_S_B|L1_S_C; 3817 pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale; 3818 3819 pte_l2_l_cache_mode = L2_B|L2_C; 3820 pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale; 3821 3822 pte_l2_s_cache_mode = L2_B|L2_C; 3823 pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale; 3824 3825 #ifdef XSCALE_CACHE_WRITE_THROUGH 3826 /* 3827 * Some versions of the XScale core have various bugs in 3828 * their cache units, the work-around for which is to run 3829 * the cache in write-through mode. Unfortunately, this 3830 * has a major (negative) impact on performance. So, we 3831 * go ahead and run fast-and-loose, in the hopes that we 3832 * don't line up the planets in a way that will trip the 3833 * bugs. 3834 * 3835 * However, we give you the option to be slow-but-correct. 3836 */ 3837 pte_l1_s_cache_mode = L1_S_C; 3838 pte_l2_l_cache_mode = L2_C; 3839 pte_l2_s_cache_mode = L2_C; 3840 #endif /* XSCALE_CACHE_WRITE_THROUGH */ 3841 3842 pte_l2_s_prot_u = L2_S_PROT_U_xscale; 3843 pte_l2_s_prot_w = L2_S_PROT_W_xscale; 3844 pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale; 3845 3846 pte_l1_s_proto = L1_S_PROTO_xscale; 3847 pte_l1_c_proto = L1_C_PROTO_xscale; 3848 pte_l2_s_proto = L2_S_PROTO_xscale; 3849 3850 pmap_copy_page_func = pmap_copy_page_xscale; 3851 pmap_zero_page_func = pmap_zero_page_xscale; 3852 3853 /* 3854 * Disable ECC protection of page table access, for now. 3855 */ 3856 __asm __volatile("mrc p15, 0, %0, c1, c0, 1" 3857 : "=r" (auxctl)); 3858 auxctl &= ~XSCALE_AUXCTL_P; 3859 __asm __volatile("mcr p15, 0, %0, c1, c0, 1" 3860 : 3861 : "r" (auxctl)); 3862 } 3863 3864 /* 3865 * xscale_setup_minidata: 3866 * 3867 * Set up the mini-data cache clean area. We require the 3868 * caller to allocate the right amount of physically and 3869 * virtually contiguous space. 3870 */ 3871 void 3872 xscale_setup_minidata(vaddr_t l1pt, vaddr_t va, paddr_t pa) 3873 { 3874 extern vaddr_t xscale_minidata_clean_addr; 3875 extern vsize_t xscale_minidata_clean_size; /* already initialized */ 3876 pd_entry_t *pde = (pd_entry_t *) l1pt; 3877 pt_entry_t *pte; 3878 vsize_t size; 3879 uint32_t auxctl; 3880 3881 xscale_minidata_clean_addr = va; 3882 3883 /* Round it to page size. */ 3884 size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME; 3885 3886 for (; size != 0; 3887 va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) { 3888 pte = (pt_entry_t *) 3889 kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME); 3890 if (pte == NULL) 3891 panic("xscale_setup_minidata: can't find L2 table for " 3892 "VA 0x%08lx", va); 3893 pte[(va >> PGSHIFT) & 0x3ff] = L2_S_PROTO | pa | 3894 L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | 3895 L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); 3896 } 3897 3898 /* 3899 * Configure the mini-data cache for write-back with 3900 * read/write-allocate. 3901 * 3902 * NOTE: In order to reconfigure the mini-data cache, we must 3903 * make sure it contains no valid data! In order to do that, 3904 * we must issue a global data cache invalidate command! 3905 * 3906 * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED! 3907 * THIS IS VERY IMPORTANT! 3908 */ 3909 3910 /* Invalidate data and mini-data. */ 3911 __asm __volatile("mcr p15, 0, %0, c7, c6, 0" 3912 : 3913 : "r" (auxctl)); 3914 3915 3916 __asm __volatile("mrc p15, 0, %0, c1, c0, 1" 3917 : "=r" (auxctl)); 3918 auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA; 3919 __asm __volatile("mcr p15, 0, %0, c1, c0, 1" 3920 : 3921 : "r" (auxctl)); 3922 } 3923 #endif /* ARM_MMU_XSCALE == 1 */ 3924