1 /* $OpenBSD: uvm_km.c,v 1.111 2013/05/30 18:02:04 tedu Exp $ */ 2 /* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */ 3 4 /* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * Copyright (c) 1991, 1993, The Regents of the University of California. 7 * 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by Charles D. Cranor, 24 * Washington University, the University of California, Berkeley and 25 * its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 43 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 */ 69 70 /* 71 * uvm_km.c: handle kernel memory allocation and management 72 */ 73 74 /* 75 * overview of kernel memory management: 76 * 77 * the kernel virtual address space is mapped by "kernel_map." kernel_map 78 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS. 79 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map). 80 * 81 * the kernel_map has several "submaps." submaps can only appear in 82 * the kernel_map (user processes can't use them). submaps "take over" 83 * the management of a sub-range of the kernel's address space. submaps 84 * are typically allocated at boot time and are never released. kernel 85 * virtual address space that is mapped by a submap is locked by the 86 * submap's lock -- not the kernel_map's lock. 87 * 88 * thus, the useful feature of submaps is that they allow us to break 89 * up the locking and protection of the kernel address space into smaller 90 * chunks. 91 * 92 * The VM system has several standard kernel submaps: 93 * kmem_map: Contains only wired kernel memory for malloc(9). 94 * Note: All access to this map must be protected by splvm as 95 * calls to malloc(9) are allowed in interrupt handlers. 96 * exec_map: Memory to hold arguments to system calls are allocated from 97 * this map. 98 * XXX: This is primeraly used to artificially limit the number 99 * of concurrent processes doing an exec. 100 * phys_map: Buffers for vmapbuf (physio) are allocated from this map. 101 * 102 * the kernel allocates its private memory out of special uvm_objects whose 103 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects 104 * are "special" and never die). all kernel objects should be thought of 105 * as large, fixed-sized, sparsely populated uvm_objects. each kernel 106 * object is equal to the size of kernel virtual address space (i.e. the 107 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS"). 108 * 109 * most kernel private memory lives in kernel_object. the only exception 110 * to this is for memory that belongs to submaps that must be protected 111 * by splvm(). each of these submaps manages their own pages. 112 * 113 * note that just because a kernel object spans the entire kernel virtual 114 * address space doesn't mean that it has to be mapped into the entire space. 115 * large chunks of a kernel object's space go unused either because 116 * that area of kernel VM is unmapped, or there is some other type of 117 * object mapped into that range (e.g. a vnode). for submap's kernel 118 * objects, the only part of the object that can ever be populated is the 119 * offsets that are managed by the submap. 120 * 121 * note that the "offset" in a kernel object is always the kernel virtual 122 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)). 123 * example: 124 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a 125 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the 126 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000, 127 * then that means that the page at offset 0x235000 in kernel_object is 128 * mapped at 0xf8235000. 129 * 130 * kernel objects have one other special property: when the kernel virtual 131 * memory mapping them is unmapped, the backing memory in the object is 132 * freed right away. this is done with the uvm_km_pgremove() function. 133 * this has to be done because there is no backing store for kernel pages 134 * and no need to save them after they are no longer referenced. 135 */ 136 137 #include <sys/param.h> 138 #include <sys/systm.h> 139 #include <sys/proc.h> 140 #include <sys/kthread.h> 141 #include <uvm/uvm.h> 142 143 /* 144 * global data structures 145 */ 146 147 struct vm_map *kernel_map = NULL; 148 149 /* Unconstraint range. */ 150 struct uvm_constraint_range no_constraint = { 0x0, (paddr_t)-1 }; 151 152 /* 153 * local data structues 154 */ 155 156 static struct vm_map kernel_map_store; 157 158 /* 159 * uvm_km_init: init kernel maps and objects to reflect reality (i.e. 160 * KVM already allocated for text, data, bss, and static data structures). 161 * 162 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS. 163 * we assume that [min -> start] has already been allocated and that 164 * "end" is the end. 165 */ 166 167 void 168 uvm_km_init(vaddr_t start, vaddr_t end) 169 { 170 vaddr_t base = VM_MIN_KERNEL_ADDRESS; 171 172 /* 173 * next, init kernel memory objects. 174 */ 175 176 /* kernel_object: for pageable anonymous kernel memory */ 177 uao_init(); 178 uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS - 179 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ); 180 181 /* 182 * init the map and reserve already allocated kernel space 183 * before installing. 184 */ 185 186 uvm_map_setup(&kernel_map_store, base, end, 187 #ifdef KVA_GUARDPAGES 188 VM_MAP_PAGEABLE | VM_MAP_GUARDPAGES 189 #else 190 VM_MAP_PAGEABLE 191 #endif 192 ); 193 kernel_map_store.pmap = pmap_kernel(); 194 if (base != start && uvm_map(&kernel_map_store, &base, start - base, 195 NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, 196 UVM_INH_NONE, UVM_ADV_RANDOM,UVM_FLAG_FIXED)) != 0) 197 panic("uvm_km_init: could not reserve space for kernel"); 198 199 /* 200 * install! 201 */ 202 203 kernel_map = &kernel_map_store; 204 } 205 206 /* 207 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap 208 * is allocated all references to that area of VM must go through it. this 209 * allows the locking of VAs in kernel_map to be broken up into regions. 210 * 211 * => if `fixed' is true, *min specifies where the region described 212 * by the submap must start 213 * => if submap is non NULL we use that as the submap, otherwise we 214 * alloc a new map 215 */ 216 struct vm_map * 217 uvm_km_suballoc(struct vm_map *map, vaddr_t *min, vaddr_t *max, vsize_t size, 218 int flags, boolean_t fixed, struct vm_map *submap) 219 { 220 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0); 221 222 size = round_page(size); /* round up to pagesize */ 223 224 /* 225 * first allocate a blank spot in the parent map 226 */ 227 228 if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 0, 229 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 230 UVM_ADV_RANDOM, mapflags)) != 0) { 231 panic("uvm_km_suballoc: unable to allocate space in parent map"); 232 } 233 234 /* 235 * set VM bounds (min is filled in by uvm_map) 236 */ 237 238 *max = *min + size; 239 240 /* 241 * add references to pmap and create or init the submap 242 */ 243 244 pmap_reference(vm_map_pmap(map)); 245 if (submap == NULL) { 246 submap = uvm_map_create(vm_map_pmap(map), *min, *max, flags); 247 if (submap == NULL) 248 panic("uvm_km_suballoc: unable to create submap"); 249 } else { 250 uvm_map_setup(submap, *min, *max, flags); 251 submap->pmap = vm_map_pmap(map); 252 } 253 254 /* 255 * now let uvm_map_submap plug in it... 256 */ 257 258 if (uvm_map_submap(map, *min, *max, submap) != 0) 259 panic("uvm_km_suballoc: submap allocation failed"); 260 261 return(submap); 262 } 263 264 /* 265 * uvm_km_pgremove: remove pages from a kernel uvm_object. 266 * 267 * => when you unmap a part of anonymous kernel memory you want to toss 268 * the pages right away. (this gets called from uvm_unmap_...). 269 */ 270 void 271 uvm_km_pgremove(struct uvm_object *uobj, vaddr_t start, vaddr_t end) 272 { 273 struct vm_page *pp; 274 voff_t curoff; 275 int slot; 276 277 KASSERT(uobj->pgops == &aobj_pager); 278 279 for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) { 280 pp = uvm_pagelookup(uobj, curoff); 281 if (pp && pp->pg_flags & PG_BUSY) { 282 atomic_setbits_int(&pp->pg_flags, PG_WANTED); 283 UVM_WAIT(pp, 0, "km_pgrm", 0); 284 curoff -= PAGE_SIZE; /* loop back to us */ 285 continue; 286 } 287 288 /* free the swap slot, then the page */ 289 slot = uao_dropswap(uobj, curoff >> PAGE_SHIFT); 290 291 if (pp != NULL) { 292 uvm_lock_pageq(); 293 uvm_pagefree(pp); 294 uvm_unlock_pageq(); 295 } else if (slot != 0) { 296 uvmexp.swpgonly--; 297 } 298 } 299 } 300 301 302 /* 303 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for "intrsafe" 304 * objects 305 * 306 * => when you unmap a part of anonymous kernel memory you want to toss 307 * the pages right away. (this gets called from uvm_unmap_...). 308 * => none of the pages will ever be busy, and none of them will ever 309 * be on the active or inactive queues (because these objects are 310 * never allowed to "page"). 311 */ 312 313 void 314 uvm_km_pgremove_intrsafe(vaddr_t start, vaddr_t end) 315 { 316 struct vm_page *pg; 317 vaddr_t va; 318 paddr_t pa; 319 320 for (va = start; va < end; va += PAGE_SIZE) { 321 if (!pmap_extract(pmap_kernel(), va, &pa)) 322 continue; 323 pg = PHYS_TO_VM_PAGE(pa); 324 if (pg == NULL) 325 panic("uvm_km_pgremove_intrsafe: no page"); 326 uvm_pagefree(pg); 327 } 328 } 329 330 /* 331 * uvm_km_kmemalloc: lower level kernel memory allocator for malloc() 332 * 333 * => we map wired memory into the specified map using the obj passed in 334 * => NOTE: we can return NULL even if we can wait if there is not enough 335 * free VM space in the map... caller should be prepared to handle 336 * this case. 337 * => we return KVA of memory allocated 338 * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't 339 * lock the map 340 * => low, high, alignment, boundary, nsegs are the corresponding parameters 341 * to uvm_pglistalloc 342 * => flags: ZERO - correspond to uvm_pglistalloc flags 343 */ 344 345 vaddr_t 346 uvm_km_kmemalloc_pla(struct vm_map *map, struct uvm_object *obj, vsize_t size, 347 vsize_t valign, int flags, paddr_t low, paddr_t high, paddr_t alignment, 348 paddr_t boundary, int nsegs) 349 { 350 vaddr_t kva, loopva; 351 voff_t offset; 352 struct vm_page *pg; 353 struct pglist pgl; 354 int pla_flags; 355 356 KASSERT(vm_map_pmap(map) == pmap_kernel()); 357 /* UVM_KMF_VALLOC => !UVM_KMF_ZERO */ 358 KASSERT(!(flags & UVM_KMF_VALLOC) || 359 !(flags & UVM_KMF_ZERO)); 360 361 /* 362 * setup for call 363 */ 364 365 size = round_page(size); 366 kva = vm_map_min(map); /* hint */ 367 if (nsegs == 0) 368 nsegs = atop(size); 369 370 /* 371 * allocate some virtual space 372 */ 373 374 if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, 375 valign, UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_NONE, 376 UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK))) != 0)) { 377 return(0); 378 } 379 380 /* 381 * if all we wanted was VA, return now 382 */ 383 384 if (flags & UVM_KMF_VALLOC) { 385 return(kva); 386 } 387 388 /* 389 * recover object offset from virtual address 390 */ 391 392 if (obj != NULL) 393 offset = kva - vm_map_min(kernel_map); 394 else 395 offset = 0; 396 397 /* 398 * now allocate and map in the memory... note that we are the only ones 399 * whom should ever get a handle on this area of VM. 400 */ 401 TAILQ_INIT(&pgl); 402 pla_flags = 0; 403 KASSERT(uvmexp.swpgonly <= uvmexp.swpages); 404 if ((flags & UVM_KMF_NOWAIT) || 405 ((flags & UVM_KMF_CANFAIL) && 406 uvmexp.swpages - uvmexp.swpgonly <= atop(size))) 407 pla_flags |= UVM_PLA_NOWAIT; 408 else 409 pla_flags |= UVM_PLA_WAITOK; 410 if (flags & UVM_KMF_ZERO) 411 pla_flags |= UVM_PLA_ZERO; 412 if (uvm_pglistalloc(size, low, high, alignment, boundary, &pgl, nsegs, 413 pla_flags) != 0) { 414 /* Failed. */ 415 uvm_unmap(map, kva, kva + size); 416 return (0); 417 } 418 419 loopva = kva; 420 while (loopva != kva + size) { 421 pg = TAILQ_FIRST(&pgl); 422 TAILQ_REMOVE(&pgl, pg, pageq); 423 uvm_pagealloc_pg(pg, obj, offset, NULL); 424 atomic_clearbits_int(&pg->pg_flags, PG_BUSY); 425 UVM_PAGE_OWN(pg, NULL); 426 427 /* 428 * map it in: note that we call pmap_enter with the map and 429 * object unlocked in case we are kmem_map. 430 */ 431 432 if (obj == NULL) { 433 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 434 UVM_PROT_RW); 435 } else { 436 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), 437 UVM_PROT_RW, 438 PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE); 439 } 440 loopva += PAGE_SIZE; 441 offset += PAGE_SIZE; 442 } 443 KASSERT(TAILQ_EMPTY(&pgl)); 444 pmap_update(pmap_kernel()); 445 446 return(kva); 447 } 448 449 /* 450 * uvm_km_free: free an area of kernel memory 451 */ 452 453 void 454 uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size) 455 { 456 uvm_unmap(map, trunc_page(addr), round_page(addr+size)); 457 } 458 459 /* 460 * uvm_km_free_wakeup: free an area of kernel memory and wake up 461 * anyone waiting for vm space. 462 * 463 * => XXX: "wanted" bit + unlock&wait on other end? 464 */ 465 466 void 467 uvm_km_free_wakeup(struct vm_map *map, vaddr_t addr, vsize_t size) 468 { 469 struct uvm_map_deadq dead_entries; 470 471 vm_map_lock(map); 472 TAILQ_INIT(&dead_entries); 473 uvm_unmap_remove(map, trunc_page(addr), round_page(addr+size), 474 &dead_entries, FALSE, TRUE); 475 wakeup(map); 476 vm_map_unlock(map); 477 478 uvm_unmap_detach(&dead_entries, 0); 479 } 480 481 /* 482 * uvm_km_alloc1: allocate wired down memory in the kernel map. 483 * 484 * => we can sleep if needed 485 */ 486 487 vaddr_t 488 uvm_km_alloc1(struct vm_map *map, vsize_t size, vsize_t align, boolean_t zeroit) 489 { 490 vaddr_t kva, loopva; 491 voff_t offset; 492 struct vm_page *pg; 493 494 KASSERT(vm_map_pmap(map) == pmap_kernel()); 495 496 size = round_page(size); 497 kva = vm_map_min(map); /* hint */ 498 499 /* 500 * allocate some virtual space 501 */ 502 503 if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object, 504 UVM_UNKNOWN_OFFSET, align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, 505 UVM_INH_NONE, UVM_ADV_RANDOM, 0)) != 0)) { 506 return(0); 507 } 508 509 /* 510 * recover object offset from virtual address 511 */ 512 513 offset = kva - vm_map_min(kernel_map); 514 515 /* 516 * now allocate the memory. we must be careful about released pages. 517 */ 518 519 loopva = kva; 520 while (size) { 521 /* allocate ram */ 522 pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0); 523 if (pg) { 524 atomic_clearbits_int(&pg->pg_flags, PG_BUSY); 525 UVM_PAGE_OWN(pg, NULL); 526 } 527 if (__predict_false(pg == NULL)) { 528 if (curproc == uvm.pagedaemon_proc) { 529 /* 530 * It is unfeasible for the page daemon to 531 * sleep for memory, so free what we have 532 * allocated and fail. 533 */ 534 uvm_unmap(map, kva, loopva - kva); 535 return (0); 536 } else { 537 uvm_wait("km_alloc1w"); /* wait for memory */ 538 continue; 539 } 540 } 541 542 /* 543 * map it in; note we're never called with an intrsafe 544 * object, so we always use regular old pmap_enter(). 545 */ 546 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), 547 UVM_PROT_ALL, PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE); 548 549 loopva += PAGE_SIZE; 550 offset += PAGE_SIZE; 551 size -= PAGE_SIZE; 552 } 553 pmap_update(map->pmap); 554 555 /* 556 * zero on request (note that "size" is now zero due to the above loop 557 * so we need to subtract kva from loopva to reconstruct the size). 558 */ 559 560 if (zeroit) 561 memset((caddr_t)kva, 0, loopva - kva); 562 563 return(kva); 564 } 565 566 /* 567 * uvm_km_valloc: allocate zero-fill memory in the kernel's address space 568 * 569 * => memory is not allocated until fault time 570 */ 571 572 vaddr_t 573 uvm_km_valloc(struct vm_map *map, vsize_t size) 574 { 575 return(uvm_km_valloc_align(map, size, 0, 0)); 576 } 577 578 vaddr_t 579 uvm_km_valloc_try(struct vm_map *map, vsize_t size) 580 { 581 return(uvm_km_valloc_align(map, size, 0, UVM_FLAG_TRYLOCK)); 582 } 583 584 vaddr_t 585 uvm_km_valloc_align(struct vm_map *map, vsize_t size, vsize_t align, int flags) 586 { 587 vaddr_t kva; 588 589 KASSERT(vm_map_pmap(map) == pmap_kernel()); 590 591 size = round_page(size); 592 kva = vm_map_min(map); /* hint */ 593 594 /* 595 * allocate some virtual space. will be demand filled by kernel_object. 596 */ 597 598 if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object, 599 UVM_UNKNOWN_OFFSET, align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, 600 UVM_INH_NONE, UVM_ADV_RANDOM, flags)) != 0)) { 601 return(0); 602 } 603 604 return(kva); 605 } 606 607 /* 608 * uvm_km_valloc_wait: allocate zero-fill memory in the kernel's address space 609 * 610 * => memory is not allocated until fault time 611 * => if no room in map, wait for space to free, unless requested size 612 * is larger than map (in which case we return 0) 613 */ 614 615 vaddr_t 616 uvm_km_valloc_prefer_wait(struct vm_map *map, vsize_t size, voff_t prefer) 617 { 618 vaddr_t kva; 619 620 KASSERT(vm_map_pmap(map) == pmap_kernel()); 621 622 size = round_page(size); 623 if (size > vm_map_max(map) - vm_map_min(map)) 624 return(0); 625 626 while (1) { 627 kva = vm_map_min(map); /* hint */ 628 629 /* 630 * allocate some virtual space. will be demand filled 631 * by kernel_object. 632 */ 633 634 if (__predict_true(uvm_map(map, &kva, size, uvm.kernel_object, 635 prefer, 0, UVM_MAPFLAG(UVM_PROT_ALL, 636 UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0)) == 0)) { 637 return(kva); 638 } 639 640 /* 641 * failed. sleep for a while (on map) 642 */ 643 644 tsleep(map, PVM, "vallocwait", 0); 645 } 646 /*NOTREACHED*/ 647 } 648 649 vaddr_t 650 uvm_km_valloc_wait(struct vm_map *map, vsize_t size) 651 { 652 return uvm_km_valloc_prefer_wait(map, size, UVM_UNKNOWN_OFFSET); 653 } 654 655 #if defined(__HAVE_PMAP_DIRECT) 656 /* 657 * uvm_km_page allocator, __HAVE_PMAP_DIRECT arch 658 * On architectures with machine memory direct mapped into a portion 659 * of KVM, we have very little work to do. Just get a physical page, 660 * and find and return its VA. 661 */ 662 void 663 uvm_km_page_init(void) 664 { 665 /* nothing */ 666 } 667 668 #else 669 /* 670 * uvm_km_page allocator, non __HAVE_PMAP_DIRECT archs 671 * This is a special allocator that uses a reserve of free pages 672 * to fulfill requests. It is fast and interrupt safe, but can only 673 * return page sized regions. Its primary use is as a backend for pool. 674 * 675 * The memory returned is allocated from the larger kernel_map, sparing 676 * pressure on the small interrupt-safe kmem_map. It is wired, but 677 * not zero filled. 678 */ 679 680 struct uvm_km_pages uvm_km_pages; 681 682 void uvm_km_createthread(void *); 683 void uvm_km_thread(void *); 684 struct uvm_km_free_page *uvm_km_doputpage(struct uvm_km_free_page *); 685 686 /* 687 * Allocate the initial reserve, and create the thread which will 688 * keep the reserve full. For bootstrapping, we allocate more than 689 * the lowat amount, because it may be a while before the thread is 690 * running. 691 */ 692 void 693 uvm_km_page_init(void) 694 { 695 int lowat_min; 696 int i; 697 int len, bulk; 698 vaddr_t addr; 699 700 mtx_init(&uvm_km_pages.mtx, IPL_VM); 701 if (!uvm_km_pages.lowat) { 702 /* based on physmem, calculate a good value here */ 703 uvm_km_pages.lowat = physmem / 256; 704 lowat_min = physmem < atop(16 * 1024 * 1024) ? 32 : 128; 705 if (uvm_km_pages.lowat < lowat_min) 706 uvm_km_pages.lowat = lowat_min; 707 } 708 if (uvm_km_pages.lowat > UVM_KM_PAGES_LOWAT_MAX) 709 uvm_km_pages.lowat = UVM_KM_PAGES_LOWAT_MAX; 710 uvm_km_pages.hiwat = 4 * uvm_km_pages.lowat; 711 if (uvm_km_pages.hiwat > UVM_KM_PAGES_HIWAT_MAX) 712 uvm_km_pages.hiwat = UVM_KM_PAGES_HIWAT_MAX; 713 714 /* Allocate all pages in as few allocations as possible. */ 715 len = 0; 716 bulk = uvm_km_pages.hiwat; 717 while (len < uvm_km_pages.hiwat && bulk > 0) { 718 bulk = MIN(bulk, uvm_km_pages.hiwat - len); 719 addr = vm_map_min(kernel_map); 720 if (uvm_map(kernel_map, &addr, (vsize_t)bulk << PAGE_SHIFT, 721 NULL, UVM_UNKNOWN_OFFSET, 0, 722 UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, UVM_INH_NONE, 723 UVM_ADV_RANDOM, UVM_KMF_TRYLOCK)) != 0) { 724 bulk /= 2; 725 continue; 726 } 727 728 for (i = len; i < len + bulk; i++, addr += PAGE_SIZE) 729 uvm_km_pages.page[i] = addr; 730 len += bulk; 731 } 732 733 uvm_km_pages.free = len; 734 for (i = len; i < UVM_KM_PAGES_HIWAT_MAX; i++) 735 uvm_km_pages.page[i] = 0; 736 737 /* tone down if really high */ 738 if (uvm_km_pages.lowat > 512) 739 uvm_km_pages.lowat = 512; 740 741 kthread_create_deferred(uvm_km_createthread, NULL); 742 } 743 744 void 745 uvm_km_createthread(void *arg) 746 { 747 kthread_create(uvm_km_thread, NULL, &uvm_km_pages.km_proc, "kmthread"); 748 } 749 750 /* 751 * Endless loop. We grab pages in increments of 16 pages, then 752 * quickly swap them into the list. At some point we can consider 753 * returning memory to the system if we have too many free pages, 754 * but that's not implemented yet. 755 */ 756 void 757 uvm_km_thread(void *arg) 758 { 759 vaddr_t pg[16]; 760 int i; 761 int allocmore = 0; 762 struct uvm_km_free_page *fp = NULL; 763 764 for (;;) { 765 mtx_enter(&uvm_km_pages.mtx); 766 if (uvm_km_pages.free >= uvm_km_pages.lowat && 767 uvm_km_pages.freelist == NULL) { 768 msleep(&uvm_km_pages.km_proc, &uvm_km_pages.mtx, 769 PVM, "kmalloc", 0); 770 } 771 allocmore = uvm_km_pages.free < uvm_km_pages.lowat; 772 fp = uvm_km_pages.freelist; 773 uvm_km_pages.freelist = NULL; 774 uvm_km_pages.freelistlen = 0; 775 mtx_leave(&uvm_km_pages.mtx); 776 777 if (allocmore) { 778 bzero(pg, sizeof(pg)); 779 for (i = 0; i < nitems(pg); i++) { 780 pg[i] = vm_map_min(kernel_map); 781 if (uvm_map(kernel_map, &pg[i], PAGE_SIZE, 782 NULL, UVM_UNKNOWN_OFFSET, 0, 783 UVM_MAPFLAG(UVM_PROT_RW, UVM_PROT_RW, 784 UVM_INH_NONE, UVM_ADV_RANDOM, 785 UVM_KMF_TRYLOCK)) != 0) { 786 pg[i] = 0; 787 break; 788 } 789 } 790 791 mtx_enter(&uvm_km_pages.mtx); 792 for (i = 0; i < nitems(pg); i++) { 793 if (uvm_km_pages.free == 794 nitems(uvm_km_pages.page)) 795 break; 796 else if (pg[i] != 0) 797 uvm_km_pages.page[uvm_km_pages.free++] 798 = pg[i]; 799 } 800 wakeup(&uvm_km_pages.free); 801 mtx_leave(&uvm_km_pages.mtx); 802 803 /* Cleanup left-over pages (if any). */ 804 for (; i < nitems(pg); i++) { 805 if (pg[i] != 0) { 806 uvm_unmap(kernel_map, 807 pg[i], pg[i] + PAGE_SIZE); 808 } 809 } 810 } 811 while (fp) { 812 fp = uvm_km_doputpage(fp); 813 } 814 } 815 } 816 817 struct uvm_km_free_page * 818 uvm_km_doputpage(struct uvm_km_free_page *fp) 819 { 820 vaddr_t va = (vaddr_t)fp; 821 struct vm_page *pg; 822 int freeva = 1; 823 struct uvm_km_free_page *nextfp = fp->next; 824 825 pg = uvm_atopg(va); 826 827 pmap_kremove(va, PAGE_SIZE); 828 pmap_update(kernel_map->pmap); 829 830 mtx_enter(&uvm_km_pages.mtx); 831 if (uvm_km_pages.free < uvm_km_pages.hiwat) { 832 uvm_km_pages.page[uvm_km_pages.free++] = va; 833 freeva = 0; 834 } 835 mtx_leave(&uvm_km_pages.mtx); 836 837 if (freeva) 838 uvm_unmap(kernel_map, va, va + PAGE_SIZE); 839 840 uvm_pagefree(pg); 841 return (nextfp); 842 } 843 #endif /* !__HAVE_PMAP_DIRECT */ 844 845 void * 846 km_alloc(size_t sz, const struct kmem_va_mode *kv, 847 const struct kmem_pa_mode *kp, const struct kmem_dyn_mode *kd) 848 { 849 struct vm_map *map; 850 struct vm_page *pg; 851 struct pglist pgl; 852 int mapflags = 0; 853 vm_prot_t prot; 854 int pla_flags; 855 int pla_maxseg; 856 #ifdef __HAVE_PMAP_DIRECT 857 paddr_t pa; 858 #endif 859 vaddr_t va, sva; 860 861 KASSERT(sz == round_page(sz)); 862 863 TAILQ_INIT(&pgl); 864 865 if (kp->kp_nomem || kp->kp_pageable) 866 goto alloc_va; 867 868 pla_flags = kd->kd_waitok ? UVM_PLA_WAITOK : UVM_PLA_NOWAIT; 869 pla_flags |= UVM_PLA_TRYCONTIG; 870 if (kp->kp_zero) 871 pla_flags |= UVM_PLA_ZERO; 872 873 pla_maxseg = kp->kp_maxseg; 874 if (pla_maxseg == 0) 875 pla_maxseg = sz / PAGE_SIZE; 876 877 if (uvm_pglistalloc(sz, kp->kp_constraint->ucr_low, 878 kp->kp_constraint->ucr_high, kp->kp_align, kp->kp_boundary, 879 &pgl, pla_maxseg, pla_flags)) { 880 return (NULL); 881 } 882 883 #ifdef __HAVE_PMAP_DIRECT 884 if (kv->kv_align || kv->kv_executable) 885 goto alloc_va; 886 #if 1 887 /* 888 * For now, only do DIRECT mappings for single page 889 * allocations, until we figure out a good way to deal 890 * with contig allocations in km_free. 891 */ 892 if (!kv->kv_singlepage) 893 goto alloc_va; 894 #endif 895 /* 896 * Dubious optimization. If we got a contig segment, just map it 897 * through the direct map. 898 */ 899 TAILQ_FOREACH(pg, &pgl, pageq) { 900 if (pg != TAILQ_FIRST(&pgl) && 901 VM_PAGE_TO_PHYS(pg) != pa + PAGE_SIZE) 902 break; 903 pa = VM_PAGE_TO_PHYS(pg); 904 } 905 if (pg == NULL) { 906 TAILQ_FOREACH(pg, &pgl, pageq) { 907 vaddr_t v; 908 v = pmap_map_direct(pg); 909 if (pg == TAILQ_FIRST(&pgl)) 910 va = v; 911 } 912 return ((void *)va); 913 } 914 #endif 915 alloc_va: 916 if (kv->kv_executable) { 917 prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 918 } else { 919 prot = VM_PROT_READ | VM_PROT_WRITE; 920 } 921 922 if (kp->kp_pageable) { 923 KASSERT(kp->kp_object); 924 KASSERT(!kv->kv_singlepage); 925 } else { 926 KASSERT(kp->kp_object == NULL); 927 } 928 929 if (kv->kv_singlepage) { 930 KASSERT(sz == PAGE_SIZE); 931 #ifdef __HAVE_PMAP_DIRECT 932 panic("km_alloc: DIRECT single page"); 933 #else 934 mtx_enter(&uvm_km_pages.mtx); 935 while (uvm_km_pages.free == 0) { 936 if (kd->kd_waitok == 0) { 937 mtx_leave(&uvm_km_pages.mtx); 938 uvm_pglistfree(&pgl); 939 return NULL; 940 } 941 msleep(&uvm_km_pages.free, &uvm_km_pages.mtx, PVM, 942 "getpage", 0); 943 } 944 va = uvm_km_pages.page[--uvm_km_pages.free]; 945 if (uvm_km_pages.free < uvm_km_pages.lowat && 946 curproc != uvm_km_pages.km_proc) { 947 if (kd->kd_slowdown) 948 *kd->kd_slowdown = 1; 949 wakeup(&uvm_km_pages.km_proc); 950 } 951 mtx_leave(&uvm_km_pages.mtx); 952 #endif 953 } else { 954 struct uvm_object *uobj = NULL; 955 956 if (kd->kd_trylock) 957 mapflags |= UVM_KMF_TRYLOCK; 958 959 if (kp->kp_object) 960 uobj = *kp->kp_object; 961 try_map: 962 map = *kv->kv_map; 963 va = vm_map_min(map); 964 if (uvm_map(map, &va, sz, uobj, kd->kd_prefer, 965 kv->kv_align, UVM_MAPFLAG(prot, prot, UVM_INH_NONE, 966 UVM_ADV_RANDOM, mapflags))) { 967 if (kv->kv_wait && kd->kd_waitok) { 968 tsleep(map, PVM, "km_allocva", 0); 969 goto try_map; 970 } 971 uvm_pglistfree(&pgl); 972 return (NULL); 973 } 974 } 975 sva = va; 976 TAILQ_FOREACH(pg, &pgl, pageq) { 977 if (kp->kp_pageable) 978 pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(pg), 979 prot, prot | PMAP_WIRED); 980 else 981 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), prot); 982 va += PAGE_SIZE; 983 } 984 pmap_update(pmap_kernel()); 985 return ((void *)sva); 986 } 987 988 void 989 km_free(void *v, size_t sz, const struct kmem_va_mode *kv, 990 const struct kmem_pa_mode *kp) 991 { 992 vaddr_t sva, eva, va; 993 struct vm_page *pg; 994 struct pglist pgl; 995 996 sva = va = (vaddr_t)v; 997 eva = va + sz; 998 999 if (kp->kp_nomem) { 1000 goto free_va; 1001 } 1002 1003 if (kv->kv_singlepage) { 1004 #ifdef __HAVE_PMAP_DIRECT 1005 pg = pmap_unmap_direct(va); 1006 uvm_pagefree(pg); 1007 #else 1008 struct uvm_km_free_page *fp = v; 1009 mtx_enter(&uvm_km_pages.mtx); 1010 fp->next = uvm_km_pages.freelist; 1011 uvm_km_pages.freelist = fp; 1012 if (uvm_km_pages.freelistlen++ > 16) 1013 wakeup(&uvm_km_pages.km_proc); 1014 mtx_leave(&uvm_km_pages.mtx); 1015 #endif 1016 return; 1017 } 1018 1019 if (kp->kp_pageable) { 1020 pmap_remove(pmap_kernel(), sva, eva); 1021 pmap_update(pmap_kernel()); 1022 } else { 1023 TAILQ_INIT(&pgl); 1024 for (va = sva; va < eva; va += PAGE_SIZE) { 1025 paddr_t pa; 1026 1027 if (!pmap_extract(pmap_kernel(), va, &pa)) 1028 continue; 1029 1030 pg = PHYS_TO_VM_PAGE(pa); 1031 if (pg == NULL) { 1032 panic("km_free: unmanaged page 0x%lx\n", pa); 1033 } 1034 TAILQ_INSERT_TAIL(&pgl, pg, pageq); 1035 } 1036 pmap_kremove(sva, sz); 1037 pmap_update(pmap_kernel()); 1038 uvm_pglistfree(&pgl); 1039 } 1040 free_va: 1041 uvm_unmap(*kv->kv_map, sva, eva); 1042 if (kv->kv_wait) 1043 wakeup(*kv->kv_map); 1044 } 1045 1046 const struct kmem_va_mode kv_any = { 1047 .kv_map = &kernel_map, 1048 }; 1049 1050 const struct kmem_va_mode kv_intrsafe = { 1051 .kv_map = &kmem_map, 1052 }; 1053 1054 const struct kmem_va_mode kv_page = { 1055 .kv_singlepage = 1 1056 }; 1057 1058 const struct kmem_pa_mode kp_dirty = { 1059 .kp_constraint = &no_constraint 1060 }; 1061 1062 const struct kmem_pa_mode kp_dma = { 1063 .kp_constraint = &dma_constraint 1064 }; 1065 1066 const struct kmem_pa_mode kp_dma_contig = { 1067 .kp_constraint = &dma_constraint, 1068 .kp_maxseg = 1 1069 }; 1070 1071 const struct kmem_pa_mode kp_dma_zero = { 1072 .kp_constraint = &dma_constraint, 1073 .kp_zero = 1 1074 }; 1075 1076 const struct kmem_pa_mode kp_zero = { 1077 .kp_constraint = &no_constraint, 1078 .kp_zero = 1 1079 }; 1080 1081 const struct kmem_pa_mode kp_pageable = { 1082 .kp_object = &uvm.kernel_object, 1083 .kp_pageable = 1 1084 /* XXX - kp_nomem, maybe, but we'll need to fix km_free. */ 1085 }; 1086 1087 const struct kmem_pa_mode kp_none = { 1088 .kp_nomem = 1 1089 }; 1090 1091 const struct kmem_dyn_mode kd_waitok = { 1092 .kd_waitok = 1, 1093 .kd_prefer = UVM_UNKNOWN_OFFSET 1094 }; 1095 1096 const struct kmem_dyn_mode kd_nowait = { 1097 .kd_prefer = UVM_UNKNOWN_OFFSET 1098 }; 1099 1100 const struct kmem_dyn_mode kd_trylock = { 1101 .kd_trylock = 1, 1102 .kd_prefer = UVM_UNKNOWN_OFFSET 1103 }; 1104