1 /* $NetBSD: uvm_km.c,v 1.56 2002/03/07 20:15:32 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by Charles D. Cranor, 23 * Washington University, the University of California, Berkeley and 24 * its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 42 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp 43 * 44 * 45 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46 * All rights reserved. 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 */ 68 69 /* 70 * uvm_km.c: handle kernel memory allocation and management 71 */ 72 73 /* 74 * overview of kernel memory management: 75 * 76 * the kernel virtual address space is mapped by "kernel_map." kernel_map 77 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS. 78 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map). 79 * 80 * the kernel_map has several "submaps." submaps can only appear in 81 * the kernel_map (user processes can't use them). submaps "take over" 82 * the management of a sub-range of the kernel's address space. submaps 83 * are typically allocated at boot time and are never released. kernel 84 * virtual address space that is mapped by a submap is locked by the 85 * submap's lock -- not the kernel_map's lock. 86 * 87 * thus, the useful feature of submaps is that they allow us to break 88 * up the locking and protection of the kernel address space into smaller 89 * chunks. 90 * 91 * the vm system has several standard kernel submaps, including: 92 * kmem_map => contains only wired kernel memory for the kernel 93 * malloc. *** access to kmem_map must be protected 94 * by splvm() because we are allowed to call malloc() 95 * at interrupt time *** 96 * mb_map => memory for large mbufs, *** protected by splvm *** 97 * pager_map => used to map "buf" structures into kernel space 98 * exec_map => used during exec to handle exec args 99 * etc... 100 * 101 * the kernel allocates its private memory out of special uvm_objects whose 102 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects 103 * are "special" and never die). all kernel objects should be thought of 104 * as large, fixed-sized, sparsely populated uvm_objects. each kernel 105 * object is equal to the size of kernel virtual address space (i.e. the 106 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS"). 107 * 108 * most kernel private memory lives in kernel_object. the only exception 109 * to this is for memory that belongs to submaps that must be protected 110 * by splvm(). pages in these submaps are not assigned to an object. 111 * 112 * note that just because a kernel object spans the entire kernel virutal 113 * address space doesn't mean that it has to be mapped into the entire space. 114 * large chunks of a kernel object's space go unused either because 115 * that area of kernel VM is unmapped, or there is some other type of 116 * object mapped into that range (e.g. a vnode). for submap's kernel 117 * objects, the only part of the object that can ever be populated is the 118 * offsets that are managed by the submap. 119 * 120 * note that the "offset" in a kernel object is always the kernel virtual 121 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)). 122 * example: 123 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a 124 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the 125 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000, 126 * then that means that the page at offset 0x235000 in kernel_object is 127 * mapped at 0xf8235000. 128 * 129 * kernel object have one other special property: when the kernel virtual 130 * memory mapping them is unmapped, the backing memory in the object is 131 * freed right away. this is done with the uvm_km_pgremove() function. 132 * this has to be done because there is no backing store for kernel pages 133 * and no need to save them after they are no longer referenced. 134 */ 135 136 #include <sys/cdefs.h> 137 __KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.56 2002/03/07 20:15:32 thorpej Exp $"); 138 139 #include "opt_uvmhist.h" 140 141 #include <sys/param.h> 142 #include <sys/systm.h> 143 #include <sys/proc.h> 144 145 #include <uvm/uvm.h> 146 147 /* 148 * global data structures 149 */ 150 151 struct vm_map *kernel_map = NULL; 152 153 /* 154 * local data structues 155 */ 156 157 static struct vm_map kernel_map_store; 158 159 /* 160 * uvm_km_init: init kernel maps and objects to reflect reality (i.e. 161 * KVM already allocated for text, data, bss, and static data structures). 162 * 163 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS. 164 * we assume that [min -> start] has already been allocated and that 165 * "end" is the end. 166 */ 167 168 void 169 uvm_km_init(start, end) 170 vaddr_t start, end; 171 { 172 vaddr_t base = VM_MIN_KERNEL_ADDRESS; 173 174 /* 175 * next, init kernel memory objects. 176 */ 177 178 /* kernel_object: for pageable anonymous kernel memory */ 179 uao_init(); 180 uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS - 181 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ); 182 183 /* 184 * init the map and reserve any space that might already 185 * have been allocated kernel space before installing. 186 */ 187 188 uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE); 189 kernel_map_store.pmap = pmap_kernel(); 190 if (start != base && 191 uvm_map(&kernel_map_store, &base, start - base, NULL, 192 UVM_UNKNOWN_OFFSET, 0, 193 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 194 UVM_ADV_RANDOM, UVM_FLAG_FIXED)) != 0) 195 panic("uvm_km_init: could not reserve space for kernel"); 196 197 /* 198 * install! 199 */ 200 201 kernel_map = &kernel_map_store; 202 } 203 204 /* 205 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap 206 * is allocated all references to that area of VM must go through it. this 207 * allows the locking of VAs in kernel_map to be broken up into regions. 208 * 209 * => if `fixed' is true, *min specifies where the region described 210 * by the submap must start 211 * => if submap is non NULL we use that as the submap, otherwise we 212 * alloc a new map 213 */ 214 struct vm_map * 215 uvm_km_suballoc(map, min, max, size, flags, fixed, submap) 216 struct vm_map *map; 217 vaddr_t *min, *max; /* IN/OUT, OUT */ 218 vsize_t size; 219 int flags; 220 boolean_t fixed; 221 struct vm_map *submap; 222 { 223 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0); 224 225 size = round_page(size); /* round up to pagesize */ 226 227 /* 228 * first allocate a blank spot in the parent map 229 */ 230 231 if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 0, 232 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 233 UVM_ADV_RANDOM, mapflags)) != 0) { 234 panic("uvm_km_suballoc: unable to allocate space in parent map"); 235 } 236 237 /* 238 * set VM bounds (min is filled in by uvm_map) 239 */ 240 241 *max = *min + size; 242 243 /* 244 * add references to pmap and create or init the submap 245 */ 246 247 pmap_reference(vm_map_pmap(map)); 248 if (submap == NULL) { 249 submap = uvm_map_create(vm_map_pmap(map), *min, *max, flags); 250 if (submap == NULL) 251 panic("uvm_km_suballoc: unable to create submap"); 252 } else { 253 uvm_map_setup(submap, *min, *max, flags); 254 submap->pmap = vm_map_pmap(map); 255 } 256 257 /* 258 * now let uvm_map_submap plug in it... 259 */ 260 261 if (uvm_map_submap(map, *min, *max, submap) != 0) 262 panic("uvm_km_suballoc: submap allocation failed"); 263 264 return(submap); 265 } 266 267 /* 268 * uvm_km_pgremove: remove pages from a kernel uvm_object. 269 * 270 * => when you unmap a part of anonymous kernel memory you want to toss 271 * the pages right away. (this gets called from uvm_unmap_...). 272 */ 273 274 void 275 uvm_km_pgremove(uobj, start, end) 276 struct uvm_object *uobj; 277 vaddr_t start, end; 278 { 279 struct vm_page *pg; 280 voff_t curoff, nextoff; 281 int swpgonlydelta = 0; 282 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist); 283 284 KASSERT(uobj->pgops == &aobj_pager); 285 simple_lock(&uobj->vmobjlock); 286 287 for (curoff = start; curoff < end; curoff = nextoff) { 288 nextoff = curoff + PAGE_SIZE; 289 pg = uvm_pagelookup(uobj, curoff); 290 if (pg != NULL && pg->flags & PG_BUSY) { 291 pg->flags |= PG_WANTED; 292 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0, 293 "km_pgrm", 0); 294 simple_lock(&uobj->vmobjlock); 295 nextoff = curoff; 296 continue; 297 } 298 299 /* 300 * free the swap slot, then the page. 301 */ 302 303 if (pg == NULL && 304 uao_find_swslot(uobj, curoff >> PAGE_SHIFT) != 0) { 305 swpgonlydelta++; 306 } 307 uao_dropswap(uobj, curoff >> PAGE_SHIFT); 308 if (pg != NULL) { 309 uvm_lock_pageq(); 310 uvm_pagefree(pg); 311 uvm_unlock_pageq(); 312 } 313 } 314 simple_unlock(&uobj->vmobjlock); 315 316 if (swpgonlydelta > 0) { 317 simple_lock(&uvm.swap_data_lock); 318 KASSERT(uvmexp.swpgonly >= swpgonlydelta); 319 uvmexp.swpgonly -= swpgonlydelta; 320 simple_unlock(&uvm.swap_data_lock); 321 } 322 } 323 324 325 /* 326 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for "intrsafe" 327 * maps 328 * 329 * => when you unmap a part of anonymous kernel memory you want to toss 330 * the pages right away. (this is called from uvm_unmap_...). 331 * => none of the pages will ever be busy, and none of them will ever 332 * be on the active or inactive queues (because they have no object). 333 */ 334 335 void 336 uvm_km_pgremove_intrsafe(start, end) 337 vaddr_t start, end; 338 { 339 struct vm_page *pg; 340 paddr_t pa; 341 UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist); 342 343 for (; start < end; start += PAGE_SIZE) { 344 if (!pmap_extract(pmap_kernel(), start, &pa)) { 345 continue; 346 } 347 pg = PHYS_TO_VM_PAGE(pa); 348 KASSERT(pg); 349 KASSERT(pg->uobject == NULL && pg->uanon == NULL); 350 uvm_pagefree(pg); 351 } 352 } 353 354 355 /* 356 * uvm_km_kmemalloc: lower level kernel memory allocator for malloc() 357 * 358 * => we map wired memory into the specified map using the obj passed in 359 * => NOTE: we can return NULL even if we can wait if there is not enough 360 * free VM space in the map... caller should be prepared to handle 361 * this case. 362 * => we return KVA of memory allocated 363 * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't 364 * lock the map 365 */ 366 367 vaddr_t 368 uvm_km_kmemalloc(map, obj, size, flags) 369 struct vm_map *map; 370 struct uvm_object *obj; 371 vsize_t size; 372 int flags; 373 { 374 vaddr_t kva, loopva; 375 vaddr_t offset; 376 vsize_t loopsize; 377 struct vm_page *pg; 378 UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist); 379 380 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)", 381 map, obj, size, flags); 382 KASSERT(vm_map_pmap(map) == pmap_kernel()); 383 384 /* 385 * setup for call 386 */ 387 388 size = round_page(size); 389 kva = vm_map_min(map); /* hint */ 390 391 /* 392 * allocate some virtual space 393 */ 394 395 if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, 396 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 397 UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK))) 398 != 0)) { 399 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0); 400 return(0); 401 } 402 403 /* 404 * if all we wanted was VA, return now 405 */ 406 407 if (flags & UVM_KMF_VALLOC) { 408 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0); 409 return(kva); 410 } 411 412 /* 413 * recover object offset from virtual address 414 */ 415 416 offset = kva - vm_map_min(kernel_map); 417 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0); 418 419 /* 420 * now allocate and map in the memory... note that we are the only ones 421 * whom should ever get a handle on this area of VM. 422 */ 423 424 loopva = kva; 425 loopsize = size; 426 while (loopsize) { 427 if (obj) { 428 simple_lock(&obj->vmobjlock); 429 } 430 pg = uvm_pagealloc(obj, offset, NULL, UVM_PGA_USERESERVE); 431 if (__predict_true(pg != NULL)) { 432 pg->flags &= ~PG_BUSY; /* new page */ 433 UVM_PAGE_OWN(pg, NULL); 434 } 435 if (obj) { 436 simple_unlock(&obj->vmobjlock); 437 } 438 439 /* 440 * out of memory? 441 */ 442 443 if (__predict_false(pg == NULL)) { 444 if (flags & UVM_KMF_NOWAIT) { 445 /* free everything! */ 446 uvm_unmap(map, kva, kva + size); 447 return(0); 448 } else { 449 uvm_wait("km_getwait2"); /* sleep here */ 450 continue; 451 } 452 } 453 454 /* 455 * map it in 456 */ 457 458 if (obj == NULL) { 459 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 460 VM_PROT_ALL); 461 } else { 462 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), 463 UVM_PROT_ALL, 464 PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE); 465 } 466 loopva += PAGE_SIZE; 467 offset += PAGE_SIZE; 468 loopsize -= PAGE_SIZE; 469 } 470 471 pmap_update(pmap_kernel()); 472 473 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 474 return(kva); 475 } 476 477 /* 478 * uvm_km_free: free an area of kernel memory 479 */ 480 481 void 482 uvm_km_free(map, addr, size) 483 struct vm_map *map; 484 vaddr_t addr; 485 vsize_t size; 486 { 487 uvm_unmap(map, trunc_page(addr), round_page(addr+size)); 488 } 489 490 /* 491 * uvm_km_free_wakeup: free an area of kernel memory and wake up 492 * anyone waiting for vm space. 493 * 494 * => XXX: "wanted" bit + unlock&wait on other end? 495 */ 496 497 void 498 uvm_km_free_wakeup(map, addr, size) 499 struct vm_map *map; 500 vaddr_t addr; 501 vsize_t size; 502 { 503 struct vm_map_entry *dead_entries; 504 505 vm_map_lock(map); 506 uvm_unmap_remove(map, trunc_page(addr), round_page(addr + size), 507 &dead_entries); 508 wakeup(map); 509 vm_map_unlock(map); 510 if (dead_entries != NULL) 511 uvm_unmap_detach(dead_entries, 0); 512 } 513 514 /* 515 * uvm_km_alloc1: allocate wired down memory in the kernel map. 516 * 517 * => we can sleep if needed 518 */ 519 520 vaddr_t 521 uvm_km_alloc1(map, size, zeroit) 522 struct vm_map *map; 523 vsize_t size; 524 boolean_t zeroit; 525 { 526 vaddr_t kva, loopva, offset; 527 struct vm_page *pg; 528 UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist); 529 530 UVMHIST_LOG(maphist,"(map=0x%x, size=0x%x)", map, size,0,0); 531 KASSERT(vm_map_pmap(map) == pmap_kernel()); 532 533 size = round_page(size); 534 kva = vm_map_min(map); /* hint */ 535 536 /* 537 * allocate some virtual space 538 */ 539 540 if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object, 541 UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, 542 UVM_INH_NONE, UVM_ADV_RANDOM, 543 0)) != 0)) { 544 UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0); 545 return(0); 546 } 547 548 /* 549 * recover object offset from virtual address 550 */ 551 552 offset = kva - vm_map_min(kernel_map); 553 UVMHIST_LOG(maphist," kva=0x%x, offset=0x%x", kva, offset,0,0); 554 555 /* 556 * now allocate the memory. 557 */ 558 559 loopva = kva; 560 while (size) { 561 simple_lock(&uvm.kernel_object->vmobjlock); 562 KASSERT(uvm_pagelookup(uvm.kernel_object, offset) == NULL); 563 pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0); 564 if (pg) { 565 pg->flags &= ~PG_BUSY; 566 UVM_PAGE_OWN(pg, NULL); 567 } 568 simple_unlock(&uvm.kernel_object->vmobjlock); 569 if (pg == NULL) { 570 uvm_wait("km_alloc1w"); 571 continue; 572 } 573 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), 574 UVM_PROT_ALL, PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE); 575 loopva += PAGE_SIZE; 576 offset += PAGE_SIZE; 577 size -= PAGE_SIZE; 578 } 579 pmap_update(map->pmap); 580 581 /* 582 * zero on request (note that "size" is now zero due to the above loop 583 * so we need to subtract kva from loopva to reconstruct the size). 584 */ 585 586 if (zeroit) 587 memset((caddr_t)kva, 0, loopva - kva); 588 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 589 return(kva); 590 } 591 592 /* 593 * uvm_km_valloc: allocate zero-fill memory in the kernel's address space 594 * 595 * => memory is not allocated until fault time 596 */ 597 598 vaddr_t 599 uvm_km_valloc(map, size) 600 struct vm_map *map; 601 vsize_t size; 602 { 603 return(uvm_km_valloc_align(map, size, 0)); 604 } 605 606 vaddr_t 607 uvm_km_valloc_align(map, size, align) 608 struct vm_map *map; 609 vsize_t size; 610 vsize_t align; 611 { 612 vaddr_t kva; 613 UVMHIST_FUNC("uvm_km_valloc"); UVMHIST_CALLED(maphist); 614 615 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0); 616 KASSERT(vm_map_pmap(map) == pmap_kernel()); 617 618 size = round_page(size); 619 kva = vm_map_min(map); /* hint */ 620 621 /* 622 * allocate some virtual space. will be demand filled by kernel_object. 623 */ 624 625 if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object, 626 UVM_UNKNOWN_OFFSET, align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, 627 UVM_INH_NONE, UVM_ADV_RANDOM, 628 0)) != 0)) { 629 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0); 630 return(0); 631 } 632 633 UVMHIST_LOG(maphist, "<- done (kva=0x%x)", kva,0,0,0); 634 return(kva); 635 } 636 637 /* 638 * uvm_km_valloc_wait: allocate zero-fill memory in the kernel's address space 639 * 640 * => memory is not allocated until fault time 641 * => if no room in map, wait for space to free, unless requested size 642 * is larger than map (in which case we return 0) 643 */ 644 645 vaddr_t 646 uvm_km_valloc_prefer_wait(map, size, prefer) 647 struct vm_map *map; 648 vsize_t size; 649 voff_t prefer; 650 { 651 vaddr_t kva; 652 UVMHIST_FUNC("uvm_km_valloc_prefer_wait"); UVMHIST_CALLED(maphist); 653 654 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0); 655 KASSERT(vm_map_pmap(map) == pmap_kernel()); 656 657 size = round_page(size); 658 if (size > vm_map_max(map) - vm_map_min(map)) 659 return(0); 660 661 for (;;) { 662 kva = vm_map_min(map); /* hint */ 663 664 /* 665 * allocate some virtual space. will be demand filled 666 * by kernel_object. 667 */ 668 669 if (__predict_true(uvm_map(map, &kva, size, uvm.kernel_object, 670 prefer, 0, UVM_MAPFLAG(UVM_PROT_ALL, 671 UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0)) 672 == 0)) { 673 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 674 return(kva); 675 } 676 677 /* 678 * failed. sleep for a while (on map) 679 */ 680 681 UVMHIST_LOG(maphist,"<<<sleeping>>>",0,0,0,0); 682 tsleep((caddr_t)map, PVM, "vallocwait", 0); 683 } 684 /*NOTREACHED*/ 685 } 686 687 vaddr_t 688 uvm_km_valloc_wait(map, size) 689 struct vm_map *map; 690 vsize_t size; 691 { 692 return uvm_km_valloc_prefer_wait(map, size, UVM_UNKNOWN_OFFSET); 693 } 694 695 /* Sanity; must specify both or none. */ 696 #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \ 697 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE)) 698 #error Must specify MAP and UNMAP together. 699 #endif 700 701 /* 702 * uvm_km_alloc_poolpage: allocate a page for the pool allocator 703 * 704 * => if the pmap specifies an alternate mapping method, we use it. 705 */ 706 707 /* ARGSUSED */ 708 vaddr_t 709 uvm_km_alloc_poolpage1(map, obj, waitok) 710 struct vm_map *map; 711 struct uvm_object *obj; 712 boolean_t waitok; 713 { 714 #if defined(PMAP_MAP_POOLPAGE) 715 struct vm_page *pg; 716 vaddr_t va; 717 718 again: 719 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); 720 if (__predict_false(pg == NULL)) { 721 if (waitok) { 722 uvm_wait("plpg"); 723 goto again; 724 } else 725 return (0); 726 } 727 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg)); 728 if (__predict_false(va == 0)) 729 uvm_pagefree(pg); 730 return (va); 731 #else 732 vaddr_t va; 733 int s; 734 735 /* 736 * NOTE: We may be called with a map that doens't require splvm 737 * protection (e.g. kernel_map). However, it does not hurt to 738 * go to splvm in this case (since unprocted maps will never be 739 * accessed in interrupt context). 740 * 741 * XXX We may want to consider changing the interface to this 742 * XXX function. 743 */ 744 745 s = splvm(); 746 va = uvm_km_kmemalloc(map, obj, PAGE_SIZE, waitok ? 0 : UVM_KMF_NOWAIT); 747 splx(s); 748 return (va); 749 #endif /* PMAP_MAP_POOLPAGE */ 750 } 751 752 /* 753 * uvm_km_free_poolpage: free a previously allocated pool page 754 * 755 * => if the pmap specifies an alternate unmapping method, we use it. 756 */ 757 758 /* ARGSUSED */ 759 void 760 uvm_km_free_poolpage1(map, addr) 761 struct vm_map *map; 762 vaddr_t addr; 763 { 764 #if defined(PMAP_UNMAP_POOLPAGE) 765 paddr_t pa; 766 767 pa = PMAP_UNMAP_POOLPAGE(addr); 768 uvm_pagefree(PHYS_TO_VM_PAGE(pa)); 769 #else 770 int s; 771 772 /* 773 * NOTE: We may be called with a map that doens't require splvm 774 * protection (e.g. kernel_map). However, it does not hurt to 775 * go to splvm in this case (since unprocted maps will never be 776 * accessed in interrupt context). 777 * 778 * XXX We may want to consider changing the interface to this 779 * XXX function. 780 */ 781 782 s = splvm(); 783 uvm_km_free(map, addr, PAGE_SIZE); 784 splx(s); 785 #endif /* PMAP_UNMAP_POOLPAGE */ 786 } 787