1 /* $OpenBSD: uvm_km.c,v 1.22 2001/11/11 01:16:56 art Exp $ */ 2 /* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */ 3 4 /* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * Copyright (c) 1991, 1993, The Regents of the University of California. 7 * 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by Charles D. Cranor, 24 * Washington University, the University of California, Berkeley and 25 * its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 43 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 */ 69 70 /* 71 * uvm_km.c: handle kernel memory allocation and management 72 */ 73 74 /* 75 * overview of kernel memory management: 76 * 77 * the kernel virtual address space is mapped by "kernel_map." kernel_map 78 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS. 79 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map). 80 * 81 * the kernel_map has several "submaps." submaps can only appear in 82 * the kernel_map (user processes can't use them). submaps "take over" 83 * the management of a sub-range of the kernel's address space. submaps 84 * are typically allocated at boot time and are never released. kernel 85 * virtual address space that is mapped by a submap is locked by the 86 * submap's lock -- not the kernel_map's lock. 87 * 88 * thus, the useful feature of submaps is that they allow us to break 89 * up the locking and protection of the kernel address space into smaller 90 * chunks. 91 * 92 * the vm system has several standard kernel submaps, including: 93 * kmem_map => contains only wired kernel memory for the kernel 94 * malloc. *** access to kmem_map must be protected 95 * by splvm() because we are allowed to call malloc() 96 * at interrupt time *** 97 * mb_map => memory for large mbufs, *** protected by splvm *** 98 * pager_map => used to map "buf" structures into kernel space 99 * exec_map => used during exec to handle exec args 100 * etc... 101 * 102 * the kernel allocates its private memory out of special uvm_objects whose 103 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects 104 * are "special" and never die). all kernel objects should be thought of 105 * as large, fixed-sized, sparsely populated uvm_objects. each kernel 106 * object is equal to the size of kernel virtual address space (i.e. the 107 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS"). 108 * 109 * most kernel private memory lives in kernel_object. the only exception 110 * to this is for memory that belongs to submaps that must be protected 111 * by splvm(). each of these submaps has their own private kernel 112 * object (e.g. kmem_object, mb_object). 113 * 114 * note that just because a kernel object spans the entire kernel virutal 115 * address space doesn't mean that it has to be mapped into the entire space. 116 * large chunks of a kernel object's space go unused either because 117 * that area of kernel VM is unmapped, or there is some other type of 118 * object mapped into that range (e.g. a vnode). for submap's kernel 119 * objects, the only part of the object that can ever be populated is the 120 * offsets that are managed by the submap. 121 * 122 * note that the "offset" in a kernel object is always the kernel virtual 123 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)). 124 * example: 125 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a 126 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the 127 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000, 128 * then that means that the page at offset 0x235000 in kernel_object is 129 * mapped at 0xf8235000. 130 * 131 * note that the offsets in kmem_object and mb_object also follow this 132 * rule. this means that the offsets for kmem_object must fall in the 133 * range of [vm_map_min(kmem_object) - vm_map_min(kernel_map)] to 134 * [vm_map_max(kmem_object) - vm_map_min(kernel_map)], so the offsets 135 * in those objects will typically not start at zero. 136 * 137 * kernel object have one other special property: when the kernel virtual 138 * memory mapping them is unmapped, the backing memory in the object is 139 * freed right away. this is done with the uvm_km_pgremove() function. 140 * this has to be done because there is no backing store for kernel pages 141 * and no need to save them after they are no longer referenced. 142 */ 143 144 #include <sys/param.h> 145 #include <sys/systm.h> 146 #include <sys/proc.h> 147 148 #include <uvm/uvm.h> 149 150 /* 151 * global data structures 152 */ 153 154 vm_map_t kernel_map = NULL; 155 156 struct vmi_list vmi_list; 157 simple_lock_data_t vmi_list_slock; 158 159 /* 160 * local data structues 161 */ 162 163 static struct vm_map kernel_map_store; 164 static struct uvm_object kmem_object_store; 165 static struct uvm_object mb_object_store; 166 167 /* 168 * All pager operations here are NULL, but the object must have 169 * a pager ops vector associated with it; various places assume 170 * it to be so. 171 */ 172 static struct uvm_pagerops km_pager; 173 174 /* 175 * uvm_km_init: init kernel maps and objects to reflect reality (i.e. 176 * KVM already allocated for text, data, bss, and static data structures). 177 * 178 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS. 179 * we assume that [min -> start] has already been allocated and that 180 * "end" is the end. 181 */ 182 183 void 184 uvm_km_init(start, end) 185 vaddr_t start, end; 186 { 187 vaddr_t base = VM_MIN_KERNEL_ADDRESS; 188 189 /* 190 * first, initialize the interrupt-safe map list. 191 */ 192 LIST_INIT(&vmi_list); 193 simple_lock_init(&vmi_list_slock); 194 195 /* 196 * next, init kernel memory objects. 197 */ 198 199 /* kernel_object: for pageable anonymous kernel memory */ 200 uao_init(); 201 uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS - 202 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ); 203 204 /* 205 * kmem_object: for use by the kernel malloc(). Memory is always 206 * wired, and this object (and the kmem_map) can be accessed at 207 * interrupt time. 208 */ 209 simple_lock_init(&kmem_object_store.vmobjlock); 210 kmem_object_store.pgops = &km_pager; 211 TAILQ_INIT(&kmem_object_store.memq); 212 kmem_object_store.uo_npages = 0; 213 /* we are special. we never die */ 214 kmem_object_store.uo_refs = UVM_OBJ_KERN_INTRSAFE; 215 uvmexp.kmem_object = &kmem_object_store; 216 217 /* 218 * mb_object: for mbuf cluster pages on platforms which use the 219 * mb_map. Memory is always wired, and this object (and the mb_map) 220 * can be accessed at interrupt time. 221 */ 222 simple_lock_init(&mb_object_store.vmobjlock); 223 mb_object_store.pgops = &km_pager; 224 TAILQ_INIT(&mb_object_store.memq); 225 mb_object_store.uo_npages = 0; 226 /* we are special. we never die */ 227 mb_object_store.uo_refs = UVM_OBJ_KERN_INTRSAFE; 228 uvmexp.mb_object = &mb_object_store; 229 230 /* 231 * init the map and reserve allready allocated kernel space 232 * before installing. 233 */ 234 235 uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE); 236 kernel_map_store.pmap = pmap_kernel(); 237 if (uvm_map(&kernel_map_store, &base, start - base, NULL, 238 UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, 239 UVM_INH_NONE, UVM_ADV_RANDOM,UVM_FLAG_FIXED)) != KERN_SUCCESS) 240 panic("uvm_km_init: could not reserve space for kernel"); 241 242 /* 243 * install! 244 */ 245 246 kernel_map = &kernel_map_store; 247 } 248 249 /* 250 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap 251 * is allocated all references to that area of VM must go through it. this 252 * allows the locking of VAs in kernel_map to be broken up into regions. 253 * 254 * => if `fixed' is true, *min specifies where the region described 255 * by the submap must start 256 * => if submap is non NULL we use that as the submap, otherwise we 257 * alloc a new map 258 */ 259 struct vm_map * 260 uvm_km_suballoc(map, min, max, size, flags, fixed, submap) 261 struct vm_map *map; 262 vaddr_t *min, *max; /* OUT, OUT */ 263 vsize_t size; 264 int flags; 265 boolean_t fixed; 266 struct vm_map *submap; 267 { 268 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0); 269 270 size = round_page(size); /* round up to pagesize */ 271 272 /* 273 * first allocate a blank spot in the parent map 274 */ 275 276 if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 0, 277 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 278 UVM_ADV_RANDOM, mapflags)) != KERN_SUCCESS) { 279 panic("uvm_km_suballoc: unable to allocate space in parent map"); 280 } 281 282 /* 283 * set VM bounds (min is filled in by uvm_map) 284 */ 285 286 *max = *min + size; 287 288 /* 289 * add references to pmap and create or init the submap 290 */ 291 292 pmap_reference(vm_map_pmap(map)); 293 if (submap == NULL) { 294 submap = uvm_map_create(vm_map_pmap(map), *min, *max, flags); 295 if (submap == NULL) 296 panic("uvm_km_suballoc: unable to create submap"); 297 } else { 298 uvm_map_setup(submap, *min, *max, flags); 299 submap->pmap = vm_map_pmap(map); 300 } 301 302 /* 303 * now let uvm_map_submap plug in it... 304 */ 305 306 if (uvm_map_submap(map, *min, *max, submap) != KERN_SUCCESS) 307 panic("uvm_km_suballoc: submap allocation failed"); 308 309 return(submap); 310 } 311 312 /* 313 * uvm_km_pgremove: remove pages from a kernel uvm_object. 314 * 315 * => when you unmap a part of anonymous kernel memory you want to toss 316 * the pages right away. (this gets called from uvm_unmap_...). 317 */ 318 319 #define UKM_HASH_PENALTY 4 /* a guess */ 320 321 void 322 uvm_km_pgremove(uobj, start, end) 323 struct uvm_object *uobj; 324 vaddr_t start, end; 325 { 326 boolean_t by_list; 327 struct vm_page *pp, *ppnext; 328 vaddr_t curoff; 329 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist); 330 331 KASSERT(uobj->pgops == &aobj_pager); 332 simple_lock(&uobj->vmobjlock); 333 334 /* choose cheapest traversal */ 335 by_list = (uobj->uo_npages <= 336 ((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY); 337 338 if (by_list) 339 goto loop_by_list; 340 341 /* by hash */ 342 343 for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) { 344 pp = uvm_pagelookup(uobj, curoff); 345 if (pp == NULL) 346 continue; 347 348 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp, 349 pp->flags & PG_BUSY, 0, 0); 350 351 /* now do the actual work */ 352 if (pp->flags & PG_BUSY) { 353 /* owner must check for this when done */ 354 pp->flags |= PG_RELEASED; 355 } else { 356 /* free the swap slot... */ 357 uao_dropswap(uobj, curoff >> PAGE_SHIFT); 358 359 /* 360 * ...and free the page; note it may be on the 361 * active or inactive queues. 362 */ 363 uvm_lock_pageq(); 364 uvm_pagefree(pp); 365 uvm_unlock_pageq(); 366 } 367 } 368 simple_unlock(&uobj->vmobjlock); 369 return; 370 371 loop_by_list: 372 373 for (pp = TAILQ_FIRST(&uobj->memq); pp != NULL; pp = ppnext) { 374 ppnext = TAILQ_NEXT(pp, listq); 375 if (pp->offset < start || pp->offset >= end) { 376 continue; 377 } 378 379 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp, 380 pp->flags & PG_BUSY, 0, 0); 381 382 if (pp->flags & PG_BUSY) { 383 /* owner must check for this when done */ 384 pp->flags |= PG_RELEASED; 385 } else { 386 /* free the swap slot... */ 387 uao_dropswap(uobj, pp->offset >> PAGE_SHIFT); 388 389 /* 390 * ...and free the page; note it may be on the 391 * active or inactive queues. 392 */ 393 uvm_lock_pageq(); 394 uvm_pagefree(pp); 395 uvm_unlock_pageq(); 396 } 397 } 398 simple_unlock(&uobj->vmobjlock); 399 } 400 401 402 /* 403 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for "intrsafe" 404 * objects 405 * 406 * => when you unmap a part of anonymous kernel memory you want to toss 407 * the pages right away. (this gets called from uvm_unmap_...). 408 * => none of the pages will ever be busy, and none of them will ever 409 * be on the active or inactive queues (because these objects are 410 * never allowed to "page"). 411 */ 412 413 void 414 uvm_km_pgremove_intrsafe(uobj, start, end) 415 struct uvm_object *uobj; 416 vaddr_t start, end; 417 { 418 boolean_t by_list; 419 struct vm_page *pp, *ppnext; 420 vaddr_t curoff; 421 UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist); 422 423 KASSERT(UVM_OBJ_IS_INTRSAFE_OBJECT(uobj)); 424 simple_lock(&uobj->vmobjlock); /* lock object */ 425 426 /* choose cheapest traversal */ 427 by_list = (uobj->uo_npages <= 428 ((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY); 429 430 if (by_list) 431 goto loop_by_list; 432 433 /* by hash */ 434 435 for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) { 436 pp = uvm_pagelookup(uobj, curoff); 437 if (pp == NULL) { 438 continue; 439 } 440 441 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp, 442 pp->flags & PG_BUSY, 0, 0); 443 KASSERT((pp->flags & PG_BUSY) == 0); 444 KASSERT((pp->pqflags & PQ_ACTIVE) == 0); 445 KASSERT((pp->pqflags & PQ_INACTIVE) == 0); 446 uvm_pagefree(pp); 447 } 448 simple_unlock(&uobj->vmobjlock); 449 return; 450 451 loop_by_list: 452 453 for (pp = TAILQ_FIRST(&uobj->memq); pp != NULL; pp = ppnext) { 454 ppnext = TAILQ_NEXT(pp, listq); 455 if (pp->offset < start || pp->offset >= end) { 456 continue; 457 } 458 459 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp, 460 pp->flags & PG_BUSY, 0, 0); 461 KASSERT((pp->flags & PG_BUSY) == 0); 462 KASSERT((pp->pqflags & PQ_ACTIVE) == 0); 463 KASSERT((pp->pqflags & PQ_INACTIVE) == 0); 464 uvm_pagefree(pp); 465 } 466 simple_unlock(&uobj->vmobjlock); 467 } 468 469 470 /* 471 * uvm_km_kmemalloc: lower level kernel memory allocator for malloc() 472 * 473 * => we map wired memory into the specified map using the obj passed in 474 * => NOTE: we can return NULL even if we can wait if there is not enough 475 * free VM space in the map... caller should be prepared to handle 476 * this case. 477 * => we return KVA of memory allocated 478 * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't 479 * lock the map 480 */ 481 482 vaddr_t 483 uvm_km_kmemalloc(map, obj, size, flags) 484 vm_map_t map; 485 struct uvm_object *obj; 486 vsize_t size; 487 int flags; 488 { 489 vaddr_t kva, loopva; 490 vaddr_t offset; 491 struct vm_page *pg; 492 UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist); 493 494 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)", 495 map, obj, size, flags); 496 KASSERT(vm_map_pmap(map) == pmap_kernel()); 497 498 /* 499 * setup for call 500 */ 501 502 size = round_page(size); 503 kva = vm_map_min(map); /* hint */ 504 505 /* 506 * allocate some virtual space 507 */ 508 509 if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, 510 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 511 UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK))) 512 != KERN_SUCCESS)) { 513 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0); 514 return(0); 515 } 516 517 /* 518 * if all we wanted was VA, return now 519 */ 520 521 if (flags & UVM_KMF_VALLOC) { 522 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0); 523 return(kva); 524 } 525 526 /* 527 * recover object offset from virtual address 528 */ 529 530 offset = kva - vm_map_min(kernel_map); 531 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0); 532 533 /* 534 * now allocate and map in the memory... note that we are the only ones 535 * whom should ever get a handle on this area of VM. 536 */ 537 538 loopva = kva; 539 while (size) { 540 simple_lock(&obj->vmobjlock); 541 pg = uvm_pagealloc(obj, offset, NULL, 0); 542 if (pg) { 543 pg->flags &= ~PG_BUSY; /* new page */ 544 UVM_PAGE_OWN(pg, NULL); 545 } 546 simple_unlock(&obj->vmobjlock); 547 548 /* 549 * out of memory? 550 */ 551 552 if (__predict_false(pg == NULL)) { 553 if (flags & UVM_KMF_NOWAIT) { 554 /* free everything! */ 555 uvm_unmap(map, kva, kva + size); 556 return(0); 557 } else { 558 uvm_wait("km_getwait2"); /* sleep here */ 559 continue; 560 } 561 } 562 563 /* 564 * map it in: note that we call pmap_enter with the map and 565 * object unlocked in case we are kmem_map/kmem_object 566 * (because if pmap_enter wants to allocate out of kmem_object 567 * it will need to lock it itself!) 568 */ 569 570 if (UVM_OBJ_IS_INTRSAFE_OBJECT(obj)) { 571 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 572 VM_PROT_ALL); 573 } else { 574 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), 575 UVM_PROT_ALL, 576 PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE); 577 } 578 loopva += PAGE_SIZE; 579 offset += PAGE_SIZE; 580 size -= PAGE_SIZE; 581 } 582 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 583 return(kva); 584 } 585 586 /* 587 * uvm_km_free: free an area of kernel memory 588 */ 589 590 void 591 uvm_km_free(map, addr, size) 592 vm_map_t map; 593 vaddr_t addr; 594 vsize_t size; 595 { 596 uvm_unmap(map, trunc_page(addr), round_page(addr+size)); 597 } 598 599 /* 600 * uvm_km_free_wakeup: free an area of kernel memory and wake up 601 * anyone waiting for vm space. 602 * 603 * => XXX: "wanted" bit + unlock&wait on other end? 604 */ 605 606 void 607 uvm_km_free_wakeup(map, addr, size) 608 vm_map_t map; 609 vaddr_t addr; 610 vsize_t size; 611 { 612 vm_map_entry_t dead_entries; 613 614 vm_map_lock(map); 615 (void)uvm_unmap_remove(map, trunc_page(addr), round_page(addr+size), 616 &dead_entries); 617 wakeup(map); 618 vm_map_unlock(map); 619 620 if (dead_entries != NULL) 621 uvm_unmap_detach(dead_entries, 0); 622 } 623 624 /* 625 * uvm_km_alloc1: allocate wired down memory in the kernel map. 626 * 627 * => we can sleep if needed 628 */ 629 630 vaddr_t 631 uvm_km_alloc1(map, size, zeroit) 632 vm_map_t map; 633 vsize_t size; 634 boolean_t zeroit; 635 { 636 vaddr_t kva, loopva, offset; 637 struct vm_page *pg; 638 UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist); 639 640 UVMHIST_LOG(maphist,"(map=0x%x, size=0x%x)", map, size,0,0); 641 KASSERT(vm_map_pmap(map) == pmap_kernel()); 642 643 size = round_page(size); 644 kva = vm_map_min(map); /* hint */ 645 646 /* 647 * allocate some virtual space 648 */ 649 650 if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object, 651 UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, 652 UVM_INH_NONE, UVM_ADV_RANDOM, 653 0)) != KERN_SUCCESS)) { 654 UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0); 655 return(0); 656 } 657 658 /* 659 * recover object offset from virtual address 660 */ 661 662 offset = kva - vm_map_min(kernel_map); 663 UVMHIST_LOG(maphist," kva=0x%x, offset=0x%x", kva, offset,0,0); 664 665 /* 666 * now allocate the memory. we must be careful about released pages. 667 */ 668 669 loopva = kva; 670 while (size) { 671 simple_lock(&uvm.kernel_object->vmobjlock); 672 pg = uvm_pagelookup(uvm.kernel_object, offset); 673 674 /* 675 * if we found a page in an unallocated region, it must be 676 * released 677 */ 678 if (pg) { 679 if ((pg->flags & PG_RELEASED) == 0) 680 panic("uvm_km_alloc1: non-released page"); 681 pg->flags |= PG_WANTED; 682 UVM_UNLOCK_AND_WAIT(pg, &uvm.kernel_object->vmobjlock, 683 FALSE, "km_alloc", 0); 684 continue; /* retry */ 685 } 686 687 /* allocate ram */ 688 pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0); 689 if (pg) { 690 pg->flags &= ~PG_BUSY; /* new page */ 691 UVM_PAGE_OWN(pg, NULL); 692 } 693 simple_unlock(&uvm.kernel_object->vmobjlock); 694 if (__predict_false(pg == NULL)) { 695 uvm_wait("km_alloc1w"); /* wait for memory */ 696 continue; 697 } 698 699 /* 700 * map it in; note we're never called with an intrsafe 701 * object, so we always use regular old pmap_enter(). 702 */ 703 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), 704 UVM_PROT_ALL, PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE); 705 706 loopva += PAGE_SIZE; 707 offset += PAGE_SIZE; 708 size -= PAGE_SIZE; 709 } 710 711 /* 712 * zero on request (note that "size" is now zero due to the above loop 713 * so we need to subtract kva from loopva to reconstruct the size). 714 */ 715 716 if (zeroit) 717 memset((caddr_t)kva, 0, loopva - kva); 718 719 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 720 return(kva); 721 } 722 723 /* 724 * uvm_km_valloc: allocate zero-fill memory in the kernel's address space 725 * 726 * => memory is not allocated until fault time 727 */ 728 729 vaddr_t 730 uvm_km_valloc(map, size) 731 vm_map_t map; 732 vsize_t size; 733 { 734 return(uvm_km_valloc_align(map, size, 0)); 735 } 736 737 vaddr_t 738 uvm_km_valloc_align(map, size, align) 739 vm_map_t map; 740 vsize_t size; 741 vsize_t align; 742 { 743 vaddr_t kva; 744 UVMHIST_FUNC("uvm_km_valloc"); UVMHIST_CALLED(maphist); 745 746 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0); 747 KASSERT(vm_map_pmap(map) == pmap_kernel()); 748 749 size = round_page(size); 750 kva = vm_map_min(map); /* hint */ 751 752 /* 753 * allocate some virtual space. will be demand filled by kernel_object. 754 */ 755 756 if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object, 757 UVM_UNKNOWN_OFFSET, align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, 758 UVM_INH_NONE, UVM_ADV_RANDOM, 759 0)) != KERN_SUCCESS)) { 760 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0); 761 return(0); 762 } 763 764 UVMHIST_LOG(maphist, "<- done (kva=0x%x)", kva,0,0,0); 765 return(kva); 766 } 767 768 /* 769 * uvm_km_valloc_wait: allocate zero-fill memory in the kernel's address space 770 * 771 * => memory is not allocated until fault time 772 * => if no room in map, wait for space to free, unless requested size 773 * is larger than map (in which case we return 0) 774 */ 775 776 vaddr_t 777 uvm_km_valloc_prefer_wait(map, size, prefer) 778 vm_map_t map; 779 vsize_t size; 780 voff_t prefer; 781 { 782 vaddr_t kva; 783 UVMHIST_FUNC("uvm_km_valloc_prefer_wait"); UVMHIST_CALLED(maphist); 784 785 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0); 786 KASSERT(vm_map_pmap(map) == pmap_kernel()); 787 788 size = round_page(size); 789 if (size > vm_map_max(map) - vm_map_min(map)) 790 return(0); 791 792 while (1) { 793 kva = vm_map_min(map); /* hint */ 794 795 /* 796 * allocate some virtual space. will be demand filled 797 * by kernel_object. 798 */ 799 800 if (__predict_true(uvm_map(map, &kva, size, uvm.kernel_object, 801 prefer, 0, UVM_MAPFLAG(UVM_PROT_ALL, 802 UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0)) 803 == KERN_SUCCESS)) { 804 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 805 return(kva); 806 } 807 808 /* 809 * failed. sleep for a while (on map) 810 */ 811 812 UVMHIST_LOG(maphist,"<<<sleeping>>>",0,0,0,0); 813 tsleep((caddr_t)map, PVM, "vallocwait", 0); 814 } 815 /*NOTREACHED*/ 816 } 817 818 vaddr_t 819 uvm_km_valloc_wait(map, size) 820 vm_map_t map; 821 vsize_t size; 822 { 823 return uvm_km_valloc_prefer_wait(map, size, UVM_UNKNOWN_OFFSET); 824 } 825 826 /* Sanity; must specify both or none. */ 827 #if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \ 828 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE)) 829 #error Must specify MAP and UNMAP together. 830 #endif 831 832 /* 833 * uvm_km_alloc_poolpage: allocate a page for the pool allocator 834 * 835 * => if the pmap specifies an alternate mapping method, we use it. 836 */ 837 838 /* ARGSUSED */ 839 vaddr_t 840 uvm_km_alloc_poolpage1(map, obj, waitok) 841 vm_map_t map; 842 struct uvm_object *obj; 843 boolean_t waitok; 844 { 845 #if defined(PMAP_MAP_POOLPAGE) 846 struct vm_page *pg; 847 vaddr_t va; 848 849 again: 850 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); 851 if (__predict_false(pg == NULL)) { 852 if (waitok) { 853 uvm_wait("plpg"); 854 goto again; 855 } else 856 return (0); 857 } 858 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg)); 859 if (__predict_false(va == 0)) 860 uvm_pagefree(pg); 861 return (va); 862 #else 863 vaddr_t va; 864 int s; 865 866 /* 867 * NOTE: We may be called with a map that doens't require splvm 868 * protection (e.g. kernel_map). However, it does not hurt to 869 * go to splvm in this case (since unprocted maps will never be 870 * accessed in interrupt context). 871 * 872 * XXX We may want to consider changing the interface to this 873 * XXX function. 874 */ 875 876 s = splvm(); 877 va = uvm_km_kmemalloc(map, obj, PAGE_SIZE, waitok ? 0 : UVM_KMF_NOWAIT); 878 splx(s); 879 return (va); 880 #endif /* PMAP_MAP_POOLPAGE */ 881 } 882 883 /* 884 * uvm_km_free_poolpage: free a previously allocated pool page 885 * 886 * => if the pmap specifies an alternate unmapping method, we use it. 887 */ 888 889 /* ARGSUSED */ 890 void 891 uvm_km_free_poolpage1(map, addr) 892 vm_map_t map; 893 vaddr_t addr; 894 { 895 #if defined(PMAP_UNMAP_POOLPAGE) 896 paddr_t pa; 897 898 pa = PMAP_UNMAP_POOLPAGE(addr); 899 uvm_pagefree(PHYS_TO_VM_PAGE(pa)); 900 #else 901 int s; 902 903 /* 904 * NOTE: We may be called with a map that doens't require splvm 905 * protection (e.g. kernel_map). However, it does not hurt to 906 * go to splvm in this case (since unprocted maps will never be 907 * accessed in interrupt context). 908 * 909 * XXX We may want to consider changing the interface to this 910 * XXX function. 911 */ 912 913 s = splvm(); 914 uvm_km_free(map, addr, PAGE_SIZE); 915 splx(s); 916 #endif /* PMAP_UNMAP_POOLPAGE */ 917 } 918