1 /* $OpenBSD: uvm_km.c,v 1.143 2021/03/26 13:40:05 mpi Exp $ */ 2 /* $NetBSD: uvm_km.c,v 1.42 2001/01/14 02:10:01 thorpej Exp $ */ 3 4 /* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * Copyright (c) 1991, 1993, The Regents of the University of California. 7 * 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 38 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp 39 * 40 * 41 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 42 * All rights reserved. 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 */ 64 65 /* 66 * uvm_km.c: handle kernel memory allocation and management 67 */ 68 69 /* 70 * overview of kernel memory management: 71 * 72 * the kernel virtual address space is mapped by "kernel_map." kernel_map 73 * starts at a machine-dependent address and is VM_KERNEL_SPACE_SIZE bytes 74 * large. 75 * 76 * the kernel_map has several "submaps." submaps can only appear in 77 * the kernel_map (user processes can't use them). submaps "take over" 78 * the management of a sub-range of the kernel's address space. submaps 79 * are typically allocated at boot time and are never released. kernel 80 * virtual address space that is mapped by a submap is locked by the 81 * submap's lock -- not the kernel_map's lock. 82 * 83 * thus, the useful feature of submaps is that they allow us to break 84 * up the locking and protection of the kernel address space into smaller 85 * chunks. 86 * 87 * The VM system has several standard kernel submaps: 88 * kmem_map: Contains only wired kernel memory for malloc(9). 89 * Note: All access to this map must be protected by splvm as 90 * calls to malloc(9) are allowed in interrupt handlers. 91 * exec_map: Memory to hold arguments to system calls are allocated from 92 * this map. 93 * XXX: This is primeraly used to artificially limit the number 94 * of concurrent processes doing an exec. 95 * phys_map: Buffers for vmapbuf (physio) are allocated from this map. 96 * 97 * the kernel allocates its private memory out of special uvm_objects whose 98 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects 99 * are "special" and never die). all kernel objects should be thought of 100 * as large, fixed-sized, sparsely populated uvm_objects. each kernel 101 * object is equal to the size of kernel virtual address space (i.e. 102 * VM_KERNEL_SPACE_SIZE). 103 * 104 * most kernel private memory lives in kernel_object. the only exception 105 * to this is for memory that belongs to submaps that must be protected 106 * by splvm(). each of these submaps manages their own pages. 107 * 108 * note that just because a kernel object spans the entire kernel virtual 109 * address space doesn't mean that it has to be mapped into the entire space. 110 * large chunks of a kernel object's space go unused either because 111 * that area of kernel VM is unmapped, or there is some other type of 112 * object mapped into that range (e.g. a vnode). for submap's kernel 113 * objects, the only part of the object that can ever be populated is the 114 * offsets that are managed by the submap. 115 * 116 * note that the "offset" in a kernel object is always the kernel virtual 117 * address minus the vm_map_min(kernel_map). 118 * example: 119 * suppose kernel_map starts at 0xf8000000 and the kernel does a 120 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the 121 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000, 122 * then that means that the page at offset 0x235000 in kernel_object is 123 * mapped at 0xf8235000. 124 * 125 * kernel objects have one other special property: when the kernel virtual 126 * memory mapping them is unmapped, the backing memory in the object is 127 * freed right away. this is done with the uvm_km_pgremove() function. 128 * this has to be done because there is no backing store for kernel pages 129 * and no need to save them after they are no longer referenced. 130 */ 131 132 #include <sys/param.h> 133 #include <sys/systm.h> 134 #include <sys/proc.h> 135 #include <sys/kthread.h> 136 #include <uvm/uvm.h> 137 138 /* 139 * global data structures 140 */ 141 142 struct vm_map *kernel_map = NULL; 143 144 /* Unconstraint range. */ 145 struct uvm_constraint_range no_constraint = { 0x0, (paddr_t)-1 }; 146 147 /* 148 * local data structures 149 */ 150 static struct vm_map kernel_map_store; 151 152 /* 153 * uvm_km_init: init kernel maps and objects to reflect reality (i.e. 154 * KVM already allocated for text, data, bss, and static data structures). 155 * 156 * => KVM is defined by [base.. base + VM_KERNEL_SPACE_SIZE]. 157 * we assume that [base -> start] has already been allocated and that 158 * "end" is the end of the kernel image span. 159 */ 160 void 161 uvm_km_init(vaddr_t base, vaddr_t start, vaddr_t end) 162 { 163 /* kernel_object: for pageable anonymous kernel memory */ 164 uao_init(); 165 uvm.kernel_object = uao_create(VM_KERNEL_SPACE_SIZE, UAO_FLAG_KERNOBJ); 166 167 /* 168 * init the map and reserve already allocated kernel space 169 * before installing. 170 */ 171 172 uvm_map_setup(&kernel_map_store, pmap_kernel(), base, end, 173 #ifdef KVA_GUARDPAGES 174 VM_MAP_PAGEABLE | VM_MAP_GUARDPAGES 175 #else 176 VM_MAP_PAGEABLE 177 #endif 178 ); 179 if (base != start && uvm_map(&kernel_map_store, &base, start - base, 180 NULL, UVM_UNKNOWN_OFFSET, 0, 181 UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE, 182 MAP_INHERIT_NONE, MADV_RANDOM, UVM_FLAG_FIXED)) != 0) 183 panic("uvm_km_init: could not reserve space for kernel"); 184 185 kernel_map = &kernel_map_store; 186 } 187 188 /* 189 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap 190 * is allocated all references to that area of VM must go through it. this 191 * allows the locking of VAs in kernel_map to be broken up into regions. 192 * 193 * => if `fixed' is true, *min specifies where the region described 194 * by the submap must start 195 * => if submap is non NULL we use that as the submap, otherwise we 196 * alloc a new map 197 */ 198 struct vm_map * 199 uvm_km_suballoc(struct vm_map *map, vaddr_t *min, vaddr_t *max, vsize_t size, 200 int flags, boolean_t fixed, struct vm_map *submap) 201 { 202 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0); 203 204 size = round_page(size); /* round up to pagesize */ 205 206 /* first allocate a blank spot in the parent map */ 207 if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 0, 208 UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE, 209 MAP_INHERIT_NONE, MADV_RANDOM, mapflags)) != 0) { 210 panic("uvm_km_suballoc: unable to allocate space in parent map"); 211 } 212 213 /* set VM bounds (min is filled in by uvm_map) */ 214 *max = *min + size; 215 216 /* add references to pmap and create or init the submap */ 217 pmap_reference(vm_map_pmap(map)); 218 if (submap == NULL) { 219 submap = uvm_map_create(vm_map_pmap(map), *min, *max, flags); 220 if (submap == NULL) 221 panic("uvm_km_suballoc: unable to create submap"); 222 } else { 223 uvm_map_setup(submap, vm_map_pmap(map), *min, *max, flags); 224 } 225 226 /* 227 * now let uvm_map_submap plug in it... 228 */ 229 if (uvm_map_submap(map, *min, *max, submap) != 0) 230 panic("uvm_km_suballoc: submap allocation failed"); 231 232 return(submap); 233 } 234 235 /* 236 * uvm_km_pgremove: remove pages from a kernel uvm_object. 237 * 238 * => when you unmap a part of anonymous kernel memory you want to toss 239 * the pages right away. (this gets called from uvm_unmap_...). 240 */ 241 void 242 uvm_km_pgremove(struct uvm_object *uobj, vaddr_t start, vaddr_t end) 243 { 244 struct vm_page *pp; 245 voff_t curoff; 246 int slot; 247 int swpgonlydelta = 0; 248 249 KASSERT(uobj->pgops == &aobj_pager); 250 251 for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) { 252 pp = uvm_pagelookup(uobj, curoff); 253 if (pp && pp->pg_flags & PG_BUSY) { 254 atomic_setbits_int(&pp->pg_flags, PG_WANTED); 255 tsleep_nsec(pp, PVM, "km_pgrm", INFSLP); 256 curoff -= PAGE_SIZE; /* loop back to us */ 257 continue; 258 } 259 260 /* free the swap slot, then the page */ 261 slot = uao_dropswap(uobj, curoff >> PAGE_SHIFT); 262 263 if (pp != NULL) { 264 uvm_lock_pageq(); 265 uvm_pagefree(pp); 266 uvm_unlock_pageq(); 267 } else if (slot != 0) { 268 swpgonlydelta++; 269 } 270 } 271 272 if (swpgonlydelta > 0) { 273 KASSERT(uvmexp.swpgonly >= swpgonlydelta); 274 atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta); 275 } 276 } 277 278 279 /* 280 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for "intrsafe" 281 * objects 282 * 283 * => when you unmap a part of anonymous kernel memory you want to toss 284 * the pages right away. (this gets called from uvm_unmap_...). 285 * => none of the pages will ever be busy, and none of them will ever 286 * be on the active or inactive queues (because these objects are 287 * never allowed to "page"). 288 */ 289 void 290 uvm_km_pgremove_intrsafe(vaddr_t start, vaddr_t end) 291 { 292 struct vm_page *pg; 293 vaddr_t va; 294 paddr_t pa; 295 296 for (va = start; va < end; va += PAGE_SIZE) { 297 if (!pmap_extract(pmap_kernel(), va, &pa)) 298 continue; 299 pg = PHYS_TO_VM_PAGE(pa); 300 if (pg == NULL) 301 panic("uvm_km_pgremove_intrsafe: no page"); 302 uvm_pagefree(pg); 303 } 304 } 305 306 /* 307 * uvm_km_kmemalloc: lower level kernel memory allocator for malloc() 308 * 309 * => we map wired memory into the specified map using the obj passed in 310 * => NOTE: we can return NULL even if we can wait if there is not enough 311 * free VM space in the map... caller should be prepared to handle 312 * this case. 313 * => we return KVA of memory allocated 314 * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't 315 * lock the map 316 * => low, high, alignment, boundary, nsegs are the corresponding parameters 317 * to uvm_pglistalloc 318 * => flags: ZERO - correspond to uvm_pglistalloc flags 319 */ 320 vaddr_t 321 uvm_km_kmemalloc_pla(struct vm_map *map, struct uvm_object *obj, vsize_t size, 322 vsize_t valign, int flags, paddr_t low, paddr_t high, paddr_t alignment, 323 paddr_t boundary, int nsegs) 324 { 325 vaddr_t kva, loopva; 326 voff_t offset; 327 struct vm_page *pg; 328 struct pglist pgl; 329 int pla_flags; 330 331 KASSERT(vm_map_pmap(map) == pmap_kernel()); 332 /* UVM_KMF_VALLOC => !UVM_KMF_ZERO */ 333 KASSERT(!(flags & UVM_KMF_VALLOC) || 334 !(flags & UVM_KMF_ZERO)); 335 336 /* setup for call */ 337 size = round_page(size); 338 kva = vm_map_min(map); /* hint */ 339 if (nsegs == 0) 340 nsegs = atop(size); 341 342 /* allocate some virtual space */ 343 if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, 344 valign, UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE, 345 MAP_INHERIT_NONE, MADV_RANDOM, (flags & UVM_KMF_TRYLOCK))) != 0)) { 346 return 0; 347 } 348 349 /* if all we wanted was VA, return now */ 350 if (flags & UVM_KMF_VALLOC) { 351 return kva; 352 } 353 354 /* recover object offset from virtual address */ 355 if (obj != NULL) 356 offset = kva - vm_map_min(kernel_map); 357 else 358 offset = 0; 359 360 /* 361 * now allocate and map in the memory... note that we are the only ones 362 * whom should ever get a handle on this area of VM. 363 */ 364 TAILQ_INIT(&pgl); 365 pla_flags = 0; 366 KASSERT(uvmexp.swpgonly <= uvmexp.swpages); 367 if ((flags & UVM_KMF_NOWAIT) || 368 ((flags & UVM_KMF_CANFAIL) && 369 uvmexp.swpages - uvmexp.swpgonly <= atop(size))) 370 pla_flags |= UVM_PLA_NOWAIT; 371 else 372 pla_flags |= UVM_PLA_WAITOK; 373 if (flags & UVM_KMF_ZERO) 374 pla_flags |= UVM_PLA_ZERO; 375 if (uvm_pglistalloc(size, low, high, alignment, boundary, &pgl, nsegs, 376 pla_flags) != 0) { 377 /* Failed. */ 378 uvm_unmap(map, kva, kva + size); 379 return (0); 380 } 381 382 loopva = kva; 383 while (loopva != kva + size) { 384 pg = TAILQ_FIRST(&pgl); 385 TAILQ_REMOVE(&pgl, pg, pageq); 386 uvm_pagealloc_pg(pg, obj, offset, NULL); 387 atomic_clearbits_int(&pg->pg_flags, PG_BUSY); 388 UVM_PAGE_OWN(pg, NULL); 389 390 /* 391 * map it in: note that we call pmap_enter with the map and 392 * object unlocked in case we are kmem_map. 393 */ 394 if (obj == NULL) { 395 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 396 PROT_READ | PROT_WRITE); 397 } else { 398 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), 399 PROT_READ | PROT_WRITE, 400 PROT_READ | PROT_WRITE | PMAP_WIRED); 401 } 402 loopva += PAGE_SIZE; 403 offset += PAGE_SIZE; 404 } 405 KASSERT(TAILQ_EMPTY(&pgl)); 406 pmap_update(pmap_kernel()); 407 408 return kva; 409 } 410 411 /* 412 * uvm_km_free: free an area of kernel memory 413 */ 414 void 415 uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size) 416 { 417 uvm_unmap(map, trunc_page(addr), round_page(addr+size)); 418 } 419 420 /* 421 * uvm_km_free_wakeup: free an area of kernel memory and wake up 422 * anyone waiting for vm space. 423 * 424 * => XXX: "wanted" bit + unlock&wait on other end? 425 */ 426 void 427 uvm_km_free_wakeup(struct vm_map *map, vaddr_t addr, vsize_t size) 428 { 429 struct uvm_map_deadq dead_entries; 430 431 vm_map_lock(map); 432 TAILQ_INIT(&dead_entries); 433 uvm_unmap_remove(map, trunc_page(addr), round_page(addr+size), 434 &dead_entries, FALSE, TRUE); 435 wakeup(map); 436 vm_map_unlock(map); 437 438 uvm_unmap_detach(&dead_entries, 0); 439 } 440 441 /* 442 * uvm_km_alloc1: allocate wired down memory in the kernel map. 443 * 444 * => we can sleep if needed 445 */ 446 vaddr_t 447 uvm_km_alloc1(struct vm_map *map, vsize_t size, vsize_t align, boolean_t zeroit) 448 { 449 vaddr_t kva, loopva; 450 voff_t offset; 451 struct vm_page *pg; 452 453 KASSERT(vm_map_pmap(map) == pmap_kernel()); 454 455 size = round_page(size); 456 kva = vm_map_min(map); /* hint */ 457 458 /* allocate some virtual space */ 459 if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object, 460 UVM_UNKNOWN_OFFSET, align, 461 UVM_MAPFLAG(PROT_READ | PROT_WRITE, 462 PROT_READ | PROT_WRITE | PROT_EXEC, 463 MAP_INHERIT_NONE, MADV_RANDOM, 0)) != 0)) { 464 return 0; 465 } 466 467 /* recover object offset from virtual address */ 468 offset = kva - vm_map_min(kernel_map); 469 470 /* now allocate the memory. we must be careful about released pages. */ 471 loopva = kva; 472 while (size) { 473 /* allocate ram */ 474 pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0); 475 if (pg) { 476 atomic_clearbits_int(&pg->pg_flags, PG_BUSY); 477 UVM_PAGE_OWN(pg, NULL); 478 } 479 if (__predict_false(pg == NULL)) { 480 if (curproc == uvm.pagedaemon_proc) { 481 /* 482 * It is unfeasible for the page daemon to 483 * sleep for memory, so free what we have 484 * allocated and fail. 485 */ 486 uvm_unmap(map, kva, loopva - kva); 487 return (0); 488 } else { 489 uvm_wait("km_alloc1w"); /* wait for memory */ 490 continue; 491 } 492 } 493 494 /* 495 * map it in; note we're never called with an intrsafe 496 * object, so we always use regular old pmap_enter(). 497 */ 498 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), 499 PROT_READ | PROT_WRITE, 500 PROT_READ | PROT_WRITE | PMAP_WIRED); 501 502 loopva += PAGE_SIZE; 503 offset += PAGE_SIZE; 504 size -= PAGE_SIZE; 505 } 506 pmap_update(map->pmap); 507 508 /* 509 * zero on request (note that "size" is now zero due to the above loop 510 * so we need to subtract kva from loopva to reconstruct the size). 511 */ 512 if (zeroit) 513 memset((caddr_t)kva, 0, loopva - kva); 514 515 return kva; 516 } 517 518 /* 519 * uvm_km_valloc: allocate zero-fill memory in the kernel's address space 520 * 521 * => memory is not allocated until fault time 522 */ 523 524 vaddr_t 525 uvm_km_valloc(struct vm_map *map, vsize_t size) 526 { 527 return uvm_km_valloc_align(map, size, 0, 0); 528 } 529 530 vaddr_t 531 uvm_km_valloc_try(struct vm_map *map, vsize_t size) 532 { 533 return uvm_km_valloc_align(map, size, 0, UVM_FLAG_TRYLOCK); 534 } 535 536 vaddr_t 537 uvm_km_valloc_align(struct vm_map *map, vsize_t size, vsize_t align, int flags) 538 { 539 vaddr_t kva; 540 541 KASSERT(vm_map_pmap(map) == pmap_kernel()); 542 543 size = round_page(size); 544 kva = vm_map_min(map); /* hint */ 545 546 /* 547 * allocate some virtual space. will be demand filled by kernel_object. 548 */ 549 if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object, 550 UVM_UNKNOWN_OFFSET, align, 551 UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE, 552 MAP_INHERIT_NONE, MADV_RANDOM, flags)) != 0)) { 553 return 0; 554 } 555 556 return kva; 557 } 558 559 /* 560 * uvm_km_valloc_wait: allocate zero-fill memory in the kernel's address space 561 * 562 * => memory is not allocated until fault time 563 * => if no room in map, wait for space to free, unless requested size 564 * is larger than map (in which case we return 0) 565 */ 566 vaddr_t 567 uvm_km_valloc_prefer_wait(struct vm_map *map, vsize_t size, voff_t prefer) 568 { 569 vaddr_t kva; 570 571 KASSERT(vm_map_pmap(map) == pmap_kernel()); 572 573 size = round_page(size); 574 if (size > vm_map_max(map) - vm_map_min(map)) 575 return 0; 576 577 while (1) { 578 kva = vm_map_min(map); /* hint */ 579 580 /* 581 * allocate some virtual space. will be demand filled 582 * by kernel_object. 583 */ 584 if (__predict_true(uvm_map(map, &kva, size, uvm.kernel_object, 585 prefer, 0, 586 UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE, 587 MAP_INHERIT_NONE, MADV_RANDOM, 0)) == 0)) { 588 return kva; 589 } 590 591 /* failed. sleep for a while (on map) */ 592 tsleep_nsec(map, PVM, "vallocwait", INFSLP); 593 } 594 /*NOTREACHED*/ 595 } 596 597 vaddr_t 598 uvm_km_valloc_wait(struct vm_map *map, vsize_t size) 599 { 600 return uvm_km_valloc_prefer_wait(map, size, UVM_UNKNOWN_OFFSET); 601 } 602 603 #if defined(__HAVE_PMAP_DIRECT) 604 /* 605 * uvm_km_page allocator, __HAVE_PMAP_DIRECT arch 606 * On architectures with machine memory direct mapped into a portion 607 * of KVM, we have very little work to do. Just get a physical page, 608 * and find and return its VA. 609 */ 610 void 611 uvm_km_page_init(void) 612 { 613 /* nothing */ 614 } 615 616 void 617 uvm_km_page_lateinit(void) 618 { 619 /* nothing */ 620 } 621 622 #else 623 /* 624 * uvm_km_page allocator, non __HAVE_PMAP_DIRECT archs 625 * This is a special allocator that uses a reserve of free pages 626 * to fulfill requests. It is fast and interrupt safe, but can only 627 * return page sized regions. Its primary use is as a backend for pool. 628 * 629 * The memory returned is allocated from the larger kernel_map, sparing 630 * pressure on the small interrupt-safe kmem_map. It is wired, but 631 * not zero filled. 632 */ 633 634 struct uvm_km_pages uvm_km_pages; 635 636 void uvm_km_createthread(void *); 637 void uvm_km_thread(void *); 638 struct uvm_km_free_page *uvm_km_doputpage(struct uvm_km_free_page *); 639 640 /* 641 * Allocate the initial reserve, and create the thread which will 642 * keep the reserve full. For bootstrapping, we allocate more than 643 * the lowat amount, because it may be a while before the thread is 644 * running. 645 */ 646 void 647 uvm_km_page_init(void) 648 { 649 int lowat_min; 650 int i; 651 int len, bulk; 652 vaddr_t addr; 653 654 mtx_init(&uvm_km_pages.mtx, IPL_VM); 655 if (!uvm_km_pages.lowat) { 656 /* based on physmem, calculate a good value here */ 657 uvm_km_pages.lowat = physmem / 256; 658 lowat_min = physmem < atop(16 * 1024 * 1024) ? 32 : 128; 659 if (uvm_km_pages.lowat < lowat_min) 660 uvm_km_pages.lowat = lowat_min; 661 } 662 if (uvm_km_pages.lowat > UVM_KM_PAGES_LOWAT_MAX) 663 uvm_km_pages.lowat = UVM_KM_PAGES_LOWAT_MAX; 664 uvm_km_pages.hiwat = 4 * uvm_km_pages.lowat; 665 if (uvm_km_pages.hiwat > UVM_KM_PAGES_HIWAT_MAX) 666 uvm_km_pages.hiwat = UVM_KM_PAGES_HIWAT_MAX; 667 668 /* Allocate all pages in as few allocations as possible. */ 669 len = 0; 670 bulk = uvm_km_pages.hiwat; 671 while (len < uvm_km_pages.hiwat && bulk > 0) { 672 bulk = MIN(bulk, uvm_km_pages.hiwat - len); 673 addr = vm_map_min(kernel_map); 674 if (uvm_map(kernel_map, &addr, (vsize_t)bulk << PAGE_SHIFT, 675 NULL, UVM_UNKNOWN_OFFSET, 0, 676 UVM_MAPFLAG(PROT_READ | PROT_WRITE, 677 PROT_READ | PROT_WRITE, MAP_INHERIT_NONE, 678 MADV_RANDOM, UVM_KMF_TRYLOCK)) != 0) { 679 bulk /= 2; 680 continue; 681 } 682 683 for (i = len; i < len + bulk; i++, addr += PAGE_SIZE) 684 uvm_km_pages.page[i] = addr; 685 len += bulk; 686 } 687 688 uvm_km_pages.free = len; 689 for (i = len; i < UVM_KM_PAGES_HIWAT_MAX; i++) 690 uvm_km_pages.page[i] = 0; 691 692 /* tone down if really high */ 693 if (uvm_km_pages.lowat > 512) 694 uvm_km_pages.lowat = 512; 695 } 696 697 void 698 uvm_km_page_lateinit(void) 699 { 700 kthread_create_deferred(uvm_km_createthread, NULL); 701 } 702 703 void 704 uvm_km_createthread(void *arg) 705 { 706 kthread_create(uvm_km_thread, NULL, &uvm_km_pages.km_proc, "kmthread"); 707 } 708 709 /* 710 * Endless loop. We grab pages in increments of 16 pages, then 711 * quickly swap them into the list. 712 */ 713 void 714 uvm_km_thread(void *arg) 715 { 716 vaddr_t pg[16]; 717 int i; 718 int allocmore = 0; 719 int flags; 720 struct uvm_km_free_page *fp = NULL; 721 722 KERNEL_UNLOCK(); 723 724 for (;;) { 725 mtx_enter(&uvm_km_pages.mtx); 726 if (uvm_km_pages.free >= uvm_km_pages.lowat && 727 uvm_km_pages.freelist == NULL) { 728 msleep_nsec(&uvm_km_pages.km_proc, &uvm_km_pages.mtx, 729 PVM, "kmalloc", INFSLP); 730 } 731 allocmore = uvm_km_pages.free < uvm_km_pages.lowat; 732 fp = uvm_km_pages.freelist; 733 uvm_km_pages.freelist = NULL; 734 uvm_km_pages.freelistlen = 0; 735 mtx_leave(&uvm_km_pages.mtx); 736 737 if (allocmore) { 738 /* 739 * If there was nothing on the freelist, then we 740 * must obtain at least one page to make progress. 741 * So, only use UVM_KMF_TRYLOCK for the first page 742 * if fp != NULL 743 */ 744 flags = UVM_MAPFLAG(PROT_READ | PROT_WRITE, 745 PROT_READ | PROT_WRITE, MAP_INHERIT_NONE, 746 MADV_RANDOM, fp != NULL ? UVM_KMF_TRYLOCK : 0); 747 memset(pg, 0, sizeof(pg)); 748 for (i = 0; i < nitems(pg); i++) { 749 pg[i] = vm_map_min(kernel_map); 750 if (uvm_map(kernel_map, &pg[i], PAGE_SIZE, 751 NULL, UVM_UNKNOWN_OFFSET, 0, flags) != 0) { 752 pg[i] = 0; 753 break; 754 } 755 756 /* made progress, so don't sleep for more */ 757 flags = UVM_MAPFLAG(PROT_READ | PROT_WRITE, 758 PROT_READ | PROT_WRITE, MAP_INHERIT_NONE, 759 MADV_RANDOM, UVM_KMF_TRYLOCK); 760 } 761 762 mtx_enter(&uvm_km_pages.mtx); 763 for (i = 0; i < nitems(pg); i++) { 764 if (uvm_km_pages.free == 765 nitems(uvm_km_pages.page)) 766 break; 767 else if (pg[i] != 0) 768 uvm_km_pages.page[uvm_km_pages.free++] 769 = pg[i]; 770 } 771 wakeup(&uvm_km_pages.free); 772 mtx_leave(&uvm_km_pages.mtx); 773 774 /* Cleanup left-over pages (if any). */ 775 for (; i < nitems(pg); i++) { 776 if (pg[i] != 0) { 777 uvm_unmap(kernel_map, 778 pg[i], pg[i] + PAGE_SIZE); 779 } 780 } 781 } 782 while (fp) { 783 fp = uvm_km_doputpage(fp); 784 } 785 } 786 } 787 788 struct uvm_km_free_page * 789 uvm_km_doputpage(struct uvm_km_free_page *fp) 790 { 791 vaddr_t va = (vaddr_t)fp; 792 struct vm_page *pg; 793 int freeva = 1; 794 struct uvm_km_free_page *nextfp = fp->next; 795 796 pg = uvm_atopg(va); 797 798 pmap_kremove(va, PAGE_SIZE); 799 pmap_update(kernel_map->pmap); 800 801 mtx_enter(&uvm_km_pages.mtx); 802 if (uvm_km_pages.free < uvm_km_pages.hiwat) { 803 uvm_km_pages.page[uvm_km_pages.free++] = va; 804 freeva = 0; 805 } 806 mtx_leave(&uvm_km_pages.mtx); 807 808 if (freeva) 809 uvm_unmap(kernel_map, va, va + PAGE_SIZE); 810 811 uvm_pagefree(pg); 812 return (nextfp); 813 } 814 #endif /* !__HAVE_PMAP_DIRECT */ 815 816 void * 817 km_alloc(size_t sz, const struct kmem_va_mode *kv, 818 const struct kmem_pa_mode *kp, const struct kmem_dyn_mode *kd) 819 { 820 struct vm_map *map; 821 struct vm_page *pg; 822 struct pglist pgl; 823 int mapflags = 0; 824 vm_prot_t prot; 825 paddr_t pla_align; 826 int pla_flags; 827 int pla_maxseg; 828 vaddr_t va, sva = 0; 829 830 KASSERT(sz == round_page(sz)); 831 832 TAILQ_INIT(&pgl); 833 834 if (kp->kp_nomem || kp->kp_pageable) 835 goto alloc_va; 836 837 pla_flags = kd->kd_waitok ? UVM_PLA_WAITOK : UVM_PLA_NOWAIT; 838 pla_flags |= UVM_PLA_TRYCONTIG; 839 if (kp->kp_zero) 840 pla_flags |= UVM_PLA_ZERO; 841 842 pla_align = kp->kp_align; 843 #ifdef __HAVE_PMAP_DIRECT 844 if (pla_align < kv->kv_align) 845 pla_align = kv->kv_align; 846 #endif 847 pla_maxseg = kp->kp_maxseg; 848 if (pla_maxseg == 0) 849 pla_maxseg = sz / PAGE_SIZE; 850 851 if (uvm_pglistalloc(sz, kp->kp_constraint->ucr_low, 852 kp->kp_constraint->ucr_high, pla_align, kp->kp_boundary, 853 &pgl, pla_maxseg, pla_flags)) { 854 return (NULL); 855 } 856 857 #ifdef __HAVE_PMAP_DIRECT 858 /* 859 * Only use direct mappings for single page or single segment 860 * allocations. 861 */ 862 if (kv->kv_singlepage || kp->kp_maxseg == 1) { 863 TAILQ_FOREACH(pg, &pgl, pageq) { 864 va = pmap_map_direct(pg); 865 if (pg == TAILQ_FIRST(&pgl)) 866 sva = va; 867 } 868 return ((void *)sva); 869 } 870 #endif 871 alloc_va: 872 prot = PROT_READ | PROT_WRITE; 873 874 if (kp->kp_pageable) { 875 KASSERT(kp->kp_object); 876 KASSERT(!kv->kv_singlepage); 877 } else { 878 KASSERT(kp->kp_object == NULL); 879 } 880 881 if (kv->kv_singlepage) { 882 KASSERT(sz == PAGE_SIZE); 883 #ifdef __HAVE_PMAP_DIRECT 884 panic("km_alloc: DIRECT single page"); 885 #else 886 mtx_enter(&uvm_km_pages.mtx); 887 while (uvm_km_pages.free == 0) { 888 if (kd->kd_waitok == 0) { 889 mtx_leave(&uvm_km_pages.mtx); 890 uvm_pglistfree(&pgl); 891 return NULL; 892 } 893 msleep_nsec(&uvm_km_pages.free, &uvm_km_pages.mtx, 894 PVM, "getpage", INFSLP); 895 } 896 va = uvm_km_pages.page[--uvm_km_pages.free]; 897 if (uvm_km_pages.free < uvm_km_pages.lowat && 898 curproc != uvm_km_pages.km_proc) { 899 if (kd->kd_slowdown) 900 *kd->kd_slowdown = 1; 901 wakeup(&uvm_km_pages.km_proc); 902 } 903 mtx_leave(&uvm_km_pages.mtx); 904 #endif 905 } else { 906 struct uvm_object *uobj = NULL; 907 908 if (kd->kd_trylock) 909 mapflags |= UVM_KMF_TRYLOCK; 910 911 if (kp->kp_object) 912 uobj = *kp->kp_object; 913 try_map: 914 map = *kv->kv_map; 915 va = vm_map_min(map); 916 if (uvm_map(map, &va, sz, uobj, kd->kd_prefer, 917 kv->kv_align, UVM_MAPFLAG(prot, prot, MAP_INHERIT_NONE, 918 MADV_RANDOM, mapflags))) { 919 if (kv->kv_wait && kd->kd_waitok) { 920 tsleep_nsec(map, PVM, "km_allocva", INFSLP); 921 goto try_map; 922 } 923 uvm_pglistfree(&pgl); 924 return (NULL); 925 } 926 } 927 sva = va; 928 TAILQ_FOREACH(pg, &pgl, pageq) { 929 if (kp->kp_pageable) 930 pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(pg), 931 prot, prot | PMAP_WIRED); 932 else 933 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), prot); 934 va += PAGE_SIZE; 935 } 936 pmap_update(pmap_kernel()); 937 return ((void *)sva); 938 } 939 940 void 941 km_free(void *v, size_t sz, const struct kmem_va_mode *kv, 942 const struct kmem_pa_mode *kp) 943 { 944 vaddr_t sva, eva, va; 945 struct vm_page *pg; 946 struct pglist pgl; 947 948 sva = (vaddr_t)v; 949 eva = sva + sz; 950 951 if (kp->kp_nomem) 952 goto free_va; 953 954 #ifdef __HAVE_PMAP_DIRECT 955 if (kv->kv_singlepage || kp->kp_maxseg == 1) { 956 TAILQ_INIT(&pgl); 957 for (va = sva; va < eva; va += PAGE_SIZE) { 958 pg = pmap_unmap_direct(va); 959 TAILQ_INSERT_TAIL(&pgl, pg, pageq); 960 } 961 uvm_pglistfree(&pgl); 962 return; 963 } 964 #else 965 if (kv->kv_singlepage) { 966 struct uvm_km_free_page *fp = v; 967 968 mtx_enter(&uvm_km_pages.mtx); 969 fp->next = uvm_km_pages.freelist; 970 uvm_km_pages.freelist = fp; 971 if (uvm_km_pages.freelistlen++ > 16) 972 wakeup(&uvm_km_pages.km_proc); 973 mtx_leave(&uvm_km_pages.mtx); 974 return; 975 } 976 #endif 977 978 if (kp->kp_pageable) { 979 pmap_remove(pmap_kernel(), sva, eva); 980 pmap_update(pmap_kernel()); 981 } else { 982 TAILQ_INIT(&pgl); 983 for (va = sva; va < eva; va += PAGE_SIZE) { 984 paddr_t pa; 985 986 if (!pmap_extract(pmap_kernel(), va, &pa)) 987 continue; 988 989 pg = PHYS_TO_VM_PAGE(pa); 990 if (pg == NULL) { 991 panic("km_free: unmanaged page 0x%lx\n", pa); 992 } 993 TAILQ_INSERT_TAIL(&pgl, pg, pageq); 994 } 995 pmap_kremove(sva, sz); 996 pmap_update(pmap_kernel()); 997 uvm_pglistfree(&pgl); 998 } 999 free_va: 1000 uvm_unmap(*kv->kv_map, sva, eva); 1001 if (kv->kv_wait) 1002 wakeup(*kv->kv_map); 1003 } 1004 1005 const struct kmem_va_mode kv_any = { 1006 .kv_map = &kernel_map, 1007 }; 1008 1009 const struct kmem_va_mode kv_intrsafe = { 1010 .kv_map = &kmem_map, 1011 }; 1012 1013 const struct kmem_va_mode kv_page = { 1014 .kv_singlepage = 1 1015 }; 1016 1017 const struct kmem_pa_mode kp_dirty = { 1018 .kp_constraint = &no_constraint 1019 }; 1020 1021 const struct kmem_pa_mode kp_dma = { 1022 .kp_constraint = &dma_constraint 1023 }; 1024 1025 const struct kmem_pa_mode kp_dma_contig = { 1026 .kp_constraint = &dma_constraint, 1027 .kp_maxseg = 1 1028 }; 1029 1030 const struct kmem_pa_mode kp_dma_zero = { 1031 .kp_constraint = &dma_constraint, 1032 .kp_zero = 1 1033 }; 1034 1035 const struct kmem_pa_mode kp_zero = { 1036 .kp_constraint = &no_constraint, 1037 .kp_zero = 1 1038 }; 1039 1040 const struct kmem_pa_mode kp_pageable = { 1041 .kp_object = &uvm.kernel_object, 1042 .kp_pageable = 1 1043 /* XXX - kp_nomem, maybe, but we'll need to fix km_free. */ 1044 }; 1045 1046 const struct kmem_pa_mode kp_none = { 1047 .kp_nomem = 1 1048 }; 1049 1050 const struct kmem_dyn_mode kd_waitok = { 1051 .kd_waitok = 1, 1052 .kd_prefer = UVM_UNKNOWN_OFFSET 1053 }; 1054 1055 const struct kmem_dyn_mode kd_nowait = { 1056 .kd_prefer = UVM_UNKNOWN_OFFSET 1057 }; 1058 1059 const struct kmem_dyn_mode kd_trylock = { 1060 .kd_trylock = 1, 1061 .kd_prefer = UVM_UNKNOWN_OFFSET 1062 }; 1063