1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 35 * 36 * 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 * 62 * $FreeBSD: src/sys/vm/vm_kern.c,v 1.61.2.2 2002/03/12 18:25:26 tegge Exp $ 63 */ 64 65 /* 66 * Kernel memory management. 67 */ 68 69 #include <sys/param.h> 70 #include <sys/systm.h> 71 #include <sys/proc.h> 72 #include <sys/malloc.h> 73 #include <sys/kernel.h> 74 #include <sys/sysctl.h> 75 76 #include <vm/vm.h> 77 #include <vm/vm_param.h> 78 #include <sys/lock.h> 79 #include <vm/pmap.h> 80 #include <vm/vm_map.h> 81 #include <vm/vm_object.h> 82 #include <vm/vm_page.h> 83 #include <vm/vm_pageout.h> 84 #include <vm/vm_kern.h> 85 #include <vm/vm_extern.h> 86 87 static struct vm_map kernel_map_store; 88 static struct vm_map clean_map_store; 89 static struct vm_map buffer_map_store; 90 91 struct vm_map *kernel_map = &kernel_map_store; 92 struct vm_map *clean_map = &clean_map_store; 93 struct vm_map *buffer_map = &buffer_map_store; 94 95 static __inline 96 int 97 KMVMCPU(int kmflags) 98 { 99 if ((kmflags & KM_CPU_SPEC) == 0) 100 return 0; 101 return VM_ALLOC_CPU(KM_GETCPU(kmflags)); 102 } 103 104 /* 105 * Allocate pageable swap-backed anonymous memory 106 */ 107 void * 108 kmem_alloc_swapbacked(kmem_anon_desc_t *kp, vm_size_t size, vm_subsys_t id) 109 { 110 int error; 111 vm_pindex_t npages; 112 113 size = round_page(size); 114 npages = size / PAGE_SIZE; 115 116 if (kp->map == NULL) 117 kp->map = kernel_map; 118 kp->data = vm_map_min(kernel_map); 119 kp->size = size; 120 kp->object = vm_object_allocate(OBJT_DEFAULT, npages); 121 122 error = vm_map_find(kp->map, kp->object, NULL, 0, 123 &kp->data, size, 124 PAGE_SIZE, TRUE, 125 VM_MAPTYPE_NORMAL, id, 126 VM_PROT_ALL, VM_PROT_ALL, 0); 127 if (error) { 128 kprintf("kmem_alloc_swapbacked: %zd bytes failed %d\n", 129 size, error); 130 kp->data = (vm_offset_t)0; 131 kmem_free_swapbacked(kp); 132 return NULL; 133 } 134 return ((void *)(intptr_t)kp->data); 135 } 136 137 void 138 kmem_free_swapbacked(kmem_anon_desc_t *kp) 139 { 140 if (kp->data) { 141 /* 142 * The object will be deallocated by kmem_free(). 143 */ 144 kmem_free(kp->map, kp->data, kp->size); 145 kp->data = (vm_offset_t)0; 146 } else { 147 /* 148 * Failure during allocation, object must be deallocated 149 * manually. 150 */ 151 vm_object_deallocate(kp->object); 152 } 153 kp->object = NULL; 154 } 155 156 /* 157 * Allocate pageable memory to the kernel's address map. "map" must 158 * be kernel_map or a submap of kernel_map. Caller must adjust map or 159 * enter VM pages itself. 160 * 161 * No requirements. 162 */ 163 vm_offset_t 164 kmem_alloc_pageable(vm_map_t map, vm_size_t size, vm_subsys_t id) 165 { 166 vm_offset_t addr; 167 int result; 168 169 size = round_page(size); 170 addr = vm_map_min(map); 171 result = vm_map_find(map, NULL, NULL, 172 (vm_offset_t) 0, &addr, size, 173 PAGE_SIZE, TRUE, 174 VM_MAPTYPE_NORMAL, id, 175 VM_PROT_ALL, VM_PROT_ALL, 0); 176 if (result != KERN_SUCCESS) 177 return (0); 178 return (addr); 179 } 180 181 /* 182 * Same as kmem_alloc_pageable, except that it create a nofault entry. 183 * 184 * No requirements. 185 */ 186 vm_offset_t 187 kmem_alloc_nofault(vm_map_t map, vm_size_t size, vm_subsys_t id, 188 vm_size_t align) 189 { 190 vm_offset_t addr; 191 int result; 192 193 size = round_page(size); 194 addr = vm_map_min(map); 195 result = vm_map_find(map, NULL, NULL, 196 (vm_offset_t) 0, &addr, size, 197 align, TRUE, 198 VM_MAPTYPE_NORMAL, id, 199 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 200 if (result != KERN_SUCCESS) 201 return (0); 202 return (addr); 203 } 204 205 /* 206 * Allocate wired-down memory in the kernel's address map or a submap. 207 * 208 * No requirements. 209 */ 210 vm_offset_t 211 kmem_alloc3(vm_map_t map, vm_size_t size, vm_subsys_t id, int kmflags) 212 { 213 vm_offset_t addr; 214 vm_offset_t gstart; 215 vm_offset_t i; 216 int count; 217 int cow; 218 219 size = round_page(size); 220 221 if (kmflags & KM_KRESERVE) 222 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT); 223 else 224 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 225 226 if (kmflags & KM_STACK) { 227 cow = MAP_IS_KSTACK; 228 gstart = PAGE_SIZE; 229 } else { 230 cow = 0; 231 gstart = 0; 232 } 233 234 /* 235 * Use the kernel object for wired-down kernel pages. Assume that no 236 * region of the kernel object is referenced more than once. 237 * 238 * Locate sufficient space in the map. This will give us the final 239 * virtual address for the new memory, and thus will tell us the 240 * offset within the kernel map. 241 */ 242 vm_map_lock(map); 243 if (vm_map_findspace(map, vm_map_min(map), size, PAGE_SIZE, 0, &addr)) { 244 vm_map_unlock(map); 245 if (kmflags & KM_KRESERVE) 246 vm_map_entry_krelease(count); 247 else 248 vm_map_entry_release(count); 249 return (0); 250 } 251 vm_object_hold(kernel_object); 252 vm_object_reference_locked(kernel_object); 253 vm_map_insert(map, &count, 254 kernel_object, NULL, 255 addr, NULL, 256 addr, addr + size, 257 VM_MAPTYPE_NORMAL, id, 258 VM_PROT_ALL, VM_PROT_ALL, cow); 259 vm_object_drop(kernel_object); 260 261 vm_map_unlock(map); 262 if (kmflags & KM_KRESERVE) 263 vm_map_entry_krelease(count); 264 else 265 vm_map_entry_release(count); 266 267 /* 268 * Guarantee that there are pages already in this object before 269 * calling vm_map_kernel_wiring(). This is to prevent the following 270 * scenario: 271 * 272 * 1) Threads have swapped out, so that there is a pager for the 273 * kernel_object. 2) The kmsg zone is empty, and so we are 274 * kmem_allocing a new page for it. 3) vm_map_kernel_wiring() calls 275 * vm_fault(); there is no page, but there is a pager, so we call 276 * pager_data_request. But the kmsg zone is empty, so we must 277 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 278 * we get the data back from the pager, it will be (very stale) 279 * non-zero data. kmem_alloc is defined to return zero-filled memory. 280 * 281 * We're intentionally not activating the pages we allocate to prevent a 282 * race with page-out. vm_map_kernel_wiring() will wire the pages. 283 */ 284 vm_object_hold(kernel_object); 285 for (i = gstart; i < size; i += PAGE_SIZE) { 286 vm_page_t mem; 287 288 mem = vm_page_grab(kernel_object, OFF_TO_IDX(addr + i), 289 VM_ALLOC_FORCE_ZERO | VM_ALLOC_NORMAL | 290 VM_ALLOC_RETRY | KMVMCPU(kmflags)); 291 vm_page_unqueue_nowakeup(mem); 292 vm_page_wakeup(mem); 293 } 294 vm_object_drop(kernel_object); 295 296 /* 297 * And finally, mark the data as pageable or non-pageable (unwiring 298 * or wiring the pages), according to the passed-in kmflags. 299 * 300 * NOTE: vm_map_kernel_wiring() handles any kstack guard. 301 */ 302 vm_map_kernel_wiring(map, addr, addr + size, kmflags); 303 304 return (addr); 305 } 306 307 /* 308 * Release a region of kernel virtual memory allocated with kmem_alloc, 309 * and return the physical pages associated with that region. 310 * 311 * WARNING! If the caller entered pages into the region using pmap_kenter() 312 * it must remove the pages using pmap_kremove[_quick]() before freeing the 313 * underlying kmem, otherwise resident_count will be mistabulated. 314 * 315 * No requirements. 316 */ 317 void 318 kmem_free(vm_map_t map, vm_offset_t addr, vm_size_t size) 319 { 320 vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 321 } 322 323 /* 324 * Used to break a system map into smaller maps, usually to reduce 325 * contention and to provide large KVA spaces for subsystems like the 326 * buffer cache. 327 * 328 * parent Map to take range from 329 * result 330 * size Size of range to find 331 * min, max Returned endpoints of map 332 * pageable Can the region be paged 333 * 334 * No requirements. 335 */ 336 void 337 kmem_suballoc(vm_map_t parent, vm_map_t result, 338 vm_offset_t *min, vm_offset_t *max, vm_size_t size) 339 { 340 int ret; 341 342 size = round_page(size); 343 344 *min = (vm_offset_t) vm_map_min(parent); 345 ret = vm_map_find(parent, NULL, NULL, 346 (vm_offset_t) 0, min, size, 347 PAGE_SIZE, TRUE, 348 VM_MAPTYPE_UNSPECIFIED, VM_SUBSYS_SYSMAP, 349 VM_PROT_ALL, VM_PROT_ALL, 0); 350 if (ret != KERN_SUCCESS) { 351 kprintf("kmem_suballoc: bad status return of %d.\n", ret); 352 panic("kmem_suballoc"); 353 } 354 *max = *min + size; 355 pmap_reference(vm_map_pmap(parent)); 356 vm_map_init(result, *min, *max, vm_map_pmap(parent)); 357 if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS) 358 panic("kmem_suballoc: unable to change range to submap"); 359 } 360 361 /* 362 * Allocates pageable memory from a sub-map of the kernel. If the submap 363 * has no room, the caller sleeps waiting for more memory in the submap. 364 * 365 * No requirements. 366 */ 367 vm_offset_t 368 kmem_alloc_wait(vm_map_t map, vm_size_t size, vm_subsys_t id) 369 { 370 vm_offset_t addr; 371 int count; 372 373 size = round_page(size); 374 375 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 376 377 for (;;) { 378 /* 379 * To make this work for more than one map, use the map's lock 380 * to lock out sleepers/wakers. 381 */ 382 vm_map_lock(map); 383 if (vm_map_findspace(map, vm_map_min(map), 384 size, PAGE_SIZE, 0, &addr) == 0) { 385 break; 386 } 387 /* no space now; see if we can ever get space */ 388 if (vm_map_max(map) - vm_map_min(map) < size) { 389 vm_map_entry_release(count); 390 vm_map_unlock(map); 391 return (0); 392 } 393 vm_map_unlock(map); 394 tsleep(map, 0, "kmaw", 0); 395 } 396 vm_map_insert(map, &count, 397 NULL, NULL, 398 (vm_offset_t)0, NULL, 399 addr, addr + size, 400 VM_MAPTYPE_NORMAL, id, 401 VM_PROT_ALL, VM_PROT_ALL, 0); 402 vm_map_unlock(map); 403 vm_map_entry_release(count); 404 405 return (addr); 406 } 407 408 /* 409 * Allocates a region from the kernel address map and physical pages 410 * within the specified address range to the kernel object. Creates a 411 * wired mapping from this region to these pages, and returns the 412 * region's starting virtual address. The allocated pages are not 413 * necessarily physically contiguous. If M_ZERO is specified through the 414 * given flags, then the pages are zeroed before they are mapped. 415 */ 416 vm_offset_t 417 kmem_alloc_attr(vm_map_t map, vm_size_t size, vm_subsys_t id, 418 int flags, vm_paddr_t low, 419 vm_paddr_t high, vm_memattr_t memattr) 420 { 421 vm_offset_t addr, i, offset; 422 vm_page_t m; 423 int count; 424 425 size = round_page(size); 426 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 427 vm_map_lock(map); 428 if (vm_map_findspace(map, vm_map_min(map), size, PAGE_SIZE, 429 flags, &addr)) { 430 vm_map_unlock(map); 431 vm_map_entry_release(count); 432 return (0); 433 } 434 offset = addr - vm_map_min(kernel_map); 435 vm_object_hold(kernel_object); 436 vm_object_reference_locked(kernel_object); 437 vm_map_insert(map, &count, 438 kernel_object, NULL, 439 offset, NULL, 440 addr, addr + size, 441 VM_MAPTYPE_NORMAL, id, 442 VM_PROT_ALL, VM_PROT_ALL, 0); 443 vm_map_unlock(map); 444 vm_map_entry_release(count); 445 vm_object_drop(kernel_object); 446 for (i = 0; i < size; i += PAGE_SIZE) { 447 m = vm_page_alloc_contig(low, high, PAGE_SIZE, 0, 448 PAGE_SIZE, memattr); 449 if (!m) { 450 return (0); 451 } 452 vm_object_hold(kernel_object); 453 vm_page_insert(m, kernel_object, OFF_TO_IDX(offset + i)); 454 vm_object_drop(kernel_object); 455 if (flags & M_ZERO) 456 pmap_zero_page(VM_PAGE_TO_PHYS(m)); 457 m->valid = VM_PAGE_BITS_ALL; 458 } 459 460 /* wire the pages */ 461 vm_map_kernel_wiring(map, addr, addr + size, 0); 462 463 return (addr); 464 } 465 466 467 /* 468 * Returns memory to a submap of the kernel, and wakes up any processes 469 * waiting for memory in that map. 470 * 471 * No requirements. 472 */ 473 void 474 kmem_free_wakeup(vm_map_t map, vm_offset_t addr, vm_size_t size) 475 { 476 int count; 477 478 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 479 vm_map_lock(map); 480 vm_map_delete(map, trunc_page(addr), round_page(addr + size), &count); 481 wakeup(map); 482 vm_map_unlock(map); 483 vm_map_entry_release(count); 484 } 485 486 /* 487 * Create the kernel_ma for (KvaStart,KvaEnd) and insert mappings to 488 * cover areas already allocated or reserved thus far. 489 * 490 * The areas (virtual_start, virtual_end) and (virtual2_start, virtual2_end) 491 * are available so the cutouts are the areas around these ranges between 492 * KvaStart and KvaEnd. 493 * 494 * Depend on the zalloc bootstrap cache to get our vm_map_entry_t. 495 * Called from the low level boot code only. 496 */ 497 void 498 kmem_init(void) 499 { 500 vm_offset_t addr; 501 vm_map_t m; 502 int count; 503 504 m = kernel_map; 505 vm_map_init(m, KvaStart, KvaEnd, kernel_pmap); 506 vm_map_lock(m); 507 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 508 m->system_map = 1; 509 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 510 addr = KvaStart; 511 if (virtual2_start) { 512 if (addr < virtual2_start) { 513 vm_map_insert(m, &count, 514 NULL, NULL, 515 (vm_offset_t) 0, NULL, 516 addr, virtual2_start, 517 VM_MAPTYPE_NORMAL, VM_SUBSYS_RESERVED, 518 VM_PROT_ALL, VM_PROT_ALL, 0); 519 } 520 addr = virtual2_end; 521 } 522 if (addr < virtual_start) { 523 vm_map_insert(m, &count, 524 NULL, NULL, 525 (vm_offset_t) 0, NULL, 526 addr, virtual_start, 527 VM_MAPTYPE_NORMAL, VM_SUBSYS_RESERVED, 528 VM_PROT_ALL, VM_PROT_ALL, 0); 529 } 530 addr = virtual_end; 531 if (addr < KvaEnd) { 532 vm_map_insert(m, &count, 533 NULL, NULL, 534 (vm_offset_t) 0, NULL, 535 addr, KvaEnd, 536 VM_MAPTYPE_NORMAL, VM_SUBSYS_RESERVED, 537 VM_PROT_ALL, VM_PROT_ALL, 0); 538 } 539 /* ... and ending with the completion of the above `insert' */ 540 vm_map_unlock(m); 541 vm_map_entry_release(count); 542 } 543 544 /* 545 * No requirements. 546 */ 547 static int 548 kvm_size(SYSCTL_HANDLER_ARGS) 549 { 550 unsigned long ksize = KvaSize; 551 552 return sysctl_handle_long(oidp, &ksize, 0, req); 553 } 554 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_ULONG|CTLFLAG_RD, 555 0, 0, kvm_size, "LU", "Size of KVM"); 556 557 /* 558 * No requirements. 559 */ 560 static int 561 kvm_free(SYSCTL_HANDLER_ARGS) 562 { 563 unsigned long kfree = virtual_end - kernel_vm_end; 564 565 return sysctl_handle_long(oidp, &kfree, 0, req); 566 } 567 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_ULONG|CTLFLAG_RD, 568 0, 0, kvm_free, "LU", "Amount of KVM free"); 569 570