1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 35 * 36 * 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 * 62 * $FreeBSD: src/sys/vm/vm_kern.c,v 1.61.2.2 2002/03/12 18:25:26 tegge Exp $ 63 */ 64 65 /* 66 * Kernel memory management. 67 */ 68 69 #include <sys/param.h> 70 #include <sys/systm.h> 71 #include <sys/proc.h> 72 #include <sys/malloc.h> 73 #include <sys/kernel.h> 74 #include <sys/sysctl.h> 75 76 #include <vm/vm.h> 77 #include <vm/vm_param.h> 78 #include <sys/lock.h> 79 #include <vm/pmap.h> 80 #include <vm/vm_map.h> 81 #include <vm/vm_object.h> 82 #include <vm/vm_page.h> 83 #include <vm/vm_pageout.h> 84 #include <vm/vm_kern.h> 85 #include <vm/vm_extern.h> 86 87 struct vm_map kernel_map; 88 struct vm_map clean_map; 89 struct vm_map buffer_map; 90 91 static __inline 92 int 93 KMVMCPU(int kmflags) 94 { 95 if ((kmflags & KM_CPU_SPEC) == 0) 96 return 0; 97 return VM_ALLOC_CPU(KM_GETCPU(kmflags)); 98 } 99 100 /* 101 * Allocate pageable swap-backed anonymous memory 102 */ 103 void * 104 kmem_alloc_swapbacked(kmem_anon_desc_t *kp, vm_size_t size, vm_subsys_t id) 105 { 106 int error; 107 vm_pindex_t npages; 108 109 size = round_page(size); 110 npages = size / PAGE_SIZE; 111 112 if (kp->map == NULL) 113 kp->map = &kernel_map; 114 kp->data = vm_map_min(&kernel_map); 115 kp->size = size; 116 kp->object = vm_object_allocate(OBJT_DEFAULT, npages); 117 118 error = vm_map_find(kp->map, kp->object, NULL, 0, 119 &kp->data, size, 120 PAGE_SIZE, TRUE, 121 VM_MAPTYPE_NORMAL, id, 122 VM_PROT_ALL, VM_PROT_ALL, 0); 123 if (error) { 124 kprintf("kmem_alloc_swapbacked: %zd bytes failed %d\n", 125 size, error); 126 kp->data = (vm_offset_t)0; 127 kmem_free_swapbacked(kp); 128 return NULL; 129 } 130 return ((void *)(intptr_t)kp->data); 131 } 132 133 void 134 kmem_free_swapbacked(kmem_anon_desc_t *kp) 135 { 136 if (kp->data) { 137 /* 138 * The object will be deallocated by kmem_free(). 139 */ 140 kmem_free(kp->map, kp->data, kp->size); 141 kp->data = (vm_offset_t)0; 142 } else { 143 /* 144 * Failure during allocation, object must be deallocated 145 * manually. 146 */ 147 vm_object_deallocate(kp->object); 148 } 149 kp->object = NULL; 150 } 151 152 /* 153 * Allocate pageable memory to the kernel's address map. "map" must 154 * be kernel_map or a submap of kernel_map. Caller must adjust map or 155 * enter VM pages itself. 156 * 157 * No requirements. 158 */ 159 vm_offset_t 160 kmem_alloc_pageable(vm_map_t map, vm_size_t size, vm_subsys_t id) 161 { 162 vm_offset_t addr; 163 int result; 164 165 size = round_page(size); 166 addr = vm_map_min(map); 167 result = vm_map_find(map, NULL, NULL, 168 (vm_offset_t) 0, &addr, size, 169 PAGE_SIZE, TRUE, 170 VM_MAPTYPE_NORMAL, id, 171 VM_PROT_ALL, VM_PROT_ALL, 0); 172 if (result != KERN_SUCCESS) 173 return (0); 174 return (addr); 175 } 176 177 /* 178 * Same as kmem_alloc_pageable, except that it create a nofault entry. 179 * 180 * No requirements. 181 */ 182 vm_offset_t 183 kmem_alloc_nofault(vm_map_t map, vm_size_t size, vm_subsys_t id, 184 vm_size_t align) 185 { 186 vm_offset_t addr; 187 int result; 188 189 size = round_page(size); 190 addr = vm_map_min(map); 191 result = vm_map_find(map, NULL, NULL, 192 (vm_offset_t) 0, &addr, size, 193 align, TRUE, 194 VM_MAPTYPE_NORMAL, id, 195 VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 196 if (result != KERN_SUCCESS) 197 return (0); 198 return (addr); 199 } 200 201 /* 202 * Allocate wired-down memory in the kernel's address map or a submap. 203 * 204 * No requirements. 205 */ 206 vm_offset_t 207 kmem_alloc3(vm_map_t map, vm_size_t size, vm_subsys_t id, int kmflags) 208 { 209 vm_offset_t addr; 210 vm_offset_t gstart; 211 vm_offset_t i; 212 int count; 213 int cow; 214 215 size = round_page(size); 216 217 if (kmflags & KM_KRESERVE) 218 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT); 219 else 220 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 221 222 if (kmflags & KM_STACK) { 223 cow = MAP_IS_KSTACK; 224 gstart = PAGE_SIZE; 225 } else { 226 cow = 0; 227 gstart = 0; 228 } 229 230 /* 231 * Use the kernel object for wired-down kernel pages. Assume that no 232 * region of the kernel object is referenced more than once. 233 * 234 * Locate sufficient space in the map. This will give us the final 235 * virtual address for the new memory, and thus will tell us the 236 * offset within the kernel map. 237 */ 238 vm_map_lock(map); 239 if (vm_map_findspace(map, vm_map_min(map), size, PAGE_SIZE, 0, &addr)) { 240 vm_map_unlock(map); 241 if (kmflags & KM_KRESERVE) 242 vm_map_entry_krelease(count); 243 else 244 vm_map_entry_release(count); 245 return (0); 246 } 247 vm_object_hold(&kernel_object); 248 vm_object_reference_locked(&kernel_object); 249 vm_map_insert(map, &count, 250 &kernel_object, NULL, 251 addr, NULL, 252 addr, addr + size, 253 VM_MAPTYPE_NORMAL, id, 254 VM_PROT_ALL, VM_PROT_ALL, cow); 255 vm_object_drop(&kernel_object); 256 257 vm_map_unlock(map); 258 if (kmflags & KM_KRESERVE) 259 vm_map_entry_krelease(count); 260 else 261 vm_map_entry_release(count); 262 263 /* 264 * Guarantee that there are pages already in this object before 265 * calling vm_map_wire. This is to prevent the following 266 * scenario: 267 * 268 * 1) Threads have swapped out, so that there is a pager for the 269 * kernel_object. 2) The kmsg zone is empty, and so we are 270 * kmem_allocing a new page for it. 3) vm_map_wire calls vm_fault; 271 * there is no page, but there is a pager, so we call 272 * pager_data_request. But the kmsg zone is empty, so we must 273 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 274 * we get the data back from the pager, it will be (very stale) 275 * non-zero data. kmem_alloc is defined to return zero-filled memory. 276 * 277 * We're intentionally not activating the pages we allocate to prevent a 278 * race with page-out. vm_map_wire will wire the pages. 279 */ 280 vm_object_hold(&kernel_object); 281 for (i = gstart; i < size; i += PAGE_SIZE) { 282 vm_page_t mem; 283 284 mem = vm_page_grab(&kernel_object, OFF_TO_IDX(addr + i), 285 VM_ALLOC_FORCE_ZERO | VM_ALLOC_NORMAL | 286 VM_ALLOC_RETRY | KMVMCPU(kmflags)); 287 vm_page_unqueue_nowakeup(mem); 288 vm_page_wakeup(mem); 289 } 290 vm_object_drop(&kernel_object); 291 292 /* 293 * And finally, mark the data as non-pageable. 294 * 295 * NOTE: vm_map_wire() handles any kstack guard. 296 */ 297 vm_map_wire(map, addr, addr + size, kmflags); 298 299 return (addr); 300 } 301 302 /* 303 * Release a region of kernel virtual memory allocated with kmem_alloc, 304 * and return the physical pages associated with that region. 305 * 306 * WARNING! If the caller entered pages into the region using pmap_kenter() 307 * it must remove the pages using pmap_kremove[_quick]() before freeing the 308 * underlying kmem, otherwise resident_count will be mistabulated. 309 * 310 * No requirements. 311 */ 312 void 313 kmem_free(vm_map_t map, vm_offset_t addr, vm_size_t size) 314 { 315 vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 316 } 317 318 /* 319 * Used to break a system map into smaller maps, usually to reduce 320 * contention and to provide large KVA spaces for subsystems like the 321 * buffer cache. 322 * 323 * parent Map to take range from 324 * result 325 * size Size of range to find 326 * min, max Returned endpoints of map 327 * pageable Can the region be paged 328 * 329 * No requirements. 330 */ 331 void 332 kmem_suballoc(vm_map_t parent, vm_map_t result, 333 vm_offset_t *min, vm_offset_t *max, vm_size_t size) 334 { 335 int ret; 336 337 size = round_page(size); 338 339 *min = (vm_offset_t) vm_map_min(parent); 340 ret = vm_map_find(parent, NULL, NULL, 341 (vm_offset_t) 0, min, size, 342 PAGE_SIZE, TRUE, 343 VM_MAPTYPE_UNSPECIFIED, VM_SUBSYS_SYSMAP, 344 VM_PROT_ALL, VM_PROT_ALL, 0); 345 if (ret != KERN_SUCCESS) { 346 kprintf("kmem_suballoc: bad status return of %d.\n", ret); 347 panic("kmem_suballoc"); 348 } 349 *max = *min + size; 350 pmap_reference(vm_map_pmap(parent)); 351 vm_map_init(result, *min, *max, vm_map_pmap(parent)); 352 if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS) 353 panic("kmem_suballoc: unable to change range to submap"); 354 } 355 356 /* 357 * Allocates pageable memory from a sub-map of the kernel. If the submap 358 * has no room, the caller sleeps waiting for more memory in the submap. 359 * 360 * No requirements. 361 */ 362 vm_offset_t 363 kmem_alloc_wait(vm_map_t map, vm_size_t size, vm_subsys_t id) 364 { 365 vm_offset_t addr; 366 int count; 367 368 size = round_page(size); 369 370 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 371 372 for (;;) { 373 /* 374 * To make this work for more than one map, use the map's lock 375 * to lock out sleepers/wakers. 376 */ 377 vm_map_lock(map); 378 if (vm_map_findspace(map, vm_map_min(map), 379 size, PAGE_SIZE, 0, &addr) == 0) { 380 break; 381 } 382 /* no space now; see if we can ever get space */ 383 if (vm_map_max(map) - vm_map_min(map) < size) { 384 vm_map_entry_release(count); 385 vm_map_unlock(map); 386 return (0); 387 } 388 vm_map_unlock(map); 389 tsleep(map, 0, "kmaw", 0); 390 } 391 vm_map_insert(map, &count, 392 NULL, NULL, 393 (vm_offset_t)0, NULL, 394 addr, addr + size, 395 VM_MAPTYPE_NORMAL, id, 396 VM_PROT_ALL, VM_PROT_ALL, 0); 397 vm_map_unlock(map); 398 vm_map_entry_release(count); 399 400 return (addr); 401 } 402 403 /* 404 * Allocates a region from the kernel address map and physical pages 405 * within the specified address range to the kernel object. Creates a 406 * wired mapping from this region to these pages, and returns the 407 * region's starting virtual address. The allocated pages are not 408 * necessarily physically contiguous. If M_ZERO is specified through the 409 * given flags, then the pages are zeroed before they are mapped. 410 */ 411 vm_offset_t 412 kmem_alloc_attr(vm_map_t map, vm_size_t size, vm_subsys_t id, 413 int flags, vm_paddr_t low, 414 vm_paddr_t high, vm_memattr_t memattr) 415 { 416 vm_offset_t addr, i, offset; 417 vm_page_t m; 418 int count; 419 420 size = round_page(size); 421 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 422 vm_map_lock(map); 423 if (vm_map_findspace(map, vm_map_min(map), size, PAGE_SIZE, 424 flags, &addr)) { 425 vm_map_unlock(map); 426 vm_map_entry_release(count); 427 return (0); 428 } 429 offset = addr - vm_map_min(&kernel_map); 430 vm_object_hold(&kernel_object); 431 vm_object_reference_locked(&kernel_object); 432 vm_map_insert(map, &count, 433 &kernel_object, NULL, 434 offset, NULL, 435 addr, addr + size, 436 VM_MAPTYPE_NORMAL, id, 437 VM_PROT_ALL, VM_PROT_ALL, 0); 438 vm_map_unlock(map); 439 vm_map_entry_release(count); 440 vm_object_drop(&kernel_object); 441 for (i = 0; i < size; i += PAGE_SIZE) { 442 m = vm_page_alloc_contig(low, high, PAGE_SIZE, 0, 443 PAGE_SIZE, memattr); 444 if (!m) { 445 return (0); 446 } 447 vm_object_hold(&kernel_object); 448 vm_page_insert(m, &kernel_object, OFF_TO_IDX(offset + i)); 449 vm_object_drop(&kernel_object); 450 if (flags & M_ZERO) 451 pmap_zero_page(VM_PAGE_TO_PHYS(m)); 452 m->valid = VM_PAGE_BITS_ALL; 453 } 454 vm_map_wire(map, addr, addr + size, 0); 455 return (addr); 456 } 457 458 459 /* 460 * Returns memory to a submap of the kernel, and wakes up any processes 461 * waiting for memory in that map. 462 * 463 * No requirements. 464 */ 465 void 466 kmem_free_wakeup(vm_map_t map, vm_offset_t addr, vm_size_t size) 467 { 468 int count; 469 470 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 471 vm_map_lock(map); 472 vm_map_delete(map, trunc_page(addr), round_page(addr + size), &count); 473 wakeup(map); 474 vm_map_unlock(map); 475 vm_map_entry_release(count); 476 } 477 478 /* 479 * Create the kernel_ma for (KvaStart,KvaEnd) and insert mappings to 480 * cover areas already allocated or reserved thus far. 481 * 482 * The areas (virtual_start, virtual_end) and (virtual2_start, virtual2_end) 483 * are available so the cutouts are the areas around these ranges between 484 * KvaStart and KvaEnd. 485 * 486 * Depend on the zalloc bootstrap cache to get our vm_map_entry_t. 487 * Called from the low level boot code only. 488 */ 489 void 490 kmem_init(void) 491 { 492 vm_offset_t addr; 493 vm_map_t m; 494 int count; 495 496 m = &kernel_map; 497 vm_map_init(m, KvaStart, KvaEnd, &kernel_pmap); 498 vm_map_lock(m); 499 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 500 m->system_map = 1; 501 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 502 addr = KvaStart; 503 if (virtual2_start) { 504 if (addr < virtual2_start) { 505 vm_map_insert(m, &count, 506 NULL, NULL, 507 (vm_offset_t) 0, NULL, 508 addr, virtual2_start, 509 VM_MAPTYPE_NORMAL, VM_SUBSYS_RESERVED, 510 VM_PROT_ALL, VM_PROT_ALL, 0); 511 } 512 addr = virtual2_end; 513 } 514 if (addr < virtual_start) { 515 vm_map_insert(m, &count, 516 NULL, NULL, 517 (vm_offset_t) 0, NULL, 518 addr, virtual_start, 519 VM_MAPTYPE_NORMAL, VM_SUBSYS_RESERVED, 520 VM_PROT_ALL, VM_PROT_ALL, 0); 521 } 522 addr = virtual_end; 523 if (addr < KvaEnd) { 524 vm_map_insert(m, &count, 525 NULL, NULL, 526 (vm_offset_t) 0, NULL, 527 addr, KvaEnd, 528 VM_MAPTYPE_NORMAL, VM_SUBSYS_RESERVED, 529 VM_PROT_ALL, VM_PROT_ALL, 0); 530 } 531 /* ... and ending with the completion of the above `insert' */ 532 vm_map_unlock(m); 533 vm_map_entry_release(count); 534 } 535 536 /* 537 * No requirements. 538 */ 539 static int 540 kvm_size(SYSCTL_HANDLER_ARGS) 541 { 542 unsigned long ksize = KvaSize; 543 544 return sysctl_handle_long(oidp, &ksize, 0, req); 545 } 546 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_ULONG|CTLFLAG_RD, 547 0, 0, kvm_size, "LU", "Size of KVM"); 548 549 /* 550 * No requirements. 551 */ 552 static int 553 kvm_free(SYSCTL_HANDLER_ARGS) 554 { 555 unsigned long kfree = virtual_end - kernel_vm_end; 556 557 return sysctl_handle_long(oidp, &kfree, 0, req); 558 } 559 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_ULONG|CTLFLAG_RD, 560 0, 0, kvm_free, "LU", "Amount of KVM free"); 561 562