1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD: src/sys/vm/vm_kern.c,v 1.61.2.2 2002/03/12 18:25:26 tegge Exp $ 65 * $DragonFly: src/sys/vm/vm_kern.c,v 1.10 2003/10/02 21:00:20 hmp Exp $ 66 */ 67 68 /* 69 * Kernel memory management. 70 */ 71 72 #include <sys/param.h> 73 #include <sys/systm.h> 74 #include <sys/proc.h> 75 #include <sys/malloc.h> 76 77 #include <vm/vm.h> 78 #include <vm/vm_param.h> 79 #include <sys/lock.h> 80 #include <vm/pmap.h> 81 #include <vm/vm_map.h> 82 #include <vm/vm_object.h> 83 #include <vm/vm_page.h> 84 #include <vm/vm_pageout.h> 85 #include <vm/vm_extern.h> 86 87 vm_map_t kernel_map=0; 88 #if defined(USE_KMEM_MAP) 89 vm_map_t kmem_map=0; 90 #endif 91 vm_map_t exec_map=0; 92 vm_map_t clean_map=0; 93 vm_map_t buffer_map=0; 94 vm_map_t mb_map=0; 95 int mb_map_full=0; 96 97 /* 98 * kmem_alloc_pageable: 99 * 100 * Allocate pageable memory to the kernel's address map. 101 * "map" must be kernel_map or a submap of kernel_map. 102 */ 103 104 vm_offset_t 105 kmem_alloc_pageable(map, size) 106 vm_map_t map; 107 vm_size_t size; 108 { 109 vm_offset_t addr; 110 int result; 111 112 size = round_page(size); 113 addr = vm_map_min(map); 114 result = vm_map_find(map, NULL, (vm_offset_t) 0, 115 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 116 if (result != KERN_SUCCESS) { 117 return (0); 118 } 119 return (addr); 120 } 121 122 /* 123 * kmem_alloc_nofault: 124 * 125 * Same as kmem_alloc_pageable, except that it create a nofault entry. 126 */ 127 128 vm_offset_t 129 kmem_alloc_nofault(map, size) 130 vm_map_t map; 131 vm_size_t size; 132 { 133 vm_offset_t addr; 134 int result; 135 136 size = round_page(size); 137 addr = vm_map_min(map); 138 result = vm_map_find(map, NULL, (vm_offset_t) 0, 139 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 140 if (result != KERN_SUCCESS) { 141 return (0); 142 } 143 return (addr); 144 } 145 146 /* 147 * Allocate wired-down memory in the kernel's address map 148 * or a submap. 149 */ 150 vm_offset_t 151 kmem_alloc(vm_map_t map, vm_size_t size) 152 { 153 vm_offset_t addr; 154 vm_offset_t offset; 155 vm_offset_t i; 156 int count; 157 158 size = round_page(size); 159 160 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT); 161 162 /* 163 * Use the kernel object for wired-down kernel pages. Assume that no 164 * region of the kernel object is referenced more than once. 165 * 166 * Locate sufficient space in the map. This will give us the final 167 * virtual address for the new memory, and thus will tell us the 168 * offset within the kernel map. 169 */ 170 vm_map_lock(map); 171 if (vm_map_findspace(map, vm_map_min(map), size, 1, &addr)) { 172 vm_map_unlock(map); 173 vm_map_entry_krelease(count); 174 return (0); 175 } 176 offset = addr - VM_MIN_KERNEL_ADDRESS; 177 vm_object_reference(kernel_object); 178 vm_map_insert(map, &count, 179 kernel_object, offset, addr, addr + size, 180 VM_PROT_ALL, VM_PROT_ALL, 0); 181 vm_map_unlock(map); 182 vm_map_entry_krelease(count); 183 184 /* 185 * Guarantee that there are pages already in this object before 186 * calling vm_map_wire. This is to prevent the following 187 * scenario: 188 * 189 * 1) Threads have swapped out, so that there is a pager for the 190 * kernel_object. 2) The kmsg zone is empty, and so we are 191 * kmem_allocing a new page for it. 3) vm_map_wire calls vm_fault; 192 * there is no page, but there is a pager, so we call 193 * pager_data_request. But the kmsg zone is empty, so we must 194 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 195 * we get the data back from the pager, it will be (very stale) 196 * non-zero data. kmem_alloc is defined to return zero-filled memory. 197 * 198 * We're intentionally not activating the pages we allocate to prevent a 199 * race with page-out. vm_map_wire will wire the pages. 200 */ 201 202 for (i = 0; i < size; i += PAGE_SIZE) { 203 vm_page_t mem; 204 205 mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i), 206 VM_ALLOC_ZERO | VM_ALLOC_RETRY); 207 if ((mem->flags & PG_ZERO) == 0) 208 vm_page_zero_fill(mem); 209 mem->valid = VM_PAGE_BITS_ALL; 210 vm_page_flag_clear(mem, PG_ZERO); 211 vm_page_wakeup(mem); 212 } 213 214 /* 215 * And finally, mark the data as non-pageable. 216 */ 217 218 (void) vm_map_wire(map, (vm_offset_t) addr, addr + size, FALSE); 219 220 return (addr); 221 } 222 223 /* 224 * kmem_free: 225 * 226 * Release a region of kernel virtual memory allocated 227 * with kmem_alloc, and return the physical pages 228 * associated with that region. 229 * 230 * This routine may not block on kernel maps. 231 */ 232 void 233 kmem_free(map, addr, size) 234 vm_map_t map; 235 vm_offset_t addr; 236 vm_size_t size; 237 { 238 (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 239 } 240 241 /* 242 * kmem_suballoc: 243 * 244 * Allocates a map to manage a subrange 245 * of the kernel virtual address space. 246 * 247 * Arguments are as follows: 248 * 249 * parent Map to take range from 250 * size Size of range to find 251 * min, max Returned endpoints of map 252 * pageable Can the region be paged 253 */ 254 vm_map_t 255 kmem_suballoc(parent, min, max, size) 256 vm_map_t parent; 257 vm_offset_t *min, *max; 258 vm_size_t size; 259 { 260 int ret; 261 vm_map_t result; 262 263 size = round_page(size); 264 265 *min = (vm_offset_t) vm_map_min(parent); 266 ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 267 min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 268 if (ret != KERN_SUCCESS) { 269 printf("kmem_suballoc: bad status return of %d.\n", ret); 270 panic("kmem_suballoc"); 271 } 272 *max = *min + size; 273 pmap_reference(vm_map_pmap(parent)); 274 result = vm_map_create(vm_map_pmap(parent), *min, *max); 275 if (result == NULL) 276 panic("kmem_suballoc: cannot create submap"); 277 if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS) 278 panic("kmem_suballoc: unable to change range to submap"); 279 return (result); 280 } 281 282 /* 283 * kmem_malloc: 284 * 285 * Allocate wired-down memory in the kernel's address map for the higher 286 * level kernel memory allocator (kern/kern_malloc.c). We cannot use 287 * kmem_alloc() because we may need to allocate memory at interrupt 288 * level where we cannot block (canwait == FALSE). 289 * 290 * This routine has its own private kernel submap (kmem_map) and object 291 * (kmem_object). This, combined with the fact that only malloc uses 292 * this routine, ensures that we will never block in map or object waits. 293 * 294 * Note that this still only works in a uni-processor environment and 295 * when called at splhigh(). 296 * 297 * We don't worry about expanding the map (adding entries) since entries 298 * for wired maps are statically allocated. 299 * 300 * NOTE: This routine is not supposed to block if M_NOWAIT is set, but 301 * I have not verified that it actually does not block. 302 */ 303 vm_offset_t 304 kmem_malloc(vm_map_t map, vm_size_t size, int flags) 305 { 306 vm_offset_t offset, i; 307 vm_map_entry_t entry; 308 vm_offset_t addr; 309 vm_page_t m; 310 int count; 311 312 #if defined(USE_KMEM_MAP) 313 if (map != kmem_map && map != mb_map) 314 panic("kmem_malloc: map != {kmem,mb}_map"); 315 #else 316 if (map != kernel_map && map != mb_map) 317 panic("kmem_malloc: map != {kmem,mb}_map"); 318 #endif 319 320 size = round_page(size); 321 addr = vm_map_min(map); 322 323 /* 324 * Locate sufficient space in the map. This will give us the final 325 * virtual address for the new memory, and thus will tell us the 326 * offset within the kernel map. 327 */ 328 vm_map_lock(map); 329 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT); 330 if (vm_map_findspace(map, vm_map_min(map), size, 1, &addr)) { 331 vm_map_unlock(map); 332 vm_map_entry_krelease(count); 333 if (map == mb_map) { 334 mb_map_full = TRUE; 335 printf("Out of mbuf clusters - adjust NMBCLUSTERS or increase maxusers!\n"); 336 return (0); 337 } 338 #if defined(USE_KMEM_MAP) 339 if ((flags & M_NOWAIT) == 0) 340 panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated", 341 #else 342 if ((flags & M_NOWAIT) == 0) 343 panic("kmem_malloc(%ld): kernel_map too small: %ld total allocated", 344 #endif 345 (long)size, (long)map->size); 346 return (0); 347 } 348 offset = addr - VM_MIN_KERNEL_ADDRESS; 349 vm_object_reference(kmem_object); 350 vm_map_insert(map, &count, 351 kmem_object, offset, addr, addr + size, 352 VM_PROT_ALL, VM_PROT_ALL, 0); 353 354 for (i = 0; i < size; i += PAGE_SIZE) { 355 /* 356 * Note: if M_NOWAIT specified alone, allocate from 357 * interrupt-safe queues only (just the free list). If 358 * M_USE_RESERVE is also specified, we can also 359 * allocate from the cache. Neither of the latter two 360 * flags may be specified from an interrupt since interrupts 361 * are not allowed to mess with the cache queue. 362 */ 363 retry: 364 m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), 365 ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) ? 366 VM_ALLOC_INTERRUPT : 367 VM_ALLOC_SYSTEM); 368 369 /* 370 * Ran out of space, free everything up and return. Don't need 371 * to lock page queues here as we know that the pages we got 372 * aren't on any queues. 373 */ 374 if (m == NULL) { 375 if ((flags & M_NOWAIT) == 0) { 376 vm_map_unlock(map); 377 VM_WAIT; 378 vm_map_lock(map); 379 goto retry; 380 } 381 /* 382 * Free the pages before removing the map entry. 383 * They are already marked busy. Calling 384 * vm_map_delete before the pages has been freed or 385 * unbusied will cause a deadlock. 386 */ 387 while (i != 0) { 388 i -= PAGE_SIZE; 389 m = vm_page_lookup(kmem_object, 390 OFF_TO_IDX(offset + i)); 391 vm_page_free(m); 392 } 393 vm_map_delete(map, addr, addr + size, &count); 394 vm_map_unlock(map); 395 vm_map_entry_krelease(count); 396 return (0); 397 } 398 vm_page_flag_clear(m, PG_ZERO); 399 m->valid = VM_PAGE_BITS_ALL; 400 } 401 402 /* 403 * Mark map entry as non-pageable. Assert: vm_map_insert() will never 404 * be able to extend the previous entry so there will be a new entry 405 * exactly corresponding to this address range and it will have 406 * wired_count == 0. 407 */ 408 if (!vm_map_lookup_entry(map, addr, &entry) || 409 entry->start != addr || entry->end != addr + size || 410 entry->wired_count != 0) 411 panic("kmem_malloc: entry not found or misaligned"); 412 entry->wired_count = 1; 413 414 vm_map_simplify_entry(map, entry, &count); 415 416 /* 417 * Loop thru pages, entering them in the pmap. (We cannot add them to 418 * the wired count without wrapping the vm_page_queue_lock in 419 * splimp...) 420 */ 421 for (i = 0; i < size; i += PAGE_SIZE) { 422 m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); 423 vm_page_wire(m); 424 vm_page_wakeup(m); 425 /* 426 * Because this is kernel_pmap, this call will not block. 427 */ 428 pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1); 429 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED); 430 } 431 vm_map_unlock(map); 432 vm_map_entry_krelease(count); 433 434 return (addr); 435 } 436 437 /* 438 * kmem_alloc_wait: 439 * 440 * Allocates pageable memory from a sub-map of the kernel. If the submap 441 * has no room, the caller sleeps waiting for more memory in the submap. 442 * 443 * This routine may block. 444 */ 445 446 vm_offset_t 447 kmem_alloc_wait(vm_map_t map, vm_size_t size) 448 { 449 vm_offset_t addr; 450 int count; 451 452 size = round_page(size); 453 454 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT); 455 456 for (;;) { 457 /* 458 * To make this work for more than one map, use the map's lock 459 * to lock out sleepers/wakers. 460 */ 461 vm_map_lock(map); 462 if (vm_map_findspace(map, vm_map_min(map), size, 1, &addr) == 0) 463 break; 464 /* no space now; see if we can ever get space */ 465 if (vm_map_max(map) - vm_map_min(map) < size) { 466 vm_map_entry_krelease(count); 467 vm_map_unlock(map); 468 return (0); 469 } 470 vm_map_unlock(map); 471 tsleep(map, 0, "kmaw", 0); 472 } 473 vm_map_insert(map, &count, 474 NULL, (vm_offset_t) 0, 475 addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0); 476 vm_map_unlock(map); 477 vm_map_entry_krelease(count); 478 return (addr); 479 } 480 481 /* 482 * kmem_free_wakeup: 483 * 484 * Returns memory to a submap of the kernel, and wakes up any processes 485 * waiting for memory in that map. 486 */ 487 void 488 kmem_free_wakeup(map, addr, size) 489 vm_map_t map; 490 vm_offset_t addr; 491 vm_size_t size; 492 { 493 int count; 494 495 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT); 496 vm_map_lock(map); 497 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size), &count); 498 wakeup(map); 499 vm_map_unlock(map); 500 vm_map_entry_krelease(count); 501 } 502 503 /* 504 * kmem_init: 505 * 506 * Create the kernel map; insert a mapping covering kernel text, 507 * data, bss, and all space allocated thus far (`boostrap' data). The 508 * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and 509 * `start' as allocated, and the range between `start' and `end' as free. 510 * 511 * Depend on the zalloc bootstrap cache to get our vm_map_entry_t. 512 */ 513 void 514 kmem_init(vm_offset_t start, vm_offset_t end) 515 { 516 vm_map_t m; 517 int count; 518 519 m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); 520 vm_map_lock(m); 521 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 522 kernel_map = m; 523 kernel_map->system_map = 1; 524 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 525 (void) vm_map_insert(m, &count, NULL, (vm_offset_t) 0, 526 VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0); 527 /* ... and ending with the completion of the above `insert' */ 528 vm_map_unlock(m); 529 vm_map_entry_release(count); 530 } 531 532 /* 533 * kmem_cpu_init: 534 * 535 * Load up extra vm_map_entry structures in each cpu's globaldata 536 * cache. These allow us to expand the mapent zone for kernel_map. 537 * Without them we would get into a recursion deadlock trying to 538 * reserve map entries (reserve->zalloc->kmem_alloc->reserve->...) 539 */ 540 void 541 kmem_cpu_init(void) 542 { 543 vm_map_entry_reserve(MAP_RESERVE_COUNT * 2); 544 } 545 546