1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD: src/sys/vm/vm_kern.c,v 1.61.2.2 2002/03/12 18:25:26 tegge Exp $ 65 * $DragonFly: src/sys/vm/vm_kern.c,v 1.17 2004/05/20 22:42:25 dillon Exp $ 66 */ 67 68 /* 69 * Kernel memory management. 70 */ 71 72 #include <sys/param.h> 73 #include <sys/systm.h> 74 #include <sys/proc.h> 75 #include <sys/malloc.h> 76 77 #include <vm/vm.h> 78 #include <vm/vm_param.h> 79 #include <sys/lock.h> 80 #include <vm/pmap.h> 81 #include <vm/vm_map.h> 82 #include <vm/vm_object.h> 83 #include <vm/vm_page.h> 84 #include <vm/vm_pageout.h> 85 #include <vm/vm_kern.h> 86 #include <vm/vm_extern.h> 87 88 vm_map_t kernel_map=0; 89 vm_map_t exec_map=0; 90 vm_map_t clean_map=0; 91 vm_map_t buffer_map=0; 92 vm_map_t mb_map=0; 93 int mb_map_full=0; 94 95 /* 96 * kmem_alloc_pageable: 97 * 98 * Allocate pageable memory to the kernel's address map. 99 * "map" must be kernel_map or a submap of kernel_map. 100 */ 101 vm_offset_t 102 kmem_alloc_pageable(vm_map_t map, vm_size_t size) 103 { 104 vm_offset_t addr; 105 int result; 106 107 size = round_page(size); 108 addr = vm_map_min(map); 109 result = vm_map_find(map, NULL, (vm_offset_t) 0, 110 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 111 if (result != KERN_SUCCESS) { 112 return (0); 113 } 114 return (addr); 115 } 116 117 /* 118 * kmem_alloc_nofault: 119 * 120 * Same as kmem_alloc_pageable, except that it create a nofault entry. 121 */ 122 vm_offset_t 123 kmem_alloc_nofault(vm_map_t map, vm_size_t size) 124 { 125 vm_offset_t addr; 126 int result; 127 128 size = round_page(size); 129 addr = vm_map_min(map); 130 result = vm_map_find(map, NULL, (vm_offset_t) 0, 131 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 132 if (result != KERN_SUCCESS) { 133 return (0); 134 } 135 return (addr); 136 } 137 138 /* 139 * Allocate wired-down memory in the kernel's address map 140 * or a submap. 141 */ 142 vm_offset_t 143 kmem_alloc3(vm_map_t map, vm_size_t size, int kmflags) 144 { 145 vm_offset_t addr; 146 vm_offset_t offset; 147 vm_offset_t i; 148 int count; 149 150 size = round_page(size); 151 152 if (kmflags & KM_KRESERVE) 153 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT); 154 else 155 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 156 157 /* 158 * Use the kernel object for wired-down kernel pages. Assume that no 159 * region of the kernel object is referenced more than once. 160 * 161 * Locate sufficient space in the map. This will give us the final 162 * virtual address for the new memory, and thus will tell us the 163 * offset within the kernel map. 164 */ 165 vm_map_lock(map); 166 if (vm_map_findspace(map, vm_map_min(map), size, 1, &addr)) { 167 vm_map_unlock(map); 168 if (kmflags & KM_KRESERVE) 169 vm_map_entry_krelease(count); 170 else 171 vm_map_entry_release(count); 172 return (0); 173 } 174 offset = addr - VM_MIN_KERNEL_ADDRESS; 175 vm_object_reference(kernel_object); 176 vm_map_insert(map, &count, 177 kernel_object, offset, addr, addr + size, 178 VM_PROT_ALL, VM_PROT_ALL, 0); 179 vm_map_unlock(map); 180 if (kmflags & KM_KRESERVE) 181 vm_map_entry_krelease(count); 182 else 183 vm_map_entry_release(count); 184 185 /* 186 * Guarantee that there are pages already in this object before 187 * calling vm_map_wire. This is to prevent the following 188 * scenario: 189 * 190 * 1) Threads have swapped out, so that there is a pager for the 191 * kernel_object. 2) The kmsg zone is empty, and so we are 192 * kmem_allocing a new page for it. 3) vm_map_wire calls vm_fault; 193 * there is no page, but there is a pager, so we call 194 * pager_data_request. But the kmsg zone is empty, so we must 195 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 196 * we get the data back from the pager, it will be (very stale) 197 * non-zero data. kmem_alloc is defined to return zero-filled memory. 198 * 199 * We're intentionally not activating the pages we allocate to prevent a 200 * race with page-out. vm_map_wire will wire the pages. 201 */ 202 203 for (i = 0; i < size; i += PAGE_SIZE) { 204 vm_page_t mem; 205 206 mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i), 207 VM_ALLOC_ZERO | VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 208 if ((mem->flags & PG_ZERO) == 0) 209 vm_page_zero_fill(mem); 210 mem->valid = VM_PAGE_BITS_ALL; 211 vm_page_flag_clear(mem, PG_ZERO); 212 vm_page_wakeup(mem); 213 } 214 215 /* 216 * And finally, mark the data as non-pageable. 217 */ 218 219 (void) vm_map_wire(map, (vm_offset_t) addr, addr + size, kmflags); 220 221 return (addr); 222 } 223 224 /* 225 * kmem_free: 226 * 227 * Release a region of kernel virtual memory allocated 228 * with kmem_alloc, and return the physical pages 229 * associated with that region. 230 * 231 * This routine may not block on kernel maps. 232 */ 233 void 234 kmem_free(vm_map_t map, vm_offset_t addr, vm_size_t size) 235 { 236 (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 237 } 238 239 /* 240 * kmem_suballoc: 241 * 242 * Allocates a map to manage a subrange 243 * of the kernel virtual address space. 244 * 245 * Arguments are as follows: 246 * 247 * parent Map to take range from 248 * size Size of range to find 249 * min, max Returned endpoints of map 250 * pageable Can the region be paged 251 */ 252 vm_map_t 253 kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max, 254 vm_size_t size) 255 { 256 int ret; 257 vm_map_t result; 258 259 size = round_page(size); 260 261 *min = (vm_offset_t) vm_map_min(parent); 262 ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 263 min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 264 if (ret != KERN_SUCCESS) { 265 printf("kmem_suballoc: bad status return of %d.\n", ret); 266 panic("kmem_suballoc"); 267 } 268 *max = *min + size; 269 pmap_reference(vm_map_pmap(parent)); 270 result = vm_map_create(vm_map_pmap(parent), *min, *max); 271 if (result == NULL) 272 panic("kmem_suballoc: cannot create submap"); 273 if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS) 274 panic("kmem_suballoc: unable to change range to submap"); 275 return (result); 276 } 277 278 /* 279 * kmem_malloc: 280 * 281 * Allocate wired-down memory in the kernel's address map for the higher 282 * level kernel memory allocator (kern/kern_malloc.c). We cannot use 283 * kmem_alloc() because we may need to allocate memory at interrupt 284 * level where we cannot block (canwait == FALSE). 285 * 286 * We don't worry about expanding the map (adding entries) since entries 287 * for wired maps are statically allocated. 288 * 289 * NOTE: Please see kmem_slab_alloc() for a better explanation of the 290 * M_* flags. 291 */ 292 vm_offset_t 293 kmem_malloc(vm_map_t map, vm_size_t size, int flags) 294 { 295 vm_offset_t offset, i; 296 vm_map_entry_t entry; 297 vm_offset_t addr; 298 vm_page_t m; 299 int count; 300 thread_t td; 301 int wanted_reserve; 302 303 if (map != kernel_map && map != mb_map) 304 panic("kmem_malloc: map != {kmem,mb}_map"); 305 306 size = round_page(size); 307 addr = vm_map_min(map); 308 309 /* 310 * Locate sufficient space in the map. This will give us the final 311 * virtual address for the new memory, and thus will tell us the 312 * offset within the kernel map. 313 */ 314 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 315 vm_map_lock(map); 316 if (vm_map_findspace(map, vm_map_min(map), size, 1, &addr)) { 317 vm_map_unlock(map); 318 vm_map_entry_release(count); 319 if (map == mb_map) { 320 mb_map_full = TRUE; 321 printf("Out of mbuf clusters - adjust NMBCLUSTERS or increase maxusers!\n"); 322 return (0); 323 } 324 if ((flags & (M_RNOWAIT|M_NULLOK)) == 0 || 325 (flags & (M_FAILSAFE|M_NULLOK)) == M_FAILSAFE 326 ) { 327 panic("kmem_malloc(%ld): kernel_map too small: " 328 "%ld total allocated", 329 (long)size, (long)map->size); 330 } 331 return (0); 332 } 333 offset = addr - VM_MIN_KERNEL_ADDRESS; 334 vm_object_reference(kmem_object); 335 vm_map_insert(map, &count, 336 kmem_object, offset, addr, addr + size, 337 VM_PROT_ALL, VM_PROT_ALL, 0); 338 339 td = curthread; 340 wanted_reserve = 0; 341 342 for (i = 0; i < size; i += PAGE_SIZE) { 343 int vmflags; 344 345 vmflags = VM_ALLOC_SYSTEM; /* XXX M_USE_RESERVE? */ 346 if ((flags & (M_WAITOK|M_RNOWAIT)) == 0) 347 printf("kmem_malloc: bad flags %08x (%p)\n", flags, ((int **)&map)[-1]); 348 if (flags & M_USE_INTERRUPT_RESERVE) 349 vmflags |= VM_ALLOC_INTERRUPT; 350 if (flags & (M_FAILSAFE|M_WAITOK)) { 351 if (td->td_preempted) { 352 wanted_reserve = 1; 353 } else { 354 vmflags |= VM_ALLOC_NORMAL; 355 wanted_reserve = 0; 356 } 357 } 358 359 m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), vmflags); 360 361 /* 362 * Ran out of space, free everything up and return. Don't need 363 * to lock page queues here as we know that the pages we got 364 * aren't on any queues. 365 * 366 * If M_WAITOK or M_FAILSAFE is set we can yield or block. 367 */ 368 if (m == NULL) { 369 if (flags & (M_FAILSAFE|M_WAITOK)) { 370 if (wanted_reserve) { 371 if (flags & M_FAILSAFE) 372 printf("kmem_malloc: no memory, try failsafe\n"); 373 vm_map_unlock(map); 374 lwkt_yield(); 375 vm_map_lock(map); 376 } else { 377 if (flags & M_FAILSAFE) 378 printf("kmem_malloc: no memory, block even though we shouldn't\n"); 379 vm_map_unlock(map); 380 vm_wait(); 381 vm_map_lock(map); 382 } 383 i -= PAGE_SIZE; /* retry */ 384 continue; 385 } 386 /* 387 * Free the pages before removing the map entry. 388 * They are already marked busy. Calling 389 * vm_map_delete before the pages has been freed or 390 * unbusied will cause a deadlock. 391 */ 392 while (i != 0) { 393 i -= PAGE_SIZE; 394 m = vm_page_lookup(kmem_object, 395 OFF_TO_IDX(offset + i)); 396 vm_page_free(m); 397 } 398 vm_map_delete(map, addr, addr + size, &count); 399 vm_map_unlock(map); 400 vm_map_entry_release(count); 401 return (0); 402 } 403 vm_page_flag_clear(m, PG_ZERO); 404 m->valid = VM_PAGE_BITS_ALL; 405 } 406 407 /* 408 * Mark map entry as non-pageable. Assert: vm_map_insert() will never 409 * be able to extend the previous entry so there will be a new entry 410 * exactly corresponding to this address range and it will have 411 * wired_count == 0. 412 */ 413 if (!vm_map_lookup_entry(map, addr, &entry) || 414 entry->start != addr || entry->end != addr + size || 415 entry->wired_count != 0) 416 panic("kmem_malloc: entry not found or misaligned"); 417 entry->wired_count = 1; 418 419 vm_map_simplify_entry(map, entry, &count); 420 421 /* 422 * Loop thru pages, entering them in the pmap. (We cannot add them to 423 * the wired count without wrapping the vm_page_queue_lock in 424 * splimp...) 425 */ 426 for (i = 0; i < size; i += PAGE_SIZE) { 427 m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); 428 vm_page_wire(m); 429 vm_page_wakeup(m); 430 /* 431 * Because this is kernel_pmap, this call will not block. 432 */ 433 pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1); 434 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED); 435 } 436 vm_map_unlock(map); 437 vm_map_entry_release(count); 438 439 return (addr); 440 } 441 442 /* 443 * kmem_alloc_wait: 444 * 445 * Allocates pageable memory from a sub-map of the kernel. If the submap 446 * has no room, the caller sleeps waiting for more memory in the submap. 447 * 448 * This routine may block. 449 */ 450 451 vm_offset_t 452 kmem_alloc_wait(vm_map_t map, vm_size_t size) 453 { 454 vm_offset_t addr; 455 int count; 456 457 size = round_page(size); 458 459 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 460 461 for (;;) { 462 /* 463 * To make this work for more than one map, use the map's lock 464 * to lock out sleepers/wakers. 465 */ 466 vm_map_lock(map); 467 if (vm_map_findspace(map, vm_map_min(map), size, 1, &addr) == 0) 468 break; 469 /* no space now; see if we can ever get space */ 470 if (vm_map_max(map) - vm_map_min(map) < size) { 471 vm_map_entry_release(count); 472 vm_map_unlock(map); 473 return (0); 474 } 475 vm_map_unlock(map); 476 tsleep(map, 0, "kmaw", 0); 477 } 478 vm_map_insert(map, &count, 479 NULL, (vm_offset_t) 0, 480 addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0); 481 vm_map_unlock(map); 482 vm_map_entry_release(count); 483 return (addr); 484 } 485 486 /* 487 * kmem_free_wakeup: 488 * 489 * Returns memory to a submap of the kernel, and wakes up any processes 490 * waiting for memory in that map. 491 */ 492 void 493 kmem_free_wakeup(vm_map_t map, vm_offset_t addr, vm_size_t size) 494 { 495 int count; 496 497 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 498 vm_map_lock(map); 499 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size), &count); 500 wakeup(map); 501 vm_map_unlock(map); 502 vm_map_entry_release(count); 503 } 504 505 /* 506 * kmem_init: 507 * 508 * Create the kernel map; insert a mapping covering kernel text, 509 * data, bss, and all space allocated thus far (`boostrap' data). The 510 * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and 511 * `start' as allocated, and the range between `start' and `end' as free. 512 * 513 * Depend on the zalloc bootstrap cache to get our vm_map_entry_t. 514 */ 515 void 516 kmem_init(vm_offset_t start, vm_offset_t end) 517 { 518 vm_map_t m; 519 int count; 520 521 m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); 522 vm_map_lock(m); 523 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 524 kernel_map = m; 525 kernel_map->system_map = 1; 526 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 527 (void) vm_map_insert(m, &count, NULL, (vm_offset_t) 0, 528 VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0); 529 /* ... and ending with the completion of the above `insert' */ 530 vm_map_unlock(m); 531 vm_map_entry_release(count); 532 } 533