1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD: src/sys/vm/vm_kern.c,v 1.61.2.2 2002/03/12 18:25:26 tegge Exp $ 65 * $DragonFly: src/sys/vm/vm_kern.c,v 1.21 2005/04/02 15:58:16 joerg Exp $ 66 */ 67 68 /* 69 * Kernel memory management. 70 */ 71 72 #include <sys/param.h> 73 #include <sys/systm.h> 74 #include <sys/proc.h> 75 #include <sys/malloc.h> 76 77 #include <vm/vm.h> 78 #include <vm/vm_param.h> 79 #include <sys/lock.h> 80 #include <vm/pmap.h> 81 #include <vm/vm_map.h> 82 #include <vm/vm_object.h> 83 #include <vm/vm_page.h> 84 #include <vm/vm_pageout.h> 85 #include <vm/vm_kern.h> 86 #include <vm/vm_extern.h> 87 88 vm_map_t kernel_map=0; 89 vm_map_t exec_map=0; 90 vm_map_t clean_map=0; 91 vm_map_t buffer_map=0; 92 93 /* 94 * kmem_alloc_pageable: 95 * 96 * Allocate pageable memory to the kernel's address map. 97 * "map" must be kernel_map or a submap of kernel_map. 98 */ 99 vm_offset_t 100 kmem_alloc_pageable(vm_map_t map, vm_size_t size) 101 { 102 vm_offset_t addr; 103 int result; 104 105 size = round_page(size); 106 addr = vm_map_min(map); 107 result = vm_map_find(map, NULL, (vm_offset_t) 0, 108 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 109 if (result != KERN_SUCCESS) { 110 return (0); 111 } 112 return (addr); 113 } 114 115 /* 116 * kmem_alloc_nofault: 117 * 118 * Same as kmem_alloc_pageable, except that it create a nofault entry. 119 */ 120 vm_offset_t 121 kmem_alloc_nofault(vm_map_t map, vm_size_t size) 122 { 123 vm_offset_t addr; 124 int result; 125 126 size = round_page(size); 127 addr = vm_map_min(map); 128 result = vm_map_find(map, NULL, (vm_offset_t) 0, 129 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 130 if (result != KERN_SUCCESS) { 131 return (0); 132 } 133 return (addr); 134 } 135 136 /* 137 * Allocate wired-down memory in the kernel's address map 138 * or a submap. 139 */ 140 vm_offset_t 141 kmem_alloc3(vm_map_t map, vm_size_t size, int kmflags) 142 { 143 vm_offset_t addr; 144 vm_offset_t offset; 145 vm_offset_t i; 146 int count; 147 148 size = round_page(size); 149 150 if (kmflags & KM_KRESERVE) 151 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT); 152 else 153 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 154 155 /* 156 * Use the kernel object for wired-down kernel pages. Assume that no 157 * region of the kernel object is referenced more than once. 158 * 159 * Locate sufficient space in the map. This will give us the final 160 * virtual address for the new memory, and thus will tell us the 161 * offset within the kernel map. 162 */ 163 vm_map_lock(map); 164 if (vm_map_findspace(map, vm_map_min(map), size, 1, &addr)) { 165 vm_map_unlock(map); 166 if (kmflags & KM_KRESERVE) 167 vm_map_entry_krelease(count); 168 else 169 vm_map_entry_release(count); 170 return (0); 171 } 172 offset = addr - VM_MIN_KERNEL_ADDRESS; 173 vm_object_reference(kernel_object); 174 vm_map_insert(map, &count, 175 kernel_object, offset, addr, addr + size, 176 VM_PROT_ALL, VM_PROT_ALL, 0); 177 vm_map_unlock(map); 178 if (kmflags & KM_KRESERVE) 179 vm_map_entry_krelease(count); 180 else 181 vm_map_entry_release(count); 182 183 /* 184 * Guarantee that there are pages already in this object before 185 * calling vm_map_wire. This is to prevent the following 186 * scenario: 187 * 188 * 1) Threads have swapped out, so that there is a pager for the 189 * kernel_object. 2) The kmsg zone is empty, and so we are 190 * kmem_allocing a new page for it. 3) vm_map_wire calls vm_fault; 191 * there is no page, but there is a pager, so we call 192 * pager_data_request. But the kmsg zone is empty, so we must 193 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 194 * we get the data back from the pager, it will be (very stale) 195 * non-zero data. kmem_alloc is defined to return zero-filled memory. 196 * 197 * We're intentionally not activating the pages we allocate to prevent a 198 * race with page-out. vm_map_wire will wire the pages. 199 */ 200 201 for (i = 0; i < size; i += PAGE_SIZE) { 202 vm_page_t mem; 203 204 mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i), 205 VM_ALLOC_ZERO | VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 206 if ((mem->flags & PG_ZERO) == 0) 207 vm_page_zero_fill(mem); 208 mem->valid = VM_PAGE_BITS_ALL; 209 vm_page_flag_clear(mem, PG_ZERO); 210 vm_page_wakeup(mem); 211 } 212 213 /* 214 * And finally, mark the data as non-pageable. 215 */ 216 217 (void) vm_map_wire(map, (vm_offset_t) addr, addr + size, kmflags); 218 219 return (addr); 220 } 221 222 /* 223 * kmem_free: 224 * 225 * Release a region of kernel virtual memory allocated 226 * with kmem_alloc, and return the physical pages 227 * associated with that region. 228 * 229 * This routine may not block on kernel maps. 230 */ 231 void 232 kmem_free(vm_map_t map, vm_offset_t addr, vm_size_t size) 233 { 234 (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 235 } 236 237 /* 238 * kmem_suballoc: 239 * 240 * Allocates a map to manage a subrange 241 * of the kernel virtual address space. 242 * 243 * Arguments are as follows: 244 * 245 * parent Map to take range from 246 * size Size of range to find 247 * min, max Returned endpoints of map 248 * pageable Can the region be paged 249 */ 250 vm_map_t 251 kmem_suballoc(vm_map_t parent, vm_offset_t *min, vm_offset_t *max, 252 vm_size_t size) 253 { 254 int ret; 255 vm_map_t result; 256 257 size = round_page(size); 258 259 *min = (vm_offset_t) vm_map_min(parent); 260 ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 261 min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 262 if (ret != KERN_SUCCESS) { 263 printf("kmem_suballoc: bad status return of %d.\n", ret); 264 panic("kmem_suballoc"); 265 } 266 *max = *min + size; 267 pmap_reference(vm_map_pmap(parent)); 268 result = vm_map_create(vm_map_pmap(parent), *min, *max); 269 if (result == NULL) 270 panic("kmem_suballoc: cannot create submap"); 271 if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS) 272 panic("kmem_suballoc: unable to change range to submap"); 273 return (result); 274 } 275 276 /* 277 * kmem_malloc: 278 * 279 * Allocate wired-down memory in the kernel's address map for the higher 280 * level kernel memory allocator (kern/kern_malloc.c). We cannot use 281 * kmem_alloc() because we may need to allocate memory at interrupt 282 * level where we cannot block (canwait == FALSE). 283 * 284 * We don't worry about expanding the map (adding entries) since entries 285 * for wired maps are statically allocated. 286 * 287 * NOTE: Please see kmem_slab_alloc() for a better explanation of the 288 * M_* flags. 289 */ 290 vm_offset_t 291 kmem_malloc(vm_map_t map, vm_size_t size, int flags) 292 { 293 vm_offset_t offset, i; 294 vm_map_entry_t entry; 295 vm_offset_t addr; 296 vm_page_t m; 297 int count, vmflags, wanted_reserve; 298 thread_t td; 299 300 if (map != kernel_map) 301 panic("kmem_malloc: map != kernel_map"); 302 303 size = round_page(size); 304 addr = vm_map_min(map); 305 306 /* 307 * Locate sufficient space in the map. This will give us the final 308 * virtual address for the new memory, and thus will tell us the 309 * offset within the kernel map. If we are unable to allocate space 310 * and neither RNOWAIT or NULLOK is set, we panic. 311 */ 312 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 313 vm_map_lock(map); 314 if (vm_map_findspace(map, vm_map_min(map), size, 1, &addr)) { 315 vm_map_unlock(map); 316 vm_map_entry_release(count); 317 if ((flags & M_NULLOK) == 0) { 318 panic("kmem_malloc(%ld): kernel_map too small: " 319 "%ld total allocated", 320 (long)size, (long)map->size); 321 } 322 return (0); 323 } 324 offset = addr - VM_MIN_KERNEL_ADDRESS; 325 vm_object_reference(kmem_object); 326 vm_map_insert(map, &count, 327 kmem_object, offset, addr, addr + size, 328 VM_PROT_ALL, VM_PROT_ALL, 0); 329 330 td = curthread; 331 wanted_reserve = 0; 332 333 vmflags = VM_ALLOC_SYSTEM; /* XXX M_USE_RESERVE? */ 334 if ((flags & (M_WAITOK|M_RNOWAIT)) == 0) 335 panic("kmem_malloc: bad flags %08x (%p)\n", flags, ((int **)&map)[-1]); 336 if (flags & M_USE_INTERRUPT_RESERVE) 337 vmflags |= VM_ALLOC_INTERRUPT; 338 339 for (i = 0; i < size; i += PAGE_SIZE) { 340 /* 341 * Only allocate PQ_CACHE pages for M_WAITOK requests and 342 * then only if we are not preempting. 343 */ 344 if (flags & M_WAITOK) { 345 if (td->td_preempted) { 346 vmflags &= ~VM_ALLOC_NORMAL; 347 wanted_reserve = 1; 348 } else { 349 vmflags |= VM_ALLOC_NORMAL; 350 wanted_reserve = 0; 351 } 352 } 353 354 m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), vmflags); 355 356 /* 357 * Ran out of space, free everything up and return. Don't need 358 * to lock page queues here as we know that the pages we got 359 * aren't on any queues. 360 * 361 * If M_WAITOK is set we can yield or block. 362 */ 363 if (m == NULL) { 364 if (flags & M_WAITOK) { 365 if (wanted_reserve) { 366 vm_map_unlock(map); 367 lwkt_yield(); 368 vm_map_lock(map); 369 } else { 370 vm_map_unlock(map); 371 vm_wait(); 372 vm_map_lock(map); 373 } 374 i -= PAGE_SIZE; /* retry */ 375 continue; 376 } 377 /* 378 * Free the pages before removing the map entry. 379 * They are already marked busy. Calling 380 * vm_map_delete before the pages has been freed or 381 * unbusied will cause a deadlock. 382 */ 383 while (i != 0) { 384 i -= PAGE_SIZE; 385 m = vm_page_lookup(kmem_object, 386 OFF_TO_IDX(offset + i)); 387 vm_page_free(m); 388 } 389 vm_map_delete(map, addr, addr + size, &count); 390 vm_map_unlock(map); 391 vm_map_entry_release(count); 392 return (0); 393 } 394 vm_page_flag_clear(m, PG_ZERO); 395 m->valid = VM_PAGE_BITS_ALL; 396 } 397 398 /* 399 * Mark map entry as non-pageable. Assert: vm_map_insert() will never 400 * be able to extend the previous entry so there will be a new entry 401 * exactly corresponding to this address range and it will have 402 * wired_count == 0. 403 */ 404 if (!vm_map_lookup_entry(map, addr, &entry) || 405 entry->start != addr || entry->end != addr + size || 406 entry->wired_count != 0) 407 panic("kmem_malloc: entry not found or misaligned"); 408 entry->wired_count = 1; 409 410 vm_map_simplify_entry(map, entry, &count); 411 412 /* 413 * Loop thru pages, entering them in the pmap. (We cannot add them to 414 * the wired count without wrapping the vm_page_queue_lock in 415 * splimp...) 416 */ 417 for (i = 0; i < size; i += PAGE_SIZE) { 418 m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); 419 vm_page_wire(m); 420 vm_page_wakeup(m); 421 /* 422 * Because this is kernel_pmap, this call will not block. 423 */ 424 pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1); 425 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED); 426 } 427 vm_map_unlock(map); 428 vm_map_entry_release(count); 429 430 return (addr); 431 } 432 433 /* 434 * kmem_alloc_wait: 435 * 436 * Allocates pageable memory from a sub-map of the kernel. If the submap 437 * has no room, the caller sleeps waiting for more memory in the submap. 438 * 439 * This routine may block. 440 */ 441 442 vm_offset_t 443 kmem_alloc_wait(vm_map_t map, vm_size_t size) 444 { 445 vm_offset_t addr; 446 int count; 447 448 size = round_page(size); 449 450 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 451 452 for (;;) { 453 /* 454 * To make this work for more than one map, use the map's lock 455 * to lock out sleepers/wakers. 456 */ 457 vm_map_lock(map); 458 if (vm_map_findspace(map, vm_map_min(map), size, 1, &addr) == 0) 459 break; 460 /* no space now; see if we can ever get space */ 461 if (vm_map_max(map) - vm_map_min(map) < size) { 462 vm_map_entry_release(count); 463 vm_map_unlock(map); 464 return (0); 465 } 466 vm_map_unlock(map); 467 tsleep(map, 0, "kmaw", 0); 468 } 469 vm_map_insert(map, &count, 470 NULL, (vm_offset_t) 0, 471 addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0); 472 vm_map_unlock(map); 473 vm_map_entry_release(count); 474 return (addr); 475 } 476 477 /* 478 * kmem_free_wakeup: 479 * 480 * Returns memory to a submap of the kernel, and wakes up any processes 481 * waiting for memory in that map. 482 */ 483 void 484 kmem_free_wakeup(vm_map_t map, vm_offset_t addr, vm_size_t size) 485 { 486 int count; 487 488 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 489 vm_map_lock(map); 490 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size), &count); 491 wakeup(map); 492 vm_map_unlock(map); 493 vm_map_entry_release(count); 494 } 495 496 /* 497 * kmem_init: 498 * 499 * Create the kernel map; insert a mapping covering kernel text, 500 * data, bss, and all space allocated thus far (`boostrap' data). The 501 * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and 502 * `start' as allocated, and the range between `start' and `end' as free. 503 * 504 * Depend on the zalloc bootstrap cache to get our vm_map_entry_t. 505 */ 506 void 507 kmem_init(vm_offset_t start, vm_offset_t end) 508 { 509 vm_map_t m; 510 int count; 511 512 m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); 513 vm_map_lock(m); 514 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 515 kernel_map = m; 516 kernel_map->system_map = 1; 517 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 518 (void) vm_map_insert(m, &count, NULL, (vm_offset_t) 0, 519 VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0); 520 /* ... and ending with the completion of the above `insert' */ 521 vm_map_unlock(m); 522 vm_map_entry_release(count); 523 } 524