1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD: src/sys/vm/vm_kern.c,v 1.61.2.2 2002/03/12 18:25:26 tegge Exp $ 65 * $DragonFly: src/sys/vm/vm_kern.c,v 1.5 2003/07/26 22:10:02 rob Exp $ 66 */ 67 68 /* 69 * Kernel memory management. 70 */ 71 72 #include <sys/param.h> 73 #include <sys/systm.h> 74 #include <sys/proc.h> 75 #include <sys/malloc.h> 76 77 #include <vm/vm.h> 78 #include <vm/vm_param.h> 79 #include <sys/lock.h> 80 #include <vm/pmap.h> 81 #include <vm/vm_map.h> 82 #include <vm/vm_object.h> 83 #include <vm/vm_page.h> 84 #include <vm/vm_pageout.h> 85 #include <vm/vm_extern.h> 86 87 vm_map_t kernel_map=0; 88 vm_map_t kmem_map=0; 89 vm_map_t exec_map=0; 90 vm_map_t clean_map=0; 91 vm_map_t buffer_map=0; 92 vm_map_t mb_map=0; 93 int mb_map_full=0; 94 95 /* 96 * kmem_alloc_pageable: 97 * 98 * Allocate pageable memory to the kernel's address map. 99 * "map" must be kernel_map or a submap of kernel_map. 100 */ 101 102 vm_offset_t 103 kmem_alloc_pageable(map, size) 104 vm_map_t map; 105 vm_size_t size; 106 { 107 vm_offset_t addr; 108 int result; 109 110 size = round_page(size); 111 addr = vm_map_min(map); 112 result = vm_map_find(map, NULL, (vm_offset_t) 0, 113 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 114 if (result != KERN_SUCCESS) { 115 return (0); 116 } 117 return (addr); 118 } 119 120 /* 121 * kmem_alloc_nofault: 122 * 123 * Same as kmem_alloc_pageable, except that it create a nofault entry. 124 */ 125 126 vm_offset_t 127 kmem_alloc_nofault(map, size) 128 vm_map_t map; 129 vm_size_t size; 130 { 131 vm_offset_t addr; 132 int result; 133 134 size = round_page(size); 135 addr = vm_map_min(map); 136 result = vm_map_find(map, NULL, (vm_offset_t) 0, 137 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 138 if (result != KERN_SUCCESS) { 139 return (0); 140 } 141 return (addr); 142 } 143 144 /* 145 * Allocate wired-down memory in the kernel's address map 146 * or a submap. 147 */ 148 vm_offset_t 149 kmem_alloc(map, size) 150 vm_map_t map; 151 vm_size_t size; 152 { 153 vm_offset_t addr; 154 vm_offset_t offset; 155 vm_offset_t i; 156 157 size = round_page(size); 158 159 /* 160 * Use the kernel object for wired-down kernel pages. Assume that no 161 * region of the kernel object is referenced more than once. 162 */ 163 164 /* 165 * Locate sufficient space in the map. This will give us the final 166 * virtual address for the new memory, and thus will tell us the 167 * offset within the kernel map. 168 */ 169 vm_map_lock(map); 170 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 171 vm_map_unlock(map); 172 return (0); 173 } 174 offset = addr - VM_MIN_KERNEL_ADDRESS; 175 vm_object_reference(kernel_object); 176 vm_map_insert(map, kernel_object, offset, addr, addr + size, 177 VM_PROT_ALL, VM_PROT_ALL, 0); 178 vm_map_unlock(map); 179 180 /* 181 * Guarantee that there are pages already in this object before 182 * calling vm_map_pageable. This is to prevent the following 183 * scenario: 184 * 185 * 1) Threads have swapped out, so that there is a pager for the 186 * kernel_object. 2) The kmsg zone is empty, and so we are 187 * kmem_allocing a new page for it. 3) vm_map_pageable calls vm_fault; 188 * there is no page, but there is a pager, so we call 189 * pager_data_request. But the kmsg zone is empty, so we must 190 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 191 * we get the data back from the pager, it will be (very stale) 192 * non-zero data. kmem_alloc is defined to return zero-filled memory. 193 * 194 * We're intentionally not activating the pages we allocate to prevent a 195 * race with page-out. vm_map_pageable will wire the pages. 196 */ 197 198 for (i = 0; i < size; i += PAGE_SIZE) { 199 vm_page_t mem; 200 201 mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i), 202 VM_ALLOC_ZERO | VM_ALLOC_RETRY); 203 if ((mem->flags & PG_ZERO) == 0) 204 vm_page_zero_fill(mem); 205 mem->valid = VM_PAGE_BITS_ALL; 206 vm_page_flag_clear(mem, PG_ZERO); 207 vm_page_wakeup(mem); 208 } 209 210 /* 211 * And finally, mark the data as non-pageable. 212 */ 213 214 (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE); 215 216 return (addr); 217 } 218 219 /* 220 * kmem_free: 221 * 222 * Release a region of kernel virtual memory allocated 223 * with kmem_alloc, and return the physical pages 224 * associated with that region. 225 * 226 * This routine may not block on kernel maps. 227 */ 228 void 229 kmem_free(map, addr, size) 230 vm_map_t map; 231 vm_offset_t addr; 232 vm_size_t size; 233 { 234 (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 235 } 236 237 /* 238 * kmem_suballoc: 239 * 240 * Allocates a map to manage a subrange 241 * of the kernel virtual address space. 242 * 243 * Arguments are as follows: 244 * 245 * parent Map to take range from 246 * size Size of range to find 247 * min, max Returned endpoints of map 248 * pageable Can the region be paged 249 */ 250 vm_map_t 251 kmem_suballoc(parent, min, max, size) 252 vm_map_t parent; 253 vm_offset_t *min, *max; 254 vm_size_t size; 255 { 256 int ret; 257 vm_map_t result; 258 259 size = round_page(size); 260 261 *min = (vm_offset_t) vm_map_min(parent); 262 ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 263 min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 264 if (ret != KERN_SUCCESS) { 265 printf("kmem_suballoc: bad status return of %d.\n", ret); 266 panic("kmem_suballoc"); 267 } 268 *max = *min + size; 269 pmap_reference(vm_map_pmap(parent)); 270 result = vm_map_create(vm_map_pmap(parent), *min, *max); 271 if (result == NULL) 272 panic("kmem_suballoc: cannot create submap"); 273 if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS) 274 panic("kmem_suballoc: unable to change range to submap"); 275 return (result); 276 } 277 278 /* 279 * kmem_malloc: 280 * 281 * Allocate wired-down memory in the kernel's address map for the higher 282 * level kernel memory allocator (kern/kern_malloc.c). We cannot use 283 * kmem_alloc() because we may need to allocate memory at interrupt 284 * level where we cannot block (canwait == FALSE). 285 * 286 * This routine has its own private kernel submap (kmem_map) and object 287 * (kmem_object). This, combined with the fact that only malloc uses 288 * this routine, ensures that we will never block in map or object waits. 289 * 290 * Note that this still only works in a uni-processor environment and 291 * when called at splhigh(). 292 * 293 * We don't worry about expanding the map (adding entries) since entries 294 * for wired maps are statically allocated. 295 * 296 * NOTE: This routine is not supposed to block if M_NOWAIT is set, but 297 * I have not verified that it actually does not block. 298 */ 299 vm_offset_t 300 kmem_malloc(map, size, flags) 301 vm_map_t map; 302 vm_size_t size; 303 int flags; 304 { 305 vm_offset_t offset, i; 306 vm_map_entry_t entry; 307 vm_offset_t addr; 308 vm_page_t m; 309 310 if (map != kmem_map && map != mb_map) 311 panic("kmem_malloc: map != {kmem,mb}_map"); 312 313 size = round_page(size); 314 addr = vm_map_min(map); 315 316 /* 317 * Locate sufficient space in the map. This will give us the final 318 * virtual address for the new memory, and thus will tell us the 319 * offset within the kernel map. 320 */ 321 vm_map_lock(map); 322 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 323 vm_map_unlock(map); 324 if (map == mb_map) { 325 mb_map_full = TRUE; 326 printf("Out of mbuf clusters - adjust NMBCLUSTERS or increase maxusers!\n"); 327 return (0); 328 } 329 if ((flags & M_NOWAIT) == 0) 330 panic("kmem_malloc(%ld): kmem_map too small: %ld total allocated", 331 (long)size, (long)map->size); 332 return (0); 333 } 334 offset = addr - VM_MIN_KERNEL_ADDRESS; 335 vm_object_reference(kmem_object); 336 vm_map_insert(map, kmem_object, offset, addr, addr + size, 337 VM_PROT_ALL, VM_PROT_ALL, 0); 338 339 for (i = 0; i < size; i += PAGE_SIZE) { 340 /* 341 * Note: if M_NOWAIT specified alone, allocate from 342 * interrupt-safe queues only (just the free list). If 343 * M_USE_RESERVE is also specified, we can also 344 * allocate from the cache. Neither of the latter two 345 * flags may be specified from an interrupt since interrupts 346 * are not allowed to mess with the cache queue. 347 */ 348 retry: 349 m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), 350 ((flags & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) ? 351 VM_ALLOC_INTERRUPT : 352 VM_ALLOC_SYSTEM); 353 354 /* 355 * Ran out of space, free everything up and return. Don't need 356 * to lock page queues here as we know that the pages we got 357 * aren't on any queues. 358 */ 359 if (m == NULL) { 360 if ((flags & M_NOWAIT) == 0) { 361 vm_map_unlock(map); 362 VM_WAIT; 363 vm_map_lock(map); 364 goto retry; 365 } 366 /* 367 * Free the pages before removing the map entry. 368 * They are already marked busy. Calling 369 * vm_map_delete before the pages has been freed or 370 * unbusied will cause a deadlock. 371 */ 372 while (i != 0) { 373 i -= PAGE_SIZE; 374 m = vm_page_lookup(kmem_object, 375 OFF_TO_IDX(offset + i)); 376 vm_page_free(m); 377 } 378 vm_map_delete(map, addr, addr + size); 379 vm_map_unlock(map); 380 return (0); 381 } 382 vm_page_flag_clear(m, PG_ZERO); 383 m->valid = VM_PAGE_BITS_ALL; 384 } 385 386 /* 387 * Mark map entry as non-pageable. Assert: vm_map_insert() will never 388 * be able to extend the previous entry so there will be a new entry 389 * exactly corresponding to this address range and it will have 390 * wired_count == 0. 391 */ 392 if (!vm_map_lookup_entry(map, addr, &entry) || 393 entry->start != addr || entry->end != addr + size || 394 entry->wired_count != 0) 395 panic("kmem_malloc: entry not found or misaligned"); 396 entry->wired_count = 1; 397 398 vm_map_simplify_entry(map, entry); 399 400 /* 401 * Loop thru pages, entering them in the pmap. (We cannot add them to 402 * the wired count without wrapping the vm_page_queue_lock in 403 * splimp...) 404 */ 405 for (i = 0; i < size; i += PAGE_SIZE) { 406 m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); 407 vm_page_wire(m); 408 vm_page_wakeup(m); 409 /* 410 * Because this is kernel_pmap, this call will not block. 411 */ 412 pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1); 413 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED); 414 } 415 vm_map_unlock(map); 416 417 return (addr); 418 } 419 420 /* 421 * kmem_alloc_wait: 422 * 423 * Allocates pageable memory from a sub-map of the kernel. If the submap 424 * has no room, the caller sleeps waiting for more memory in the submap. 425 * 426 * This routine may block. 427 */ 428 429 vm_offset_t 430 kmem_alloc_wait(map, size) 431 vm_map_t map; 432 vm_size_t size; 433 { 434 vm_offset_t addr; 435 436 size = round_page(size); 437 438 for (;;) { 439 /* 440 * To make this work for more than one map, use the map's lock 441 * to lock out sleepers/wakers. 442 */ 443 vm_map_lock(map); 444 if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0) 445 break; 446 /* no space now; see if we can ever get space */ 447 if (vm_map_max(map) - vm_map_min(map) < size) { 448 vm_map_unlock(map); 449 return (0); 450 } 451 vm_map_unlock(map); 452 tsleep(map, 0, "kmaw", 0); 453 } 454 vm_map_insert(map, NULL, (vm_offset_t) 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0); 455 vm_map_unlock(map); 456 return (addr); 457 } 458 459 /* 460 * kmem_free_wakeup: 461 * 462 * Returns memory to a submap of the kernel, and wakes up any processes 463 * waiting for memory in that map. 464 */ 465 void 466 kmem_free_wakeup(map, addr, size) 467 vm_map_t map; 468 vm_offset_t addr; 469 vm_size_t size; 470 { 471 vm_map_lock(map); 472 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 473 wakeup(map); 474 vm_map_unlock(map); 475 } 476 477 /* 478 * kmem_init: 479 * 480 * Create the kernel map; insert a mapping covering kernel text, 481 * data, bss, and all space allocated thus far (`boostrap' data). The 482 * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and 483 * `start' as allocated, and the range between `start' and `end' as free. 484 */ 485 486 void 487 kmem_init(start, end) 488 vm_offset_t start, end; 489 { 490 vm_map_t m; 491 492 m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); 493 vm_map_lock(m); 494 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 495 kernel_map = m; 496 kernel_map->system_map = 1; 497 (void) vm_map_insert(m, NULL, (vm_offset_t) 0, 498 VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0); 499 /* ... and ending with the completion of the above `insert' */ 500 vm_map_unlock(m); 501 } 502 503