1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * %sccs.include.redist.c% 9 * 10 * @(#)vm_kern.c 7.8 (Berkeley) 02/19/92 11 * 12 * 13 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 14 * All rights reserved. 15 * 16 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 17 * 18 * Permission to use, copy, modify and distribute this software and 19 * its documentation is hereby granted, provided that both the copyright 20 * notice and this permission notice appear in all copies of the 21 * software, derivative works or modified versions, and any portions 22 * thereof, and that both notices appear in supporting documentation. 23 * 24 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 25 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 26 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 27 * 28 * Carnegie Mellon requests users of this software to return to 29 * 30 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 31 * School of Computer Science 32 * Carnegie Mellon University 33 * Pittsburgh PA 15213-3890 34 * 35 * any improvements or extensions that they make and grant Carnegie the 36 * rights to redistribute these changes. 37 */ 38 39 /* 40 * Kernel memory management. 41 */ 42 43 #include "param.h" 44 45 #include "vm.h" 46 #include "vm_page.h" 47 #include "vm_pageout.h" 48 #include "vm_kern.h" 49 50 /* 51 * kmem_alloc_pageable: 52 * 53 * Allocate pageable memory to the kernel's address map. 54 * map must be "kernel_map" below. 55 */ 56 57 vm_offset_t kmem_alloc_pageable(map, size) 58 vm_map_t map; 59 register vm_size_t size; 60 { 61 vm_offset_t addr; 62 register int result; 63 64 #if 0 65 if (map != kernel_map) 66 panic("kmem_alloc_pageable: not called with kernel_map"); 67 #endif 0 68 69 size = round_page(size); 70 71 addr = vm_map_min(map); 72 result = vm_map_find(map, NULL, (vm_offset_t) 0, 73 &addr, size, TRUE); 74 if (result != KERN_SUCCESS) { 75 return(0); 76 } 77 78 return(addr); 79 } 80 81 /* 82 * Allocate wired-down memory in the kernel's address map 83 * or a submap. 84 */ 85 vm_offset_t kmem_alloc(map, size) 86 register vm_map_t map; 87 register vm_size_t size; 88 { 89 vm_offset_t addr; 90 register int result; 91 register vm_offset_t offset; 92 extern vm_object_t kernel_object; 93 vm_offset_t i; 94 95 size = round_page(size); 96 97 /* 98 * Use the kernel object for wired-down kernel pages. 99 * Assume that no region of the kernel object is 100 * referenced more than once. 101 */ 102 103 /* 104 * Locate sufficient space in the map. This will give us the 105 * final virtual address for the new memory, and thus will tell 106 * us the offset within the kernel map. 107 */ 108 vm_map_lock(map); 109 if (vm_map_findspace(map, 0, size, &addr)) { 110 vm_map_unlock(map); 111 return (0); 112 } 113 offset = addr - VM_MIN_KERNEL_ADDRESS; 114 vm_object_reference(kernel_object); 115 vm_map_insert(map, kernel_object, offset, addr, addr + size); 116 vm_map_unlock(map); 117 118 /* 119 * Guarantee that there are pages already in this object 120 * before calling vm_map_pageable. This is to prevent the 121 * following scenario: 122 * 123 * 1) Threads have swapped out, so that there is a 124 * pager for the kernel_object. 125 * 2) The kmsg zone is empty, and so we are kmem_allocing 126 * a new page for it. 127 * 3) vm_map_pageable calls vm_fault; there is no page, 128 * but there is a pager, so we call 129 * pager_data_request. But the kmsg zone is empty, 130 * so we must kmem_alloc. 131 * 4) goto 1 132 * 5) Even if the kmsg zone is not empty: when we get 133 * the data back from the pager, it will be (very 134 * stale) non-zero data. kmem_alloc is defined to 135 * return zero-filled memory. 136 * 137 * We're intentionally not activating the pages we allocate 138 * to prevent a race with page-out. vm_map_pageable will wire 139 * the pages. 140 */ 141 142 vm_object_lock(kernel_object); 143 for (i = 0 ; i < size; i+= PAGE_SIZE) { 144 vm_page_t mem; 145 146 while ((mem = vm_page_alloc(kernel_object, offset+i)) == NULL) { 147 vm_object_unlock(kernel_object); 148 VM_WAIT; 149 vm_object_lock(kernel_object); 150 } 151 vm_page_zero_fill(mem); 152 mem->busy = FALSE; 153 } 154 vm_object_unlock(kernel_object); 155 156 /* 157 * And finally, mark the data as non-pageable. 158 */ 159 160 (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE); 161 162 /* 163 * Try to coalesce the map 164 */ 165 166 vm_map_simplify(map, addr); 167 168 return(addr); 169 } 170 171 /* 172 * kmem_free: 173 * 174 * Release a region of kernel virtual memory allocated 175 * with kmem_alloc, and return the physical pages 176 * associated with that region. 177 */ 178 void kmem_free(map, addr, size) 179 vm_map_t map; 180 register vm_offset_t addr; 181 vm_size_t size; 182 { 183 (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 184 } 185 186 /* 187 * kmem_suballoc: 188 * 189 * Allocates a map to manage a subrange 190 * of the kernel virtual address space. 191 * 192 * Arguments are as follows: 193 * 194 * parent Map to take range from 195 * size Size of range to find 196 * min, max Returned endpoints of map 197 * pageable Can the region be paged 198 */ 199 vm_map_t kmem_suballoc(parent, min, max, size, pageable) 200 register vm_map_t parent; 201 vm_offset_t *min, *max; 202 register vm_size_t size; 203 boolean_t pageable; 204 { 205 register int ret; 206 vm_map_t result; 207 208 size = round_page(size); 209 210 *min = (vm_offset_t) vm_map_min(parent); 211 ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 212 min, size, TRUE); 213 if (ret != KERN_SUCCESS) { 214 printf("kmem_suballoc: bad status return of %d.\n", ret); 215 panic("kmem_suballoc"); 216 } 217 *max = *min + size; 218 pmap_reference(vm_map_pmap(parent)); 219 result = vm_map_create(vm_map_pmap(parent), *min, *max, pageable); 220 if (result == NULL) 221 panic("kmem_suballoc: cannot create submap"); 222 if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS) 223 panic("kmem_suballoc: unable to change range to submap"); 224 return(result); 225 } 226 227 /* 228 * vm_move: 229 * 230 * Move memory from source to destination map, possibly deallocating 231 * the source map reference to the memory. 232 * 233 * Parameters are as follows: 234 * 235 * src_map Source address map 236 * src_addr Address within source map 237 * dst_map Destination address map 238 * num_bytes Amount of data (in bytes) to copy/move 239 * src_dealloc Should source be removed after copy? 240 * 241 * Assumes the src and dst maps are not already locked. 242 * 243 * Returns new destination address or 0 (if a failure occurs). 244 */ 245 vm_offset_t vm_move(src_map,src_addr,dst_map,num_bytes,src_dealloc) 246 vm_map_t src_map; 247 register vm_offset_t src_addr; 248 register vm_map_t dst_map; 249 vm_offset_t num_bytes; 250 boolean_t src_dealloc; 251 { 252 register vm_offset_t src_start; /* Beginning of region */ 253 register vm_size_t src_size; /* Size of rounded region */ 254 vm_offset_t dst_start; /* destination address */ 255 register int result; 256 257 /* 258 * Page-align the source region 259 */ 260 261 src_start = trunc_page(src_addr); 262 src_size = round_page(src_addr + num_bytes) - src_start; 263 264 /* 265 * If there's no destination, we can be at most deallocating 266 * the source range. 267 */ 268 if (dst_map == NULL) { 269 if (src_dealloc) 270 if (vm_deallocate(src_map, src_start, src_size) 271 != KERN_SUCCESS) { 272 printf("vm_move: deallocate of source"); 273 printf(" failed, dealloc_only clause\n"); 274 } 275 return(0); 276 } 277 278 /* 279 * Allocate a place to put the copy 280 */ 281 282 dst_start = (vm_offset_t) 0; 283 if ((result = vm_allocate(dst_map, &dst_start, src_size, TRUE)) 284 == KERN_SUCCESS) { 285 /* 286 * Perform the copy, asking for deallocation if desired 287 */ 288 result = vm_map_copy(dst_map, src_map, dst_start, src_size, 289 src_start, FALSE, src_dealloc); 290 } 291 292 /* 293 * Return the destination address corresponding to 294 * the source address given (rather than the front 295 * of the newly-allocated page). 296 */ 297 298 if (result == KERN_SUCCESS) 299 return(dst_start + (src_addr - src_start)); 300 return(0); 301 } 302 303 /* 304 * Allocate wired-down memory in the kernel's address map for the higher 305 * level kernel memory allocator (kern/kern_malloc.c). We cannot use 306 * kmem_alloc() because we may need to allocate memory at interrupt 307 * level where we cannot block (canwait == FALSE). 308 * 309 * This routine has its own private kernel submap (kmem_map) and object 310 * (kmem_object). This, combined with the fact that only malloc uses 311 * this routine, ensures that we will never block in map or object waits. 312 * 313 * Note that this still only works in a uni-processor environment and 314 * when called at splhigh(). 315 * 316 * We don't worry about expanding the map (adding entries) since entries 317 * for wired maps are statically allocated. 318 */ 319 vm_offset_t 320 kmem_malloc(map, size, canwait) 321 register vm_map_t map; 322 register vm_size_t size; 323 boolean_t canwait; 324 { 325 register vm_offset_t offset, i; 326 vm_map_entry_t entry; 327 vm_offset_t addr; 328 vm_page_t m; 329 extern vm_object_t kmem_object; 330 331 if (map != kmem_map && map != mb_map) 332 panic("kern_malloc_alloc: map != {kmem,mb}_map"); 333 334 size = round_page(size); 335 addr = vm_map_min(map); 336 337 /* 338 * Locate sufficient space in the map. This will give us the 339 * final virtual address for the new memory, and thus will tell 340 * us the offset within the kernel map. 341 */ 342 vm_map_lock(map); 343 if (vm_map_findspace(map, 0, size, &addr)) { 344 vm_map_unlock(map); 345 if (canwait) /* XXX should wait */ 346 panic("kmem_malloc: %s too small", 347 map == kmem_map ? "kmem_map" : "mb_map"); 348 return (0); 349 } 350 offset = addr - vm_map_min(kmem_map); 351 vm_object_reference(kmem_object); 352 vm_map_insert(map, kmem_object, offset, addr, addr + size); 353 354 /* 355 * If we can wait, just mark the range as wired 356 * (will fault pages as necessary). 357 */ 358 if (canwait) { 359 vm_map_unlock(map); 360 (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, 361 FALSE); 362 vm_map_simplify(map, addr); 363 return(addr); 364 } 365 366 /* 367 * If we cannot wait then we must allocate all memory up front, 368 * pulling it off the active queue to prevent pageout. 369 */ 370 vm_object_lock(kmem_object); 371 for (i = 0; i < size; i += PAGE_SIZE) { 372 m = vm_page_alloc(kmem_object, offset + i); 373 374 /* 375 * Ran out of space, free everything up and return. 376 * Don't need to lock page queues here as we know 377 * that the pages we got aren't on any queues. 378 */ 379 if (m == NULL) { 380 while (i != 0) { 381 i -= PAGE_SIZE; 382 m = vm_page_lookup(kmem_object, offset + i); 383 vm_page_free(m); 384 } 385 vm_object_unlock(kmem_object); 386 vm_map_delete(map, addr, addr + size); 387 vm_map_unlock(map); 388 return(0); 389 } 390 #if 0 391 vm_page_zero_fill(m); 392 #endif 393 m->busy = FALSE; 394 } 395 vm_object_unlock(kmem_object); 396 397 /* 398 * Mark map entry as non-pageable. 399 * Assert: vm_map_insert() will never be able to extend the previous 400 * entry so there will be a new entry exactly corresponding to this 401 * address range and it will have wired_count == 0. 402 */ 403 if (!vm_map_lookup_entry(map, addr, &entry) || 404 entry->start != addr || entry->end != addr + size || 405 entry->wired_count) 406 panic("kmem_malloc: entry not found or misaligned"); 407 entry->wired_count++; 408 409 /* 410 * Loop thru pages, entering them in the pmap. 411 * (We cannot add them to the wired count without 412 * wrapping the vm_page_queue_lock in splimp...) 413 */ 414 for (i = 0; i < size; i += PAGE_SIZE) { 415 vm_object_lock(kmem_object); 416 m = vm_page_lookup(kmem_object, offset + i); 417 vm_object_unlock(kmem_object); 418 pmap_enter(map->pmap, addr + i, VM_PAGE_TO_PHYS(m), 419 VM_PROT_DEFAULT, TRUE); 420 } 421 vm_map_unlock(map); 422 423 vm_map_simplify(map, addr); 424 return(addr); 425 } 426 427 /* 428 * kmem_alloc_wait 429 * 430 * Allocates pageable memory from a sub-map of the kernel. If the submap 431 * has no room, the caller sleeps waiting for more memory in the submap. 432 * 433 */ 434 vm_offset_t kmem_alloc_wait(map, size) 435 vm_map_t map; 436 vm_size_t size; 437 { 438 vm_offset_t addr; 439 int result; 440 441 size = round_page(size); 442 443 for (;;) { 444 /* 445 * To make this work for more than one map, 446 * use the map's lock to lock out sleepers/wakers. 447 */ 448 vm_map_lock(map); 449 if (vm_map_findspace(map, 0, size, &addr) == 0) 450 break; 451 /* no space now; see if we can ever get space */ 452 if (vm_map_max(map) - vm_map_min(map) < size) { 453 vm_map_unlock(map); 454 return (0); 455 } 456 assert_wait((int)map, TRUE); 457 vm_map_unlock(map); 458 thread_block(); 459 } 460 vm_map_insert(map, NULL, (vm_offset_t)0, addr, addr + size); 461 vm_map_unlock(map); 462 return (addr); 463 } 464 465 /* 466 * kmem_free_wakeup 467 * 468 * Returns memory to a submap of the kernel, and wakes up any threads 469 * waiting for memory in that map. 470 */ 471 void kmem_free_wakeup(map, addr, size) 472 vm_map_t map; 473 vm_offset_t addr; 474 vm_size_t size; 475 { 476 vm_map_lock(map); 477 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 478 thread_wakeup((int)map); 479 vm_map_unlock(map); 480 } 481 482 /* 483 * Create the kernel map; insert a mapping covering kernel text, data, bss, 484 * and all space allocated thus far (`boostrap' data). The new map will thus 485 * map the range between VM_MIN_KERNEL_ADDRESS and `start' as allocated, and 486 * the range between `start' and `end' as free. 487 */ 488 void kmem_init(start, end) 489 vm_offset_t start, end; 490 { 491 register vm_map_t m; 492 493 m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end, FALSE); 494 vm_map_lock(m); 495 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 496 kernel_map = m; 497 (void) vm_map_insert(m, NULL, (vm_offset_t)0, 498 VM_MIN_KERNEL_ADDRESS, start); 499 /* ... and ending with the completion of the above `insert' */ 500 vm_map_unlock(m); 501 } 502