1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 39 * 40 * 41 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 42 * All rights reserved. 43 * 44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 45 * 46 * Permission to use, copy, modify and distribute this software and 47 * its documentation is hereby granted, provided that both the copyright 48 * notice and this permission notice appear in all copies of the 49 * software, derivative works or modified versions, and any portions 50 * thereof, and that both notices appear in supporting documentation. 51 * 52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 55 * 56 * Carnegie Mellon requests users of this software to return to 57 * 58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 59 * School of Computer Science 60 * Carnegie Mellon University 61 * Pittsburgh PA 15213-3890 62 * 63 * any improvements or extensions that they make and grant Carnegie the 64 * rights to redistribute these changes. 65 * 66 * $FreeBSD: src/sys/vm/vm_kern.c,v 1.61.2.2 2002/03/12 18:25:26 tegge Exp $ 67 * $DragonFly: src/sys/vm/vm_kern.c,v 1.29 2007/06/07 23:14:29 dillon Exp $ 68 */ 69 70 /* 71 * Kernel memory management. 72 */ 73 74 #include <sys/param.h> 75 #include <sys/systm.h> 76 #include <sys/proc.h> 77 #include <sys/malloc.h> 78 #include <sys/kernel.h> 79 #include <sys/sysctl.h> 80 81 #include <vm/vm.h> 82 #include <vm/vm_param.h> 83 #include <sys/lock.h> 84 #include <vm/pmap.h> 85 #include <vm/vm_map.h> 86 #include <vm/vm_object.h> 87 #include <vm/vm_page.h> 88 #include <vm/vm_pageout.h> 89 #include <vm/vm_kern.h> 90 #include <vm/vm_extern.h> 91 92 struct vm_map kernel_map; 93 struct vm_map clean_map; 94 struct vm_map buffer_map; 95 96 /* 97 * Allocate pageable memory to the kernel's address map. "map" must 98 * be kernel_map or a submap of kernel_map. 99 * 100 * No requirements. 101 */ 102 vm_offset_t 103 kmem_alloc_pageable(vm_map_t map, vm_size_t size) 104 { 105 vm_offset_t addr; 106 int result; 107 108 size = round_page(size); 109 addr = vm_map_min(map); 110 result = vm_map_find(map, NULL, (vm_offset_t) 0, 111 &addr, size, PAGE_SIZE, 112 TRUE, VM_MAPTYPE_NORMAL, 113 VM_PROT_ALL, VM_PROT_ALL, 114 0); 115 if (result != KERN_SUCCESS) 116 return (0); 117 return (addr); 118 } 119 120 /* 121 * Same as kmem_alloc_pageable, except that it create a nofault entry. 122 * 123 * No requirements. 124 */ 125 vm_offset_t 126 kmem_alloc_nofault(vm_map_t map, vm_size_t size, vm_size_t align) 127 { 128 vm_offset_t addr; 129 int result; 130 131 size = round_page(size); 132 addr = vm_map_min(map); 133 result = vm_map_find(map, NULL, (vm_offset_t) 0, 134 &addr, size, align, 135 TRUE, VM_MAPTYPE_NORMAL, 136 VM_PROT_ALL, VM_PROT_ALL, 137 MAP_NOFAULT); 138 if (result != KERN_SUCCESS) 139 return (0); 140 return (addr); 141 } 142 143 /* 144 * Allocate wired-down memory in the kernel's address map or a submap. 145 * 146 * No requirements. 147 */ 148 vm_offset_t 149 kmem_alloc3(vm_map_t map, vm_size_t size, int kmflags) 150 { 151 vm_offset_t addr; 152 vm_offset_t gstart; 153 vm_offset_t i; 154 int count; 155 int cow; 156 157 size = round_page(size); 158 159 if (kmflags & KM_KRESERVE) 160 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT); 161 else 162 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 163 164 if (kmflags & KM_STACK) { 165 cow = MAP_IS_KSTACK; 166 gstart = PAGE_SIZE; 167 } else { 168 cow = 0; 169 gstart = 0; 170 } 171 172 /* 173 * Use the kernel object for wired-down kernel pages. Assume that no 174 * region of the kernel object is referenced more than once. 175 * 176 * Locate sufficient space in the map. This will give us the final 177 * virtual address for the new memory, and thus will tell us the 178 * offset within the kernel map. 179 */ 180 vm_map_lock(map); 181 if (vm_map_findspace(map, vm_map_min(map), size, PAGE_SIZE, 0, &addr)) { 182 vm_map_unlock(map); 183 if (kmflags & KM_KRESERVE) 184 vm_map_entry_krelease(count); 185 else 186 vm_map_entry_release(count); 187 return (0); 188 } 189 vm_object_hold(&kernel_object); 190 vm_object_reference_locked(&kernel_object); 191 vm_map_insert(map, &count, 192 &kernel_object, addr, addr, addr + size, 193 VM_MAPTYPE_NORMAL, 194 VM_PROT_ALL, VM_PROT_ALL, 195 cow); 196 vm_object_drop(&kernel_object); 197 198 vm_map_unlock(map); 199 if (kmflags & KM_KRESERVE) 200 vm_map_entry_krelease(count); 201 else 202 vm_map_entry_release(count); 203 204 /* 205 * Guarantee that there are pages already in this object before 206 * calling vm_map_wire. This is to prevent the following 207 * scenario: 208 * 209 * 1) Threads have swapped out, so that there is a pager for the 210 * kernel_object. 2) The kmsg zone is empty, and so we are 211 * kmem_allocing a new page for it. 3) vm_map_wire calls vm_fault; 212 * there is no page, but there is a pager, so we call 213 * pager_data_request. But the kmsg zone is empty, so we must 214 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 215 * we get the data back from the pager, it will be (very stale) 216 * non-zero data. kmem_alloc is defined to return zero-filled memory. 217 * 218 * We're intentionally not activating the pages we allocate to prevent a 219 * race with page-out. vm_map_wire will wire the pages. 220 */ 221 vm_object_hold(&kernel_object); 222 for (i = gstart; i < size; i += PAGE_SIZE) { 223 vm_page_t mem; 224 225 mem = vm_page_grab(&kernel_object, OFF_TO_IDX(addr + i), 226 VM_ALLOC_FORCE_ZERO | VM_ALLOC_NORMAL | 227 VM_ALLOC_RETRY); 228 vm_page_unqueue_nowakeup(mem); 229 vm_page_wakeup(mem); 230 } 231 vm_object_drop(&kernel_object); 232 233 /* 234 * And finally, mark the data as non-pageable. 235 * 236 * NOTE: vm_map_wire() handles any kstack guard. 237 */ 238 vm_map_wire(map, (vm_offset_t)addr, addr + size, kmflags); 239 240 return (addr); 241 } 242 243 /* 244 * Release a region of kernel virtual memory allocated with kmem_alloc, 245 * and return the physical pages associated with that region. 246 * 247 * WARNING! If the caller entered pages into the region using pmap_kenter() 248 * it must remove the pages using pmap_kremove[_quick]() before freeing the 249 * underlying kmem, otherwise resident_count will be mistabulated. 250 * 251 * No requirements. 252 */ 253 void 254 kmem_free(vm_map_t map, vm_offset_t addr, vm_size_t size) 255 { 256 vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 257 } 258 259 /* 260 * Used to break a system map into smaller maps, usually to reduce 261 * contention and to provide large KVA spaces for subsystems like the 262 * buffer cache. 263 * 264 * parent Map to take range from 265 * result 266 * size Size of range to find 267 * min, max Returned endpoints of map 268 * pageable Can the region be paged 269 * 270 * No requirements. 271 */ 272 void 273 kmem_suballoc(vm_map_t parent, vm_map_t result, 274 vm_offset_t *min, vm_offset_t *max, vm_size_t size) 275 { 276 int ret; 277 278 size = round_page(size); 279 280 *min = (vm_offset_t) vm_map_min(parent); 281 ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 282 min, size, PAGE_SIZE, 283 TRUE, VM_MAPTYPE_UNSPECIFIED, 284 VM_PROT_ALL, VM_PROT_ALL, 285 0); 286 if (ret != KERN_SUCCESS) { 287 kprintf("kmem_suballoc: bad status return of %d.\n", ret); 288 panic("kmem_suballoc"); 289 } 290 *max = *min + size; 291 pmap_reference(vm_map_pmap(parent)); 292 vm_map_init(result, *min, *max, vm_map_pmap(parent)); 293 if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS) 294 panic("kmem_suballoc: unable to change range to submap"); 295 } 296 297 /* 298 * Allocates pageable memory from a sub-map of the kernel. If the submap 299 * has no room, the caller sleeps waiting for more memory in the submap. 300 * 301 * No requirements. 302 */ 303 vm_offset_t 304 kmem_alloc_wait(vm_map_t map, vm_size_t size) 305 { 306 vm_offset_t addr; 307 int count; 308 309 size = round_page(size); 310 311 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 312 313 for (;;) { 314 /* 315 * To make this work for more than one map, use the map's lock 316 * to lock out sleepers/wakers. 317 */ 318 vm_map_lock(map); 319 if (vm_map_findspace(map, vm_map_min(map), 320 size, PAGE_SIZE, 0, &addr) == 0) { 321 break; 322 } 323 /* no space now; see if we can ever get space */ 324 if (vm_map_max(map) - vm_map_min(map) < size) { 325 vm_map_entry_release(count); 326 vm_map_unlock(map); 327 return (0); 328 } 329 vm_map_unlock(map); 330 tsleep(map, 0, "kmaw", 0); 331 } 332 vm_map_insert(map, &count, 333 NULL, (vm_offset_t) 0, 334 addr, addr + size, 335 VM_MAPTYPE_NORMAL, 336 VM_PROT_ALL, VM_PROT_ALL, 337 0); 338 vm_map_unlock(map); 339 vm_map_entry_release(count); 340 341 return (addr); 342 } 343 344 /* 345 * Returns memory to a submap of the kernel, and wakes up any processes 346 * waiting for memory in that map. 347 * 348 * No requirements. 349 */ 350 void 351 kmem_free_wakeup(vm_map_t map, vm_offset_t addr, vm_size_t size) 352 { 353 int count; 354 355 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 356 vm_map_lock(map); 357 vm_map_delete(map, trunc_page(addr), round_page(addr + size), &count); 358 wakeup(map); 359 vm_map_unlock(map); 360 vm_map_entry_release(count); 361 } 362 363 /* 364 * Create the kernel_ma for (KvaStart,KvaEnd) and insert mappings to 365 * cover areas already allocated or reserved thus far. 366 * 367 * The areas (virtual_start, virtual_end) and (virtual2_start, virtual2_end) 368 * are available so the cutouts are the areas around these ranges between 369 * KvaStart and KvaEnd. 370 * 371 * Depend on the zalloc bootstrap cache to get our vm_map_entry_t. 372 * Called from the low level boot code only. 373 */ 374 void 375 kmem_init(void) 376 { 377 vm_offset_t addr; 378 vm_map_t m; 379 int count; 380 381 m = vm_map_create(&kernel_map, &kernel_pmap, KvaStart, KvaEnd); 382 vm_map_lock(m); 383 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 384 m->system_map = 1; 385 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 386 addr = KvaStart; 387 if (virtual2_start) { 388 if (addr < virtual2_start) { 389 vm_map_insert(m, &count, NULL, (vm_offset_t) 0, 390 addr, virtual2_start, 391 VM_MAPTYPE_NORMAL, 392 VM_PROT_ALL, VM_PROT_ALL, 393 0); 394 } 395 addr = virtual2_end; 396 } 397 if (addr < virtual_start) { 398 vm_map_insert(m, &count, NULL, (vm_offset_t) 0, 399 addr, virtual_start, 400 VM_MAPTYPE_NORMAL, 401 VM_PROT_ALL, VM_PROT_ALL, 402 0); 403 } 404 addr = virtual_end; 405 if (addr < KvaEnd) { 406 vm_map_insert(m, &count, NULL, (vm_offset_t) 0, 407 addr, KvaEnd, 408 VM_MAPTYPE_NORMAL, 409 VM_PROT_ALL, VM_PROT_ALL, 410 0); 411 } 412 /* ... and ending with the completion of the above `insert' */ 413 vm_map_unlock(m); 414 vm_map_entry_release(count); 415 } 416 417 /* 418 * No requirements. 419 */ 420 static int 421 kvm_size(SYSCTL_HANDLER_ARGS) 422 { 423 unsigned long ksize = KvaSize; 424 425 return sysctl_handle_long(oidp, &ksize, 0, req); 426 } 427 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_ULONG|CTLFLAG_RD, 428 0, 0, kvm_size, "LU", "Size of KVM"); 429 430 /* 431 * No requirements. 432 */ 433 static int 434 kvm_free(SYSCTL_HANDLER_ARGS) 435 { 436 unsigned long kfree = virtual_end - kernel_vm_end; 437 438 return sysctl_handle_long(oidp, &kfree, 0, req); 439 } 440 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_ULONG|CTLFLAG_RD, 441 0, 0, kvm_free, "LU", "Amount of KVM free"); 442 443