1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 39 * 40 * 41 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 42 * All rights reserved. 43 * 44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 45 * 46 * Permission to use, copy, modify and distribute this software and 47 * its documentation is hereby granted, provided that both the copyright 48 * notice and this permission notice appear in all copies of the 49 * software, derivative works or modified versions, and any portions 50 * thereof, and that both notices appear in supporting documentation. 51 * 52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 55 * 56 * Carnegie Mellon requests users of this software to return to 57 * 58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 59 * School of Computer Science 60 * Carnegie Mellon University 61 * Pittsburgh PA 15213-3890 62 * 63 * any improvements or extensions that they make and grant Carnegie the 64 * rights to redistribute these changes. 65 * 66 * $FreeBSD: src/sys/vm/vm_kern.c,v 1.61.2.2 2002/03/12 18:25:26 tegge Exp $ 67 * $DragonFly: src/sys/vm/vm_kern.c,v 1.29 2007/06/07 23:14:29 dillon Exp $ 68 */ 69 70 /* 71 * Kernel memory management. 72 */ 73 74 #include <sys/param.h> 75 #include <sys/systm.h> 76 #include <sys/proc.h> 77 #include <sys/malloc.h> 78 #include <sys/kernel.h> 79 #include <sys/sysctl.h> 80 81 #include <vm/vm.h> 82 #include <vm/vm_param.h> 83 #include <sys/lock.h> 84 #include <vm/pmap.h> 85 #include <vm/vm_map.h> 86 #include <vm/vm_object.h> 87 #include <vm/vm_page.h> 88 #include <vm/vm_pageout.h> 89 #include <vm/vm_kern.h> 90 #include <vm/vm_extern.h> 91 92 struct vm_map kernel_map; 93 struct vm_map clean_map; 94 struct vm_map buffer_map; 95 96 /* 97 * Allocate pageable memory to the kernel's address map. "map" must 98 * be kernel_map or a submap of kernel_map. 99 * 100 * No requirements. 101 */ 102 vm_offset_t 103 kmem_alloc_pageable(vm_map_t map, vm_size_t size) 104 { 105 vm_offset_t addr; 106 int result; 107 108 size = round_page(size); 109 addr = vm_map_min(map); 110 result = vm_map_find(map, NULL, (vm_offset_t) 0, 111 &addr, size, PAGE_SIZE, 112 TRUE, VM_MAPTYPE_NORMAL, 113 VM_PROT_ALL, VM_PROT_ALL, 114 0); 115 if (result != KERN_SUCCESS) 116 return (0); 117 return (addr); 118 } 119 120 /* 121 * Same as kmem_alloc_pageable, except that it create a nofault entry. 122 * 123 * No requirements. 124 */ 125 vm_offset_t 126 kmem_alloc_nofault(vm_map_t map, vm_size_t size, vm_size_t align) 127 { 128 vm_offset_t addr; 129 int result; 130 131 size = round_page(size); 132 addr = vm_map_min(map); 133 result = vm_map_find(map, NULL, (vm_offset_t) 0, 134 &addr, size, align, 135 TRUE, VM_MAPTYPE_NORMAL, 136 VM_PROT_ALL, VM_PROT_ALL, 137 MAP_NOFAULT); 138 if (result != KERN_SUCCESS) 139 return (0); 140 return (addr); 141 } 142 143 /* 144 * Allocate wired-down memory in the kernel's address map or a submap. 145 * 146 * No requirements. 147 */ 148 vm_offset_t 149 kmem_alloc3(vm_map_t map, vm_size_t size, int kmflags) 150 { 151 vm_offset_t addr; 152 vm_offset_t gstart; 153 vm_offset_t i; 154 int count; 155 int cow; 156 157 size = round_page(size); 158 159 if (kmflags & KM_KRESERVE) 160 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT); 161 else 162 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 163 164 if (kmflags & KM_STACK) { 165 cow = MAP_IS_KSTACK; 166 gstart = PAGE_SIZE; 167 } else { 168 cow = 0; 169 gstart = 0; 170 } 171 172 /* 173 * Use the kernel object for wired-down kernel pages. Assume that no 174 * region of the kernel object is referenced more than once. 175 * 176 * Locate sufficient space in the map. This will give us the final 177 * virtual address for the new memory, and thus will tell us the 178 * offset within the kernel map. 179 */ 180 vm_map_lock(map); 181 if (vm_map_findspace(map, vm_map_min(map), size, PAGE_SIZE, 0, &addr)) { 182 vm_map_unlock(map); 183 if (kmflags & KM_KRESERVE) 184 vm_map_entry_krelease(count); 185 else 186 vm_map_entry_release(count); 187 return (0); 188 } 189 vm_object_reference(&kernel_object); 190 vm_map_insert(map, &count, 191 &kernel_object, addr, addr, addr + size, 192 VM_MAPTYPE_NORMAL, 193 VM_PROT_ALL, VM_PROT_ALL, 194 cow); 195 vm_map_unlock(map); 196 if (kmflags & KM_KRESERVE) 197 vm_map_entry_krelease(count); 198 else 199 vm_map_entry_release(count); 200 201 /* 202 * Guarantee that there are pages already in this object before 203 * calling vm_map_wire. This is to prevent the following 204 * scenario: 205 * 206 * 1) Threads have swapped out, so that there is a pager for the 207 * kernel_object. 2) The kmsg zone is empty, and so we are 208 * kmem_allocing a new page for it. 3) vm_map_wire calls vm_fault; 209 * there is no page, but there is a pager, so we call 210 * pager_data_request. But the kmsg zone is empty, so we must 211 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 212 * we get the data back from the pager, it will be (very stale) 213 * non-zero data. kmem_alloc is defined to return zero-filled memory. 214 * 215 * We're intentionally not activating the pages we allocate to prevent a 216 * race with page-out. vm_map_wire will wire the pages. 217 */ 218 lwkt_gettoken(&vm_token); 219 vm_object_hold(&kernel_object); 220 for (i = gstart; i < size; i += PAGE_SIZE) { 221 vm_page_t mem; 222 223 mem = vm_page_grab(&kernel_object, OFF_TO_IDX(addr + i), 224 VM_ALLOC_ZERO | VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 225 if ((mem->flags & PG_ZERO) == 0) 226 vm_page_zero_fill(mem); 227 mem->valid = VM_PAGE_BITS_ALL; 228 vm_page_flag_clear(mem, PG_ZERO); 229 vm_page_wakeup(mem); 230 } 231 vm_object_drop(&kernel_object); 232 lwkt_reltoken(&vm_token); 233 234 /* 235 * And finally, mark the data as non-pageable. 236 * 237 * NOTE: vm_map_wire() handles any kstack guard. 238 */ 239 vm_map_wire(map, (vm_offset_t)addr, addr + size, kmflags); 240 241 return (addr); 242 } 243 244 /* 245 * Release a region of kernel virtual memory allocated with kmem_alloc, 246 * and return the physical pages associated with that region. 247 * 248 * WARNING! If the caller entered pages into the region using pmap_kenter() 249 * it must remove the pages using pmap_kremove[_quick]() before freeing the 250 * underlying kmem, otherwise resident_count will be mistabulated. 251 * 252 * No requirements. 253 */ 254 void 255 kmem_free(vm_map_t map, vm_offset_t addr, vm_size_t size) 256 { 257 vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 258 } 259 260 /* 261 * Used to break a system map into smaller maps, usually to reduce 262 * contention and to provide large KVA spaces for subsystems like the 263 * buffer cache. 264 * 265 * parent Map to take range from 266 * result 267 * size Size of range to find 268 * min, max Returned endpoints of map 269 * pageable Can the region be paged 270 * 271 * No requirements. 272 */ 273 void 274 kmem_suballoc(vm_map_t parent, vm_map_t result, 275 vm_offset_t *min, vm_offset_t *max, vm_size_t size) 276 { 277 int ret; 278 279 size = round_page(size); 280 281 lwkt_gettoken(&vm_token); 282 *min = (vm_offset_t) vm_map_min(parent); 283 ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 284 min, size, PAGE_SIZE, 285 TRUE, VM_MAPTYPE_UNSPECIFIED, 286 VM_PROT_ALL, VM_PROT_ALL, 287 0); 288 if (ret != KERN_SUCCESS) { 289 kprintf("kmem_suballoc: bad status return of %d.\n", ret); 290 panic("kmem_suballoc"); 291 } 292 *max = *min + size; 293 pmap_reference(vm_map_pmap(parent)); 294 vm_map_init(result, *min, *max, vm_map_pmap(parent)); 295 if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS) 296 panic("kmem_suballoc: unable to change range to submap"); 297 lwkt_reltoken(&vm_token); 298 } 299 300 /* 301 * Allocates pageable memory from a sub-map of the kernel. If the submap 302 * has no room, the caller sleeps waiting for more memory in the submap. 303 * 304 * No requirements. 305 */ 306 vm_offset_t 307 kmem_alloc_wait(vm_map_t map, vm_size_t size) 308 { 309 vm_offset_t addr; 310 int count; 311 312 size = round_page(size); 313 314 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 315 316 for (;;) { 317 /* 318 * To make this work for more than one map, use the map's lock 319 * to lock out sleepers/wakers. 320 */ 321 vm_map_lock(map); 322 if (vm_map_findspace(map, vm_map_min(map), 323 size, PAGE_SIZE, 0, &addr) == 0) { 324 break; 325 } 326 /* no space now; see if we can ever get space */ 327 if (vm_map_max(map) - vm_map_min(map) < size) { 328 vm_map_entry_release(count); 329 vm_map_unlock(map); 330 return (0); 331 } 332 vm_map_unlock(map); 333 tsleep(map, 0, "kmaw", 0); 334 } 335 vm_map_insert(map, &count, 336 NULL, (vm_offset_t) 0, 337 addr, addr + size, 338 VM_MAPTYPE_NORMAL, 339 VM_PROT_ALL, VM_PROT_ALL, 340 0); 341 vm_map_unlock(map); 342 vm_map_entry_release(count); 343 344 return (addr); 345 } 346 347 /* 348 * Returns memory to a submap of the kernel, and wakes up any processes 349 * waiting for memory in that map. 350 * 351 * No requirements. 352 */ 353 void 354 kmem_free_wakeup(vm_map_t map, vm_offset_t addr, vm_size_t size) 355 { 356 int count; 357 358 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 359 vm_map_lock(map); 360 vm_map_delete(map, trunc_page(addr), round_page(addr + size), &count); 361 wakeup(map); 362 vm_map_unlock(map); 363 vm_map_entry_release(count); 364 } 365 366 /* 367 * Create the kernel_ma for (KvaStart,KvaEnd) and insert mappings to 368 * cover areas already allocated or reserved thus far. 369 * 370 * The areas (virtual_start, virtual_end) and (virtual2_start, virtual2_end) 371 * are available so the cutouts are the areas around these ranges between 372 * KvaStart and KvaEnd. 373 * 374 * Depend on the zalloc bootstrap cache to get our vm_map_entry_t. 375 * Called from the low level boot code only. 376 */ 377 void 378 kmem_init(void) 379 { 380 vm_offset_t addr; 381 vm_map_t m; 382 int count; 383 384 m = vm_map_create(&kernel_map, &kernel_pmap, KvaStart, KvaEnd); 385 vm_map_lock(m); 386 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 387 m->system_map = 1; 388 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 389 addr = KvaStart; 390 if (virtual2_start) { 391 if (addr < virtual2_start) { 392 vm_map_insert(m, &count, NULL, (vm_offset_t) 0, 393 addr, virtual2_start, 394 VM_MAPTYPE_NORMAL, 395 VM_PROT_ALL, VM_PROT_ALL, 396 0); 397 } 398 addr = virtual2_end; 399 } 400 if (addr < virtual_start) { 401 vm_map_insert(m, &count, NULL, (vm_offset_t) 0, 402 addr, virtual_start, 403 VM_MAPTYPE_NORMAL, 404 VM_PROT_ALL, VM_PROT_ALL, 405 0); 406 } 407 addr = virtual_end; 408 if (addr < KvaEnd) { 409 vm_map_insert(m, &count, NULL, (vm_offset_t) 0, 410 addr, KvaEnd, 411 VM_MAPTYPE_NORMAL, 412 VM_PROT_ALL, VM_PROT_ALL, 413 0); 414 } 415 /* ... and ending with the completion of the above `insert' */ 416 vm_map_unlock(m); 417 vm_map_entry_release(count); 418 } 419 420 /* 421 * No requirements. 422 */ 423 static int 424 kvm_size(SYSCTL_HANDLER_ARGS) 425 { 426 unsigned long ksize = KvaSize; 427 428 return sysctl_handle_long(oidp, &ksize, 0, req); 429 } 430 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_ULONG|CTLFLAG_RD, 431 0, 0, kvm_size, "LU", "Size of KVM"); 432 433 /* 434 * No requirements. 435 */ 436 static int 437 kvm_free(SYSCTL_HANDLER_ARGS) 438 { 439 unsigned long kfree = virtual_end - kernel_vm_end; 440 441 return sysctl_handle_long(oidp, &kfree, 0, req); 442 } 443 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_ULONG|CTLFLAG_RD, 444 0, 0, kvm_free, "LU", "Amount of KVM free"); 445 446