1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 39 * 40 * 41 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 42 * All rights reserved. 43 * 44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 45 * 46 * Permission to use, copy, modify and distribute this software and 47 * its documentation is hereby granted, provided that both the copyright 48 * notice and this permission notice appear in all copies of the 49 * software, derivative works or modified versions, and any portions 50 * thereof, and that both notices appear in supporting documentation. 51 * 52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 55 * 56 * Carnegie Mellon requests users of this software to return to 57 * 58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 59 * School of Computer Science 60 * Carnegie Mellon University 61 * Pittsburgh PA 15213-3890 62 * 63 * any improvements or extensions that they make and grant Carnegie the 64 * rights to redistribute these changes. 65 * 66 * $FreeBSD: src/sys/vm/vm_kern.c,v 1.61.2.2 2002/03/12 18:25:26 tegge Exp $ 67 */ 68 69 /* 70 * Kernel memory management. 71 */ 72 73 #include <sys/param.h> 74 #include <sys/systm.h> 75 #include <sys/proc.h> 76 #include <sys/malloc.h> 77 #include <sys/kernel.h> 78 #include <sys/sysctl.h> 79 80 #include <vm/vm.h> 81 #include <vm/vm_param.h> 82 #include <sys/lock.h> 83 #include <vm/pmap.h> 84 #include <vm/vm_map.h> 85 #include <vm/vm_object.h> 86 #include <vm/vm_page.h> 87 #include <vm/vm_pageout.h> 88 #include <vm/vm_kern.h> 89 #include <vm/vm_extern.h> 90 91 struct vm_map kernel_map; 92 struct vm_map clean_map; 93 struct vm_map buffer_map; 94 95 /* 96 * Allocate pageable memory to the kernel's address map. "map" must 97 * be kernel_map or a submap of kernel_map. 98 * 99 * No requirements. 100 */ 101 vm_offset_t 102 kmem_alloc_pageable(vm_map_t map, vm_size_t size) 103 { 104 vm_offset_t addr; 105 int result; 106 107 size = round_page(size); 108 addr = vm_map_min(map); 109 result = vm_map_find(map, NULL, (vm_offset_t) 0, 110 &addr, size, PAGE_SIZE, 111 TRUE, VM_MAPTYPE_NORMAL, 112 VM_PROT_ALL, VM_PROT_ALL, 113 0); 114 if (result != KERN_SUCCESS) 115 return (0); 116 return (addr); 117 } 118 119 /* 120 * Same as kmem_alloc_pageable, except that it create a nofault entry. 121 * 122 * No requirements. 123 */ 124 vm_offset_t 125 kmem_alloc_nofault(vm_map_t map, vm_size_t size, vm_size_t align) 126 { 127 vm_offset_t addr; 128 int result; 129 130 size = round_page(size); 131 addr = vm_map_min(map); 132 result = vm_map_find(map, NULL, (vm_offset_t) 0, 133 &addr, size, align, 134 TRUE, VM_MAPTYPE_NORMAL, 135 VM_PROT_ALL, VM_PROT_ALL, 136 MAP_NOFAULT); 137 if (result != KERN_SUCCESS) 138 return (0); 139 return (addr); 140 } 141 142 /* 143 * Allocate wired-down memory in the kernel's address map or a submap. 144 * 145 * No requirements. 146 */ 147 vm_offset_t 148 kmem_alloc3(vm_map_t map, vm_size_t size, int kmflags) 149 { 150 vm_offset_t addr; 151 vm_offset_t gstart; 152 vm_offset_t i; 153 int count; 154 int cow; 155 156 size = round_page(size); 157 158 if (kmflags & KM_KRESERVE) 159 count = vm_map_entry_kreserve(MAP_RESERVE_COUNT); 160 else 161 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 162 163 if (kmflags & KM_STACK) { 164 cow = MAP_IS_KSTACK; 165 gstart = PAGE_SIZE; 166 } else { 167 cow = 0; 168 gstart = 0; 169 } 170 171 /* 172 * Use the kernel object for wired-down kernel pages. Assume that no 173 * region of the kernel object is referenced more than once. 174 * 175 * Locate sufficient space in the map. This will give us the final 176 * virtual address for the new memory, and thus will tell us the 177 * offset within the kernel map. 178 */ 179 vm_map_lock(map); 180 if (vm_map_findspace(map, vm_map_min(map), size, PAGE_SIZE, 0, &addr)) { 181 vm_map_unlock(map); 182 if (kmflags & KM_KRESERVE) 183 vm_map_entry_krelease(count); 184 else 185 vm_map_entry_release(count); 186 return (0); 187 } 188 vm_object_hold(&kernel_object); 189 vm_object_reference_locked(&kernel_object); 190 vm_map_insert(map, &count, 191 &kernel_object, addr, addr, addr + size, 192 VM_MAPTYPE_NORMAL, 193 VM_PROT_ALL, VM_PROT_ALL, 194 cow); 195 vm_object_drop(&kernel_object); 196 197 vm_map_unlock(map); 198 if (kmflags & KM_KRESERVE) 199 vm_map_entry_krelease(count); 200 else 201 vm_map_entry_release(count); 202 203 /* 204 * Guarantee that there are pages already in this object before 205 * calling vm_map_wire. This is to prevent the following 206 * scenario: 207 * 208 * 1) Threads have swapped out, so that there is a pager for the 209 * kernel_object. 2) The kmsg zone is empty, and so we are 210 * kmem_allocing a new page for it. 3) vm_map_wire calls vm_fault; 211 * there is no page, but there is a pager, so we call 212 * pager_data_request. But the kmsg zone is empty, so we must 213 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 214 * we get the data back from the pager, it will be (very stale) 215 * non-zero data. kmem_alloc is defined to return zero-filled memory. 216 * 217 * We're intentionally not activating the pages we allocate to prevent a 218 * race with page-out. vm_map_wire will wire the pages. 219 */ 220 vm_object_hold(&kernel_object); 221 for (i = gstart; i < size; i += PAGE_SIZE) { 222 vm_page_t mem; 223 224 mem = vm_page_grab(&kernel_object, OFF_TO_IDX(addr + i), 225 VM_ALLOC_FORCE_ZERO | VM_ALLOC_NORMAL | 226 VM_ALLOC_RETRY); 227 vm_page_unqueue_nowakeup(mem); 228 vm_page_wakeup(mem); 229 } 230 vm_object_drop(&kernel_object); 231 232 /* 233 * And finally, mark the data as non-pageable. 234 * 235 * NOTE: vm_map_wire() handles any kstack guard. 236 */ 237 vm_map_wire(map, addr, addr + size, kmflags); 238 239 return (addr); 240 } 241 242 /* 243 * Release a region of kernel virtual memory allocated with kmem_alloc, 244 * and return the physical pages associated with that region. 245 * 246 * WARNING! If the caller entered pages into the region using pmap_kenter() 247 * it must remove the pages using pmap_kremove[_quick]() before freeing the 248 * underlying kmem, otherwise resident_count will be mistabulated. 249 * 250 * No requirements. 251 */ 252 void 253 kmem_free(vm_map_t map, vm_offset_t addr, vm_size_t size) 254 { 255 vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 256 } 257 258 /* 259 * Used to break a system map into smaller maps, usually to reduce 260 * contention and to provide large KVA spaces for subsystems like the 261 * buffer cache. 262 * 263 * parent Map to take range from 264 * result 265 * size Size of range to find 266 * min, max Returned endpoints of map 267 * pageable Can the region be paged 268 * 269 * No requirements. 270 */ 271 void 272 kmem_suballoc(vm_map_t parent, vm_map_t result, 273 vm_offset_t *min, vm_offset_t *max, vm_size_t size) 274 { 275 int ret; 276 277 size = round_page(size); 278 279 *min = (vm_offset_t) vm_map_min(parent); 280 ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 281 min, size, PAGE_SIZE, 282 TRUE, VM_MAPTYPE_UNSPECIFIED, 283 VM_PROT_ALL, VM_PROT_ALL, 284 0); 285 if (ret != KERN_SUCCESS) { 286 kprintf("kmem_suballoc: bad status return of %d.\n", ret); 287 panic("kmem_suballoc"); 288 } 289 *max = *min + size; 290 pmap_reference(vm_map_pmap(parent)); 291 vm_map_init(result, *min, *max, vm_map_pmap(parent)); 292 if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS) 293 panic("kmem_suballoc: unable to change range to submap"); 294 } 295 296 /* 297 * Allocates pageable memory from a sub-map of the kernel. If the submap 298 * has no room, the caller sleeps waiting for more memory in the submap. 299 * 300 * No requirements. 301 */ 302 vm_offset_t 303 kmem_alloc_wait(vm_map_t map, vm_size_t size) 304 { 305 vm_offset_t addr; 306 int count; 307 308 size = round_page(size); 309 310 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 311 312 for (;;) { 313 /* 314 * To make this work for more than one map, use the map's lock 315 * to lock out sleepers/wakers. 316 */ 317 vm_map_lock(map); 318 if (vm_map_findspace(map, vm_map_min(map), 319 size, PAGE_SIZE, 0, &addr) == 0) { 320 break; 321 } 322 /* no space now; see if we can ever get space */ 323 if (vm_map_max(map) - vm_map_min(map) < size) { 324 vm_map_entry_release(count); 325 vm_map_unlock(map); 326 return (0); 327 } 328 vm_map_unlock(map); 329 tsleep(map, 0, "kmaw", 0); 330 } 331 vm_map_insert(map, &count, 332 NULL, (vm_offset_t) 0, 333 addr, addr + size, 334 VM_MAPTYPE_NORMAL, 335 VM_PROT_ALL, VM_PROT_ALL, 336 0); 337 vm_map_unlock(map); 338 vm_map_entry_release(count); 339 340 return (addr); 341 } 342 343 /* 344 * Returns memory to a submap of the kernel, and wakes up any processes 345 * waiting for memory in that map. 346 * 347 * No requirements. 348 */ 349 void 350 kmem_free_wakeup(vm_map_t map, vm_offset_t addr, vm_size_t size) 351 { 352 int count; 353 354 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 355 vm_map_lock(map); 356 vm_map_delete(map, trunc_page(addr), round_page(addr + size), &count); 357 wakeup(map); 358 vm_map_unlock(map); 359 vm_map_entry_release(count); 360 } 361 362 /* 363 * Create the kernel_ma for (KvaStart,KvaEnd) and insert mappings to 364 * cover areas already allocated or reserved thus far. 365 * 366 * The areas (virtual_start, virtual_end) and (virtual2_start, virtual2_end) 367 * are available so the cutouts are the areas around these ranges between 368 * KvaStart and KvaEnd. 369 * 370 * Depend on the zalloc bootstrap cache to get our vm_map_entry_t. 371 * Called from the low level boot code only. 372 */ 373 void 374 kmem_init(void) 375 { 376 vm_offset_t addr; 377 vm_map_t m; 378 int count; 379 380 m = vm_map_create(&kernel_map, &kernel_pmap, KvaStart, KvaEnd); 381 vm_map_lock(m); 382 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 383 m->system_map = 1; 384 count = vm_map_entry_reserve(MAP_RESERVE_COUNT); 385 addr = KvaStart; 386 if (virtual2_start) { 387 if (addr < virtual2_start) { 388 vm_map_insert(m, &count, NULL, (vm_offset_t) 0, 389 addr, virtual2_start, 390 VM_MAPTYPE_NORMAL, 391 VM_PROT_ALL, VM_PROT_ALL, 392 0); 393 } 394 addr = virtual2_end; 395 } 396 if (addr < virtual_start) { 397 vm_map_insert(m, &count, NULL, (vm_offset_t) 0, 398 addr, virtual_start, 399 VM_MAPTYPE_NORMAL, 400 VM_PROT_ALL, VM_PROT_ALL, 401 0); 402 } 403 addr = virtual_end; 404 if (addr < KvaEnd) { 405 vm_map_insert(m, &count, NULL, (vm_offset_t) 0, 406 addr, KvaEnd, 407 VM_MAPTYPE_NORMAL, 408 VM_PROT_ALL, VM_PROT_ALL, 409 0); 410 } 411 /* ... and ending with the completion of the above `insert' */ 412 vm_map_unlock(m); 413 vm_map_entry_release(count); 414 } 415 416 /* 417 * No requirements. 418 */ 419 static int 420 kvm_size(SYSCTL_HANDLER_ARGS) 421 { 422 unsigned long ksize = KvaSize; 423 424 return sysctl_handle_long(oidp, &ksize, 0, req); 425 } 426 SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_ULONG|CTLFLAG_RD, 427 0, 0, kvm_size, "LU", "Size of KVM"); 428 429 /* 430 * No requirements. 431 */ 432 static int 433 kvm_free(SYSCTL_HANDLER_ARGS) 434 { 435 unsigned long kfree = virtual_end - kernel_vm_end; 436 437 return sysctl_handle_long(oidp, &kfree, 0, req); 438 } 439 SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_ULONG|CTLFLAG_RD, 440 0, 0, kvm_free, "LU", "Amount of KVM free"); 441 442